summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/cgroup-v2.txt11
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt6
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt39
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/emac.txt62
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt3
-rw-r--r--Documentation/extcon/intel-int3496.txt5
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/porting6
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/gcc-plugins.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/pinctrl.txt8
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/virtual/kvm/api.txt63
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--MAINTAINERS43
-rw-r--r--Makefile16
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi21
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi20
-rw-r--r--arch/arc/include/asm/kprobes.h4
-rw-r--r--arch/arc/kernel/entry-arcv2.S12
-rw-r--r--arch/arc/kernel/setup.c16
-rw-r--r--arch/arc/mm/cache.c3
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts5
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts1
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts1
-rw-r--r--arch/arm/boot/dts/imx6sx-udoo-neo.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi24
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi9
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts9
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi7
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mach-at91/pm.c18
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c154
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c10
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c45
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/probes/kprobes/core.c49
-rw-r--r--arch/arm/probes/kprobes/test-core.c11
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi11
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/current.h2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/kaslr.c10
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/c6x/kernel/ptrace.c41
-rw-r--r--arch/h8300/kernel/ptrace.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/metag/include/asm/uaccess.h15
-rw-r--r--arch/metag/kernel/ptrace.c19
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/irq.h15
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/genex.S12
-rw-r--r--arch/mips/kernel/process.c56
-rw-r--r--arch/mips/kernel/ptrace.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/ralink/rt3883.c4
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h8
-rw-r--r--arch/openrisc/include/asm/uaccess.h2
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c4
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/include/asm/uaccess.h62
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/cache.c22
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/perf.c94
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/fixup.S98
-rw-r--r--arch/parisc/lib/lusercopy.S318
-rw-r--r--arch/parisc/lib/memcpy.c461
-rw-r--r--arch/parisc/mm/fault.c17
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c5
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/mce.h108
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/kernel/mce.c88
-rw-r--r--arch/powerpc/kernel/mce_power.c237
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/isa207-common.c43
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c20
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/gaccess.c7
-rw-r--r--arch/sparc/include/asm/page_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/misctrap.S1
-rw-r--r--arch/sparc/kernel/ptrace_64.c2
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/spiterrs.S1
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S1
-rw-r--r--arch/sparc/kernel/urtt_fill.S1
-rw-r--r--arch/sparc/kernel/winfixup.S2
-rw-r--r--arch/sparc/lib/NG2memcpy.S4
-rw-r--r--arch/sparc/lib/NG4memcpy.S1
-rw-r--r--arch/sparc/lib/NG4memset.S1
-rw-r--r--arch/sparc/lib/NGmemcpy.S1
-rw-r--r--arch/sparc/mm/hugetlbpage.c9
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c1
-rw-r--r--arch/sparc/mm/tlb.c6
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/x86/Makefile35
-rw-r--r--arch/x86/Makefile_32.cpu18
-rw-r--r--arch/x86/boot/compressed/error.c1
-rw-r--r--arch/x86/events/core.c25
-rw-r--r--arch/x86/include/asm/kvm_page_track.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h8
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/apic.c26
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kernel/unwind_frame.c36
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/page_track.c8
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c59
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile1
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c82
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c2
-rw-r--r--arch/x86/platform/intel-mid/mfld.c15
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/asm/page.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h5
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c30
-rw-r--r--block/blk-mq-sched.c181
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c112
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-stat.c4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c114
-rw-r--r--crypto/af_alg.c9
-rw-r--r--crypto/algif_hash.c9
-rw-r--r--crypto/lrw.c7
-rw-r--r--crypto/xts.c7
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c8
-rw-r--r--drivers/acpi/acpi_processor.c57
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/ioapic.c6
-rw-r--r--drivers/acpi/processor_core.c133
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/libata-sff.c1
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c1
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/block/nbd.c136
-rw-r--r--drivers/bluetooth/Kconfig3
-rw-r--r--drivers/char/hw_random/amd-rng.c42
-rw-r--r--drivers/char/hw_random/geode-rng.c50
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/ppdev.c11
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c9
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c2
-rw-r--r--drivers/clocksource/clkevt-probe.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c16
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c67
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c20
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c17
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c23
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c24
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c275
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c18
-rw-r--r--drivers/cpuidle/sysfs.c12
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c1
-rw-r--r--drivers/crypto/ccp/ccp-dev.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev.h5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c42
-rw-r--r--drivers/crypto/s5p-sss.c132
-rw-r--r--drivers/dax/dax.c33
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c5
-rw-r--r--drivers/edac/pnd2_edac.c1546
-rw-r--r--drivers/edac/pnd2_edac.h301
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-intel-int3496.c39
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c26
-rw-r--r--drivers/gpio/gpio-mcp23s08.c65
-rw-r--r--drivers/gpio/gpio-mockup.c7
-rw-r--r--drivers/gpio/gpio-xgene.c13
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c82
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c121
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c57
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c67
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c23
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c79
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-corsair.c47
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c22
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hv/channel.c25
-rw-r--r--drivers/hv/channel_mgmt.c27
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c4
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hv_utils_transport.c12
-rw-r--r--drivers/hv/hv_utils_transport.h1
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/asus_atk0110.c3
-rw-r--r--drivers/hwmon/it87.c24
-rw-r--r--drivers/hwmon/max31790.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c4
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c34
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c4
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c10
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c4
-rw-r--r--drivers/iio/industrialio-core.c7
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c29
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c42
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c3
-rw-r--r--drivers/input/misc/cm109.c4
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/alps.c72
-rw-r--r--drivers/input/mouse/alps.h11
-rw-r--r--drivers/input/mouse/elan_i2c_core.c20
-rw-r--r--drivers/input/rmi4/rmi_f30.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/hanwang.c3
-rw-r--r--drivers/input/tablet/kbtab.c3
-rw-r--r--drivers/input/touchscreen/sur40.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c6
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-verity-fec.c18
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c44
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c22
-rw-r--r--drivers/memory/omap-gpmc.c4
-rw-r--r--drivers/misc/cxl/pci.c13
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c4
-rw-r--r--drivers/mmc/core/block.c7
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c30
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/ushc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c112
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c55
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c212
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c110
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c104
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h43
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c184
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c28
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c11
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c47
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/fjes/fjes_main.c78
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/irda/vlsi_ir.c8
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/mdio-boardinfo.c1
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c21
-rw-r--r--drivers/net/usb/cdc_ether.c15
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c45
-rw-r--r--drivers/net/vrf.c7
-rw-r--r--drivers/net/vxlan.c73
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c6
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c20
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/core.c11
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/nvme/target/loop.c90
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/parport/share.c6
-rw-r--r--drivers/pci/dwc/Kconfig1
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/host/pci-thunder-pem.c64
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c24
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c19
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-bcm-nsp-usb3.c177
-rw-r--r--drivers/phy/phy-exynos-pcie.c4
-rw-r--r--drivers/pinctrl/core.c97
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c13
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/ti/Kconfig2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c49
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c451
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/rapidio/devices/tsi721.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h4
-rw-r--r--drivers/remoteproc/Kconfig6
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c20
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/aacraid/commsup.c14
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c38
-rw-r--r--drivers/scsi/hpsa.c54
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c135
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h13
-rw-r--r--drivers/scsi/qedf/qedf_fip.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c16
-rw-r--r--drivers/scsi/qedi/qedi_fw.c4
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h8
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c748
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c49
-rw-r--r--drivers/scsi/scsi_lib.c20
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufs.h22
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c235
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/target/target_core_alua.c82
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_user.c152
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/cpu_cooling.c39
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c105
-rw-r--r--drivers/thermal/devfreq_cooling.c14
-rw-r--r--drivers/tty/serial/8250/8250_dw.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c25
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/st-asc.c11
-rw-r--r--drivers/tty/tty_ldisc.c92
-rw-r--r--drivers/tty/vt/keyboard.c1
-rw-r--r--drivers/usb/class/usbtmc.c18
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/f_acm.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_uvc.c10
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c43
-rw-r--r--drivers/usb/misc/idmouse.c3
-rw-r--r--drivers/usb/misc/lvstest.c4
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c23
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c3
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/i1480/dfu/usb.c3
-rw-r--r--drivers/vfio/vfio.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c7
-rw-r--r--drivers/vhost/vsock.c41
-rw-r--r--drivers/virtio/virtio_balloon.c19
-rw-r--r--drivers/virtio/virtio_pci_common.c9
-rw-r--r--drivers/xen/gntdev.c11
-rw-r--r--drivers/xen/xen-acpi-processor.c36
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/afs/callback.c7
-rw-r--r--fs/afs/cmservice.c11
-rw-r--r--fs/afs/file.c20
-rw-r--r--fs/afs/fsclient.c77
-rw-r--r--fs/afs/inode.c42
-rw-r--r--fs/afs/internal.h23
-rw-r--r--fs/afs/misc.c2
-rw-r--r--fs/afs/mntpt.c53
-rw-r--r--fs/afs/rxrpc.c149
-rw-r--r--fs/afs/security.c9
-rw-r--r--fs/afs/server.c6
-rw-r--r--fs/afs/vlocation.c16
-rw-r--r--fs/afs/write.c76
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent_io.c49
-rw-r--r--fs/btrfs/inode.c20
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/send.c7
-rw-r--r--fs/cifs/cifsfs.c87
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h18
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/ioctl.c66
-rw-r--r--fs/cifs/smb2misc.c46
-rw-r--r--fs/cifs/smb2ops.c41
-rw-r--r--fs/cifs/smb2pdu.c4
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c55
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/crypto/crypto.c10
-rw-r--r--fs/crypto/fname.c2
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c52
-rw-r--r--fs/crypto/policy.c7
-rw-r--r--fs/dax.c35
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c43
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/super.c10
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/ext4/xattr.c65
-rw-r--r--fs/f2fs/debug.c1
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/node.c163
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/hugetlbfs/inode.c25
-rw-r--r--fs/jbd2/journal.c22
-rw-r--r--fs/jbd2/revoke.c1
-rw-r--r--fs/kernfs/file.c3
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/client.c25
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/filelayout/filelayout.c151
-rw-r--r--fs/nfs/filelayout/filelayout.h19
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c8
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h14
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c9
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs4client.c4
-rw-r--r--fs/nfs/nfs4proc.c20
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/pnfs_nfs.c31
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/nfsctl.c43
-rw-r--r--fs/nfsd/nfsproc.c1
-rw-r--r--fs/nfsd/nfssvc.c28
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/orangefs/super.c9
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/stat.c86
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h1
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c106
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c13
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_dir2_readdir.c11
-rw-r--r--fs/xfs/xfs_inode.c7
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/drm/ttm/ttm_object.h5
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/ccp.h2
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/dccp.h1
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/errqueue.h2
-rw-r--r--include/linux/filter.h16
-rw-r--r--include/linux/fscrypt_common.h1
-rw-r--r--include/linux/gpio/consumer.h16
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/hyperv.h10
-rw-r--r--include/linux/iio/sw_device.h2
-rw-r--r--include/linux/iommu.h18
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/list_nulls.h5
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mfd/cros_ec.h3
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/omap-gpmc.h16
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/rculist_nulls.h14
-rw-r--r--include/linux/reset.h9
-rw-r--r--include/linux/sched/clock.h13
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/linux/virtio_vsock.h3
-rw-r--r--include/net/af_vsock.h3
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h30
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h6
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/structs.h19
-rw-r--r--include/net/sock.h9
-rw-r--r--include/rdma/ib_verbs.h30
-rw-r--r--include/scsi/libiscsi.h1
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/target/target_core_backend.h7
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/omap_drm.h38
-rw-r--r--include/uapi/linux/btrfs.h27
-rw-r--r--include/uapi/linux/packet_diag.h2
-rw-r--r--include/uapi/linux/stat.h5
-rw-r--r--include/uapi/rdma/mlx5-abi.h3
-rw-r--r--include/video/exynos5433_decon.h12
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c639
-rw-r--r--kernel/audit.h15
-rw-r--r--kernel/auditsc.c29
-rw-r--r--kernel/bpf/hashtab.c253
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/cpu.c28
-rw-r--r--kernel/events/core.c64
-rw-r--r--kernel/futex.c22
-rw-r--r--kernel/locking/rwsem-spinlock.c16
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/ptrace.c14
-rw-r--r--kernel/sched/clock.c46
-rw-r--r--kernel/sched/cpufreq_schedutil.c20
-rw-r--r--kernel/sched/deadline.c63
-rw-r--r--kernel/sched/loadavg.c20
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/workqueue.c1
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--mm/gup.c2
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/internal.h7
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c36
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/percpu-vm.c7
-rw-r--r--mm/percpu.c5
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/vmalloc.c3
-rw-r--r--mm/vmstat.c19
-rw-r--r--mm/workingset.c2
-rw-r--r--mm/z3fold.c1
-rw-r--r--net/atm/svc.c5
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/batman-adv/bat_iv_ogm.c11
-rw-r--r--net/batman-adv/bat_v.c14
-rw-r--r--net/batman-adv/fragmentation.c20
-rw-r--r--net/batman-adv/gateway_common.c5
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c33
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/netclassid_cgroup.c32
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/skbuff.c55
-rw-r--r--net/core/sock.c122
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c24
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c25
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c20
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c43
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/l2tp/l2tp_core.c160
-rw-r--r--net/l2tp/l2tp_core.h9
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_eth.c10
-rw-r--r--net/l2tp/l2tp_ip.c22
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/l2tp/l2tp_netlink.c52
-rw-r--r--net/l2tp/l2tp_ppp.c94
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mpls/af_mpls.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c13
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_cthelper.c287
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/nft_ct.c21
-rw-r--r--net/netfilter/nft_meta.c40
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/netfilter/nft_set_bitmap.c165
-rw-r--r--net/netlink/af_netlink.c41
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib_cm.c47
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/tcp.c38
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/rxrpc/input.c27
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/sendmsg.c49
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sctp/associola.c19
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/output.c76
-rw-r--r--net/sctp/outqueue.c14
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c22
-rw-r--r--net/sctp/stream.c43
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/socket.c18
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/tipc/subscr.c7
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/vmw_vsock/af_vsock.c17
-rw-r--r--net/vmw_vsock/virtio_transport.c42
-rw-r--r--net/vmw_vsock/virtio_transport_common.c7
-rw-r--r--net/wireless/nl80211.c127
-rw-r--r--net/wireless/sysfs.c10
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_policy.c19
-rw-r--r--net/xfrm/xfrm_user.c9
-rw-r--r--samples/statx/test-statx.c12
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c1
-rw-r--r--sound/core/seq/seq_fifo.c7
-rw-r--r--sound/core/seq/seq_memory.c17
-rw-r--r--sound/core/seq/seq_memory.h1
-rw-r--r--sound/pci/ctxfi/cthw20k1.c2
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_realtek.c28
-rw-r--r--sound/soc/atmel/atmel-classd.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c16
-rw-r--r--sound/soc/codecs/rt5665.c10
-rw-r--r--sound/soc/codecs/rt5665.h2
-rw-r--r--sound/soc/codecs/wm_adsp.c9
-rw-r--r--sound/soc/generic/simple-card-utils.c1
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/sh/rcar/cmd.c36
-rw-r--r--sound/soc/sh/rcar/dma.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c6
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun8i-codec.c67
-rw-r--r--sound/x86/Kconfig3
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h18
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/testing/selftests/bpf/Makefile26
-rw-r--r--tools/testing/selftests/bpf/test_maps.c29
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c294
-rw-r--r--tools/testing/selftests/powerpc/Makefile10
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h48
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c44
1153 files changed, 16432 insertions, 8373 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..e229922dc7f0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -171,6 +171,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
173Takashi YOSHII <takashi.yoshii.zj@renesas.com> 173Takashi YOSHII <takashi.yoshii.zj@renesas.com>
174Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
174Yusuke Goda <goda.yusuke@renesas.com> 175Yusuke Goda <goda.yusuke@renesas.com>
175Gustavo Padovan <gustavo@las.ic.unicamp.br> 176Gustavo Padovan <gustavo@las.ic.unicamp.br>
176Gustavo Padovan <padovan@profusion.mobi> 177Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2ba45caabada..facc20a3f962 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1725,6 +1725,12 @@
1725 kernel and module base offset ASLR (Address Space 1725 kernel and module base offset ASLR (Address Space
1726 Layout Randomization). 1726 Layout Randomization).
1727 1727
1728 kasan_multi_shot
1729 [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
1730 report on every invalid memory access. Without this
1731 parameter KASAN will print report only for the first
1732 invalid access.
1733
1728 keepinitrd [HW,ARM] 1734 keepinitrd [HW,ARM]
1729 1735
1730 kernelcore= [KNL,X86,IA-64,PPC] 1736 kernelcore= [KNL,X86,IA-64,PPC]
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 3b8449f8ac7e..49d7c997fa1e 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -1142,16 +1142,17 @@ used by the kernel.
1142 1142
1143 pids.max 1143 pids.max
1144 1144
1145 A read-write single value file which exists on non-root cgroups. The 1145 A read-write single value file which exists on non-root
1146 default is "max". 1146 cgroups. The default is "max".
1147 1147
1148 Hard limit of number of processes. 1148 Hard limit of number of processes.
1149 1149
1150 pids.current 1150 pids.current
1151 1151
1152 A read-only single value file which exists on all cgroups. 1152 A read-only single value file which exists on all cgroups.
1153 1153
1154 The number of processes currently in the cgroup and its descendants. 1154 The number of processes currently in the cgroup and its
1155 descendants.
1155 1156
1156Organisational operations are not blocked by cgroup policies, so it is 1157Organisational operations are not blocked by cgroup policies, so it is
1157possible to have pids.current > pids.max. This can be done by either 1158possible to have pids.current > pids.max. This can be done by either
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
index 30c546900b60..07dbb358182c 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
@@ -45,7 +45,7 @@ The following clocks are available:
45 - 1 15 SATA 45 - 1 15 SATA
46 - 1 16 SATA USB 46 - 1 16 SATA USB
47 - 1 17 Main 47 - 1 17 Main
48 - 1 18 SD/MMC 48 - 1 18 SD/MMC/GOP
49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) 49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
50 - 1 22 USB3H0 50 - 1 22 USB3H0
51 - 1 23 USB3H1 51 - 1 23 USB3H1
@@ -65,7 +65,7 @@ Required properties:
65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
70 70
71Example: 71Example:
@@ -78,6 +78,6 @@ Example:
78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
83 }; 83 };
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
index a78265993665..ca5204b3bc21 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
@@ -4,7 +4,6 @@ Required properties:
4 - compatible: value should be one of the following 4 - compatible: value should be one of the following
5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ 5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ 6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
7 "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
8 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ 7 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
9 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ 8 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
10 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ 9 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 18645e0228b0..5837402c3ade 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -11,7 +11,6 @@ Required properties:
11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ 11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ 12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ 13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
14 "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
15 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ 14 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
16 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ 15 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
17 16
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index ea9c1c9607f6..520d61dad6dd 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -13,7 +13,7 @@ Required Properties:
13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, 13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
14 before RK3288 14 before RK3288
15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
16 - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 16 - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
deleted file mode 100644
index e68ae5dec9c9..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
+++ /dev/null
@@ -1,39 +0,0 @@
1Broadcom USB3 phy binding for northstar plus SoC
2The USB3 phy is internal to the SoC and is accessed using mdio interface.
3
4Required mdio bus properties:
5- reg: Should be 0x0 for SoC internal USB3 phy
6- #address-cells: must be 1
7- #size-cells: must be 0
8
9Required USB3 PHY properties:
10- compatible: should be "brcm,nsp-usb3-phy"
11- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
12- usb3-ctrl-syscon: handler of syscon node defining physical address
13 of usb3 control register.
14- #phy-cells: must be 0
15
16Required usb3 control properties:
17- compatible: should be "brcm,nsp-usb3-ctrl"
18- reg: offset and length of the control registers
19
20Example:
21
22 mdio@0 {
23 reg = <0x0>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 usb3_phy: usb-phy@10 {
28 compatible = "brcm,nsp-usb3-phy";
29 reg = <0x10>;
30 usb3-ctrl-syscon = <&usb3_ctrl>;
31 #phy-cells = <0>;
32 status = "disabled";
33 };
34 };
35
36 usb3_ctrl: syscon@104408 {
37 compatible = "brcm,nsp-usb3-ctrl", "syscon";
38 reg = <0x104408 0x3fc>;
39 };
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
index 712baf6c3e24..44b842b6ca15 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
@@ -71,6 +71,9 @@
71 For Axon it can be absent, though my current driver 71 For Axon it can be absent, though my current driver
72 doesn't handle phy-address yet so for now, keep 72 doesn't handle phy-address yet so for now, keep
73 0x00ffffff in it. 73 0x00ffffff in it.
74 - phy-handle : Used to describe configurations where a external PHY
75 is used. Please refer to:
76 Documentation/devicetree/bindings/net/ethernet.txt
74 - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec 77 - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
75 operations (if absent the value is the same as 78 operations (if absent the value is the same as
76 rx-fifo-size). For Axon, either absent or 2048. 79 rx-fifo-size). For Axon, either absent or 2048.
@@ -81,8 +84,22 @@
81 offload, phandle of the TAH device node. 84 offload, phandle of the TAH device node.
82 - tah-channel : 1 cell, optional. If appropriate, channel used on the 85 - tah-channel : 1 cell, optional. If appropriate, channel used on the
83 TAH engine. 86 TAH engine.
87 - fixed-link : Fixed-link subnode describing a link to a non-MDIO
88 managed entity. See
89 Documentation/devicetree/bindings/net/fixed-link.txt
90 for details.
91 - mdio subnode : When the EMAC has a phy connected to its local
92 mdio, which us supported by the kernel's network
93 PHY library in drivers/net/phy, there must be device
94 tree subnode with the following required properties:
95 - #address-cells: Must be <1>.
96 - #size-cells: Must be <0>.
84 97
85 Example: 98 For PHY definitions: Please refer to
99 Documentation/devicetree/bindings/net/phy.txt and
100 Documentation/devicetree/bindings/net/ethernet.txt
101
102 Examples:
86 103
87 EMAC0: ethernet@40000800 { 104 EMAC0: ethernet@40000800 {
88 device_type = "network"; 105 device_type = "network";
@@ -104,6 +121,48 @@
104 zmii-channel = <0>; 121 zmii-channel = <0>;
105 }; 122 };
106 123
124 EMAC1: ethernet@ef600c00 {
125 device_type = "network";
126 compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
127 interrupt-parent = <&EMAC1>;
128 interrupts = <0 1>;
129 #interrupt-cells = <1>;
130 #address-cells = <0>;
131 #size-cells = <0>;
132 interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
133 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
134 reg = <0xef600c00 0x000000c4>;
135 local-mac-address = [000000000000]; /* Filled in by U-Boot */
136 mal-device = <&MAL0>;
137 mal-tx-channel = <0>;
138 mal-rx-channel = <0>;
139 cell-index = <0>;
140 max-frame-size = <9000>;
141 rx-fifo-size = <16384>;
142 tx-fifo-size = <2048>;
143 fifo-entry-size = <10>;
144 phy-mode = "rgmii";
145 phy-handle = <&phy0>;
146 phy-map = <0x00000000>;
147 rgmii-device = <&RGMII0>;
148 rgmii-channel = <0>;
149 tah-device = <&TAH0>;
150 tah-channel = <0>;
151 has-inverted-stacr-oc;
152 has-new-stacr-staopc;
153
154 mdio {
155 #address-cells = <1>;
156 #size-cells = <0>;
157
158 phy0: ethernet-phy@0 {
159 compatible = "ethernet-phy-ieee802.3-c22";
160 reg = <0>;
161 };
162 };
163 };
164
165
107 ii) McMAL node 166 ii) McMAL node
108 167
109 Required properties: 168 Required properties:
@@ -145,4 +204,3 @@
145 - revision : as provided by the RGMII new version register if 204 - revision : as provided by the RGMII new version register if
146 available. 205 available.
147 For Axon: 0x0000012a 206 For Axon: 0x0000012a
148
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 471477299ece..9cf7876ab434 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -12,7 +12,8 @@ Required properties:
12- reg : Offset and length of the register set for the module 12- reg : Offset and length of the register set for the module
13- interrupts : the interrupt number for the RNG module. 13- interrupts : the interrupt number for the RNG module.
14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
15- clocks: the trng clock source 15- clocks: the trng clock source. Only mandatory for the
16 "inside-secure,safexcel-eip76" compatible.
16 17
17Example: 18Example:
18/* AM335x */ 19/* AM335x */
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
index af0b366c25b7..8155dbc7fad3 100644
--- a/Documentation/extcon/intel-int3496.txt
+++ b/Documentation/extcon/intel-int3496.txt
@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
20Index 2: The output gpio for muxing of the data pins between the USB host and 20Index 2: The output gpio for muxing of the data pins between the USB host and
21 the USB peripheral controller, write 1 to mux to the peripheral 21 the USB peripheral controller, write 1 to mux to the peripheral
22 controller 22 controller
23
24There is a mapping between indices and GPIO connection IDs as follows
25 id index 0
26 vbus index 1
27 mux index 2
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fdcfdd79682a..fe25787ff6d4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -58,8 +58,7 @@ prototypes:
58 int (*permission) (struct inode *, int, unsigned int); 58 int (*permission) (struct inode *, int, unsigned int);
59 int (*get_acl)(struct inode *, int); 59 int (*get_acl)(struct inode *, int);
60 int (*setattr) (struct dentry *, struct iattr *); 60 int (*setattr) (struct dentry *, struct iattr *);
61 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 61 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
62 u32, unsigned int);
63 ssize_t (*listxattr) (struct dentry *, char *, size_t); 62 ssize_t (*listxattr) (struct dentry *, char *, size_t);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 void (*update_time)(struct inode *, struct timespec *, int); 64 void (*update_time)(struct inode *, struct timespec *, int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 95280079c0b3..5fb17f49f7a2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -600,3 +600,9 @@ in your dentry operations instead.
600[recommended] 600[recommended]
601 ->readlink is optional for symlinks. Don't set, unless filesystem needs 601 ->readlink is optional for symlinks. Don't set, unless filesystem needs
602 to fake something for readlink(2). 602 to fake something for readlink(2).
603--
604[mandatory]
605 ->getattr() is now passed a struct path rather than a vfsmount and
606 dentry separately, and it now has request_mask and query_flags arguments
607 to specify the fields and sync type requested by statx. Filesystems not
608 supporting any statx-specific features may ignore the new arguments.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 569211703721..94dd27ef4a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -382,8 +382,7 @@ struct inode_operations {
382 int (*permission) (struct inode *, int); 382 int (*permission) (struct inode *, int);
383 int (*get_acl)(struct inode *, int); 383 int (*get_acl)(struct inode *, int);
384 int (*setattr) (struct dentry *, struct iattr *); 384 int (*setattr) (struct dentry *, struct iattr *);
385 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 385 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
386 u32, unsigned int);
387 ssize_t (*listxattr) (struct dentry *, char *, size_t); 386 ssize_t (*listxattr) (struct dentry *, char *, size_t);
388 void (*update_time)(struct inode *, struct timespec *, int); 387 void (*update_time)(struct inode *, struct timespec *, int);
389 int (*atomic_open)(struct inode *, struct dentry *, struct file *, 388 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
index 891c69464434..433eaefb4aa1 100644
--- a/Documentation/gcc-plugins.txt
+++ b/Documentation/gcc-plugins.txt
@@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
18gcc-4.7 can be compiled by a C or a C++ compiler, 18gcc-4.7 can be compiled by a C or a C++ compiler,
19and versions 4.8+ can only be compiled by a C++ compiler. 19and versions 4.8+ can only be compiled by a C++ compiler.
20 20
21Currently the GCC plugin infrastructure supports only the x86, arm and arm64 21Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
22architectures. 22powerpc architectures.
23 23
24This infrastructure was ported from grsecurity [6] and PaX [7]. 24This infrastructure was ported from grsecurity [6] and PaX [7].
25 25
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fc73eeb7b3b8..ab0230461377 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
1006 FALSE (router) 1006 FALSE (router)
1007 1007
1008forwarding - BOOLEAN 1008forwarding - BOOLEAN
1009 Enable IP forwarding on this interface. 1009 Enable IP forwarding on this interface. This controls whether packets
1010 received _on_ this interface can be forwarded.
1010 1011
1011mc_forwarding - BOOLEAN 1012mc_forwarding - BOOLEAN
1012 Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE 1013 Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 54bd5faa8782..f2af35f6d6b2 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
77 77
78int __init foo_probe(void) 78int __init foo_probe(void)
79{ 79{
80 int error;
81
80 struct pinctrl_dev *pctl; 82 struct pinctrl_dev *pctl;
81 83
82 return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); 84 error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
85 if (error)
86 return error;
87
88 return pinctrl_enable(pctl);
83} 89}
84 90
85To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and 91To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 11ec2d93a5e0..61e9c78bd6d1 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
124 124
125.. code-block:: none 125.. code-block:: none
126 126
127 Cc: <stable@vger.kernel.org> # 3.3.x- 127 Cc: <stable@vger.kernel.org> # 3.3.x
128 128
129The tag has the meaning of: 129The tag has the meaning of:
130 130
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3c248f772ae6..fd106899afd1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
3377 __u32 pad; 3377 __u32 pad;
3378}; 3378};
3379 3379
33804.104 KVM_X86_GET_MCE_CAP_SUPPORTED
3381
3382Capability: KVM_CAP_MCE
3383Architectures: x86
3384Type: system ioctl
3385Parameters: u64 mce_cap (out)
3386Returns: 0 on success, -1 on error
3387
3388Returns supported MCE capabilities. The u64 mce_cap parameter
3389has the same format as the MSR_IA32_MCG_CAP register. Supported
3390capabilities will have the corresponding bits set.
3391
33924.105 KVM_X86_SETUP_MCE
3393
3394Capability: KVM_CAP_MCE
3395Architectures: x86
3396Type: vcpu ioctl
3397Parameters: u64 mcg_cap (in)
3398Returns: 0 on success,
3399 -EFAULT if u64 mcg_cap cannot be read,
3400 -EINVAL if the requested number of banks is invalid,
3401 -EINVAL if requested MCE capability is not supported.
3402
3403Initializes MCE support for use. The u64 mcg_cap parameter
3404has the same format as the MSR_IA32_MCG_CAP register and
3405specifies which capabilities should be enabled. The maximum
3406supported number of error-reporting banks can be retrieved when
3407checking for KVM_CAP_MCE. The supported capabilities can be
3408retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
3409
34104.106 KVM_X86_SET_MCE
3411
3412Capability: KVM_CAP_MCE
3413Architectures: x86
3414Type: vcpu ioctl
3415Parameters: struct kvm_x86_mce (in)
3416Returns: 0 on success,
3417 -EFAULT if struct kvm_x86_mce cannot be read,
3418 -EINVAL if the bank number is invalid,
3419 -EINVAL if VAL bit is not set in status field.
3420
3421Inject a machine check error (MCE) into the guest. The input
3422parameter is:
3423
3424struct kvm_x86_mce {
3425 __u64 status;
3426 __u64 addr;
3427 __u64 misc;
3428 __u64 mcg_status;
3429 __u8 bank;
3430 __u8 pad1[7];
3431 __u64 pad2[3];
3432};
3433
3434If the MCE being reported is an uncorrected error, KVM will
3435inject it as an MCE exception into the guest. If the guest
3436MCG_STATUS register reports that an MCE is in progress, KVM
3437causes an KVM_EXIT_SHUTDOWN vmexit.
3438
3439Otherwise, if the MCE is a corrected error, KVM will just
3440store it in the corresponding bank (provided this bank is
3441not holding a previously reported uncorrected error).
3442
33805. The kvm_run structure 34435. The kvm_run structure
3381------------------------ 3444------------------------
3382 3445
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
83 83
84 Bits for undefined preemption levels are RAZ/WI. 84 Bits for undefined preemption levels are RAZ/WI.
85 85
86 For historical reasons and to provide ABI compatibility with userspace we
87 export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
88 field in the lower 5 bits of a word, meaning that userspace must always
89 use the lower 5 bits to communicate with the KVM device and must shift the
90 value left by 3 places to obtain the actual priority mask level.
91
86 Limitations: 92 Limitations:
87 - Priorities are not implemented, and registers are RAZ/WI 93 - Priorities are not implemented, and registers are RAZ/WI
88 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. 94 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/MAINTAINERS b/MAINTAINERS
index c776906f67a9..2be6e991271b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
3216 3216
3217CISCO VIC ETHERNET NIC DRIVER 3217CISCO VIC ETHERNET NIC DRIVER
3218M: Christian Benvenuti <benve@cisco.com> 3218M: Christian Benvenuti <benve@cisco.com>
3219M: Sujith Sankar <ssujith@cisco.com>
3220M: Govindarajulu Varadarajan <_govind@gmx.com> 3219M: Govindarajulu Varadarajan <_govind@gmx.com>
3221M: Neel Patel <neepatel@cisco.com> 3220M: Neel Patel <neepatel@cisco.com>
3222S: Supported 3221S: Supported
@@ -3450,6 +3449,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
3450T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) 3449T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
3451B: https://bugzilla.kernel.org 3450B: https://bugzilla.kernel.org
3452F: Documentation/cpu-freq/ 3451F: Documentation/cpu-freq/
3452F: Documentation/devicetree/bindings/cpufreq/
3453F: drivers/cpufreq/ 3453F: drivers/cpufreq/
3454F: include/linux/cpufreq.h 3454F: include/linux/cpufreq.h
3455F: tools/testing/selftests/cpufreq/ 3455F: tools/testing/selftests/cpufreq/
@@ -4118,14 +4118,13 @@ F: drivers/block/drbd/
4118F: lib/lru_cache.c 4118F: lib/lru_cache.c
4119F: Documentation/blockdev/drbd/ 4119F: Documentation/blockdev/drbd/
4120 4120
4121DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS 4121DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
4122M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4122M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
4123T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 4123T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
4124S: Supported 4124S: Supported
4125F: Documentation/kobject.txt 4125F: Documentation/kobject.txt
4126F: drivers/base/ 4126F: drivers/base/
4127F: fs/debugfs/ 4127F: fs/debugfs/
4128F: fs/kernfs/
4129F: fs/sysfs/ 4128F: fs/sysfs/
4130F: include/linux/debugfs.h 4129F: include/linux/debugfs.h
4131F: include/linux/kobj* 4130F: include/linux/kobj*
@@ -4776,6 +4775,12 @@ L: linux-edac@vger.kernel.org
4776S: Maintained 4775S: Maintained
4777F: drivers/edac/mpc85xx_edac.[ch] 4776F: drivers/edac/mpc85xx_edac.[ch]
4778 4777
4778EDAC-PND2
4779M: Tony Luck <tony.luck@intel.com>
4780L: linux-edac@vger.kernel.org
4781S: Maintained
4782F: drivers/edac/pnd2_edac.[ch]
4783
4779EDAC-PASEMI 4784EDAC-PASEMI
4780M: Egor Martovetsky <egor@pasemi.com> 4785M: Egor Martovetsky <egor@pasemi.com>
4781L: linux-edac@vger.kernel.org 4786L: linux-edac@vger.kernel.org
@@ -4923,6 +4928,7 @@ F: include/linux/netfilter_bridge/
4923F: net/bridge/ 4928F: net/bridge/
4924 4929
4925ETHERNET PHY LIBRARY 4930ETHERNET PHY LIBRARY
4931M: Andrew Lunn <andrew@lunn.ch>
4926M: Florian Fainelli <f.fainelli@gmail.com> 4932M: Florian Fainelli <f.fainelli@gmail.com>
4927L: netdev@vger.kernel.org 4933L: netdev@vger.kernel.org
4928S: Maintained 4934S: Maintained
@@ -7084,9 +7090,9 @@ S: Maintained
7084F: fs/autofs4/ 7090F: fs/autofs4/
7085 7091
7086KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 7092KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
7093M: Masahiro Yamada <yamada.masahiro@socionext.com>
7087M: Michal Marek <mmarek@suse.com> 7094M: Michal Marek <mmarek@suse.com>
7088T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 7095T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
7089T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
7090L: linux-kbuild@vger.kernel.org 7096L: linux-kbuild@vger.kernel.org
7091S: Maintained 7097S: Maintained
7092F: Documentation/kbuild/ 7098F: Documentation/kbuild/
@@ -7203,6 +7209,14 @@ F: arch/mips/include/uapi/asm/kvm*
7203F: arch/mips/include/asm/kvm* 7209F: arch/mips/include/asm/kvm*
7204F: arch/mips/kvm/ 7210F: arch/mips/kvm/
7205 7211
7212KERNFS
7213M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7214M: Tejun Heo <tj@kernel.org>
7215T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
7216S: Supported
7217F: include/linux/kernfs.h
7218F: fs/kernfs/
7219
7206KEXEC 7220KEXEC
7207M: Eric Biederman <ebiederm@xmission.com> 7221M: Eric Biederman <ebiederm@xmission.com>
7208W: http://kernel.org/pub/linux/utils/kernel/kexec/ 7222W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -7774,13 +7788,6 @@ F: include/net/mac80211.h
7774F: net/mac80211/ 7788F: net/mac80211/
7775F: drivers/net/wireless/mac80211_hwsim.[ch] 7789F: drivers/net/wireless/mac80211_hwsim.[ch]
7776 7790
7777MACVLAN DRIVER
7778M: Patrick McHardy <kaber@trash.net>
7779L: netdev@vger.kernel.org
7780S: Maintained
7781F: drivers/net/macvlan.c
7782F: include/linux/if_macvlan.h
7783
7784MAILBOX API 7791MAILBOX API
7785M: Jassi Brar <jassisinghbrar@gmail.com> 7792M: Jassi Brar <jassisinghbrar@gmail.com>
7786L: linux-kernel@vger.kernel.org 7793L: linux-kernel@vger.kernel.org
@@ -7853,6 +7860,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
7853MARVELL MWIFIEX WIRELESS DRIVER 7860MARVELL MWIFIEX WIRELESS DRIVER
7854M: Amitkumar Karwar <akarwar@marvell.com> 7861M: Amitkumar Karwar <akarwar@marvell.com>
7855M: Nishant Sarmukadam <nishants@marvell.com> 7862M: Nishant Sarmukadam <nishants@marvell.com>
7863M: Ganapathi Bhat <gbhat@marvell.com>
7864M: Xinming Hu <huxm@marvell.com>
7856L: linux-wireless@vger.kernel.org 7865L: linux-wireless@vger.kernel.org
7857S: Maintained 7866S: Maintained
7858F: drivers/net/wireless/marvell/mwifiex/ 7867F: drivers/net/wireless/marvell/mwifiex/
@@ -10814,6 +10823,7 @@ F: drivers/s390/block/dasd*
10814F: block/partitions/ibm.c 10823F: block/partitions/ibm.c
10815 10824
10816S390 NETWORK DRIVERS 10825S390 NETWORK DRIVERS
10826M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10817M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10827M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10818L: linux-s390@vger.kernel.org 10828L: linux-s390@vger.kernel.org
10819W: http://www.ibm.com/developerworks/linux/linux390/ 10829W: http://www.ibm.com/developerworks/linux/linux390/
@@ -10844,6 +10854,7 @@ S: Supported
10844F: drivers/s390/scsi/zfcp_* 10854F: drivers/s390/scsi/zfcp_*
10845 10855
10846S390 IUCV NETWORK LAYER 10856S390 IUCV NETWORK LAYER
10857M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10847M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10858M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10848L: linux-s390@vger.kernel.org 10859L: linux-s390@vger.kernel.org
10849W: http://www.ibm.com/developerworks/linux/linux390/ 10860W: http://www.ibm.com/developerworks/linux/linux390/
@@ -13383,14 +13394,6 @@ W: https://linuxtv.org
13383S: Maintained 13394S: Maintained
13384F: drivers/media/platform/vivid/* 13395F: drivers/media/platform/vivid/*
13385 13396
13386VLAN (802.1Q)
13387M: Patrick McHardy <kaber@trash.net>
13388L: netdev@vger.kernel.org
13389S: Maintained
13390F: drivers/net/macvlan.c
13391F: include/linux/if_*vlan.h
13392F: net/8021q/
13393
13394VLYNQ BUS 13397VLYNQ BUS
13395M: Florian Fainelli <f.fainelli@gmail.com> 13398M: Florian Fainelli <f.fainelli@gmail.com>
13396L: openwrt-devel@lists.openwrt.org (subscribers-only) 13399L: openwrt-devel@lists.openwrt.org (subscribers-only)
diff --git a/Makefile b/Makefile
index b841fb36beb2..efa267a92ba6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc6
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE =
372CFLAGS_KERNEL = 372CFLAGS_KERNEL =
373AFLAGS_KERNEL = 373AFLAGS_KERNEL =
374LDFLAGS_vmlinux = 374LDFLAGS_vmlinux =
375CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 375CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
377 377
378 378
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
653# Tell gcc to never replace conditional load with a non-conditional one 653# Tell gcc to never replace conditional load with a non-conditional one
654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
655 655
656# check for 'asm goto'
657ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
658 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
659 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
660endif
661
656include scripts/Makefile.gcc-plugins 662include scripts/Makefile.gcc-plugins
657 663
658ifdef CONFIG_READABLE_ASM 664ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
798# use the deterministic mode of AR if available 804# use the deterministic mode of AR if available
799KBUILD_ARFLAGS := $(call ar-option,D) 805KBUILD_ARFLAGS := $(call ar-option,D)
800 806
801# check for 'asm goto'
802ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
803 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
804 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
805endif
806
807include scripts/Makefile.kasan 807include scripts/Makefile.kasan
808include scripts/Makefile.extrawarn 808include scripts/Makefile.extrawarn
809include scripts/Makefile.ubsan 809include scripts/Makefile.ubsan
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1290 /* copy relevant bits of struct timex. */ 1290 /* copy relevant bits of struct timex. */
1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || 1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
1293 offsetof(struct timex32, time))) 1293 offsetof(struct timex32, tick)))
1294 return -EFAULT; 1294 return -EFAULT;
1295 1295
1296 ret = do_adjtimex(&txc); 1296 ret = do_adjtimex(&txc);
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 65808fe0a290..2891cb266cf0 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -26,6 +26,7 @@
26 device_type = "cpu"; 26 device_type = "cpu";
27 compatible = "snps,arc770d"; 27 compatible = "snps,arc770d";
28 reg = <0>; 28 reg = <0>;
29 clocks = <&core_clk>;
29 }; 30 };
30 }; 31 };
31 32
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 2dfe8037dfbb..5e944d3e5b74 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -21,6 +21,7 @@
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
24 }; 25 };
25 }; 26 };
26 27
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index 4c11079f3565..54b277d7dea0 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -19,8 +19,27 @@
19 19
20 cpu@0 { 20 cpu@0 {
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38xN"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
25 };
26 cpu@1 {
27 device_type = "cpu";
28 compatible = "snps,archs38";
29 reg = <1>;
30 clocks = <&core_clk>;
31 };
32 cpu@2 {
33 device_type = "cpu";
34 compatible = "snps,archs38";
35 reg = <2>;
36 clocks = <&core_clk>;
37 };
38 cpu@3 {
39 device_type = "cpu";
40 compatible = "snps,archs38";
41 reg = <3>;
42 clocks = <&core_clk>;
24 }; 43 };
25 }; 44 };
26 45
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index f0df59b23e21..459fc656b759 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -112,13 +112,19 @@
112 interrupts = <7>; 112 interrupts = <7>;
113 bus-width = <4>; 113 bus-width = <4>;
114 }; 114 };
115 };
115 116
116 /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ 117 /*
117 uio_ev: uio@0xD0000000 { 118 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
118 compatible = "generic-uio"; 119 *
119 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 120 * This node is intentionally put outside of MB above becase
120 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 121 * it maps areas outside of MB's 0xEz-0xFz.
121 interrupts = <23>; 122 */
122 }; 123 uio_ev: uio@0xD0000000 {
124 compatible = "generic-uio";
125 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
126 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
127 interrupt-parent = <&mb_intc>;
128 interrupts = <23>;
123 }; 129 };
124}; 130};
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 00bdbe167615..2e52d18e6bc7 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
54void kretprobe_trampoline(void); 54void kretprobe_trampoline(void);
55void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 55void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
56#else 56#else
57static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 57#define trap_is_kprobe(address, regs)
58{
59}
60#endif /* CONFIG_KPROBES */ 58#endif /* CONFIG_KPROBES */
61 59
62#endif /* _ARC_KPROBES_H */ 60#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2585632eaa68..cc558a25b8fa 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -100,15 +100,21 @@ END(handle_interrupt)
100;################### Non TLB Exception Handling ############################# 100;################### Non TLB Exception Handling #############################
101 101
102ENTRY(EV_SWI) 102ENTRY(EV_SWI)
103 flag 1 103 ; TODO: implement this
104 EXCEPTION_PROLOGUE
105 b ret_from_exception
104END(EV_SWI) 106END(EV_SWI)
105 107
106ENTRY(EV_DivZero) 108ENTRY(EV_DivZero)
107 flag 1 109 ; TODO: implement this
110 EXCEPTION_PROLOGUE
111 b ret_from_exception
108END(EV_DivZero) 112END(EV_DivZero)
109 113
110ENTRY(EV_DCError) 114ENTRY(EV_DCError)
111 flag 1 115 ; TODO: implement this
116 EXCEPTION_PROLOGUE
117 b ret_from_exception
112END(EV_DCError) 118END(EV_DCError)
113 119
114; --------------------------------------------- 120; ---------------------------------------------
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3093fa898a23..fa62404ba58f 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/root_dev.h> 12#include <linux/root_dev.h>
13#include <linux/clk.h>
13#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
14#include <linux/clocksource.h> 15#include <linux/clocksource.h>
15#include <linux/console.h> 16#include <linux/console.h>
@@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
488{ 489{
489 char *str; 490 char *str;
490 int cpu_id = ptr_to_cpu(v); 491 int cpu_id = ptr_to_cpu(v);
491 struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); 492 struct device *cpu_dev = get_cpu_device(cpu_id);
492 u32 freq = 0; 493 struct clk *cpu_clk;
494 unsigned long freq = 0;
493 495
494 if (!cpu_online(cpu_id)) { 496 if (!cpu_online(cpu_id)) {
495 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); 497 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
502 504
503 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 505 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
504 506
505 of_property_read_u32(core_clk, "clock-frequency", &freq); 507 cpu_clk = clk_get(cpu_dev, NULL);
508 if (IS_ERR(cpu_clk)) {
509 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
510 cpu_id);
511 } else {
512 freq = clk_get_rate(cpu_clk);
513 }
506 if (freq) 514 if (freq)
507 seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", 515 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
508 freq / 1000000, (freq / 10000) % 100); 516 freq / 1000000, (freq / 10000) % 100);
509 517
510 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", 518 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d408fa21a07c..928562967f3c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
633 633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635 635
636 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
637 read_aux_reg(r);
638
636 /* Important to wait for flush to complete */ 639 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY); 640 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638} 641}
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 02981eae96b9..1ec8e0d80191 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -63,14 +63,14 @@
63 label = "home"; 63 label = "home";
64 linux,code = <KEY_HOME>; 64 linux,code = <KEY_HOME>;
65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; 65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
66 gpio-key,wakeup; 66 wakeup-source;
67 }; 67 };
68 68
69 button@1 { 69 button@1 {
70 label = "menu"; 70 label = "menu";
71 linux,code = <KEY_MENU>; 71 linux,code = <KEY_MENU>;
72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; 72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
73 gpio-key,wakeup; 73 wakeup-source;
74 }; 74 };
75 75
76 }; 76 };
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0d341c545b01..e5ac1d81d15c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -315,6 +315,13 @@
315 /* ID & VBUS GPIOs provided in board dts */ 315 /* ID & VBUS GPIOs provided in board dts */
316 }; 316 };
317 }; 317 };
318
319 tpic2810: tpic2810@60 {
320 compatible = "ti,tpic2810";
321 reg = <0x60>;
322 gpio-controller;
323 #gpio-cells = <2>;
324 };
318}; 325};
319 326
320&mcspi3 { 327&mcspi3 {
@@ -330,13 +337,6 @@
330 spi-max-frequency = <1000000>; 337 spi-max-frequency = <1000000>;
331 spi-cpol; 338 spi-cpol;
332 }; 339 };
333
334 tpic2810: tpic2810@60 {
335 compatible = "ti,tpic2810";
336 reg = <0x60>;
337 gpio-controller;
338 #gpio-cells = <2>;
339 };
340}; 340};
341 341
342&uart3 { 342&uart3 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 4fbb089cf5ad..00de62dc0042 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
66 timer@20200 { 66 timer@20200 {
67 compatible = "arm,cortex-a9-global-timer"; 67 compatible = "arm,cortex-a9-global-timer";
68 reg = <0x20200 0x100>; 68 reg = <0x20200 0x100>;
69 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 69 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
70 clocks = <&periph_clk>; 70 clocks = <&periph_clk>;
71 }; 71 };
72 72
73 local-timer@20600 { 73 local-timer@20600 {
74 compatible = "arm,cortex-a9-twd-timer"; 74 compatible = "arm,cortex-a9-twd-timer";
75 reg = <0x20600 0x100>; 75 reg = <0x20600 0x100>;
76 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 76 interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
77 clocks = <&periph_clk>; 77 clocks = <&periph_clk>;
78 }; 78 };
79 79
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index bfd923096a8c..ae31a5826e91 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,15 +48,14 @@
48 }; 48 };
49 49
50 memory { 50 memory {
51 reg = <0x00000000 0x10000000>; 51 reg = <0x80000000 0x10000000>;
52 }; 52 };
53}; 53};
54 54
55&uart0 { 55&uart0 {
56 clock-frequency = <62499840>; 56 status = "okay";
57}; 57};
58 58
59&uart1 { 59&uart1 {
60 clock-frequency = <62499840>;
61 status = "okay"; 60 status = "okay";
62}; 61};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 3f04a40eb90c..df05e7f568af 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 9fd542200d3d..4a3ab19c6281 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 41e7fd350fcd..81f78435d8c7 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 477c4860db52..c88b8fefcb2f 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index c0a499d5ba44..d503fa0dde31 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index f7eb5854a224..cc0363b843c1 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 16666324fda8..74e15a3cd9f8 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 49f466fe0b1d..dcfc97591433 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -121,11 +121,6 @@
121 }; 121 };
122}; 122};
123 123
124&cpu0 {
125 arm-supply = <&sw1a_reg>;
126 soc-supply = <&sw1c_reg>;
127};
128
129&fec1 { 124&fec1 {
130 pinctrl-names = "default"; 125 pinctrl-names = "default";
131 pinctrl-0 = <&pinctrl_enet1>; 126 pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 22332be72140..528b4e9c6d3d 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
266 }; 266 };
267 267
268 usb1: ohci@00400000 { 268 usb1: ohci@00400000 {
269 compatible = "atmel,sama5d2-ohci", "usb-ohci"; 269 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
270 reg = <0x00400000 0x100000>; 270 reg = <0x00400000 0x100000>;
271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; 271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; 272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 82d8c4771293..6c5affe2d0f5 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,7 @@
14#include <dt-bindings/mfd/dbx500-prcmu.h> 14#include <dt-bindings/mfd/dbx500-prcmu.h>
15#include <dt-bindings/arm/ux500_pm_domains.h> 15#include <dt-bindings/arm/ux500_pm_domains.h>
16#include <dt-bindings/gpio/gpio.h> 16#include <dt-bindings/gpio/gpio.h>
17#include <dt-bindings/clock/ste-ab8500.h>
17#include "skeleton.dtsi" 18#include "skeleton.dtsi"
18 19
19/ { 20/ {
@@ -603,6 +604,11 @@
603 interrupt-controller; 604 interrupt-controller;
604 #interrupt-cells = <2>; 605 #interrupt-cells = <2>;
605 606
607 ab8500_clock: clock-controller {
608 compatible = "stericsson,ab8500-clk";
609 #clock-cells = <1>;
610 };
611
606 ab8500_gpio: ab8500-gpio { 612 ab8500_gpio: ab8500-gpio {
607 compatible = "stericsson,ab8500-gpio"; 613 compatible = "stericsson,ab8500-gpio";
608 gpio-controller; 614 gpio-controller;
@@ -686,6 +692,8 @@
686 692
687 ab8500-pwm { 693 ab8500-pwm {
688 compatible = "stericsson,ab8500-pwm"; 694 compatible = "stericsson,ab8500-pwm";
695 clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
696 clock-names = "intclk";
689 }; 697 };
690 698
691 ab8500-debugfs { 699 ab8500-debugfs {
@@ -700,6 +708,9 @@
700 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; 708 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
701 V-DMIC-supply = <&ab8500_ldo_dmic_reg>; 709 V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
702 710
711 clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
712 clock-names = "audioclk";
713
703 stericsson,earpeice-cmv = <950>; /* Units in mV. */ 714 stericsson,earpeice-cmv = <950>; /* Units in mV. */
704 }; 715 };
705 716
@@ -1095,6 +1106,14 @@
1095 status = "disabled"; 1106 status = "disabled";
1096 }; 1107 };
1097 1108
1109 sound {
1110 compatible = "stericsson,snd-soc-mop500";
1111 stericsson,cpu-dai = <&msp1 &msp3>;
1112 stericsson,audio-codec = <&codec>;
1113 clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
1114 clock-names = "sysclk", "ulpclk", "intclk";
1115 };
1116
1098 msp0: msp@80123000 { 1117 msp0: msp@80123000 {
1099 compatible = "stericsson,ux500-msp-i2s"; 1118 compatible = "stericsson,ux500-msp-i2s";
1100 reg = <0x80123000 0x1000>; 1119 reg = <0x80123000 0x1000>;
@@ -1170,11 +1189,6 @@
1170 status = "disabled"; 1189 status = "disabled";
1171 }; 1190 };
1172 1191
1173 cpufreq-cooling {
1174 compatible = "stericsson,db8500-cpufreq-cooling";
1175 status = "disabled";
1176 };
1177
1178 mcde@a0350000 { 1192 mcde@a0350000 {
1179 compatible = "stericsson,mcde"; 1193 compatible = "stericsson,mcde";
1180 reg = <0xa0350000 0x1000>, /* MCDE */ 1194 reg = <0xa0350000 0x1000>, /* MCDE */
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index f37f9e10713c..9e359e4f342e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -186,15 +186,6 @@
186 status = "okay"; 186 status = "okay";
187 }; 187 };
188 188
189 sound {
190 compatible = "stericsson,snd-soc-mop500";
191
192 stericsson,cpu-dai = <&msp1 &msp3>;
193 stericsson,audio-codec = <&codec>;
194 clocks = <&prcmu_clk PRCMU_SYSCLK>;
195 clock-names = "sysclk";
196 };
197
198 msp0: msp@80123000 { 189 msp0: msp@80123000 {
199 pinctrl-names = "default"; 190 pinctrl-names = "default";
200 pinctrl-0 = <&msp0_default_mode>; 191 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index dd5514def604..ade1d0d4e5f4 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -159,15 +159,6 @@
159 "", "", "", "", "", "", "", ""; 159 "", "", "", "", "", "", "", "";
160 }; 160 };
161 161
162 sound {
163 compatible = "stericsson,snd-soc-mop500";
164
165 stericsson,cpu-dai = <&msp1 &msp3>;
166 stericsson,audio-codec = <&codec>;
167 clocks = <&prcmu_clk PRCMU_SYSCLK>;
168 clock-names = "sysclk";
169 };
170
171 msp0: msp@80123000 { 162 msp0: msp@80123000 {
172 pinctrl-names = "default"; 163 pinctrl-names = "default";
173 pinctrl-0 = <&msp0_default_mode>; 164 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 72ec0d5ae052..bbf1c8cbaac6 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
167 reg = <8>; 167 reg = <8>;
168 label = "cpu"; 168 label = "cpu";
169 ethernet = <&gmac>; 169 ethernet = <&gmac>;
170 phy-mode = "rgmii"; 170 phy-mode = "rgmii-txid";
171 fixed-link { 171 fixed-link {
172 speed = <1000>; 172 speed = <1000>;
173 full-duplex; 173 full-duplex;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a952cc0703cc..8a3ed21cb7bc 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -495,7 +495,7 @@
495 resets = <&ccu RST_BUS_GPU>; 495 resets = <&ccu RST_BUS_GPU>;
496 496
497 assigned-clocks = <&ccu CLK_GPU>; 497 assigned-clocks = <&ccu CLK_GPU>;
498 assigned-clock-rates = <408000000>; 498 assigned-clock-rates = <384000000>;
499 }; 499 };
500 500
501 gic: interrupt-controller@01c81000 { 501 gic: interrupt-controller@01c81000 {
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 18c174fef84f..0467fb365bfc 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -113,8 +113,8 @@
113 simple-audio-card,mclk-fs = <512>; 113 simple-audio-card,mclk-fs = <512>;
114 simple-audio-card,aux-devs = <&codec_analog>; 114 simple-audio-card,aux-devs = <&codec_analog>;
115 simple-audio-card,routing = 115 simple-audio-card,routing =
116 "Left DAC", "Digital Left DAC", 116 "Left DAC", "AIF1 Slot 0 Left",
117 "Right DAC", "Digital Right DAC"; 117 "Right DAC", "AIF1 Slot 0 Right";
118 status = "disabled"; 118 status = "disabled";
119 119
120 simple-audio-card,cpu { 120 simple-audio-card,cpu {
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 7097c18ff487..d6bd15898db6 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -50,8 +50,6 @@
50 50
51 backlight: backlight { 51 backlight: backlight {
52 compatible = "pwm-backlight"; 52 compatible = "pwm-backlight";
53 pinctrl-names = "default";
54 pinctrl-0 = <&bl_en_pin>;
55 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; 53 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
56 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; 54 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
57 default-brightness-level = <8>; 55 default-brightness-level = <8>;
@@ -93,11 +91,6 @@
93}; 91};
94 92
95&pio { 93&pio {
96 bl_en_pin: bl_en_pin@0 {
97 pins = "PH6";
98 function = "gpio_in";
99 };
100
101 mmc0_cd_pin: mmc0_cd_pin@0 { 94 mmc0_cd_pin: mmc0_cd_pin@0 {
102 pins = "PB4"; 95 pins = "PB4";
103 function = "gpio_in"; 96 function = "gpio_in";
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f2462a6bdba6..decd388d613d 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
188CONFIG_WL18XX=m 188CONFIG_WL18XX=m
189CONFIG_WLCORE_SPI=m 189CONFIG_WLCORE_SPI=m
190CONFIG_WLCORE_SDIO=m 190CONFIG_WLCORE_SDIO=m
191CONFIG_INPUT_MOUSEDEV=m
191CONFIG_INPUT_JOYDEV=m 192CONFIG_INPUT_JOYDEV=m
192CONFIG_INPUT_EVDEV=m 193CONFIG_INPUT_EVDEV=m
193CONFIG_KEYBOARD_ATKBD=m 194CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
1124 if (__hyp_get_vectors() == hyp_default_vectors) 1124 if (__hyp_get_vectors() == hyp_default_vectors)
1125 cpu_init_hyp_mode(NULL); 1125 cpu_init_hyp_mode(NULL);
1126 } 1126 }
1127
1128 if (vgic_present)
1129 kvm_vgic_init_cpu_hardware();
1127} 1130}
1128 1131
1129static void cpu_hyp_reset(void) 1132static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size; 292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next; 293 phys_addr_t next;
294 294
295 assert_spin_locked(&kvm->mmu_lock);
295 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
296 do { 297 do {
297 next = stage2_pgd_addr_end(addr, end); 298 next = stage2_pgd_addr_end(addr, end);
298 if (!stage2_pgd_none(*pgd)) 299 if (!stage2_pgd_none(*pgd))
299 unmap_stage2_puds(kvm, pgd, addr, next); 300 unmap_stage2_puds(kvm, pgd, addr, next);
301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
300 } while (pgd++, addr = next, addr != end); 307 } while (pgd++, addr = next, addr != end);
301} 308}
302 309
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 810 int idx;
804 811
805 idx = srcu_read_lock(&kvm->srcu); 812 idx = srcu_read_lock(&kvm->srcu);
813 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 814 spin_lock(&kvm->mmu_lock);
807 815
808 slots = kvm_memslots(kvm); 816 slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 818 stage2_unmap_memslot(kvm, memslot);
811 819
812 spin_unlock(&kvm->mmu_lock); 820 spin_unlock(&kvm->mmu_lock);
821 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 822 srcu_read_unlock(&kvm->srcu, idx);
814} 823}
815 824
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 838 if (kvm->arch.pgd == NULL)
830 return; 839 return;
831 840
841 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
843 spin_unlock(&kvm->mmu_lock);
844
833 /* Free the HW pgd, one page at a time */ 845 /* Free the HW pgd, one page at a time */
834 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
835 kvm->arch.pgd = NULL; 847 kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1801 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1813 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1802 return -EFAULT; 1814 return -EFAULT;
1803 1815
1816 down_read(&current->mm->mmap_sem);
1804 /* 1817 /*
1805 * A memory region could potentially cover multiple VMAs, and any holes 1818 * A memory region could potentially cover multiple VMAs, and any holes
1806 * between them, so iterate over all of them to find out if we can map 1819 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1844 pa += vm_start - vma->vm_start; 1857 pa += vm_start - vma->vm_start;
1845 1858
1846 /* IO region dirty page logging not allowed */ 1859 /* IO region dirty page logging not allowed */
1847 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1860 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1848 return -EINVAL; 1861 ret = -EINVAL;
1862 goto out;
1863 }
1849 1864
1850 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1865 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1851 vm_end - vm_start, 1866 vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1857 } while (hva < reg_end); 1872 } while (hva < reg_end);
1858 1873
1859 if (change == KVM_MR_FLAGS_ONLY) 1874 if (change == KVM_MR_FLAGS_ONLY)
1860 return ret; 1875 goto out;
1861 1876
1862 spin_lock(&kvm->mmu_lock); 1877 spin_lock(&kvm->mmu_lock);
1863 if (ret) 1878 if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1865 else 1880 else
1866 stage2_flush_memslot(kvm, memslot); 1881 stage2_flush_memslot(kvm, memslot);
1867 spin_unlock(&kvm->mmu_lock); 1882 spin_unlock(&kvm->mmu_lock);
1883out:
1884 up_read(&current->mm->mmap_sem);
1868 return ret; 1885 return ret;
1869} 1886}
1870 1887
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 3d89b7905bd9..a277981f414d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
290} 290}
291 291
292static void sama5d3_ddr_standby(void)
293{
294 u32 lpr0;
295 u32 saved_lpr0;
296
297 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
298 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
299 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
300
301 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
302
303 cpu_do_idle();
304
305 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
306}
307
292/* We manage both DDRAM/SDRAM controllers, we need more than one value to 308/* We manage both DDRAM/SDRAM controllers, we need more than one value to
293 * remember. 309 * remember.
294 */ 310 */
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
323 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, 339 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
324 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, 340 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
325 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, 341 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
326 { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, 342 { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
327 { /*sentinel*/ } 343 { /*sentinel*/ }
328}; 344};
329 345
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 093458b62c8d..c89757abb0ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
241 241
242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o 242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
243obj-y += $(onenand-m) $(onenand-y) 243obj-y += $(onenand-m) $(onenand-y)
244
245nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
246obj-y += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644
index f6ac027f3c3b..000000000000
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * gpmc-nand.c
3 *
4 * Copyright (C) 2009 Texas Instruments
5 * Vimal Singh <vimalsingh@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/io.h>
15#include <linux/omap-gpmc.h>
16#include <linux/mtd/nand.h>
17#include <linux/platform_data/mtd-nand-omap2.h>
18
19#include <asm/mach/flash.h>
20
21#include "soc.h"
22
23/* minimum size for IO mapping */
24#define NAND_IO_SIZE 4
25
26static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
27{
28 /* platforms which support all ECC schemes */
29 if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
30 soc_is_omap54xx() || soc_is_dra7xx())
31 return 1;
32
33 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
34 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
35 if (cpu_is_omap24xx())
36 return 0;
37 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
38 return 0;
39 else
40 return 1;
41 }
42
43 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
44 * which require H/W based ECC error detection */
45 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
46 ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
47 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
48 return 0;
49
50 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
51 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
52 ecc_opt == OMAP_ECC_HAM1_CODE_SW)
53 return 1;
54 else
55 return 0;
56}
57
58/* This function will go away once the device-tree convertion is complete */
59static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
60 struct gpmc_settings *s)
61{
62 /* Enable RD PIN Monitoring Reg */
63 if (gpmc_nand_data->dev_ready) {
64 s->wait_on_read = true;
65 s->wait_on_write = true;
66 }
67
68 if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
69 s->device_width = GPMC_DEVWIDTH_16BIT;
70 else
71 s->device_width = GPMC_DEVWIDTH_8BIT;
72}
73
74int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
75 struct gpmc_timings *gpmc_t)
76{
77 int err = 0;
78 struct gpmc_settings s;
79 struct platform_device *pdev;
80 struct resource gpmc_nand_res[] = {
81 { .flags = IORESOURCE_MEM, },
82 { .flags = IORESOURCE_IRQ, },
83 { .flags = IORESOURCE_IRQ, },
84 };
85
86 BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
87
88 err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
89 (unsigned long *)&gpmc_nand_res[0].start);
90 if (err < 0) {
91 pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
92 gpmc_nand_data->cs, err);
93 return err;
94 }
95 gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
96 gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
97 gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
98
99 memset(&s, 0, sizeof(struct gpmc_settings));
100 gpmc_set_legacy(gpmc_nand_data, &s);
101
102 s.device_nand = true;
103
104 if (gpmc_t) {
105 err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
106 if (err < 0) {
107 pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
108 err);
109 return err;
110 }
111 }
112
113 err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
114 if (err < 0)
115 goto out_free_cs;
116
117 err = gpmc_configure(GPMC_CONFIG_WP, 0);
118 if (err < 0)
119 goto out_free_cs;
120
121 if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
122 pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
123 err = -EINVAL;
124 goto out_free_cs;
125 }
126
127
128 pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
129 if (pdev) {
130 err = platform_device_add_resources(pdev, gpmc_nand_res,
131 ARRAY_SIZE(gpmc_nand_res));
132 if (!err)
133 pdev->dev.platform_data = gpmc_nand_data;
134 } else {
135 err = -ENOMEM;
136 }
137 if (err)
138 goto out_free_pdev;
139
140 err = platform_device_add(pdev);
141 if (err) {
142 dev_err(&pdev->dev, "Unable to register NAND device\n");
143 goto out_free_pdev;
144 }
145
146 return 0;
147
148out_free_pdev:
149 platform_device_put(pdev);
150out_free_cs:
151 gpmc_cs_free(gpmc_nand_data->cs);
152
153 return err;
154}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c703546a..2944af820558 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
367 return ret; 367 return ret;
368} 368}
369 369
370void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) 370int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
371{ 371{
372 int err; 372 int err;
373 struct device *dev = &gpmc_onenand_device.dev; 373 struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
393 if (err < 0) { 393 if (err < 0) {
394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n", 394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
395 gpmc_onenand_data->cs, err); 395 gpmc_onenand_data->cs, err);
396 return; 396 return err;
397 } 397 }
398 398
399 gpmc_onenand_resource.end = gpmc_onenand_resource.start + 399 gpmc_onenand_resource.end = gpmc_onenand_resource.start +
400 ONENAND_IO_SIZE - 1; 400 ONENAND_IO_SIZE - 1;
401 401
402 if (platform_device_register(&gpmc_onenand_device) < 0) { 402 err = platform_device_register(&gpmc_onenand_device);
403 if (err) {
403 dev_err(dev, "Unable to register OneNAND device\n"); 404 dev_err(dev, "Unable to register OneNAND device\n");
404 gpmc_cs_free(gpmc_onenand_data->cs); 405 gpmc_cs_free(gpmc_onenand_data->cs);
405 return;
406 } 406 }
407
408 return err;
407} 409}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2734d4..4c6f14cf92a8 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/assembler.h>
20 21
21#include "omap44xx.h" 22#include "omap44xx.h"
22 23
@@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
66 cmp r0, r4 67 cmp r0, r4
67 bne wait_2 68 bne wait_2
68 ldr r12, =API_HYP_ENTRY 69 ldr r12, =API_HYP_ENTRY
69 adr r0, hyp_boot 70 badr r0, hyp_boot
70 smc #0 71 smc #0
71hyp_boot: 72hyp_boot:
72 b omap_secondary_startup 73 b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917ec8621..1435fee39a89 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
2112}; 2112};
2113 2113
2114/* L4 CORE -> SR1 interface */ 2114/* L4 CORE -> SR1 interface */
2115static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
2116 {
2117 .pa_start = OMAP34XX_SR1_BASE,
2118 .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
2119 .flags = ADDR_TYPE_RT,
2120 },
2121 { },
2122};
2115 2123
2116static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { 2124static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
2117 .master = &omap3xxx_l4_core_hwmod, 2125 .master = &omap3xxx_l4_core_hwmod,
2118 .slave = &omap34xx_sr1_hwmod, 2126 .slave = &omap34xx_sr1_hwmod,
2119 .clk = "sr_l4_ick", 2127 .clk = "sr_l4_ick",
2128 .addr = omap3_sr1_addr_space,
2120 .user = OCP_USER_MPU, 2129 .user = OCP_USER_MPU,
2121}; 2130};
2122 2131
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
2124 .master = &omap3xxx_l4_core_hwmod, 2133 .master = &omap3xxx_l4_core_hwmod,
2125 .slave = &omap36xx_sr1_hwmod, 2134 .slave = &omap36xx_sr1_hwmod,
2126 .clk = "sr_l4_ick", 2135 .clk = "sr_l4_ick",
2136 .addr = omap3_sr1_addr_space,
2127 .user = OCP_USER_MPU, 2137 .user = OCP_USER_MPU,
2128}; 2138};
2129 2139
2130/* L4 CORE -> SR1 interface */ 2140/* L4 CORE -> SR1 interface */
2141static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
2142 {
2143 .pa_start = OMAP34XX_SR2_BASE,
2144 .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
2145 .flags = ADDR_TYPE_RT,
2146 },
2147 { },
2148};
2131 2149
2132static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { 2150static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
2133 .master = &omap3xxx_l4_core_hwmod, 2151 .master = &omap3xxx_l4_core_hwmod,
2134 .slave = &omap34xx_sr2_hwmod, 2152 .slave = &omap34xx_sr2_hwmod,
2135 .clk = "sr_l4_ick", 2153 .clk = "sr_l4_ick",
2154 .addr = omap3_sr2_addr_space,
2136 .user = OCP_USER_MPU, 2155 .user = OCP_USER_MPU,
2137}; 2156};
2138 2157
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
2140 .master = &omap3xxx_l4_core_hwmod, 2159 .master = &omap3xxx_l4_core_hwmod,
2141 .slave = &omap36xx_sr2_hwmod, 2160 .slave = &omap36xx_sr2_hwmod,
2142 .clk = "sr_l4_ick", 2161 .clk = "sr_l4_ick",
2162 .addr = omap3_sr2_addr_space,
2143 .user = OCP_USER_MPU, 2163 .user = OCP_USER_MPU,
2144}; 2164};
2145 2165
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
3111 * Return: 0 if device named @dev_name is not likely to be accessible, 3131 * Return: 0 if device named @dev_name is not likely to be accessible,
3112 * or 1 if it is likely to be accessible. 3132 * or 1 if it is likely to be accessible.
3113 */ 3133 */
3114static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, 3134static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
3115 const char *dev_name) 3135 const char *dev_name)
3116{ 3136{
3137 struct device_node *node;
3138 bool available;
3139
3117 if (!bus) 3140 if (!bus)
3118 return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; 3141 return omap_type() == OMAP2_DEVICE_TYPE_GP;
3119 3142
3120 if (of_device_is_available(of_find_node_by_name(bus, dev_name))) 3143 node = of_get_child_by_name(bus, dev_name);
3121 return 1; 3144 available = of_device_is_available(node);
3145 of_node_put(node);
3122 3146
3123 return 0; 3147 return available;
3124} 3148}
3125 3149
3126int __init omap3xxx_hwmod_init(void) 3150int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
3189 3213
3190 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { 3214 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
3191 r = omap_hwmod_register_links(h_sham); 3215 r = omap_hwmod_register_links(h_sham);
3192 if (r < 0) 3216 if (r < 0) {
3217 of_node_put(bus);
3193 return r; 3218 return r;
3219 }
3194 } 3220 }
3195 3221
3196 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { 3222 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
3197 r = omap_hwmod_register_links(h_aes); 3223 r = omap_hwmod_register_links(h_aes);
3198 if (r < 0) 3224 if (r < 0) {
3225 of_node_put(bus);
3199 return r; 3226 return r;
3227 }
3200 } 3228 }
3229 of_node_put(bus);
3201 3230
3202 /* 3231 /*
3203 * Register hwmod links specific to certain ES levels of a 3232 * Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 63eabb06f9f1..475811f5383a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
936} 936}
937 937
938/*
939 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
940 * that the intention is to allow exporting memory allocated via the
941 * coherent DMA APIs through the dma_buf API, which only accepts a
942 * scattertable. This presents a couple of problems:
943 * 1. Not all memory allocated via the coherent DMA APIs is backed by
944 * a struct page
945 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
946 * as we will try to flush the memory through a different alias to that
947 * actually being used (and the flushes are redundant.)
948 */
938int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 949int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
939 void *cpu_addr, dma_addr_t handle, size_t size, 950 void *cpu_addr, dma_addr_t handle, size_t size,
940 unsigned long attrs) 951 unsigned long attrs)
941{ 952{
942 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 953 unsigned long pfn = dma_to_pfn(dev, handle);
954 struct page *page;
943 int ret; 955 int ret;
944 956
957 /* If the PFN is not valid, we do not have a struct page */
958 if (!pfn_valid(pfn))
959 return -ENXIO;
960
961 page = pfn_to_page(pfn);
962
945 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 963 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
946 if (unlikely(ret)) 964 if (unlikely(ret))
947 return ret; 965 return ret;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b5c7aaf9c76..33a45bd96860 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
303 */ 303 */
304static inline bool security_extensions_enabled(void) 304static inline bool security_extensions_enabled(void)
305{ 305{
306 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 306 /* Check CPUID Identification Scheme before ID_PFR1 read */
307 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
308 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
309 return 0;
307} 310}
308 311
309static unsigned long __init setup_vectors_base(void) 312static unsigned long __init setup_vectors_base(void)
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index b6dc9d838a9a..ad1f4e6a9e33 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
266#endif 266#endif
267 267
268 if (p) { 268 if (p) {
269 if (cur) { 269 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
270 /*
271 * Probe hit but conditional execution check failed,
272 * so just skip the instruction and continue as if
273 * nothing had happened.
274 * In this case, we can skip recursing check too.
275 */
276 singlestep_skip(p, regs);
277 } else if (cur) {
270 /* Kprobe is pending, so we're recursing. */ 278 /* Kprobe is pending, so we're recursing. */
271 switch (kcb->kprobe_status) { 279 switch (kcb->kprobe_status) {
272 case KPROBE_HIT_ACTIVE: 280 case KPROBE_HIT_ACTIVE:
273 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
282 case KPROBE_HIT_SS:
274 /* A pre- or post-handler probe got us here. */ 283 /* A pre- or post-handler probe got us here. */
275 kprobes_inc_nmissed_count(p); 284 kprobes_inc_nmissed_count(p);
276 save_previous_kprobe(kcb); 285 save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
279 singlestep(p, regs, kcb); 288 singlestep(p, regs, kcb);
280 restore_previous_kprobe(kcb); 289 restore_previous_kprobe(kcb);
281 break; 290 break;
291 case KPROBE_REENTER:
292 /* A nested probe was hit in FIQ, it is a BUG */
293 pr_warn("Unrecoverable kprobe detected at %p.\n",
294 p->addr);
295 /* fall through */
282 default: 296 default:
283 /* impossible cases */ 297 /* impossible cases */
284 BUG(); 298 BUG();
285 } 299 }
286 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { 300 } else {
287 /* Probe hit and conditional execution check ok. */ 301 /* Probe hit and conditional execution check ok. */
288 set_current_kprobe(p); 302 set_current_kprobe(p);
289 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 303 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
304 } 318 }
305 reset_current_kprobe(); 319 reset_current_kprobe();
306 } 320 }
307 } else {
308 /*
309 * Probe hit but conditional execution check failed,
310 * so just skip the instruction and continue as if
311 * nothing had happened.
312 */
313 singlestep_skip(p, regs);
314 } 321 }
315 } else if (cur) { 322 } else if (cur) {
316 /* We probably hit a jprobe. Call its break handler. */ 323 /* We probably hit a jprobe. Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
434 struct hlist_node *tmp; 441 struct hlist_node *tmp;
435 unsigned long flags, orig_ret_address = 0; 442 unsigned long flags, orig_ret_address = 0;
436 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 443 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
444 kprobe_opcode_t *correct_ret_addr = NULL;
437 445
438 INIT_HLIST_HEAD(&empty_rp); 446 INIT_HLIST_HEAD(&empty_rp);
439 kretprobe_hash_lock(current, &head, &flags); 447 kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
456 /* another task is sharing our hash bucket */ 464 /* another task is sharing our hash bucket */
457 continue; 465 continue;
458 466
467 orig_ret_address = (unsigned long)ri->ret_addr;
468
469 if (orig_ret_address != trampoline_address)
470 /*
471 * This is the real return address. Any other
472 * instances associated with this task are for
473 * other calls deeper on the call stack
474 */
475 break;
476 }
477
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479
480 correct_ret_addr = ri->ret_addr;
481 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
482 if (ri->task != current)
483 /* another task is sharing our hash bucket */
484 continue;
485
486 orig_ret_address = (unsigned long)ri->ret_addr;
459 if (ri->rp && ri->rp->handler) { 487 if (ri->rp && ri->rp->handler) {
460 __this_cpu_write(current_kprobe, &ri->rp->kp); 488 __this_cpu_write(current_kprobe, &ri->rp->kp);
461 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 489 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
490 ri->ret_addr = correct_ret_addr;
462 ri->rp->handler(ri, regs); 491 ri->rp->handler(ri, regs);
463 __this_cpu_write(current_kprobe, NULL); 492 __this_cpu_write(current_kprobe, NULL);
464 } 493 }
465 494
466 orig_ret_address = (unsigned long)ri->ret_addr;
467 recycle_rp_inst(ri, &empty_rp); 495 recycle_rp_inst(ri, &empty_rp);
468 496
469 if (orig_ret_address != trampoline_address) 497 if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
475 break; 503 break;
476 } 504 }
477 505
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479 kretprobe_hash_unlock(current, &flags); 506 kretprobe_hash_unlock(current, &flags);
480 507
481 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 508 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c893726aa52d..1c98a87786ca 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -977,7 +977,10 @@ static void coverage_end(void)
977void __naked __kprobes_test_case_start(void) 977void __naked __kprobes_test_case_start(void)
978{ 978{
979 __asm__ __volatile__ ( 979 __asm__ __volatile__ (
980 "stmdb sp!, {r4-r11} \n\t" 980 "mov r2, sp \n\t"
981 "bic r3, r2, #7 \n\t"
982 "mov sp, r3 \n\t"
983 "stmdb sp!, {r2-r11} \n\t"
981 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 984 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
982 "bic r0, lr, #1 @ r0 = inline data \n\t" 985 "bic r0, lr, #1 @ r0 = inline data \n\t"
983 "mov r1, sp \n\t" 986 "mov r1, sp \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
997 "movne pc, r0 \n\t" 1000 "movne pc, r0 \n\t"
998 "mov r0, r4 \n\t" 1001 "mov r0, r4 \n\t"
999 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1002 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1000 "ldmia sp!, {r4-r11} \n\t" 1003 "ldmia sp!, {r2-r11} \n\t"
1004 "mov sp, r2 \n\t"
1001 "mov pc, r0 \n\t" 1005 "mov pc, r0 \n\t"
1002 ); 1006 );
1003} 1007}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
1013 "bxne r0 \n\t" 1017 "bxne r0 \n\t"
1014 "mov r0, r4 \n\t" 1018 "mov r0, r4 \n\t"
1015 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1019 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1016 "ldmia sp!, {r4-r11} \n\t" 1020 "ldmia sp!, {r2-r11} \n\t"
1021 "mov sp, r2 \n\t"
1017 "bx r0 \n\t" 1022 "bx r0 \n\t"
1018 ); 1023 );
1019} 1024}
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 3c2cb5d5adfa..0bb0e9c6376c 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -411,3 +411,4 @@
411394 common pkey_mprotect sys_pkey_mprotect 411394 common pkey_mprotect sys_pkey_mprotect
412395 common pkey_alloc sys_pkey_alloc 412395 common pkey_alloc sys_pkey_alloc
413396 common pkey_free sys_pkey_free 413396 common pkey_free sys_pkey_free
414397 common statx sys_statx
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8c7c244247b6..3741859765cf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
1073 def_bool y 1073 def_bool y
1074 depends on COMPAT && SYSVIPC 1074 depends on COMPAT && SYSVIPC
1075 1075
1076config KEYS_COMPAT
1077 def_bool y
1078 depends on COMPAT && KEYS
1079
1076endmenu 1080endmenu
1077 1081
1078menu "Power management options" 1082menu "Power management options"
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 9f9e203c09c5..bcb03fc32665 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -114,6 +114,7 @@
114 pcie0: pcie@20020000 { 114 pcie0: pcie@20020000 {
115 compatible = "brcm,iproc-pcie"; 115 compatible = "brcm,iproc-pcie";
116 reg = <0 0x20020000 0 0x1000>; 116 reg = <0 0x20020000 0 0x1000>;
117 dma-coherent;
117 118
118 #interrupt-cells = <1>; 119 #interrupt-cells = <1>;
119 interrupt-map-mask = <0 0 0 0>; 120 interrupt-map-mask = <0 0 0 0>;
@@ -144,6 +145,7 @@
144 pcie4: pcie@50020000 { 145 pcie4: pcie@50020000 {
145 compatible = "brcm,iproc-pcie"; 146 compatible = "brcm,iproc-pcie";
146 reg = <0 0x50020000 0 0x1000>; 147 reg = <0 0x50020000 0 0x1000>;
148 dma-coherent;
147 149
148 #interrupt-cells = <1>; 150 #interrupt-cells = <1>;
149 interrupt-map-mask = <0 0 0 0>; 151 interrupt-map-mask = <0 0 0 0>;
@@ -174,6 +176,7 @@
174 pcie8: pcie@60c00000 { 176 pcie8: pcie@60c00000 {
175 compatible = "brcm,iproc-pcie-paxc"; 177 compatible = "brcm,iproc-pcie-paxc";
176 reg = <0 0x60c00000 0 0x1000>; 178 reg = <0 0x60c00000 0 0x1000>;
179 dma-coherent;
177 linux,pci-domain = <8>; 180 linux,pci-domain = <8>;
178 181
179 bus-range = <0x0 0x1>; 182 bus-range = <0x0 0x1>;
@@ -203,6 +206,7 @@
203 <0x61030000 0x100>; 206 <0x61030000 0x100>;
204 reg-names = "amac_base", "idm_base", "nicpm_base"; 207 reg-names = "amac_base", "idm_base", "nicpm_base";
205 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; 208 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
209 dma-coherent;
206 phy-handle = <&gphy0>; 210 phy-handle = <&gphy0>;
207 phy-mode = "rgmii"; 211 phy-mode = "rgmii";
208 status = "disabled"; 212 status = "disabled";
@@ -213,6 +217,7 @@
213 reg = <0x612c0000 0x445>; /* PDC FS0 regs */ 217 reg = <0x612c0000 0x445>; /* PDC FS0 regs */
214 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 218 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
215 #mbox-cells = <1>; 219 #mbox-cells = <1>;
220 dma-coherent;
216 brcm,rx-status-len = <32>; 221 brcm,rx-status-len = <32>;
217 brcm,use-bcm-hdr; 222 brcm,use-bcm-hdr;
218 }; 223 };
@@ -222,6 +227,7 @@
222 reg = <0x612e0000 0x445>; /* PDC FS1 regs */ 227 reg = <0x612e0000 0x445>; /* PDC FS1 regs */
223 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; 228 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
224 #mbox-cells = <1>; 229 #mbox-cells = <1>;
230 dma-coherent;
225 brcm,rx-status-len = <32>; 231 brcm,rx-status-len = <32>;
226 brcm,use-bcm-hdr; 232 brcm,use-bcm-hdr;
227 }; 233 };
@@ -231,6 +237,7 @@
231 reg = <0x61300000 0x445>; /* PDC FS2 regs */ 237 reg = <0x61300000 0x445>; /* PDC FS2 regs */
232 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; 238 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
233 #mbox-cells = <1>; 239 #mbox-cells = <1>;
240 dma-coherent;
234 brcm,rx-status-len = <32>; 241 brcm,rx-status-len = <32>;
235 brcm,use-bcm-hdr; 242 brcm,use-bcm-hdr;
236 }; 243 };
@@ -240,6 +247,7 @@
240 reg = <0x61320000 0x445>; /* PDC FS3 regs */ 247 reg = <0x61320000 0x445>; /* PDC FS3 regs */
241 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; 248 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
242 #mbox-cells = <1>; 249 #mbox-cells = <1>;
250 dma-coherent;
243 brcm,rx-status-len = <32>; 251 brcm,rx-status-len = <32>;
244 brcm,use-bcm-hdr; 252 brcm,use-bcm-hdr;
245 }; 253 };
@@ -644,6 +652,7 @@
644 sata: ahci@663f2000 { 652 sata: ahci@663f2000 {
645 compatible = "brcm,iproc-ahci", "generic-ahci"; 653 compatible = "brcm,iproc-ahci", "generic-ahci";
646 reg = <0x663f2000 0x1000>; 654 reg = <0x663f2000 0x1000>;
655 dma-coherent;
647 reg-names = "ahci"; 656 reg-names = "ahci";
648 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; 657 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
649 #address-cells = <1>; 658 #address-cells = <1>;
@@ -667,6 +676,7 @@
667 compatible = "brcm,sdhci-iproc-cygnus"; 676 compatible = "brcm,sdhci-iproc-cygnus";
668 reg = <0x66420000 0x100>; 677 reg = <0x66420000 0x100>;
669 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; 678 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
679 dma-coherent;
670 bus-width = <8>; 680 bus-width = <8>;
671 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 681 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
672 status = "disabled"; 682 status = "disabled";
@@ -676,6 +686,7 @@
676 compatible = "brcm,sdhci-iproc-cygnus"; 686 compatible = "brcm,sdhci-iproc-cygnus";
677 reg = <0x66430000 0x100>; 687 reg = <0x66430000 0x100>;
678 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; 688 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
689 dma-coherent;
679 bus-width = <8>; 690 bus-width = <8>;
680 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 691 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
681 status = "disabled"; 692 status = "disabled";
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 05310ad8c5ab..f31c48d0cd68 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
251static inline bool system_uses_ttbr0_pan(void) 251static inline bool system_uses_ttbr0_pan(void)
252{ 252{
253 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && 253 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
254 !cpus_have_cap(ARM64_HAS_PAN); 254 !cpus_have_const_cap(ARM64_HAS_PAN);
255} 255}
256 256
257#endif /* __ASSEMBLY__ */ 257#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index 86c404171305..f6580d4afb0e 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#include <asm/sysreg.h>
7
8#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
9 7
10struct task_struct; 8struct task_struct;
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e78ac26324bd..bdbeb06dc11e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 394 47#define __NR_compat_syscalls 398
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index b7e8ef16ff0d..c66b51aab195 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
809__SYSCALL(__NR_preadv2, compat_sys_preadv2) 809__SYSCALL(__NR_preadv2, compat_sys_preadv2)
810#define __NR_pwritev2 393 810#define __NR_pwritev2 393
811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) 811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
812#define __NR_pkey_mprotect 394
813__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
814#define __NR_pkey_alloc 395
815__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
816#define __NR_pkey_free 396
817__SYSCALL(__NR_pkey_free, sys_pkey_free)
818#define __NR_statx 397
819__SYSCALL(__NR_statx, sys_statx)
812 820
813/* 821/*
814 * Please add new compat syscalls above this comment and update 822 * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 75a0f8acef66..fd691087dc9a 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
30} 30}
31 31
32/** 32/**
33 * cpu_suspend() - function to enter a low-power idle state 33 * arm_cpuidle_suspend() - function to enter a low-power idle state
34 * @arg: argument to pass to CPU suspend operations 34 * @arg: argument to pass to CPU suspend operations
35 * 35 *
36 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU 36 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24ef628c..d7e90d97f5c4 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
131 /* 131 /*
132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment 132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
134 * happens, increase the KASLR offset by the size of the kernel image. 134 * happens, increase the KASLR offset by the size of the kernel image
135 * rounded up by SWAPPER_BLOCK_SIZE.
135 */ 136 */
136 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != 137 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
137 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) 138 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
138 offset = (offset + (u64)(_end - _text)) & mask; 139 u64 kimg_sz = _end - _text;
140 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
141 & mask;
142 }
139 143
140 if (IS_ENABLED(CONFIG_KASAN)) 144 if (IS_ENABLED(CONFIG_KASAN))
141 /* 145 /*
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae5b8a2..c5c45942fb6e 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
372 return 0; 372 return 0;
373} 373}
374 374
375int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
376 unsigned long val, void *data)
377{
378 return NOTIFY_DONE;
379}
380
381static void __kprobes kprobe_handler(struct pt_regs *regs) 375static void __kprobes kprobe_handler(struct pt_regs *regs)
382{ 376{
383 struct kprobe *p, *cur_kprobe; 377 struct kprobe *p, *cur_kprobe;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ef1caae02110..9b1036570586 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -944,7 +944,7 @@ static bool have_cpu_die(void)
944#ifdef CONFIG_HOTPLUG_CPU 944#ifdef CONFIG_HOTPLUG_CPU
945 int any_cpu = raw_smp_processor_id(); 945 int any_cpu = raw_smp_processor_id();
946 946
947 if (cpu_ops[any_cpu]->cpu_die) 947 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
948 return true; 948 return true;
949#endif 949#endif
950 return false; 950 return false;
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index b8cc94e9698b..f8b69d84238e 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1,2 +1 @@
1vdso.lds vdso.lds
2vdso-offsets.h
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4bf899fb451b..1b35b8bddbfb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -42,7 +42,20 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44 44
45static const char *fault_name(unsigned int esr); 45struct fault_info {
46 int (*fn)(unsigned long addr, unsigned int esr,
47 struct pt_regs *regs);
48 int sig;
49 int code;
50 const char *name;
51};
52
53static const struct fault_info fault_info[];
54
55static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
56{
57 return fault_info + (esr & 63);
58}
46 59
47#ifdef CONFIG_KPROBES 60#ifdef CONFIG_KPROBES
48static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 61static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
197 struct pt_regs *regs) 210 struct pt_regs *regs)
198{ 211{
199 struct siginfo si; 212 struct siginfo si;
213 const struct fault_info *inf;
200 214
201 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { 215 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
216 inf = esr_to_fault_info(esr);
202 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 217 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
203 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 218 tsk->comm, task_pid_nr(tsk), inf->name, sig,
204 addr, esr); 219 addr, esr);
205 show_pte(tsk->mm, addr); 220 show_pte(tsk->mm, addr);
206 show_regs(regs); 221 show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
219{ 234{
220 struct task_struct *tsk = current; 235 struct task_struct *tsk = current;
221 struct mm_struct *mm = tsk->active_mm; 236 struct mm_struct *mm = tsk->active_mm;
237 const struct fault_info *inf;
222 238
223 /* 239 /*
224 * If we are in kernel mode at this point, we have no context to 240 * If we are in kernel mode at this point, we have no context to
225 * handle this fault with. 241 * handle this fault with.
226 */ 242 */
227 if (user_mode(regs)) 243 if (user_mode(regs)) {
228 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); 244 inf = esr_to_fault_info(esr);
229 else 245 __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
246 } else
230 __do_kernel_fault(mm, addr, esr, regs); 247 __do_kernel_fault(mm, addr, esr, regs);
231} 248}
232 249
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
488 return 1; 505 return 1;
489} 506}
490 507
491static const struct fault_info { 508static const struct fault_info fault_info[] = {
492 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
493 int sig;
494 int code;
495 const char *name;
496} fault_info[] = {
497 { do_bad, SIGBUS, 0, "ttbr address size fault" }, 509 { do_bad, SIGBUS, 0, "ttbr address size fault" },
498 { do_bad, SIGBUS, 0, "level 1 address size fault" }, 510 { do_bad, SIGBUS, 0, "level 1 address size fault" },
499 { do_bad, SIGBUS, 0, "level 2 address size fault" }, 511 { do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -560,19 +572,13 @@ static const struct fault_info {
560 { do_bad, SIGBUS, 0, "unknown 63" }, 572 { do_bad, SIGBUS, 0, "unknown 63" },
561}; 573};
562 574
563static const char *fault_name(unsigned int esr)
564{
565 const struct fault_info *inf = fault_info + (esr & 63);
566 return inf->name;
567}
568
569/* 575/*
570 * Dispatch a data abort to the relevant handler. 576 * Dispatch a data abort to the relevant handler.
571 */ 577 */
572asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, 578asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
573 struct pt_regs *regs) 579 struct pt_regs *regs)
574{ 580{
575 const struct fault_info *inf = fault_info + (esr & 63); 581 const struct fault_info *inf = esr_to_fault_info(esr);
576 struct siginfo info; 582 struct siginfo info;
577 583
578 if (!inf->fn(addr, esr, regs)) 584 if (!inf->fn(addr, esr, regs))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e25584d72396..7514a000e361 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
295 } else if (ps == PUD_SIZE) { 295 } else if (ps == PUD_SIZE) {
296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
297 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
298 hugetlb_add_hstate(CONT_PTE_SHIFT);
299 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
300 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
301 } else { 297 } else {
302 hugetlb_bad_size(); 298 hugetlb_bad_size();
303 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 299 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
306 return 1; 302 return 1;
307} 303}
308__setup("hugepagesz=", setup_hugepagesz); 304__setup("hugepagesz=", setup_hugepagesz);
309
310#ifdef CONFIG_ARM64_64K_PAGES
311static __init int add_default_hugepagesz(void)
312{
313 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
314 hugetlb_add_hstate(CONT_PTE_SHIFT);
315 return 0;
316}
317arch_initcall(add_default_hugepagesz);
318#endif
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 55d1e9205543..687a358a3733 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -162,7 +162,7 @@ void __init kasan_init(void)
162 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 162 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
163 163
164 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, 164 vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
165 pfn_to_nid(virt_to_pfn(_text))); 165 pfn_to_nid(virt_to_pfn(lm_alias(_text))));
166 166
167 /* 167 /*
168 * vmemmap_populate() has populated the shadow region that covers the 168 * vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index a27e1f02ce18..8801dc98fd44 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
70 0, sizeof(*regs)); 70 0, sizeof(*regs));
71} 71}
72 72
73static int gpr_set(struct task_struct *target,
74 const struct user_regset *regset,
75 unsigned int pos, unsigned int count,
76 const void *kbuf, const void __user *ubuf)
77{
78 int ret;
79 struct pt_regs *regs = task_pt_regs(target);
80
81 /* Don't copyin TSR or CSR */
82 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
83 &regs,
84 0, PT_TSR * sizeof(long));
85 if (ret)
86 return ret;
87
88 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
89 PT_TSR * sizeof(long),
90 (PT_TSR + 1) * sizeof(long));
91 if (ret)
92 return ret;
93
94 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
95 &regs,
96 (PT_TSR + 1) * sizeof(long),
97 PT_CSR * sizeof(long));
98 if (ret)
99 return ret;
100
101 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
102 PT_CSR * sizeof(long),
103 (PT_CSR + 1) * sizeof(long));
104 if (ret)
105 return ret;
106
107 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 &regs,
109 (PT_CSR + 1) * sizeof(long), -1);
110 return ret;
111}
112
113enum c6x_regset { 73enum c6x_regset {
114 REGSET_GPR, 74 REGSET_GPR,
115}; 75};
@@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
121 .size = sizeof(u32), 81 .size = sizeof(u32),
122 .align = sizeof(u32), 82 .align = sizeof(u32),
123 .get = gpr_get, 83 .get = gpr_get,
124 .set = gpr_set
125 }, 84 },
126}; 85};
127 86
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 92075544a19a..0dc1c8f622bc 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
95 long *reg = (long *)&regs; 95 long *reg = (long *)&regs;
96 96
97 /* build user regs in buffer */ 97 /* build user regs in buffer */
98 for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
99 for (r = 0; r < sizeof(regs) / sizeof(long); r++)
99 *reg++ = h8300_get_reg(target, r); 100 *reg++ = h8300_get_reg(target, r);
100 101
101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
113 long *reg; 114 long *reg;
114 115
115 /* build user regs in buffer */ 116 /* build user regs in buffer */
116 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 117 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
118 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
117 *reg++ = h8300_get_reg(target, r); 119 *reg++ = h8300_get_reg(target, r);
118 120
119 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
122 return ret; 124 return ret;
123 125
124 /* write back to pt_regs */ 126 /* write back to pt_regs */
125 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 127 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
126 h8300_put_reg(target, r, *reg++); 128 h8300_put_reg(target, r, *reg++);
127 return 0; 129 return 0;
128} 130}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf076f7df..531cb9eb3319 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
60CONFIG_NET_FOU_IP_TUNNELS=y 61CONFIG_NET_FOU_IP_TUNNELS=y
61CONFIG_INET_AH=m 62CONFIG_INET_AH=m
62CONFIG_INET_ESP=m 63CONFIG_INET_ESP=m
64CONFIG_INET_ESP_OFFLOAD=m
63CONFIG_INET_IPCOMP=m 65CONFIG_INET_IPCOMP=m
64CONFIG_INET_XFRM_MODE_TRANSPORT=m 66CONFIG_INET_XFRM_MODE_TRANSPORT=m
65CONFIG_INET_XFRM_MODE_TUNNEL=m 67CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
71CONFIG_IPV6_ROUTER_PREF=y 73CONFIG_IPV6_ROUTER_PREF=y
72CONFIG_INET6_AH=m 74CONFIG_INET6_AH=m
73CONFIG_INET6_ESP=m 75CONFIG_INET6_ESP=m
76CONFIG_INET6_ESP_OFFLOAD=m
74CONFIG_INET6_IPCOMP=m 77CONFIG_INET6_IPCOMP=m
75CONFIG_IPV6_ILA=m 78CONFIG_IPV6_ILA=m
76CONFIG_IPV6_VTI=m 79CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
101CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
102CONFIG_NFT_SET_RBTREE=m 105CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m 106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
104CONFIG_NFT_COUNTER=m 108CONFIG_NFT_COUNTER=m
105CONFIG_NFT_LOG=m 109CONFIG_NFT_LOG=m
106CONFIG_NFT_LIMIT=m 110CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m 377CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m 378CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m 379CONFIG_IPVLAN=m
380CONFIG_IPVTAP=m
374CONFIG_VXLAN=m 381CONFIG_VXLAN=m
375CONFIG_GENEVE=m 382CONFIG_GENEVE=m
376CONFIG_GTP=m 383CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
383# CONFIG_NET_VENDOR_AMAZON is not set 390# CONFIG_NET_VENDOR_AMAZON is not set
384CONFIG_A2065=y 391CONFIG_A2065=y
385CONFIG_ARIADNE=y 392CONFIG_ARIADNE=y
393# CONFIG_NET_VENDOR_AQUANTIA is not set
386# CONFIG_NET_VENDOR_ARC is not set 394# CONFIG_NET_VENDOR_ARC is not set
387# CONFIG_NET_CADENCE is not set 395# CONFIG_NET_CADENCE is not set
388# CONFIG_NET_VENDOR_BROADCOM is not set 396# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
404# CONFIG_NET_VENDOR_SOLARFLARE is not set 412# CONFIG_NET_VENDOR_SOLARFLARE is not set
405# CONFIG_NET_VENDOR_SMSC is not set 413# CONFIG_NET_VENDOR_SMSC is not set
406# CONFIG_NET_VENDOR_STMICRO is not set 414# CONFIG_NET_VENDOR_STMICRO is not set
407# CONFIG_NET_VENDOR_SYNOPSYS is not set
408# CONFIG_NET_VENDOR_VIA is not set 415# CONFIG_NET_VENDOR_VIA is not set
409# CONFIG_NET_VENDOR_WIZNET is not set 416# CONFIG_NET_VENDOR_WIZNET is not set
410CONFIG_PPP=m 417CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
564CONFIG_DLM=m 571CONFIG_DLM=m
565# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 572# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
566CONFIG_MAGIC_SYSRQ=y 573CONFIG_MAGIC_SYSRQ=y
574CONFIG_WW_MUTEX_SELFTEST=m
575CONFIG_ATOMIC64_SELFTEST=m
567CONFIG_ASYNC_RAID6_TEST=m 576CONFIG_ASYNC_RAID6_TEST=m
568CONFIG_TEST_HEXDUMP=m 577CONFIG_TEST_HEXDUMP=m
569CONFIG_TEST_STRING_HELPERS=m 578CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
594CONFIG_CRYPTO_LRW=m 603CONFIG_CRYPTO_LRW=m
595CONFIG_CRYPTO_PCBC=m 604CONFIG_CRYPTO_PCBC=m
596CONFIG_CRYPTO_KEYWRAP=m 605CONFIG_CRYPTO_KEYWRAP=m
606CONFIG_CRYPTO_CMAC=m
597CONFIG_CRYPTO_XCBC=m 607CONFIG_CRYPTO_XCBC=m
598CONFIG_CRYPTO_VMAC=m 608CONFIG_CRYPTO_VMAC=m
599CONFIG_CRYPTO_MICHAEL_MIC=m 609CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
605CONFIG_CRYPTO_SHA3=m 615CONFIG_CRYPTO_SHA3=m
606CONFIG_CRYPTO_TGR192=m 616CONFIG_CRYPTO_TGR192=m
607CONFIG_CRYPTO_WP512=m 617CONFIG_CRYPTO_WP512=m
618CONFIG_CRYPTO_AES_TI=m
608CONFIG_CRYPTO_ANUBIS=m 619CONFIG_CRYPTO_ANUBIS=m
609CONFIG_CRYPTO_BLOWFISH=m 620CONFIG_CRYPTO_BLOWFISH=m
610CONFIG_CRYPTO_CAMELLIA=m 621CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 640CONFIG_CRYPTO_USER_API_RNG=m
630CONFIG_CRYPTO_USER_API_AEAD=m 641CONFIG_CRYPTO_USER_API_AEAD=m
631# CONFIG_CRYPTO_HW is not set 642# CONFIG_CRYPTO_HW is not set
643CONFIG_CRC32_SELFTEST=m
632CONFIG_XZ_DEC_TEST=m 644CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de24963f5f..ca91d39555da 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
362CONFIG_VETH=m 369CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
372# CONFIG_NET_VENDOR_AQUANTIA is not set
365# CONFIG_NET_VENDOR_ARC is not set 373# CONFIG_NET_VENDOR_ARC is not set
366# CONFIG_NET_CADENCE is not set 374# CONFIG_NET_CADENCE is not set
367# CONFIG_NET_VENDOR_BROADCOM is not set 375# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
378# CONFIG_NET_VENDOR_SEEQ is not set 386# CONFIG_NET_VENDOR_SEEQ is not set
379# CONFIG_NET_VENDOR_SOLARFLARE is not set 387# CONFIG_NET_VENDOR_SOLARFLARE is not set
380# CONFIG_NET_VENDOR_STMICRO is not set 388# CONFIG_NET_VENDOR_STMICRO is not set
381# CONFIG_NET_VENDOR_SYNOPSYS is not set
382# CONFIG_NET_VENDOR_VIA is not set 389# CONFIG_NET_VENDOR_VIA is not set
383# CONFIG_NET_VENDOR_WIZNET is not set 390# CONFIG_NET_VENDOR_WIZNET is not set
384CONFIG_PPP=m 391CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
523CONFIG_DLM=m 530CONFIG_DLM=m
524# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 531# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
525CONFIG_MAGIC_SYSRQ=y 532CONFIG_MAGIC_SYSRQ=y
533CONFIG_WW_MUTEX_SELFTEST=m
534CONFIG_ATOMIC64_SELFTEST=m
526CONFIG_ASYNC_RAID6_TEST=m 535CONFIG_ASYNC_RAID6_TEST=m
527CONFIG_TEST_HEXDUMP=m 536CONFIG_TEST_HEXDUMP=m
528CONFIG_TEST_STRING_HELPERS=m 537CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
553CONFIG_CRYPTO_LRW=m 562CONFIG_CRYPTO_LRW=m
554CONFIG_CRYPTO_PCBC=m 563CONFIG_CRYPTO_PCBC=m
555CONFIG_CRYPTO_KEYWRAP=m 564CONFIG_CRYPTO_KEYWRAP=m
565CONFIG_CRYPTO_CMAC=m
556CONFIG_CRYPTO_XCBC=m 566CONFIG_CRYPTO_XCBC=m
557CONFIG_CRYPTO_VMAC=m 567CONFIG_CRYPTO_VMAC=m
558CONFIG_CRYPTO_MICHAEL_MIC=m 568CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
564CONFIG_CRYPTO_SHA3=m 574CONFIG_CRYPTO_SHA3=m
565CONFIG_CRYPTO_TGR192=m 575CONFIG_CRYPTO_TGR192=m
566CONFIG_CRYPTO_WP512=m 576CONFIG_CRYPTO_WP512=m
577CONFIG_CRYPTO_AES_TI=m
567CONFIG_CRYPTO_ANUBIS=m 578CONFIG_CRYPTO_ANUBIS=m
568CONFIG_CRYPTO_BLOWFISH=m 579CONFIG_CRYPTO_BLOWFISH=m
569CONFIG_CRYPTO_CAMELLIA=m 580CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
588CONFIG_CRYPTO_USER_API_RNG=m 599CONFIG_CRYPTO_USER_API_RNG=m
589CONFIG_CRYPTO_USER_API_AEAD=m 600CONFIG_CRYPTO_USER_API_AEAD=m
590# CONFIG_CRYPTO_HW is not set 601# CONFIG_CRYPTO_HW is not set
602CONFIG_CRC32_SELFTEST=m
591CONFIG_XZ_DEC_TEST=m 603CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f871f3..23a3d8a691e2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
362CONFIG_MACVLAN=m 368CONFIG_MACVLAN=m
363CONFIG_MACVTAP=m 369CONFIG_MACVTAP=m
364CONFIG_IPVLAN=m 370CONFIG_IPVLAN=m
371CONFIG_IPVTAP=m
365CONFIG_VXLAN=m 372CONFIG_VXLAN=m
366CONFIG_GENEVE=m 373CONFIG_GENEVE=m
367CONFIG_GTP=m 374CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
372# CONFIG_NET_VENDOR_ALACRITECH is not set 379# CONFIG_NET_VENDOR_ALACRITECH is not set
373# CONFIG_NET_VENDOR_AMAZON is not set 380# CONFIG_NET_VENDOR_AMAZON is not set
374CONFIG_ATARILANCE=y 381CONFIG_ATARILANCE=y
382# CONFIG_NET_VENDOR_AQUANTIA is not set
375# CONFIG_NET_VENDOR_ARC is not set 383# CONFIG_NET_VENDOR_ARC is not set
376# CONFIG_NET_CADENCE is not set 384# CONFIG_NET_CADENCE is not set
377# CONFIG_NET_VENDOR_BROADCOM is not set 385# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
389# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
390CONFIG_SMC91X=y 398CONFIG_SMC91X=y
391# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
392# CONFIG_NET_VENDOR_SYNOPSYS is not set
393# CONFIG_NET_VENDOR_VIA is not set 400# CONFIG_NET_VENDOR_VIA is not set
394# CONFIG_NET_VENDOR_WIZNET is not set 401# CONFIG_NET_VENDOR_WIZNET is not set
395CONFIG_PPP=m 402CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
544CONFIG_DLM=m 551CONFIG_DLM=m
545# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 552# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
546CONFIG_MAGIC_SYSRQ=y 553CONFIG_MAGIC_SYSRQ=y
554CONFIG_WW_MUTEX_SELFTEST=m
555CONFIG_ATOMIC64_SELFTEST=m
547CONFIG_ASYNC_RAID6_TEST=m 556CONFIG_ASYNC_RAID6_TEST=m
548CONFIG_TEST_HEXDUMP=m 557CONFIG_TEST_HEXDUMP=m
549CONFIG_TEST_STRING_HELPERS=m 558CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
574CONFIG_CRYPTO_LRW=m 583CONFIG_CRYPTO_LRW=m
575CONFIG_CRYPTO_PCBC=m 584CONFIG_CRYPTO_PCBC=m
576CONFIG_CRYPTO_KEYWRAP=m 585CONFIG_CRYPTO_KEYWRAP=m
586CONFIG_CRYPTO_CMAC=m
577CONFIG_CRYPTO_XCBC=m 587CONFIG_CRYPTO_XCBC=m
578CONFIG_CRYPTO_VMAC=m 588CONFIG_CRYPTO_VMAC=m
579CONFIG_CRYPTO_MICHAEL_MIC=m 589CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
585CONFIG_CRYPTO_SHA3=m 595CONFIG_CRYPTO_SHA3=m
586CONFIG_CRYPTO_TGR192=m 596CONFIG_CRYPTO_TGR192=m
587CONFIG_CRYPTO_WP512=m 597CONFIG_CRYPTO_WP512=m
598CONFIG_CRYPTO_AES_TI=m
588CONFIG_CRYPTO_ANUBIS=m 599CONFIG_CRYPTO_ANUBIS=m
589CONFIG_CRYPTO_BLOWFISH=m 600CONFIG_CRYPTO_BLOWFISH=m
590CONFIG_CRYPTO_CAMELLIA=m 601CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
609CONFIG_CRYPTO_USER_API_RNG=m 620CONFIG_CRYPTO_USER_API_RNG=m
610CONFIG_CRYPTO_USER_API_AEAD=m 621CONFIG_CRYPTO_USER_API_AEAD=m
611# CONFIG_CRYPTO_HW is not set 622# CONFIG_CRYPTO_HW is not set
623CONFIG_CRC32_SELFTEST=m
612CONFIG_XZ_DEC_TEST=m 624CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a0aa69..95deb95140fe 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed4422cc9..afae6958db2d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
365CONFIG_HPLANCE=y 372CONFIG_HPLANCE=y
373# CONFIG_NET_VENDOR_AQUANTIA is not set
366# CONFIG_NET_VENDOR_ARC is not set 374# CONFIG_NET_VENDOR_ARC is not set
367# CONFIG_NET_CADENCE is not set 375# CONFIG_NET_CADENCE is not set
368# CONFIG_NET_VENDOR_BROADCOM is not set 376# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
379# CONFIG_NET_VENDOR_SEEQ is not set 387# CONFIG_NET_VENDOR_SEEQ is not set
380# CONFIG_NET_VENDOR_SOLARFLARE is not set 388# CONFIG_NET_VENDOR_SOLARFLARE is not set
381# CONFIG_NET_VENDOR_STMICRO is not set 389# CONFIG_NET_VENDOR_STMICRO is not set
382# CONFIG_NET_VENDOR_SYNOPSYS is not set
383# CONFIG_NET_VENDOR_VIA is not set 390# CONFIG_NET_VENDOR_VIA is not set
384# CONFIG_NET_VENDOR_WIZNET is not set 391# CONFIG_NET_VENDOR_WIZNET is not set
385CONFIG_PPP=m 392CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
525CONFIG_DLM=m 532CONFIG_DLM=m
526# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 533# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
527CONFIG_MAGIC_SYSRQ=y 534CONFIG_MAGIC_SYSRQ=y
535CONFIG_WW_MUTEX_SELFTEST=m
536CONFIG_ATOMIC64_SELFTEST=m
528CONFIG_ASYNC_RAID6_TEST=m 537CONFIG_ASYNC_RAID6_TEST=m
529CONFIG_TEST_HEXDUMP=m 538CONFIG_TEST_HEXDUMP=m
530CONFIG_TEST_STRING_HELPERS=m 539CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
555CONFIG_CRYPTO_LRW=m 564CONFIG_CRYPTO_LRW=m
556CONFIG_CRYPTO_PCBC=m 565CONFIG_CRYPTO_PCBC=m
557CONFIG_CRYPTO_KEYWRAP=m 566CONFIG_CRYPTO_KEYWRAP=m
567CONFIG_CRYPTO_CMAC=m
558CONFIG_CRYPTO_XCBC=m 568CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m 569CONFIG_CRYPTO_VMAC=m
560CONFIG_CRYPTO_MICHAEL_MIC=m 570CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
566CONFIG_CRYPTO_SHA3=m 576CONFIG_CRYPTO_SHA3=m
567CONFIG_CRYPTO_TGR192=m 577CONFIG_CRYPTO_TGR192=m
568CONFIG_CRYPTO_WP512=m 578CONFIG_CRYPTO_WP512=m
579CONFIG_CRYPTO_AES_TI=m
569CONFIG_CRYPTO_ANUBIS=m 580CONFIG_CRYPTO_ANUBIS=m
570CONFIG_CRYPTO_BLOWFISH=m 581CONFIG_CRYPTO_BLOWFISH=m
571CONFIG_CRYPTO_CAMELLIA=m 582CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
590CONFIG_CRYPTO_USER_API_RNG=m 601CONFIG_CRYPTO_USER_API_RNG=m
591CONFIG_CRYPTO_USER_API_AEAD=m 602CONFIG_CRYPTO_USER_API_AEAD=m
592# CONFIG_CRYPTO_HW is not set 603# CONFIG_CRYPTO_HW is not set
604CONFIG_CRC32_SELFTEST=m
593CONFIG_XZ_DEC_TEST=m 605CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2f0dac..b010734729a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_INET_AH=m 59CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_ESP_OFFLOAD=m
60CONFIG_INET_IPCOMP=m 62CONFIG_INET_IPCOMP=m
61CONFIG_INET_XFRM_MODE_TRANSPORT=m 63CONFIG_INET_XFRM_MODE_TRANSPORT=m
62CONFIG_INET_XFRM_MODE_TUNNEL=m 64CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 70CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 71CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 72CONFIG_INET6_ESP=m
73CONFIG_INET6_ESP_OFFLOAD=m
71CONFIG_INET6_IPCOMP=m 74CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_ILA=m 75CONFIG_IPV6_ILA=m
73CONFIG_IPV6_VTI=m 76CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
98CONFIG_NFT_CT=m 101CONFIG_NFT_CT=m
99CONFIG_NFT_SET_RBTREE=m 102CONFIG_NFT_SET_RBTREE=m
100CONFIG_NFT_SET_HASH=m 103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
101CONFIG_NFT_COUNTER=m 105CONFIG_NFT_COUNTER=m
102CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
103CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
369CONFIG_MACVLAN=m 375CONFIG_MACVLAN=m
370CONFIG_MACVTAP=m 376CONFIG_MACVTAP=m
371CONFIG_IPVLAN=m 377CONFIG_IPVLAN=m
378CONFIG_IPVTAP=m
372CONFIG_VXLAN=m 379CONFIG_VXLAN=m
373CONFIG_GENEVE=m 380CONFIG_GENEVE=m
374CONFIG_GTP=m 381CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
379# CONFIG_NET_VENDOR_ALACRITECH is not set 386# CONFIG_NET_VENDOR_ALACRITECH is not set
380# CONFIG_NET_VENDOR_AMAZON is not set 387# CONFIG_NET_VENDOR_AMAZON is not set
381CONFIG_MACMACE=y 388CONFIG_MACMACE=y
389# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 390# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set 391# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 392# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
398# CONFIG_NET_VENDOR_SOLARFLARE is not set 406# CONFIG_NET_VENDOR_SOLARFLARE is not set
399# CONFIG_NET_VENDOR_SMSC is not set 407# CONFIG_NET_VENDOR_SMSC is not set
400# CONFIG_NET_VENDOR_STMICRO is not set 408# CONFIG_NET_VENDOR_STMICRO is not set
401# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 409# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 410# CONFIG_NET_VENDOR_WIZNET is not set
404CONFIG_PPP=m 411CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
547CONFIG_DLM=m 554CONFIG_DLM=m
548# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 555# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
549CONFIG_MAGIC_SYSRQ=y 556CONFIG_MAGIC_SYSRQ=y
557CONFIG_WW_MUTEX_SELFTEST=m
558CONFIG_ATOMIC64_SELFTEST=m
550CONFIG_ASYNC_RAID6_TEST=m 559CONFIG_ASYNC_RAID6_TEST=m
551CONFIG_TEST_HEXDUMP=m 560CONFIG_TEST_HEXDUMP=m
552CONFIG_TEST_STRING_HELPERS=m 561CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_LRW=m 586CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 587CONFIG_CRYPTO_PCBC=m
579CONFIG_CRYPTO_KEYWRAP=m 588CONFIG_CRYPTO_KEYWRAP=m
589CONFIG_CRYPTO_CMAC=m
580CONFIG_CRYPTO_XCBC=m 590CONFIG_CRYPTO_XCBC=m
581CONFIG_CRYPTO_VMAC=m 591CONFIG_CRYPTO_VMAC=m
582CONFIG_CRYPTO_MICHAEL_MIC=m 592CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
588CONFIG_CRYPTO_SHA3=m 598CONFIG_CRYPTO_SHA3=m
589CONFIG_CRYPTO_TGR192=m 599CONFIG_CRYPTO_TGR192=m
590CONFIG_CRYPTO_WP512=m 600CONFIG_CRYPTO_WP512=m
601CONFIG_CRYPTO_AES_TI=m
591CONFIG_CRYPTO_ANUBIS=m 602CONFIG_CRYPTO_ANUBIS=m
592CONFIG_CRYPTO_BLOWFISH=m 603CONFIG_CRYPTO_BLOWFISH=m
593CONFIG_CRYPTO_CAMELLIA=m 604CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
612CONFIG_CRYPTO_USER_API_RNG=m 623CONFIG_CRYPTO_USER_API_RNG=m
613CONFIG_CRYPTO_USER_API_AEAD=m 624CONFIG_CRYPTO_USER_API_AEAD=m
614# CONFIG_CRYPTO_HW is not set 625# CONFIG_CRYPTO_HW is not set
626CONFIG_CRC32_SELFTEST=m
615CONFIG_XZ_DEC_TEST=m 627CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0decb2f8..0e414549b235 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
21CONFIG_UNIXWARE_DISKLABEL=y 21CONFIG_UNIXWARE_DISKLABEL=y
22# CONFIG_EFI_PARTITION is not set 22# CONFIG_EFI_PARTITION is not set
23CONFIG_IOSCHED_DEADLINE=m 23CONFIG_IOSCHED_DEADLINE=m
24CONFIG_MQ_IOSCHED_DEADLINE=m
24CONFIG_KEXEC=y 25CONFIG_KEXEC=y
25CONFIG_BOOTINFO_PROC=y 26CONFIG_BOOTINFO_PROC=y
26CONFIG_M68020=y 27CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
67CONFIG_NET_FOU_IP_TUNNELS=y 68CONFIG_NET_FOU_IP_TUNNELS=y
68CONFIG_INET_AH=m 69CONFIG_INET_AH=m
69CONFIG_INET_ESP=m 70CONFIG_INET_ESP=m
71CONFIG_INET_ESP_OFFLOAD=m
70CONFIG_INET_IPCOMP=m 72CONFIG_INET_IPCOMP=m
71CONFIG_INET_XFRM_MODE_TRANSPORT=m 73CONFIG_INET_XFRM_MODE_TRANSPORT=m
72CONFIG_INET_XFRM_MODE_TUNNEL=m 74CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
78CONFIG_IPV6_ROUTER_PREF=y 80CONFIG_IPV6_ROUTER_PREF=y
79CONFIG_INET6_AH=m 81CONFIG_INET6_AH=m
80CONFIG_INET6_ESP=m 82CONFIG_INET6_ESP=m
83CONFIG_INET6_ESP_OFFLOAD=m
81CONFIG_INET6_IPCOMP=m 84CONFIG_INET6_IPCOMP=m
82CONFIG_IPV6_ILA=m 85CONFIG_IPV6_ILA=m
83CONFIG_IPV6_VTI=m 86CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
108CONFIG_NFT_CT=m 111CONFIG_NFT_CT=m
109CONFIG_NFT_SET_RBTREE=m 112CONFIG_NFT_SET_RBTREE=m
110CONFIG_NFT_SET_HASH=m 113CONFIG_NFT_SET_HASH=m
114CONFIG_NFT_SET_BITMAP=m
111CONFIG_NFT_COUNTER=m 115CONFIG_NFT_COUNTER=m
112CONFIG_NFT_LOG=m 116CONFIG_NFT_LOG=m
113CONFIG_NFT_LIMIT=m 117CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
308CONFIG_NET_L3_MASTER_DEV=y 312CONFIG_NET_L3_MASTER_DEV=y
309CONFIG_AF_KCM=m 313CONFIG_AF_KCM=m
310# CONFIG_WIRELESS is not set 314# CONFIG_WIRELESS is not set
315CONFIG_PSAMPLE=m
316CONFIG_NET_IFE=m
311CONFIG_NET_DEVLINK=m 317CONFIG_NET_DEVLINK=m
312# CONFIG_UEVENT_HELPER is not set 318# CONFIG_UEVENT_HELPER is not set
313CONFIG_DEVTMPFS=y 319CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
402CONFIG_MACVLAN=m 408CONFIG_MACVLAN=m
403CONFIG_MACVTAP=m 409CONFIG_MACVTAP=m
404CONFIG_IPVLAN=m 410CONFIG_IPVLAN=m
411CONFIG_IPVTAP=m
405CONFIG_VXLAN=m 412CONFIG_VXLAN=m
406CONFIG_GENEVE=m 413CONFIG_GENEVE=m
407CONFIG_GTP=m 414CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
419CONFIG_MVME147_NET=y 426CONFIG_MVME147_NET=y
420CONFIG_SUN3LANCE=y 427CONFIG_SUN3LANCE=y
421CONFIG_MACMACE=y 428CONFIG_MACMACE=y
429# CONFIG_NET_VENDOR_AQUANTIA is not set
422# CONFIG_NET_VENDOR_ARC is not set 430# CONFIG_NET_VENDOR_ARC is not set
423# CONFIG_NET_CADENCE is not set 431# CONFIG_NET_CADENCE is not set
424# CONFIG_NET_VENDOR_BROADCOM is not set 432# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
444# CONFIG_NET_VENDOR_SOLARFLARE is not set 452# CONFIG_NET_VENDOR_SOLARFLARE is not set
445CONFIG_SMC91X=y 453CONFIG_SMC91X=y
446# CONFIG_NET_VENDOR_STMICRO is not set 454# CONFIG_NET_VENDOR_STMICRO is not set
447# CONFIG_NET_VENDOR_SYNOPSYS is not set
448# CONFIG_NET_VENDOR_VIA is not set 455# CONFIG_NET_VENDOR_VIA is not set
449# CONFIG_NET_VENDOR_WIZNET is not set 456# CONFIG_NET_VENDOR_WIZNET is not set
450CONFIG_PLIP=m 457CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
627CONFIG_DLM=m 634CONFIG_DLM=m
628# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 635# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
629CONFIG_MAGIC_SYSRQ=y 636CONFIG_MAGIC_SYSRQ=y
637CONFIG_WW_MUTEX_SELFTEST=m
638CONFIG_ATOMIC64_SELFTEST=m
630CONFIG_ASYNC_RAID6_TEST=m 639CONFIG_ASYNC_RAID6_TEST=m
631CONFIG_TEST_HEXDUMP=m 640CONFIG_TEST_HEXDUMP=m
632CONFIG_TEST_STRING_HELPERS=m 641CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
657CONFIG_CRYPTO_LRW=m 666CONFIG_CRYPTO_LRW=m
658CONFIG_CRYPTO_PCBC=m 667CONFIG_CRYPTO_PCBC=m
659CONFIG_CRYPTO_KEYWRAP=m 668CONFIG_CRYPTO_KEYWRAP=m
669CONFIG_CRYPTO_CMAC=m
660CONFIG_CRYPTO_XCBC=m 670CONFIG_CRYPTO_XCBC=m
661CONFIG_CRYPTO_VMAC=m 671CONFIG_CRYPTO_VMAC=m
662CONFIG_CRYPTO_MICHAEL_MIC=m 672CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
668CONFIG_CRYPTO_SHA3=m 678CONFIG_CRYPTO_SHA3=m
669CONFIG_CRYPTO_TGR192=m 679CONFIG_CRYPTO_TGR192=m
670CONFIG_CRYPTO_WP512=m 680CONFIG_CRYPTO_WP512=m
681CONFIG_CRYPTO_AES_TI=m
671CONFIG_CRYPTO_ANUBIS=m 682CONFIG_CRYPTO_ANUBIS=m
672CONFIG_CRYPTO_BLOWFISH=m 683CONFIG_CRYPTO_BLOWFISH=m
673CONFIG_CRYPTO_CAMELLIA=m 684CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
692CONFIG_CRYPTO_USER_API_RNG=m 703CONFIG_CRYPTO_USER_API_RNG=m
693CONFIG_CRYPTO_USER_API_AEAD=m 704CONFIG_CRYPTO_USER_API_AEAD=m
694# CONFIG_CRYPTO_HW is not set 705# CONFIG_CRYPTO_HW is not set
706CONFIG_CRC32_SELFTEST=m
695CONFIG_XZ_DEC_TEST=m 707CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd22e96f..b2e687a0ec3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68030=y 31CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_INET_AH=m 57CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_ESP_OFFLOAD=m
58CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
59CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
60CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 68CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 69CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 70CONFIG_INET6_ESP=m
71CONFIG_INET6_ESP_OFFLOAD=m
69CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
70CONFIG_IPV6_ILA=m 73CONFIG_IPV6_ILA=m
71CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
96CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
97CONFIG_NFT_SET_RBTREE=m 100CONFIG_NFT_SET_RBTREE=m
98CONFIG_NFT_SET_HASH=m 101CONFIG_NFT_SET_HASH=m
102CONFIG_NFT_SET_BITMAP=m
99CONFIG_NFT_COUNTER=m 103CONFIG_NFT_COUNTER=m
100CONFIG_NFT_LOG=m 104CONFIG_NFT_LOG=m
101CONFIG_NFT_LIMIT=m 105CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
293CONFIG_NET_L3_MASTER_DEV=y 297CONFIG_NET_L3_MASTER_DEV=y
294CONFIG_AF_KCM=m 298CONFIG_AF_KCM=m
295# CONFIG_WIRELESS is not set 299# CONFIG_WIRELESS is not set
300CONFIG_PSAMPLE=m
301CONFIG_NET_IFE=m
296CONFIG_NET_DEVLINK=m 302CONFIG_NET_DEVLINK=m
297# CONFIG_UEVENT_HELPER is not set 303# CONFIG_UEVENT_HELPER is not set
298CONFIG_DEVTMPFS=y 304CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
351CONFIG_MACVLAN=m 357CONFIG_MACVLAN=m
352CONFIG_MACVTAP=m 358CONFIG_MACVTAP=m
353CONFIG_IPVLAN=m 359CONFIG_IPVLAN=m
360CONFIG_IPVTAP=m
354CONFIG_VXLAN=m 361CONFIG_VXLAN=m
355CONFIG_GENEVE=m 362CONFIG_GENEVE=m
356CONFIG_GTP=m 363CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
361# CONFIG_NET_VENDOR_ALACRITECH is not set 368# CONFIG_NET_VENDOR_ALACRITECH is not set
362# CONFIG_NET_VENDOR_AMAZON is not set 369# CONFIG_NET_VENDOR_AMAZON is not set
363CONFIG_MVME147_NET=y 370CONFIG_MVME147_NET=y
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca858f05..cbd8ee24d1bc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3fd5fcd..1e82cc944339 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68040=y 32CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
358CONFIG_MACVLAN=m 364CONFIG_MACVLAN=m
359CONFIG_MACVTAP=m 365CONFIG_MACVTAP=m
360CONFIG_IPVLAN=m 366CONFIG_IPVLAN=m
367CONFIG_IPVTAP=m
361CONFIG_VXLAN=m 368CONFIG_VXLAN=m
362CONFIG_GENEVE=m 369CONFIG_GENEVE=m
363CONFIG_GTP=m 370CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
369# CONFIG_NET_VENDOR_ALACRITECH is not set 376# CONFIG_NET_VENDOR_ALACRITECH is not set
370# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AMD is not set 378# CONFIG_NET_VENDOR_AMD is not set
379# CONFIG_NET_VENDOR_AQUANTIA is not set
372# CONFIG_NET_VENDOR_ARC is not set 380# CONFIG_NET_VENDOR_ARC is not set
373# CONFIG_NET_CADENCE is not set 381# CONFIG_NET_CADENCE is not set
374# CONFIG_NET_VENDOR_BROADCOM is not set 382# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
388# CONFIG_NET_VENDOR_SOLARFLARE is not set 396# CONFIG_NET_VENDOR_SOLARFLARE is not set
389# CONFIG_NET_VENDOR_SMSC is not set 397# CONFIG_NET_VENDOR_SMSC is not set
390# CONFIG_NET_VENDOR_STMICRO is not set 398# CONFIG_NET_VENDOR_STMICRO is not set
391# CONFIG_NET_VENDOR_SYNOPSYS is not set
392# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
393# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
394CONFIG_PLIP=m 401CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
538CONFIG_DLM=m 545CONFIG_DLM=m
539# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 546# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
540CONFIG_MAGIC_SYSRQ=y 547CONFIG_MAGIC_SYSRQ=y
548CONFIG_WW_MUTEX_SELFTEST=m
549CONFIG_ATOMIC64_SELFTEST=m
541CONFIG_ASYNC_RAID6_TEST=m 550CONFIG_ASYNC_RAID6_TEST=m
542CONFIG_TEST_HEXDUMP=m 551CONFIG_TEST_HEXDUMP=m
543CONFIG_TEST_STRING_HELPERS=m 552CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
568CONFIG_CRYPTO_LRW=m 577CONFIG_CRYPTO_LRW=m
569CONFIG_CRYPTO_PCBC=m 578CONFIG_CRYPTO_PCBC=m
570CONFIG_CRYPTO_KEYWRAP=m 579CONFIG_CRYPTO_KEYWRAP=m
580CONFIG_CRYPTO_CMAC=m
571CONFIG_CRYPTO_XCBC=m 581CONFIG_CRYPTO_XCBC=m
572CONFIG_CRYPTO_VMAC=m 582CONFIG_CRYPTO_VMAC=m
573CONFIG_CRYPTO_MICHAEL_MIC=m 583CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
579CONFIG_CRYPTO_SHA3=m 589CONFIG_CRYPTO_SHA3=m
580CONFIG_CRYPTO_TGR192=m 590CONFIG_CRYPTO_TGR192=m
581CONFIG_CRYPTO_WP512=m 591CONFIG_CRYPTO_WP512=m
592CONFIG_CRYPTO_AES_TI=m
582CONFIG_CRYPTO_ANUBIS=m 593CONFIG_CRYPTO_ANUBIS=m
583CONFIG_CRYPTO_BLOWFISH=m 594CONFIG_CRYPTO_BLOWFISH=m
584CONFIG_CRYPTO_CAMELLIA=m 595CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
603CONFIG_CRYPTO_USER_API_RNG=m 614CONFIG_CRYPTO_USER_API_RNG=m
604CONFIG_CRYPTO_USER_API_AEAD=m 615CONFIG_CRYPTO_USER_API_AEAD=m
605# CONFIG_CRYPTO_HW is not set 616# CONFIG_CRYPTO_HW is not set
617CONFIG_CRC32_SELFTEST=m
606CONFIG_XZ_DEC_TEST=m 618CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a562ad..f9e77f57a972 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3=y 31CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_EZCHIP is not set 372# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
375# CONFIG_NET_VENDOR_SOLARFLARE is not set 383# CONFIG_NET_VENDOR_SOLARFLARE is not set
376# CONFIG_NET_VENDOR_STMICRO is not set 384# CONFIG_NET_VENDOR_STMICRO is not set
377# CONFIG_NET_VENDOR_SUN is not set 385# CONFIG_NET_VENDOR_SUN is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
546CONFIG_CRYPTO_LRW=m 555CONFIG_CRYPTO_LRW=m
547CONFIG_CRYPTO_PCBC=m 556CONFIG_CRYPTO_PCBC=m
548CONFIG_CRYPTO_KEYWRAP=m 557CONFIG_CRYPTO_KEYWRAP=m
558CONFIG_CRYPTO_CMAC=m
549CONFIG_CRYPTO_XCBC=m 559CONFIG_CRYPTO_XCBC=m
550CONFIG_CRYPTO_VMAC=m 560CONFIG_CRYPTO_VMAC=m
551CONFIG_CRYPTO_MICHAEL_MIC=m 561CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
557CONFIG_CRYPTO_SHA3=m 567CONFIG_CRYPTO_SHA3=m
558CONFIG_CRYPTO_TGR192=m 568CONFIG_CRYPTO_TGR192=m
559CONFIG_CRYPTO_WP512=m 569CONFIG_CRYPTO_WP512=m
570CONFIG_CRYPTO_AES_TI=m
560CONFIG_CRYPTO_ANUBIS=m 571CONFIG_CRYPTO_ANUBIS=m
561CONFIG_CRYPTO_BLOWFISH=m 572CONFIG_CRYPTO_BLOWFISH=m
562CONFIG_CRYPTO_CAMELLIA=m 573CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
581CONFIG_CRYPTO_USER_API_RNG=m 592CONFIG_CRYPTO_USER_API_RNG=m
582CONFIG_CRYPTO_USER_API_AEAD=m 593CONFIG_CRYPTO_USER_API_AEAD=m
583# CONFIG_CRYPTO_HW is not set 594# CONFIG_CRYPTO_HW is not set
595CONFIG_CRC32_SELFTEST=m
584CONFIG_XZ_DEC_TEST=m 596CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b61365f769..3c394fcfb368 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3X=y 31CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_BROADCOM is not set 372# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
375# CONFIG_NET_VENDOR_SEEQ is not set 383# CONFIG_NET_VENDOR_SEEQ is not set
376# CONFIG_NET_VENDOR_SOLARFLARE is not set 384# CONFIG_NET_VENDOR_SOLARFLARE is not set
377# CONFIG_NET_VENDOR_STMICRO is not set 385# CONFIG_NET_VENDOR_STMICRO is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
547CONFIG_CRYPTO_LRW=m 556CONFIG_CRYPTO_LRW=m
548CONFIG_CRYPTO_PCBC=m 557CONFIG_CRYPTO_PCBC=m
549CONFIG_CRYPTO_KEYWRAP=m 558CONFIG_CRYPTO_KEYWRAP=m
559CONFIG_CRYPTO_CMAC=m
550CONFIG_CRYPTO_XCBC=m 560CONFIG_CRYPTO_XCBC=m
551CONFIG_CRYPTO_VMAC=m 561CONFIG_CRYPTO_VMAC=m
552CONFIG_CRYPTO_MICHAEL_MIC=m 562CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
558CONFIG_CRYPTO_SHA3=m 568CONFIG_CRYPTO_SHA3=m
559CONFIG_CRYPTO_TGR192=m 569CONFIG_CRYPTO_TGR192=m
560CONFIG_CRYPTO_WP512=m 570CONFIG_CRYPTO_WP512=m
571CONFIG_CRYPTO_AES_TI=m
561CONFIG_CRYPTO_ANUBIS=m 572CONFIG_CRYPTO_ANUBIS=m
562CONFIG_CRYPTO_BLOWFISH=m 573CONFIG_CRYPTO_BLOWFISH=m
563CONFIG_CRYPTO_CAMELLIA=m 574CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
582CONFIG_CRYPTO_USER_API_RNG=m 593CONFIG_CRYPTO_USER_API_RNG=m
583CONFIG_CRYPTO_USER_API_AEAD=m 594CONFIG_CRYPTO_USER_API_AEAD=m
584# CONFIG_CRYPTO_HW is not set 595# CONFIG_CRYPTO_HW is not set
596CONFIG_CRC32_SELFTEST=m
585CONFIG_XZ_DEC_TEST=m 597CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
148#define __change_bit(nr, vaddr) change_bit(nr, vaddr) 148#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
149 149
150 150
151static inline int test_bit(int nr, const unsigned long *vaddr) 151static inline int test_bit(int nr, const volatile unsigned long *vaddr)
152{ 152{
153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; 153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
154} 154}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 379 7#define NR_syscalls 380
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
384#define __NR_copy_file_range 376 384#define __NR_copy_file_range 376
385#define __NR_preadv2 377 385#define __NR_preadv2 377
386#define __NR_pwritev2 378 386#define __NR_pwritev2 378
387#define __NR_statx 379
387 388
388#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 389#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9ced24..8c9fcfafe0dd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
399 .long sys_copy_file_range 399 .long sys_copy_file_range
400 .long sys_preadv2 400 .long sys_preadv2
401 .long sys_pwritev2 401 .long sys_pwritev2
402 .long sys_statx
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..07238b39638c 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197 197
198#define strlen_user(str) strnlen_user(str, 32767) 198#define strlen_user(str) strnlen_user(str, 32767)
199 199
200extern unsigned long __must_check __copy_user_zeroing(void *to, 200extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201 const void __user *from, 201 unsigned long n);
202 unsigned long n);
203 202
204static inline unsigned long 203static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 204copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 205{
206 unsigned long res = n;
207 if (likely(access_ok(VERIFY_READ, from, n))) 207 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 208 res = raw_copy_from_user(to, from, n);
209 memset(to, 0, n); 209 if (unlikely(res))
210 return n; 210 memset(to + (n - res), 0, res);
211 return res;
211} 212}
212 213
213#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) 214#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214#define __copy_from_user_inatomic __copy_from_user 215#define __copy_from_user_inatomic __copy_from_user
215 216
216extern unsigned long __must_check __copy_user(void __user *to, 217extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 5fd16ee5280c..e615603a4b0a 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -26,6 +26,16 @@
26 * user_regset definitions. 26 * user_regset definitions.
27 */ 27 */
28 28
29static unsigned long user_txstatus(const struct pt_regs *regs)
30{
31 unsigned long data = (unsigned long)regs->ctx.Flags;
32
33 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
34 data |= USER_GP_REGS_STATUS_CATCH_BIT;
35
36 return data;
37}
38
29int metag_gp_regs_copyout(const struct pt_regs *regs, 39int metag_gp_regs_copyout(const struct pt_regs *regs,
30 unsigned int pos, unsigned int count, 40 unsigned int pos, unsigned int count,
31 void *kbuf, void __user *ubuf) 41 void *kbuf, void __user *ubuf)
@@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
64 if (ret) 74 if (ret)
65 goto out; 75 goto out;
66 /* TXSTATUS */ 76 /* TXSTATUS */
67 data = (unsigned long)regs->ctx.Flags; 77 data = user_txstatus(regs);
68 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
69 data |= USER_GP_REGS_STATUS_CATCH_BIT;
70 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 78 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
71 &data, 4*25, 4*26); 79 &data, 4*25, 4*26);
72 if (ret) 80 if (ret)
@@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
121 if (ret) 129 if (ret)
122 goto out; 130 goto out;
123 /* TXSTATUS */ 131 /* TXSTATUS */
132 data = user_txstatus(regs);
124 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 133 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
125 &data, 4*25, 4*26); 134 &data, 4*25, 4*26);
126 if (ret) 135 if (ret)
@@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
246 unsigned long long *ptr; 255 unsigned long long *ptr;
247 int ret, i; 256 int ret, i;
248 257
258 if (count < 4*13)
259 return -EINVAL;
249 /* Read the entire pipeline before making any changes */ 260 /* Read the entire pipeline before making any changes */
250 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
251 &rp, 0, 4*13); 262 &rp, 0, 4*13);
@@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
305 const void *kbuf, const void __user *ubuf) 316 const void *kbuf, const void __user *ubuf)
306{ 317{
307 int ret; 318 int ret;
308 void __user *tls; 319 void __user *tls = target->thread.tls_ptr;
309 320
310 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
311 if (ret) 322 if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
29 COPY \ 29 COPY \
30 "1:\n" \ 30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \ 31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \ 32 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \ 33 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \ 34 " JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \ 260 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \ 262 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 263 "SUB %3, %3, #32\n" \
266 "24:\n" \ 264 "24:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "25:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "26:\n" \
268 "SUB %3, %3, #32\n" \ 269 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \ 270 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \ 271 "BR $Lloop"id"\n" \
271 \ 272 \
272 "MOV RAPF, %1\n" \ 273 "MOV RAPF, %1\n" \
273 "25:\n" \ 274 "27:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 275 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \ 276 "28:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 277 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
278 "29:\n" \
277 "SUB %3, %3, #32\n" \ 279 "SUB %3, %3, #32\n" \
278 "27:\n" \ 280 "30:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 281 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \ 282 "31:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 283 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "32:\n" \
282 "SUB %0, %0, #8\n" \ 285 "SUB %0, %0, #8\n" \
283 "29:\n" \ 286 "33:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \ 287 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \ 288 "SUB %3, %3, #32\n" \
286 "1:" \ 289 "1:" \
@@ -312,11 +315,15 @@
312 " .long 26b,3b\n" \ 315 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \ 316 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \ 317 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \ 318 " .long 29b,3b\n" \
319 " .long 30b,3b\n" \
320 " .long 31b,3b\n" \
321 " .long 32b,3b\n" \
322 " .long 33b,4b\n" \
316 " .previous\n" \ 323 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 324 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 325 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory") 326 : "D1Ar1", "D0Ar2", "cc", "memory")
320 327
321/* rewind 'to' and 'from' pointers when a fault occurs 328/* rewind 'to' and 'from' pointers when a fault occurs
322 * 329 *
@@ -342,7 +349,7 @@
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ 349#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 350 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \ 351 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \ 352 "ANDS D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \ 353 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \ 354 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \ 355 "MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \ 411 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \ 413 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \ 414 "SUB %3, %3, #16\n" \
412 "25:\n" \ 415 "24:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \ 417 "25:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 418 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
419 "26:\n" \
416 "SUB %3, %3, #16\n" \ 420 "SUB %3, %3, #16\n" \
417 "27:\n" \ 421 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 422 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \ 423 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 424 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
425 "29:\n" \
426 "SUB %3, %3, #16\n" \
427 "30:\n" \
428 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "31:\n" \
430 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
431 "32:\n" \
421 "SUB %3, %3, #16\n" \ 432 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \ 433 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \ 434 "BR $Lloop"id"\n" \
424 \ 435 \
425 "MOV RAPF, %1\n" \ 436 "MOV RAPF, %1\n" \
426 "29:\n" \ 437 "33:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \ 439 "34:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
441 "35:\n" \
430 "SUB %3, %3, #16\n" \ 442 "SUB %3, %3, #16\n" \
431 "31:\n" \ 443 "36:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 444 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \ 445 "37:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 446 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "38:\n" \
435 "SUB %3, %3, #16\n" \ 448 "SUB %3, %3, #16\n" \
436 "33:\n" \ 449 "39:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 450 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \ 451 "40:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 452 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
453 "41:\n" \
440 "SUB %3, %3, #16\n" \ 454 "SUB %3, %3, #16\n" \
441 "35:\n" \ 455 "42:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 456 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \ 457 "43:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 458 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
459 "44:\n" \
445 "SUB %0, %0, #4\n" \ 460 "SUB %0, %0, #4\n" \
446 "37:\n" \ 461 "45:\n" \
447 "SETD [%0++], D0.7\n" \ 462 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \ 463 "SUB %3, %3, #16\n" \
449 "1:" \ 464 "1:" \
@@ -483,11 +498,19 @@
483 " .long 34b,3b\n" \ 498 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \ 499 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \ 500 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \ 501 " .long 37b,3b\n" \
502 " .long 38b,3b\n" \
503 " .long 39b,3b\n" \
504 " .long 40b,3b\n" \
505 " .long 41b,3b\n" \
506 " .long 42b,3b\n" \
507 " .long 43b,3b\n" \
508 " .long 44b,3b\n" \
509 " .long 45b,4b\n" \
487 " .previous\n" \ 510 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 511 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 512 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory") 513 : "D1Ar1", "D0Ar2", "cc", "memory")
491 514
492/* rewind 'to' and 'from' pointers when a fault occurs 515/* rewind 'to' and 'from' pointers when a fault occurs
493 * 516 *
@@ -513,7 +536,7 @@
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ 536#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 537 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \ 538 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \ 539 "ANDS D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \ 540 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \ 541 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \ 542 "MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
538 if ((unsigned long) src & 1) { 561 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn); 562 __asm_copy_to_user_1(dst, src, retn);
540 n--; 563 n--;
564 if (retn)
565 return retn + n;
541 } 566 }
542 if ((unsigned long) dst & 1) { 567 if ((unsigned long) dst & 1) {
543 /* Worst case - byte copy */ 568 /* Worst case - byte copy */
544 while (n > 0) { 569 while (n > 0) {
545 __asm_copy_to_user_1(dst, src, retn); 570 __asm_copy_to_user_1(dst, src, retn);
546 n--; 571 n--;
572 if (retn)
573 return retn + n;
547 } 574 }
548 } 575 }
549 if (((unsigned long) src & 2) && n >= 2) { 576 if (((unsigned long) src & 2) && n >= 2) {
550 __asm_copy_to_user_2(dst, src, retn); 577 __asm_copy_to_user_2(dst, src, retn);
551 n -= 2; 578 n -= 2;
579 if (retn)
580 return retn + n;
552 } 581 }
553 if ((unsigned long) dst & 2) { 582 if ((unsigned long) dst & 2) {
554 /* Second worst case - word copy */ 583 /* Second worst case - word copy */
555 while (n >= 2) { 584 while (n >= 2) {
556 __asm_copy_to_user_2(dst, src, retn); 585 __asm_copy_to_user_2(dst, src, retn);
557 n -= 2; 586 n -= 2;
587 if (retn)
588 return retn + n;
558 } 589 }
559 } 590 }
560 591
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
569 while (n >= 8) { 600 while (n >= 8) {
570 __asm_copy_to_user_8x64(dst, src, retn); 601 __asm_copy_to_user_8x64(dst, src, retn);
571 n -= 8; 602 n -= 8;
603 if (retn)
604 return retn + n;
572 } 605 }
573 } 606 }
574 if (n >= RAPF_MIN_BUF_SIZE) { 607 if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
581 while (n >= 8) { 614 while (n >= 8) {
582 __asm_copy_to_user_8x64(dst, src, retn); 615 __asm_copy_to_user_8x64(dst, src, retn);
583 n -= 8; 616 n -= 8;
617 if (retn)
618 return retn + n;
584 } 619 }
585 } 620 }
586#endif 621#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
588 while (n >= 16) { 623 while (n >= 16) {
589 __asm_copy_to_user_16(dst, src, retn); 624 __asm_copy_to_user_16(dst, src, retn);
590 n -= 16; 625 n -= 16;
626 if (retn)
627 return retn + n;
591 } 628 }
592 629
593 while (n >= 4) { 630 while (n >= 4) {
594 __asm_copy_to_user_4(dst, src, retn); 631 __asm_copy_to_user_4(dst, src, retn);
595 n -= 4; 632 n -= 4;
633 if (retn)
634 return retn + n;
596 } 635 }
597 636
598 switch (n) { 637 switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
609 break; 648 break;
610 } 649 }
611 650
651 /*
652 * If we get here, retn correctly reflects the number of failing
653 * bytes.
654 */
612 return retn; 655 return retn;
613} 656}
614EXPORT_SYMBOL(__copy_user); 657EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
617 __asm_copy_user_cont(to, from, ret, \ 660 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \ 661 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \ 662 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \ 663 "3: ADD %2,%2,#1\n", \
621 " SETB [%0++],D1Ar1\n", \
622 " .long 2b,3b\n") 664 " .long 2b,3b\n")
623 665
624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 666#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \ 667 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \ 668 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \ 669 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \ 670 "3: ADD %2,%2,#2\n" FIXUP, \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY) 671 " .long 2b,3b\n" TENTRY)
631 672
632#define __asm_copy_from_user_2(to, from, ret) \ 673#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
636 __asm_copy_from_user_2x_cont(to, from, ret, \ 677 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \ 678 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \ 679 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \ 680 "5: ADD %2,%2,#1\n", \
640 " SETB [%0++],D1Ar1\n", \
641 " .long 4b,5b\n") 681 " .long 4b,5b\n")
642 682
643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 683#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \ 684 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \ 685 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \ 686 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \ 687 "3: ADD %2,%2,#4\n" FIXUP, \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY) 688 " .long 2b,3b\n" TENTRY)
650 689
651#define __asm_copy_from_user_4(to, from, ret) \ 690#define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") 691 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653 692
654#define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
660 " .long 4b,5b\n")
661
662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
669
670#define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
672
673#define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
679 " .long 6b,7b\n")
680
681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
688
689#define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
691
692#define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
698 " .long 6b,7b\n")
699
700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
707
708#define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
710
711#define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
717 " .long 8b,9b\n")
718
719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
726
727#define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
729
730#define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
736 " .long 8b,9b\n")
737
738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
745
746#define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
748
749#define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
755 " .long 10b,11b\n")
756
757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
764
765#define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
767
768#define __asm_copy_from_user_8x64(to, from, ret) \ 693#define __asm_copy_from_user_8x64(to, from, ret) \
769 asm volatile ( \ 694 asm volatile ( \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \ 695 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \ 696 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
772 "1:\n" \ 697 "1:\n" \
773 " .section .fixup,\"ax\"\n" \ 698 " .section .fixup,\"ax\"\n" \
774 " MOV D1Ar1,#0\n" \
775 " MOV D0Ar2,#0\n" \
776 "3: ADD %2,%2,#8\n" \ 699 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \ 700 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \ 701 " JUMP D0Ar2,#LO(1b)\n" \
780 " .previous\n" \ 702 " .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
789 * 711 *
790 * Rationale: 712 * Rationale:
791 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only 714 * source.
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
796 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*8 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
797 */ 726 */
798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
800 "SUB %1, %1, #8\n") 729 "LSR D0Ar2, D0Ar2, #5\n" \
730 "ANDS D0Ar2, D0Ar2, #0x38\n" \
731 "ADDZ D0Ar2, D0Ar2, #32\n" \
732 "SUB %1, %1, D0Ar2\n")
801 733
802/* rewind 'from' pointer when a fault occurs 734/* rewind 'from' pointer when a fault occurs
803 * 735 *
804 * Rationale: 736 * Rationale:
805 * A fault occurs while reading from user buffer, which is the 737 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only 738 * source.
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first, 739 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be 740 * the kernel buffer is at the right state and needn't be
810 * corrected. 741 * corrected, but the source must be rewound to the beginning of
742 * the block, which is LSM_STEP*4 bytes.
743 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
744 * and stored in D0Ar2
745 *
746 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
747 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
748 * a fault happens at the 4th write, LSM_STEP will be 0
749 * instead of 4. The code copes with that.
811 */ 750 */
812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 751#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 752 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
814 "SUB %1, %1, #4\n") 753 "LSR D0Ar2, D0Ar2, #6\n" \
754 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
755 "ADDZ D0Ar2, D0Ar2, #16\n" \
756 "SUB %1, %1, D0Ar2\n")
815 757
816 758
817/* Copy from user to kernel, zeroing the bytes that were inaccessible in 759/*
818 userland. The return-value is the number of bytes that were 760 * Copy from user to kernel. The return-value is the number of bytes that were
819 inaccessible. */ 761 * inaccessible.
820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 762 */
821 unsigned long n) 763unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764 unsigned long n)
822{ 765{
823 register char *dst asm ("A0.2") = pdst; 766 register char *dst asm ("A0.2") = pdst;
824 register const char __user *src asm ("A1.2") = psrc; 767 register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
830 if ((unsigned long) src & 1) { 773 if ((unsigned long) src & 1) {
831 __asm_copy_from_user_1(dst, src, retn); 774 __asm_copy_from_user_1(dst, src, retn);
832 n--; 775 n--;
776 if (retn)
777 return retn + n;
833 } 778 }
834 if ((unsigned long) dst & 1) { 779 if ((unsigned long) dst & 1) {
835 /* Worst case - byte copy */ 780 /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
837 __asm_copy_from_user_1(dst, src, retn); 782 __asm_copy_from_user_1(dst, src, retn);
838 n--; 783 n--;
839 if (retn) 784 if (retn)
840 goto copy_exception_bytes; 785 return retn + n;
841 } 786 }
842 } 787 }
843 if (((unsigned long) src & 2) && n >= 2) { 788 if (((unsigned long) src & 2) && n >= 2) {
844 __asm_copy_from_user_2(dst, src, retn); 789 __asm_copy_from_user_2(dst, src, retn);
845 n -= 2; 790 n -= 2;
791 if (retn)
792 return retn + n;
846 } 793 }
847 if ((unsigned long) dst & 2) { 794 if ((unsigned long) dst & 2) {
848 /* Second worst case - word copy */ 795 /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
850 __asm_copy_from_user_2(dst, src, retn); 797 __asm_copy_from_user_2(dst, src, retn);
851 n -= 2; 798 n -= 2;
852 if (retn) 799 if (retn)
853 goto copy_exception_bytes; 800 return retn + n;
854 } 801 }
855 } 802 }
856 803
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
860 if (retn != 0)
861 goto copy_exception_bytes;
862
863#ifdef USE_RAPF 804#ifdef USE_RAPF
864 /* 64 bit copy loop */ 805 /* 64 bit copy loop */
865 if (!(((unsigned long) src | (unsigned long) dst) & 7)) { 806 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
872 __asm_copy_from_user_8x64(dst, src, retn); 813 __asm_copy_from_user_8x64(dst, src, retn);
873 n -= 8; 814 n -= 8;
874 if (retn) 815 if (retn)
875 goto copy_exception_bytes; 816 return retn + n;
876 } 817 }
877 } 818 }
878 819
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
888 __asm_copy_from_user_8x64(dst, src, retn); 829 __asm_copy_from_user_8x64(dst, src, retn);
889 n -= 8; 830 n -= 8;
890 if (retn) 831 if (retn)
891 goto copy_exception_bytes; 832 return retn + n;
892 } 833 }
893 } 834 }
894#endif 835#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
898 n -= 4; 839 n -= 4;
899 840
900 if (retn) 841 if (retn)
901 goto copy_exception_bytes; 842 return retn + n;
902 } 843 }
903 844
904 /* If we get here, there were no memory read faults. */ 845 /* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
924 /* If we get here, retn correctly reflects the number of failing 865 /* If we get here, retn correctly reflects the number of failing
925 bytes. */ 866 bytes. */
926 return retn; 867 return retn;
927
928 copy_exception_bytes:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
933 {
934 char *endp;
935 for (endp = dst + n; dst < endp; dst++)
936 *dst = 0;
937 }
938
939 return retn + n;
940} 868}
941EXPORT_SYMBOL(__copy_user_zeroing); 869EXPORT_SYMBOL(raw_copy_from_user);
942 870
943#define __asm_clear_8x64(to, ret) \ 871#define __asm_clear_8x64(to, ret) \
944 asm volatile ( \ 872 asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..e0bb576410bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
1531 select CPU_SUPPORTS_HIGHMEM 1531 select CPU_SUPPORTS_HIGHMEM
1532 select CPU_SUPPORTS_MSA 1532 select CPU_SUPPORTS_MSA
1533 select GENERIC_CSUM 1533 select GENERIC_CSUM
1534 select MIPS_O32_FP64_SUPPORT if MIPS32_O32 1534 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
1535 select HAVE_KVM 1535 select HAVE_KVM
1536 help 1536 help
1537 Choose this option to build a kernel for release 6 or later of the 1537 Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f94455f964ec..a2813fe381cf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -21,6 +21,7 @@
21#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
22#include <asm/fpu_emulator.h> 22#include <asm/fpu_emulator.h>
23#include <asm/hazards.h> 23#include <asm/hazards.h>
24#include <asm/ptrace.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/current.h> 26#include <asm/current.h>
26#include <asm/msa.h> 27#include <asm/msa.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e201d1..ddd1c918103b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
18#include <irq.h> 18#include <irq.h>
19 19
20#define IRQ_STACK_SIZE THREAD_SIZE 20#define IRQ_STACK_SIZE THREAD_SIZE
21#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
21 22
22extern void *irq_stack[NR_CPUS]; 23extern void *irq_stack[NR_CPUS];
23 24
25/*
26 * The highest address on the IRQ stack contains a dummy frame put down in
27 * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
28 *
29 * top ------------
30 * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
31 * ------------
32 * | | <- First frame of IRQ context
33 * ------------
34 *
35 * task sp holds a copy of the task stack pointer where the struct pt_regs
36 * from exception entry can be found.
37 */
38
24static inline bool on_irq_stack(int cpu, unsigned long sp) 39static inline bool on_irq_stack(int cpu, unsigned long sp)
25{ 40{
26 unsigned long low = (unsigned long)irq_stack[cpu]; 41 unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe51514..a8df44d60607 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
127 " andi %[ticket], %[ticket], 0xffff \n" 127 " andi %[ticket], %[ticket], 0xffff \n"
128 " bne %[ticket], %[my_ticket], 4f \n" 128 " bne %[ticket], %[my_ticket], 4f \n"
129 " subu %[ticket], %[my_ticket], %[ticket] \n" 129 " subu %[ticket], %[my_ticket], %[ticket] \n"
130 "2: \n" 130 "2: .insn \n"
131 " .subsection 2 \n" 131 " .subsection 2 \n"
132 "4: andi %[ticket], %[ticket], 0xffff \n" 132 "4: andi %[ticket], %[ticket], 0xffff \n"
133 " sll %[ticket], 5 \n" 133 " sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
202 " sc %[ticket], %[ticket_ptr] \n" 202 " sc %[ticket], %[ticket_ptr] \n"
203 " beqz %[ticket], 1b \n" 203 " beqz %[ticket], 1b \n"
204 " li %[ticket], 1 \n" 204 " li %[ticket], 1 \n"
205 "2: \n" 205 "2: .insn \n"
206 " .subsection 2 \n" 206 " .subsection 2 \n"
207 "3: b 2b \n" 207 "3: b 2b \n"
208 " li %[ticket], 0 \n" 208 " li %[ticket], 0 \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
382 " .set reorder \n" 382 " .set reorder \n"
383 __WEAK_LLSC_MB 383 __WEAK_LLSC_MB
384 " li %2, 1 \n" 384 " li %2, 1 \n"
385 "2: \n" 385 "2: .insn \n"
386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
387 : GCC_OFF_SMALL_ASM() (rw->lock) 387 : GCC_OFF_SMALL_ASM() (rw->lock)
388 : "memory"); 388 : "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
422 " lui %1, 0x8000 \n" 422 " lui %1, 0x8000 \n"
423 " sc %1, %0 \n" 423 " sc %1, %0 \n"
424 " li %2, 1 \n" 424 " li %2, 1 \n"
425 "2: \n" 425 "2: .insn \n"
426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427 "=&r" (ret) 427 "=&r" (ret)
428 : GCC_OFF_SMALL_ASM() (rw->lock) 428 : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3e940dbe0262..78faf4292e90 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -386,17 +386,18 @@
386#define __NR_pkey_mprotect (__NR_Linux + 363) 386#define __NR_pkey_mprotect (__NR_Linux + 363)
387#define __NR_pkey_alloc (__NR_Linux + 364) 387#define __NR_pkey_alloc (__NR_Linux + 364)
388#define __NR_pkey_free (__NR_Linux + 365) 388#define __NR_pkey_free (__NR_Linux + 365)
389#define __NR_statx (__NR_Linux + 366)
389 390
390 391
391/* 392/*
392 * Offset of the last Linux o32 flavoured syscall 393 * Offset of the last Linux o32 flavoured syscall
393 */ 394 */
394#define __NR_Linux_syscalls 365 395#define __NR_Linux_syscalls 366
395 396
396#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 397#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
397 398
398#define __NR_O32_Linux 4000 399#define __NR_O32_Linux 4000
399#define __NR_O32_Linux_syscalls 365 400#define __NR_O32_Linux_syscalls 366
400 401
401#if _MIPS_SIM == _MIPS_SIM_ABI64 402#if _MIPS_SIM == _MIPS_SIM_ABI64
402 403
@@ -730,16 +731,17 @@
730#define __NR_pkey_mprotect (__NR_Linux + 323) 731#define __NR_pkey_mprotect (__NR_Linux + 323)
731#define __NR_pkey_alloc (__NR_Linux + 324) 732#define __NR_pkey_alloc (__NR_Linux + 324)
732#define __NR_pkey_free (__NR_Linux + 325) 733#define __NR_pkey_free (__NR_Linux + 325)
734#define __NR_statx (__NR_Linux + 326)
733 735
734/* 736/*
735 * Offset of the last Linux 64-bit flavoured syscall 737 * Offset of the last Linux 64-bit flavoured syscall
736 */ 738 */
737#define __NR_Linux_syscalls 325 739#define __NR_Linux_syscalls 326
738 740
739#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 741#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
740 742
741#define __NR_64_Linux 5000 743#define __NR_64_Linux 5000
742#define __NR_64_Linux_syscalls 325 744#define __NR_64_Linux_syscalls 326
743 745
744#if _MIPS_SIM == _MIPS_SIM_NABI32 746#if _MIPS_SIM == _MIPS_SIM_NABI32
745 747
@@ -1077,15 +1079,16 @@
1077#define __NR_pkey_mprotect (__NR_Linux + 327) 1079#define __NR_pkey_mprotect (__NR_Linux + 327)
1078#define __NR_pkey_alloc (__NR_Linux + 328) 1080#define __NR_pkey_alloc (__NR_Linux + 328)
1079#define __NR_pkey_free (__NR_Linux + 329) 1081#define __NR_pkey_free (__NR_Linux + 329)
1082#define __NR_statx (__NR_Linux + 330)
1080 1083
1081/* 1084/*
1082 * Offset of the last N32 flavoured syscall 1085 * Offset of the last N32 flavoured syscall
1083 */ 1086 */
1084#define __NR_Linux_syscalls 329 1087#define __NR_Linux_syscalls 330
1085 1088
1086#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1089#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1087 1090
1088#define __NR_N32_Linux 6000 1091#define __NR_N32_Linux 6000
1089#define __NR_N32_Linux_syscalls 329 1092#define __NR_N32_Linux_syscalls 330
1090 1093
1091#endif /* _UAPI_ASM_UNISTD_H */ 1094#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index bb5c5d34ba81..a670c0c11875 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE);
103 DEFINE(_THREAD_MASK, THREAD_MASK); 103 DEFINE(_THREAD_MASK, THREAD_MASK);
104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); 104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
105 DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
105 BLANK(); 106 BLANK();
106} 107}
107 108
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a607add..a00e87b0256d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
361 END(mips_cps_get_bootcfg) 361 END(mips_cps_get_bootcfg)
362 362
363LEAF(mips_cps_boot_vpes) 363LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0) 364 lw ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366 366
367#if defined(CONFIG_CPU_MIPSR6) 367#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 07718bb5fc9d..12422fd4af23 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1824 } 1824 }
1825 1825
1826 decode_configs(c); 1826 decode_configs(c);
1827 c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; 1827 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1829 break; 1829 break;
1830 default: 1830 default:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7ec9612cb007..ae810da4d499 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
215 beq t0, t1, 2f 215 beq t0, t1, 2f
216 216
217 /* Switch to IRQ stack */ 217 /* Switch to IRQ stack */
218 li t1, _IRQ_STACK_SIZE 218 li t1, _IRQ_STACK_START
219 PTR_ADD sp, t0, t1 219 PTR_ADD sp, t0, t1
220 220
221 /* Save task's sp on IRQ stack so that unwinding can follow it */
222 LONG_S s1, 0(sp)
2212: 2232:
222 jal plat_irq_dispatch 224 jal plat_irq_dispatch
223 225
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
325 beq t0, t1, 2f 327 beq t0, t1, 2f
326 328
327 /* Switch to IRQ stack */ 329 /* Switch to IRQ stack */
328 li t1, _IRQ_STACK_SIZE 330 li t1, _IRQ_STACK_START
329 PTR_ADD sp, t0, t1 331 PTR_ADD sp, t0, t1
330 332
333 /* Save task's sp on IRQ stack so that unwinding can follow it */
334 LONG_S s1, 0(sp)
3312: 3352:
332 jalr v0 336 jalr v0
333 337
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
519 BUILD_HANDLER reserved reserved sti verbose /* others */ 523 BUILD_HANDLER reserved reserved sti verbose /* others */
520 524
521 .align 5 525 .align 5
522 LEAF(handle_ri_rdhwr_vivt) 526 LEAF(handle_ri_rdhwr_tlbp)
523 .set push 527 .set push
524 .set noat 528 .set noat
525 .set noreorder 529 .set noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
538 .set pop 542 .set pop
539 bltz k1, handle_ri /* slow path */ 543 bltz k1, handle_ri /* slow path */
540 /* fall thru */ 544 /* fall thru */
541 END(handle_ri_rdhwr_vivt) 545 END(handle_ri_rdhwr_tlbp)
542 546
543 LEAF(handle_ri_rdhwr) 547 LEAF(handle_ri_rdhwr)
544 .set push 548 .set push
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fb6b6b650719..b68e10fc453d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
488 unsigned long pc, 488 unsigned long pc,
489 unsigned long *ra) 489 unsigned long *ra)
490{ 490{
491 unsigned long low, high, irq_stack_high;
491 struct mips_frame_info info; 492 struct mips_frame_info info;
492 unsigned long size, ofs; 493 unsigned long size, ofs;
494 struct pt_regs *regs;
493 int leaf; 495 int leaf;
494 extern void ret_from_irq(void);
495 extern void ret_from_exception(void);
496 496
497 if (!stack_page) 497 if (!stack_page)
498 return 0; 498 return 0;
499 499
500 /* 500 /*
501 * If we reached the bottom of interrupt context, 501 * IRQ stacks start at IRQ_STACK_START
502 * return saved pc in pt_regs. 502 * task stacks at THREAD_SIZE - 32
503 */ 503 */
504 if (pc == (unsigned long)ret_from_irq || 504 low = stack_page;
505 pc == (unsigned long)ret_from_exception) { 505 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
506 struct pt_regs *regs; 506 high = stack_page + IRQ_STACK_START;
507 if (*sp >= stack_page && 507 irq_stack_high = high;
508 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { 508 } else {
509 regs = (struct pt_regs *)*sp; 509 high = stack_page + THREAD_SIZE - 32;
510 pc = regs->cp0_epc; 510 irq_stack_high = 0;
511 if (!user_mode(regs) && __kernel_text_address(pc)) { 511 }
512 *sp = regs->regs[29]; 512
513 *ra = regs->regs[31]; 513 /*
514 return pc; 514 * If we reached the top of the interrupt stack, start unwinding
515 } 515 * the interrupted task stack.
516 */
517 if (unlikely(*sp == irq_stack_high)) {
518 unsigned long task_sp = *(unsigned long *)*sp;
519
520 /*
521 * Check that the pointer saved in the IRQ stack head points to
522 * something within the stack of the current task
523 */
524 if (!object_is_on_stack((void *)task_sp))
525 return 0;
526
527 /*
528 * Follow pointer to tasks kernel stack frame where interrupted
529 * state was saved.
530 */
531 regs = (struct pt_regs *)task_sp;
532 pc = regs->cp0_epc;
533 if (!user_mode(regs) && __kernel_text_address(pc)) {
534 *sp = regs->regs[29];
535 *ra = regs->regs[31];
536 return pc;
516 } 537 }
517 return 0; 538 return 0;
518 } 539 }
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
533 if (leaf < 0) 554 if (leaf < 0)
534 return 0; 555 return 0;
535 556
536 if (*sp < stack_page || 557 if (*sp < low || *sp + info.frame_size > high)
537 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
538 return 0; 558 return 0;
539 559
540 if (leaf) 560 if (leaf)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 339601267265..6931fe722a0b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
456 &target->thread.fpu, 456 &target->thread.fpu,
457 0, sizeof(elf_fpregset_t)); 457 0, sizeof(elf_fpregset_t));
458 458
459 for (i = 0; i < NUM_FPU_REGS; i++) { 459 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
460 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
460 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 461 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
461 &fpr_val, i * sizeof(elf_fpreg_t), 462 &fpr_val, i * sizeof(elf_fpreg_t),
462 (i + 1) * sizeof(elf_fpreg_t)); 463 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397eee86..80ed68b2c95e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
600 PTR sys_pkey_mprotect 600 PTR sys_pkey_mprotect
601 PTR sys_pkey_alloc 601 PTR sys_pkey_alloc
602 PTR sys_pkey_free /* 4365 */ 602 PTR sys_pkey_free /* 4365 */
603 PTR sys_statx
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96ee912..49765b44aa9b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
438 PTR sys_pkey_mprotect 438 PTR sys_pkey_mprotect
439 PTR sys_pkey_alloc 439 PTR sys_pkey_alloc
440 PTR sys_pkey_free /* 5325 */ 440 PTR sys_pkey_free /* 5325 */
441 PTR sys_statx
441 .size sys_call_table,.-sys_call_table 442 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba39a065..90bad2d1b2d3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
433 PTR sys_pkey_mprotect 433 PTR sys_pkey_mprotect
434 PTR sys_pkey_alloc 434 PTR sys_pkey_alloc
435 PTR sys_pkey_free 435 PTR sys_pkey_free
436 PTR sys_statx /* 6330 */
436 .size sysn32_call_table,.-sysn32_call_table 437 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042dd25f..2dd70bd104e1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
588 PTR sys_pkey_mprotect 588 PTR sys_pkey_mprotect
589 PTR sys_pkey_alloc 589 PTR sys_pkey_alloc
590 PTR sys_pkey_free /* 4365 */ 590 PTR sys_pkey_free /* 4365 */
591 PTR sys_statx
591 .size sys32_call_table,.-sys32_call_table 592 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c7d17cfb32f6..b49e7bf9f950 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
83extern asmlinkage void handle_sys(void); 83extern asmlinkage void handle_sys(void);
84extern asmlinkage void handle_bp(void); 84extern asmlinkage void handle_bp(void);
85extern asmlinkage void handle_ri(void); 85extern asmlinkage void handle_ri(void);
86extern asmlinkage void handle_ri_rdhwr_vivt(void); 86extern asmlinkage void handle_ri_rdhwr_tlbp(void);
87extern asmlinkage void handle_ri_rdhwr(void); 87extern asmlinkage void handle_ri_rdhwr(void);
88extern asmlinkage void handle_cpu(void); 88extern asmlinkage void handle_cpu(void);
89extern asmlinkage void handle_ov(void); 89extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
2408 2408
2409 set_except_vector(EXCCODE_SYS, handle_sys); 2409 set_except_vector(EXCCODE_SYS, handle_sys);
2410 set_except_vector(EXCCODE_BP, handle_bp); 2410 set_except_vector(EXCCODE_BP, handle_bp);
2411 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : 2411
2412 (cpu_has_vtag_icache ? 2412 if (rdhwr_noopt)
2413 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2413 set_except_vector(EXCCODE_RI, handle_ri);
2414 else {
2415 if (cpu_has_vtag_icache)
2416 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 else if (current_cpu_type() == CPU_LOONGSON3)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 }
2422
2414 set_except_vector(EXCCODE_CPU, handle_cpu); 2423 set_except_vector(EXCCODE_CPU, handle_cpu);
2415 set_except_vector(EXCCODE_OV, handle_ov); 2424 set_except_vector(EXCCODE_OV, handle_ov);
2416 set_except_vector(EXCCODE_TR, handle_tr); 2425 set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3c3aa05891dd..95bec460b651 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
467 467
468 if (!np_xbar) 468 if (!np_xbar)
469 panic("Failed to load xbar nodes from devicetree"); 469 panic("Failed to load xbar nodes from devicetree");
470 if (of_address_to_resource(np_pmu, 0, &res_xbar)) 470 if (of_address_to_resource(np_xbar, 0, &res_xbar))
471 panic("Failed to get xbar resources"); 471 panic("Failed to get xbar resources");
472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), 472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
473 res_xbar.name)) 473 res_xbar.name))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e7f798d55fbc..3fe99cb271a9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; 1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1563 1563
1564 c->vcache.waybit = 0; 1564 c->vcache.waybit = 0;
1565 c->vcache.waysize = vcache_size / c->vcache.ways;
1565 1566
1566 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", 1567 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1567 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); 1568 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
1664 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ 1665 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1665 scache_size *= 4; 1666 scache_size *= 4;
1666 c->scache.waybit = 0; 1667 c->scache.waybit = 0;
1668 c->scache.waysize = scache_size / c->scache.ways;
1667 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1669 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1668 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1670 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1669 if (scache_size) 1671 if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
761 struct uasm_label **l, 761 struct uasm_label **l,
762 unsigned int pte, 762 unsigned int pte,
763 unsigned int ptr) 763 unsigned int ptr,
764 unsigned int flush)
764{ 765{
765#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
766 UASM_i_SC(p, pte, 0, ptr); 767 UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
769#else 770#else
770 UASM_i_SW(p, pte, 0, ptr); 771 UASM_i_SW(p, pte, 0, ptr);
771#endif 772#endif
773 if (cpu_has_ftlb && flush) {
774 BUG_ON(!cpu_has_tlbinv);
775
776 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
777 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
778 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
779 build_tlb_write_entry(p, l, r, tlb_indexed);
780
781 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
782 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
783 build_huge_update_entries(p, pte, ptr);
784 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
785
786 return;
787 }
788
772 build_huge_update_entries(p, pte, ptr); 789 build_huge_update_entries(p, pte, ptr);
773 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 790 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
774} 791}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
2199 uasm_l_tlbl_goaround2(&l, p); 2216 uasm_l_tlbl_goaround2(&l, p);
2200 } 2217 }
2201 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2218 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2202 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2219 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2203#endif 2220#endif
2204 2221
2205 uasm_l_nopage_tlbl(&l, p); 2222 uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
2254 build_tlb_probe_entry(&p); 2271 build_tlb_probe_entry(&p);
2255 uasm_i_ori(&p, wr.r1, wr.r1, 2272 uasm_i_ori(&p, wr.r1, wr.r1,
2256 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2273 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2257 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2274 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2258#endif 2275#endif
2259 2276
2260 uasm_l_nopage_tlbs(&l, p); 2277 uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
2310 build_tlb_probe_entry(&p); 2327 build_tlb_probe_entry(&p);
2311 uasm_i_ori(&p, wr.r1, wr.r1, 2328 uasm_i_ori(&p, wr.r1, wr.r1,
2312 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2329 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2313 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2330 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2314#endif 2331#endif
2315 2332
2316 uasm_l_nopage_tlbm(&l, p); 2333 uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index c4ffd43d3996..48ce701557a4 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; 35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; 36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; 37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; 38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
39static struct rt2880_pmx_func pci_func[] = { 39static struct rt2880_pmx_func pci_func[] = {
40 FUNC("pci-dev", 0, 40, 32), 40 FUNC("pci-dev", 0, 40, 32),
41 FUNC("pci-host2", 1, 40, 32), 41 FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
43 FUNC("pci-fnc", 3, 40, 32) 43 FUNC("pci-fnc", 3, 40, 32)
44}; 44};
45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; 45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; 46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
47 47
48static struct rt2880_pmx_group rt3883_pinmux_data[] = { 48static struct rt2880_pmx_group rt3883_pinmux_data[] = {
49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), 49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c5426157b..3901b80d4420 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
48 return alloc_bootmem_align(size, align); 48 return alloc_bootmem_align(size, align);
49} 49}
50 50
51int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
52 bool nomap)
53{
54 reserve_bootmem(base, size, BOOTMEM_DEFAULT);
55 return 0;
56}
57
51void __init early_init_devtree(void *params) 58void __init early_init_devtree(void *params)
52{ 59{
53 __be32 *dtb = (u32 *)__dtb_start; 60 __be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6e57ffa5db27..6044d9be28b4 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
201 } 201 }
202#endif /* CONFIG_BLK_DEV_INITRD */ 202#endif /* CONFIG_BLK_DEV_INITRD */
203 203
204 early_init_fdt_reserve_self();
205 early_init_fdt_scan_reserved_mem();
206
204 unflatten_and_copy_device_tree(); 207 unflatten_and_copy_device_tree();
205 208
206 setup_cpuinfo(); 209 setup_cpuinfo();
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 5fcb9ac72693..f0a5d8b844d6 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
77 return val; 77 return val;
78} 78}
79 79
80#define xchg(ptr, with) \ 80#define xchg(ptr, with) \
81 ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) 81 ({ \
82 (__typeof__(*(ptr))) __xchg((unsigned long)(with), \
83 (ptr), \
84 sizeof(*(ptr))); \
85 })
82 86
83#endif /* __ASM_OPENRISC_CMPXCHG_H */ 87#endif /* __ASM_OPENRISC_CMPXCHG_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 140faa16685a..1311e6b13991 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -211,7 +211,7 @@ do { \
211 case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ 211 case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
212 case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ 212 case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
213 case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ 213 case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
214 case 8: __get_user_asm2(x, ptr, retval); \ 214 case 8: __get_user_asm2(x, ptr, retval); break; \
215 default: (x) = __get_user_bad(); \ 215 default: (x) = __get_user_bad(); \
216 } \ 216 } \
217} while (0) 217} while (0)
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 5c4695d13542..ee3e604959e1 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -30,6 +30,7 @@
30#include <asm/hardirq.h> 30#include <asm/hardirq.h>
31#include <asm/delay.h> 31#include <asm/delay.h>
32#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
33 34
34#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) 35#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
35 36
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
42DECLARE_EXPORT(__ashrdi3); 43DECLARE_EXPORT(__ashrdi3);
43DECLARE_EXPORT(__ashldi3); 44DECLARE_EXPORT(__ashldi3);
44DECLARE_EXPORT(__lshrdi3); 45DECLARE_EXPORT(__lshrdi3);
46DECLARE_EXPORT(__ucmpdi2);
45 47
48EXPORT_SYMBOL(empty_zero_page);
46EXPORT_SYMBOL(__copy_tofrom_user); 49EXPORT_SYMBOL(__copy_tofrom_user);
50EXPORT_SYMBOL(__clear_user);
47EXPORT_SYMBOL(memset); 51EXPORT_SYMBOL(memset);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 828a29110459..f8da545854f9 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
90} 90}
91 91
92void (*pm_power_off) (void) = machine_power_off; 92void (*pm_power_off) (void) = machine_power_off;
93EXPORT_SYMBOL(pm_power_off);
93 94
94/* 95/*
95 * When a process does an "exec", machine state like FPU and debug 96 * When a process does an "exec", machine state like FPU and debug
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
43 43
44#define flush_kernel_dcache_range(start,size) \ 44#define flush_kernel_dcache_range(start,size) \
45 flush_kernel_dcache_range_asm((start), (start)+(size)); 45 flush_kernel_dcache_range_asm((start), (start)+(size));
46/* vmap range flushes and invalidates. Architecturally, we don't need
47 * the invalidate, because the CPU should refuse to speculate once an
48 * area has been flushed, so invalidate is left empty */
49static inline void flush_kernel_vmap_range(void *vaddr, int size)
50{
51 unsigned long start = (unsigned long)vaddr;
52
53 flush_kernel_dcache_range_asm(start, start + size);
54}
55static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
56{
57 unsigned long start = (unsigned long)vaddr;
58 void *cursor = vaddr;
59 46
60 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 47void flush_kernel_vmap_range(void *vaddr, int size);
61 struct page *page = vmalloc_to_page(cursor); 48void invalidate_kernel_vmap_range(void *vaddr, int size);
62
63 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
64 flush_kernel_dcache_page(page);
65 }
66 flush_kernel_dcache_range_asm(start, start + size);
67}
68 49
69#define flush_cache_vmap(start, end) flush_cache_all() 50#define flush_cache_vmap(start, end) flush_cache_all()
70#define flush_cache_vunmap(start, end) flush_cache_all() 51#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index fb4382c28259..8442727f28d2 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -32,7 +32,8 @@
32 * that put_user is the same as __put_user, etc. 32 * that put_user is the same as __put_user, etc.
33 */ 33 */
34 34
35#define access_ok(type, uaddr, size) (1) 35#define access_ok(type, uaddr, size) \
36 ( (uaddr) == (uaddr) )
36 37
37#define put_user __put_user 38#define put_user __put_user
38#define get_user __get_user 39#define get_user __get_user
@@ -64,6 +65,15 @@ struct exception_table_entry {
64 ".previous\n" 65 ".previous\n"
65 66
66/* 67/*
68 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
69 * (with lowest bit set) for which the fault handler in fixup_exception() will
70 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
71 * register in case of a read fault in get_user().
72 */
73#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
74 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
75
76/*
67 * The page fault handler stores, in a per-cpu area, the following information 77 * The page fault handler stores, in a per-cpu area, the following information
68 * if a fixup routine is available. 78 * if a fixup routine is available.
69 */ 79 */
@@ -90,7 +100,7 @@ struct exception_data {
90#define __get_user(x, ptr) \ 100#define __get_user(x, ptr) \
91({ \ 101({ \
92 register long __gu_err __asm__ ("r8") = 0; \ 102 register long __gu_err __asm__ ("r8") = 0; \
93 register long __gu_val __asm__ ("r9") = 0; \ 103 register long __gu_val; \
94 \ 104 \
95 load_sr2(); \ 105 load_sr2(); \
96 switch (sizeof(*(ptr))) { \ 106 switch (sizeof(*(ptr))) { \
@@ -106,22 +116,23 @@ struct exception_data {
106}) 116})
107 117
108#define __get_user_asm(ldx, ptr) \ 118#define __get_user_asm(ldx, ptr) \
109 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 119 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
110 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 120 "9:\n" \
121 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
111 : "=r"(__gu_val), "=r"(__gu_err) \ 122 : "=r"(__gu_val), "=r"(__gu_err) \
112 : "r"(ptr), "1"(__gu_err) \ 123 : "r"(ptr), "1"(__gu_err));
113 : "r1");
114 124
115#if !defined(CONFIG_64BIT) 125#if !defined(CONFIG_64BIT)
116 126
117#define __get_user_asm64(ptr) \ 127#define __get_user_asm64(ptr) \
118 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 128 __asm__(" copy %%r0,%R0\n" \
119 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 129 "1: ldw 0(%%sr2,%2),%0\n" \
120 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 130 "2: ldw 4(%%sr2,%2),%R0\n" \
121 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 131 "9:\n" \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
133 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
122 : "=r"(__gu_val), "=r"(__gu_err) \ 134 : "=r"(__gu_val), "=r"(__gu_err) \
123 : "r"(ptr), "1"(__gu_err) \ 135 : "r"(ptr), "1"(__gu_err));
124 : "r1");
125 136
126#endif /* !defined(CONFIG_64BIT) */ 137#endif /* !defined(CONFIG_64BIT) */
127 138
@@ -147,32 +158,31 @@ struct exception_data {
147 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 158 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
148 * instead of writing. This is because they do not write to any memory 159 * instead of writing. This is because they do not write to any memory
149 * gcc knows about, so there are no aliasing issues. These macros must 160 * gcc knows about, so there are no aliasing issues. These macros must
150 * also be aware that "fixup_put_user_skip_[12]" are executed in the 161 * also be aware that fixups are executed in the context of the fault,
151 * context of the fault, and any registers used there must be listed 162 * and any registers used there must be listed as clobbers.
152 * as clobbers. In this case only "r1" is used by the current routines. 163 * r8 is already listed as err.
153 * r8/r9 are already listed as err/val.
154 */ 164 */
155 165
156#define __put_user_asm(stx, x, ptr) \ 166#define __put_user_asm(stx, x, ptr) \
157 __asm__ __volatile__ ( \ 167 __asm__ __volatile__ ( \
158 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 168 "1: " stx " %2,0(%%sr2,%1)\n" \
159 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 169 "9:\n" \
170 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
160 : "=r"(__pu_err) \ 171 : "=r"(__pu_err) \
161 : "r"(ptr), "r"(x), "0"(__pu_err) \ 172 : "r"(ptr), "r"(x), "0"(__pu_err))
162 : "r1")
163 173
164 174
165#if !defined(CONFIG_64BIT) 175#if !defined(CONFIG_64BIT)
166 176
167#define __put_user_asm64(__val, ptr) do { \ 177#define __put_user_asm64(__val, ptr) do { \
168 __asm__ __volatile__ ( \ 178 __asm__ __volatile__ ( \
169 "\n1:\tstw %2,0(%%sr2,%1)" \ 179 "1: stw %2,0(%%sr2,%1)\n" \
170 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 180 "2: stw %R2,4(%%sr2,%1)\n" \
171 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 181 "9:\n" \
172 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 182 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
183 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
173 : "=r"(__pu_err) \ 184 : "=r"(__pu_err) \
174 : "r"(ptr), "r"(__val), "0"(__pu_err) \ 185 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
175 : "r1"); \
176} while (0) 186} while (0)
177 187
178#endif /* !defined(CONFIG_64BIT) */ 188#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 6b0741e7a7ed..667c99421003 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -362,8 +362,9 @@
362#define __NR_copy_file_range (__NR_Linux + 346) 362#define __NR_copy_file_range (__NR_Linux + 346)
363#define __NR_preadv2 (__NR_Linux + 347) 363#define __NR_preadv2 (__NR_Linux + 347)
364#define __NR_pwritev2 (__NR_Linux + 348) 364#define __NR_pwritev2 (__NR_Linux + 348)
365#define __NR_statx (__NR_Linux + 349)
365 366
366#define __NR_Linux_syscalls (__NR_pwritev2 + 1) 367#define __NR_Linux_syscalls (__NR_statx + 1)
367 368
368 369
369#define __IGNORE_select /* newselect */ 370#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5de861..c32a09095216 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 } 617 }
618} 618}
619
620void flush_kernel_vmap_range(void *vaddr, int size)
621{
622 unsigned long start = (unsigned long)vaddr;
623
624 if ((unsigned long)size > parisc_cache_flush_threshold)
625 flush_data_cache();
626 else
627 flush_kernel_dcache_range_asm(start, start + size);
628}
629EXPORT_SYMBOL(flush_kernel_vmap_range);
630
631void invalidate_kernel_vmap_range(void *vaddr, int size)
632{
633 unsigned long start = (unsigned long)vaddr;
634
635 if ((unsigned long)size > parisc_cache_flush_threshold)
636 flush_data_cache();
637 else
638 flush_kernel_dcache_range_asm(start, start + size);
639}
640EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index a0ecdb4abcc8..c66c943d9322 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
620 */ 620 */
621 *loc = fsel(val, addend); 621 *loc = fsel(val, addend);
622 break; 622 break;
623 case R_PARISC_SECREL32:
624 /* 32-bit section relative address. */
625 *loc = fsel(val, addend);
626 break;
623 case R_PARISC_DPREL21L: 627 case R_PARISC_DPREL21L:
624 /* left 21 bit of relative address */ 628 /* left 21 bit of relative address */
625 val = lrsel(val - dp, addend); 629 val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
807 */ 811 */
808 *loc = fsel(val, addend); 812 *loc = fsel(val, addend);
809 break; 813 break;
814 case R_PARISC_SECREL32:
815 /* 32-bit section relative address. */
816 *loc = fsel(val, addend);
817 break;
810 case R_PARISC_FPTR64: 818 case R_PARISC_FPTR64:
811 /* 64-bit function address */ 819 /* 64-bit function address */
812 if(in_local(me, (void *)(val + addend))) { 820 if(in_local(me, (void *)(val + addend))) {
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7484b3d11e0d..c6d6272a934f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups - defined as int to avoid creation of function pointers */
51extern int fixup_get_user_skip_1;
52extern int fixup_get_user_skip_2;
53extern int fixup_put_user_skip_1;
54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1);
58EXPORT_SYMBOL(fixup_put_user_skip_2);
59
60#ifndef CONFIG_64BIT 50#ifndef CONFIG_64BIT
61/* Needed so insmod can set dp value */ 51/* Needed so insmod can set dp value */
62extern int $global$; 52extern int $global$;
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index e282a5131d77..6017a5af2e6e 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -39,7 +39,7 @@
39 * the PDC INTRIGUE calls. This is done to eliminate bugs introduced 39 * the PDC INTRIGUE calls. This is done to eliminate bugs introduced
40 * in various PDC revisions. The code is much more maintainable 40 * in various PDC revisions. The code is much more maintainable
41 * and reliable this way vs having to debug on every version of PDC 41 * and reliable this way vs having to debug on every version of PDC
42 * on every box. 42 * on every box.
43 */ 43 */
44 44
45#include <linux/capability.h> 45#include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
195static int perf_release(struct inode *inode, struct file *file); 195static int perf_release(struct inode *inode, struct file *file);
196static int perf_open(struct inode *inode, struct file *file); 196static int perf_open(struct inode *inode, struct file *file);
197static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 197static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
198static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 198static ssize_t perf_write(struct file *file, const char __user *buf,
199 loff_t *ppos); 199 size_t count, loff_t *ppos);
200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201static void perf_start_counters(void); 201static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr); 202static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
222/* 222/*
223 * configure: 223 * configure:
224 * 224 *
225 * Configure the cpu with a given data image. First turn off the counters, 225 * Configure the cpu with a given data image. First turn off the counters,
226 * then download the image, then turn the counters back on. 226 * then download the image, then turn the counters back on.
227 */ 227 */
228static int perf_config(uint32_t *image_ptr) 228static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
234 error = perf_stop_counters(raddr); 234 error = perf_stop_counters(raddr);
235 if (error != 0) { 235 if (error != 0) {
236 printk("perf_config: perf_stop_counters = %ld\n", error); 236 printk("perf_config: perf_stop_counters = %ld\n", error);
237 return -EINVAL; 237 return -EINVAL;
238 } 238 }
239 239
240printk("Preparing to write image\n"); 240printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
242 error = perf_write_image((uint64_t *)image_ptr); 242 error = perf_write_image((uint64_t *)image_ptr);
243 if (error != 0) { 243 if (error != 0) {
244 printk("perf_config: DOWNLOAD = %ld\n", error); 244 printk("perf_config: DOWNLOAD = %ld\n", error);
245 return -EINVAL; 245 return -EINVAL;
246 } 246 }
247 247
248printk("Preparing to start counters\n"); 248printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
254} 254}
255 255
256/* 256/*
257 * Open the device and initialize all of its memory. The device is only 257 * Open the device and initialize all of its memory. The device is only
258 * opened once, but can be "queried" by multiple processes that know its 258 * opened once, but can be "queried" by multiple processes that know its
259 * file descriptor. 259 * file descriptor.
260 */ 260 */
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
298 * called on the processor that the download should happen 298 * called on the processor that the download should happen
299 * on. 299 * on.
300 */ 300 */
301static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 301static ssize_t perf_write(struct file *file, const char __user *buf,
302 loff_t *ppos) 302 size_t count, loff_t *ppos)
303{ 303{
304 size_t image_size; 304 size_t image_size;
305 uint32_t image_type; 305 uint32_t image_type;
306 uint32_t interface_type; 306 uint32_t interface_type;
307 uint32_t test; 307 uint32_t test;
308 308
309 if (perf_processor_interface == ONYX_INTF) 309 if (perf_processor_interface == ONYX_INTF)
310 image_size = PCXU_IMAGE_SIZE; 310 image_size = PCXU_IMAGE_SIZE;
311 else if (perf_processor_interface == CUDA_INTF) 311 else if (perf_processor_interface == CUDA_INTF)
312 image_size = PCXW_IMAGE_SIZE; 312 image_size = PCXW_IMAGE_SIZE;
313 else 313 else
314 return -EFAULT; 314 return -EFAULT;
315 315
316 if (!capable(CAP_SYS_ADMIN)) 316 if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
330 330
331 /* First check the machine type is correct for 331 /* First check the machine type is correct for
332 the requested image */ 332 the requested image */
333 if (((perf_processor_interface == CUDA_INTF) && 333 if (((perf_processor_interface == CUDA_INTF) &&
334 (interface_type != CUDA_INTF)) || 334 (interface_type != CUDA_INTF)) ||
335 ((perf_processor_interface == ONYX_INTF) && 335 ((perf_processor_interface == ONYX_INTF) &&
336 (interface_type != ONYX_INTF))) 336 (interface_type != ONYX_INTF)))
337 return -EINVAL; 337 return -EINVAL;
338 338
339 /* Next check to make sure the requested image 339 /* Next check to make sure the requested image
340 is valid */ 340 is valid */
341 if (((interface_type == CUDA_INTF) && 341 if (((interface_type == CUDA_INTF) &&
342 (test >= MAX_CUDA_IMAGES)) || 342 (test >= MAX_CUDA_IMAGES)) ||
343 ((interface_type == ONYX_INTF) && 343 ((interface_type == ONYX_INTF) &&
344 (test >= MAX_ONYX_IMAGES))) 344 (test >= MAX_ONYX_IMAGES)))
345 return -EINVAL; 345 return -EINVAL;
346 346
347 /* Copy the image into the processor */ 347 /* Copy the image into the processor */
348 if (interface_type == CUDA_INTF) 348 if (interface_type == CUDA_INTF)
349 return perf_config(cuda_images[test]); 349 return perf_config(cuda_images[test]);
350 else 350 else
351 return perf_config(onyx_images[test]); 351 return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
359static void perf_patch_images(void) 359static void perf_patch_images(void)
360{ 360{
361#if 0 /* FIXME!! */ 361#if 0 /* FIXME!! */
362/* 362/*
363 * NOTE: this routine is VERY specific to the current TLB image. 363 * NOTE: this routine is VERY specific to the current TLB image.
364 * If the image is changed, this routine might also need to be changed. 364 * If the image is changed, this routine might also need to be changed.
365 */ 365 */
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
367 extern void $i_dtlb_miss_2_0(); 367 extern void $i_dtlb_miss_2_0();
368 extern void PA2_0_iva(); 368 extern void PA2_0_iva();
369 369
370 /* 370 /*
371 * We can only use the lower 32-bits, the upper 32-bits should be 0 371 * We can only use the lower 32-bits, the upper 32-bits should be 0
372 * anyway given this is in the kernel 372 * anyway given this is in the kernel
373 */ 373 */
374 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); 374 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
375 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); 375 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
377 377
378 if (perf_processor_interface == ONYX_INTF) { 378 if (perf_processor_interface == ONYX_INTF) {
379 /* clear last 2 bytes */ 379 /* clear last 2 bytes */
380 onyx_images[TLBMISS][15] &= 0xffffff00; 380 onyx_images[TLBMISS][15] &= 0xffffff00;
381 /* set 2 bytes */ 381 /* set 2 bytes */
382 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 382 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
383 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; 383 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
384 onyx_images[TLBMISS][17] = itlb_addr; 384 onyx_images[TLBMISS][17] = itlb_addr;
385 385
386 /* clear last 2 bytes */ 386 /* clear last 2 bytes */
387 onyx_images[TLBHANDMISS][15] &= 0xffffff00; 387 onyx_images[TLBHANDMISS][15] &= 0xffffff00;
388 /* set 2 bytes */ 388 /* set 2 bytes */
389 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 389 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
390 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; 390 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
391 onyx_images[TLBHANDMISS][17] = itlb_addr; 391 onyx_images[TLBHANDMISS][17] = itlb_addr;
392 392
393 /* clear last 2 bytes */ 393 /* clear last 2 bytes */
394 onyx_images[BIG_CPI][15] &= 0xffffff00; 394 onyx_images[BIG_CPI][15] &= 0xffffff00;
395 /* set 2 bytes */ 395 /* set 2 bytes */
396 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); 396 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
397 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; 397 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
404 404
405 } else if (perf_processor_interface == CUDA_INTF) { 405 } else if (perf_processor_interface == CUDA_INTF) {
406 /* Cuda interface */ 406 /* Cuda interface */
407 cuda_images[TLBMISS][16] = 407 cuda_images[TLBMISS][16] =
408 (cuda_images[TLBMISS][16]&0xffff0000) | 408 (cuda_images[TLBMISS][16]&0xffff0000) |
409 ((dtlb_addr >> 8)&0x0000ffff); 409 ((dtlb_addr >> 8)&0x0000ffff);
410 cuda_images[TLBMISS][17] = 410 cuda_images[TLBMISS][17] =
411 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 411 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
412 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; 412 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
413 413
414 cuda_images[TLBHANDMISS][16] = 414 cuda_images[TLBHANDMISS][16] =
415 (cuda_images[TLBHANDMISS][16]&0xffff0000) | 415 (cuda_images[TLBHANDMISS][16]&0xffff0000) |
416 ((dtlb_addr >> 8)&0x0000ffff); 416 ((dtlb_addr >> 8)&0x0000ffff);
417 cuda_images[TLBHANDMISS][17] = 417 cuda_images[TLBHANDMISS][17] =
418 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 418 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
419 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; 419 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
420 420
421 cuda_images[BIG_CPI][16] = 421 cuda_images[BIG_CPI][16] =
422 (cuda_images[BIG_CPI][16]&0xffff0000) | 422 (cuda_images[BIG_CPI][16]&0xffff0000) |
423 ((dtlb_addr >> 8)&0x0000ffff); 423 ((dtlb_addr >> 8)&0x0000ffff);
424 cuda_images[BIG_CPI][17] = 424 cuda_images[BIG_CPI][17] =
425 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 425 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
426 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; 426 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
427 } else { 427 } else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
433 433
434/* 434/*
435 * ioctl routine 435 * ioctl routine
436 * All routines effect the processor that they are executed on. Thus you 436 * All routines effect the processor that they are executed on. Thus you
437 * must be running on the processor that you wish to change. 437 * must be running on the processor that you wish to change.
438 */ 438 */
439 439
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
459 } 459 }
460 460
461 /* copy out the Counters */ 461 /* copy out the Counters */
462 if (copy_to_user((void __user *)arg, raddr, 462 if (copy_to_user((void __user *)arg, raddr,
463 sizeof (raddr)) != 0) { 463 sizeof (raddr)) != 0) {
464 error = -EFAULT; 464 error = -EFAULT;
465 break; 465 break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
487 .open = perf_open, 487 .open = perf_open,
488 .release = perf_release 488 .release = perf_release
489}; 489};
490 490
491static struct miscdevice perf_dev = { 491static struct miscdevice perf_dev = {
492 MISC_DYNAMIC_MINOR, 492 MISC_DYNAMIC_MINOR,
493 PA_PERF_DEV, 493 PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
595 /* OR sticky2 (bit 1496) to counter2 bit 32 */ 595 /* OR sticky2 (bit 1496) to counter2 bit 32 */
596 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; 596 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
597 raddr[2] = (uint32_t)tmp64; 597 raddr[2] = (uint32_t)tmp64;
598 598
599 /* Counter3 is bits 1497 to 1528 */ 599 /* Counter3 is bits 1497 to 1528 */
600 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; 600 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
601 /* OR sticky3 (bit 1529) to counter3 bit 32 */ 601 /* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
617 userbuf[22] = 0; 617 userbuf[22] = 0;
618 userbuf[23] = 0; 618 userbuf[23] = 0;
619 619
620 /* 620 /*
621 * Write back the zeroed bytes + the image given 621 * Write back the zeroed bytes + the image given
622 * the read was destructive. 622 * the read was destructive.
623 */ 623 */
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
625 } else { 625 } else {
626 626
627 /* 627 /*
628 * Read RDR-15 which contains the counters and sticky bits 628 * Read RDR-15 which contains the counters and sticky bits
629 */ 629 */
630 if (!perf_rdr_read_ubuf(15, userbuf)) { 630 if (!perf_rdr_read_ubuf(15, userbuf)) {
631 return -13; 631 return -13;
632 } 632 }
633 633
634 /* 634 /*
635 * Clear out the counters 635 * Clear out the counters
636 */ 636 */
637 perf_rdr_clear(15); 637 perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
644 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); 644 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
645 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); 645 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
646 } 646 }
647 647
648 return 0; 648 return 0;
649} 649}
650 650
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
682 i = tentry->num_words; 682 i = tentry->num_words;
683 while (i--) { 683 while (i--) {
684 buffer[i] = 0; 684 buffer[i] = 0;
685 } 685 }
686 686
687 /* Check for bits an even number of 64 */ 687 /* Check for bits an even number of 64 */
688 if ((xbits = width & 0x03f) != 0) { 688 if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
808 } 808 }
809 809
810 runway = ioremap_nocache(cpu_device->hpa.start, 4096); 810 runway = ioremap_nocache(cpu_device->hpa.start, 4096);
811 if (!runway) {
812 pr_err("perf_write_image: ioremap failed!\n");
813 return -ENOMEM;
814 }
811 815
812 /* Merge intrigue bits into Runway STATUS 0 */ 816 /* Merge intrigue bits into Runway STATUS 0 */
813 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; 817 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
814 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 818 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
815 runway + RUNWAY_STATUS); 819 runway + RUNWAY_STATUS);
816 820
817 /* Write RUNWAY DEBUG registers */ 821 /* Write RUNWAY DEBUG registers */
818 for (i = 0; i < 8; i++) { 822 for (i = 0; i < 8; i++) {
819 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); 823 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
820 } 824 }
821 825
822 return 0; 826 return 0;
823} 827}
824 828
825/* 829/*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
843 perf_rdr_shift_out_U(rdr_num, buffer[i]); 847 perf_rdr_shift_out_U(rdr_num, buffer[i]);
844 } else { 848 } else {
845 perf_rdr_shift_out_W(rdr_num, buffer[i]); 849 perf_rdr_shift_out_W(rdr_num, buffer[i]);
846 } 850 }
847 } 851 }
848printk("perf_rdr_write done\n"); 852printk("perf_rdr_write done\n");
849} 853}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 06f7ca7fe70b..4516a5b53f38 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -142,6 +142,10 @@ void machine_power_off(void)
142 142
143 printk(KERN_EMERG "System shut down completed.\n" 143 printk(KERN_EMERG "System shut down completed.\n"
144 "Please power this system off now."); 144 "Please power this system off now.");
145
146 /* prevent soft lockup/stalled CPU messages for endless loop. */
147 rcu_sysrq_start();
148 for (;;);
145} 149}
146 150
147void (*pm_power_off)(void) = machine_power_off; 151void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1de8061..44aeaa9c039f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -444,6 +444,7 @@
444 ENTRY_SAME(copy_file_range) 444 ENTRY_SAME(copy_file_range)
445 ENTRY_COMP(preadv2) 445 ENTRY_COMP(preadv2)
446 ENTRY_COMP(pwritev2) 446 ENTRY_COMP(pwritev2)
447 ENTRY_SAME(statx)
447 448
448 449
449.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) 450.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8d839a..f2dac4d73b1b 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for parisc-specific library files 2# Makefile for parisc-specific library files
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
6 ucmpdi2.o delay.o 6 ucmpdi2.o delay.o
7 7
8obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f22c7a6..000000000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 * Fixup routines for kernel exception handling.
21 */
22#include <asm/asm-offsets.h>
23#include <asm/assembly.h>
24#include <asm/errno.h>
25#include <linux/linkage.h>
26
27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2
29 loadgp
30 addil LT%__per_cpu_offset,%r27
31 LDREG RT%__per_cpu_offset(%r1),\t1
32 /* t2 = smp_processor_id() */
33 mfctl 30,\t2
34 ldw TI_CPU(\t2),\t2
35#ifdef CONFIG_64BIT
36 extrd,u \t2,63,32,\t2
37#endif
38 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
39 LDREGX \t2(\t1),\t2
40 addil LT%exception_data,%r27
41 LDREG RT%exception_data(%r1),\t1
42 /* t1 = this_cpu_ptr(&exception_data) */
43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
46 /* t1 = t1->fault_ip */
47 LDREG EXCDATA_IP(\t1), \t1
48 .endm
49#else
50 .macro get_fault_ip t1 t2
51 loadgp
52 /* t1 = this_cpu_ptr(&exception_data) */
53 addil LT%exception_data,%r27
54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
57 /* t1 = t2->fault_ip */
58 LDREG EXCDATA_IP(\t2), \t1
59 .endm
60#endif
61
62 .level LEVEL
63
64 .text
65 .section .fixup, "ax"
66
67 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
68ENTRY_CFI(fixup_get_user_skip_1)
69 get_fault_ip %r1,%r8
70 ldo 4(%r1), %r1
71 ldi -EFAULT, %r8
72 bv %r0(%r1)
73 copy %r0, %r9
74ENDPROC_CFI(fixup_get_user_skip_1)
75
76ENTRY_CFI(fixup_get_user_skip_2)
77 get_fault_ip %r1,%r8
78 ldo 8(%r1), %r1
79 ldi -EFAULT, %r8
80 bv %r0(%r1)
81 copy %r0, %r9
82ENDPROC_CFI(fixup_get_user_skip_2)
83
84 /* put_user() fixups, store -EFAULT in r8 */
85ENTRY_CFI(fixup_put_user_skip_1)
86 get_fault_ip %r1,%r8
87 ldo 4(%r1), %r1
88 bv %r0(%r1)
89 ldi -EFAULT, %r8
90ENDPROC_CFI(fixup_put_user_skip_1)
91
92ENTRY_CFI(fixup_put_user_skip_2)
93 get_fault_ip %r1,%r8
94 ldo 8(%r1), %r1
95 bv %r0(%r1)
96 ldi -EFAULT, %r8
97ENDPROC_CFI(fixup_put_user_skip_2)
98
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de6b5df..f01188c044ee 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
9 * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
8 * 10 *
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user)
132 134
133 .procend 135 .procend
134 136
137
138
139/*
140 * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
141 *
142 * Inputs:
143 * - sr1 already contains space of source region
144 * - sr2 already contains space of destination region
145 *
146 * Returns:
147 * - number of bytes that could not be copied.
148 * On success, this will be zero.
149 *
150 * This code is based on a C-implementation of a copy routine written by
151 * Randolph Chung, which in turn was derived from the glibc.
152 *
153 * Several strategies are tried to try to get the best performance for various
154 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
155 * at a time using general registers. Unaligned copies are handled either by
156 * aligning the destination and then using shift-and-write method, or in a few
157 * cases by falling back to a byte-at-a-time copy.
158 *
159 * Testing with various alignments and buffer sizes shows that this code is
160 * often >10x faster than a simple byte-at-a-time copy, even for strangely
161 * aligned operands. It is interesting to note that the glibc version of memcpy
162 * (written in C) is actually quite fast already. This routine is able to beat
163 * it by 30-40% for aligned copies because of the loop unrolling, but in some
164 * cases the glibc version is still slightly faster. This lends more
165 * credibility that gcc can generate very good code as long as we are careful.
166 *
167 * Possible optimizations:
168 * - add cache prefetching
169 * - try not to use the post-increment address modifiers; they may create
170 * additional interlocks. Assumption is that those were only efficient on old
171 * machines (pre PA8000 processors)
172 */
173
174 dst = arg0
175 src = arg1
176 len = arg2
177 end = arg3
178 t1 = r19
179 t2 = r20
180 t3 = r21
181 t4 = r22
182 srcspc = sr1
183 dstspc = sr2
184
185 t0 = r1
186 a1 = t1
187 a2 = t2
188 a3 = t3
189 a0 = t4
190
191 save_src = ret0
192 save_dst = ret1
193 save_len = r31
194
195ENTRY_CFI(pa_memcpy)
196 .proc
197 .callinfo NO_CALLS
198 .entry
199
200 /* Last destination address */
201 add dst,len,end
202
203 /* short copy with less than 16 bytes? */
204 cmpib,>>=,n 15,len,.Lbyte_loop
205
206 /* same alignment? */
207 xor src,dst,t0
208 extru t0,31,2,t1
209 cmpib,<>,n 0,t1,.Lunaligned_copy
210
211#ifdef CONFIG_64BIT
212 /* only do 64-bit copies if we can get aligned. */
213 extru t0,31,3,t1
214 cmpib,<>,n 0,t1,.Lalign_loop32
215
216 /* loop until we are 64-bit aligned */
217.Lalign_loop64:
218 extru dst,31,3,t1
219 cmpib,=,n 0,t1,.Lcopy_loop_16
22020: ldb,ma 1(srcspc,src),t1
22121: stb,ma t1,1(dstspc,dst)
222 b .Lalign_loop64
223 ldo -1(len),len
224
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
227
228 ldi 31,t0
229.Lcopy_loop_16:
230 cmpb,COND(>>=),n t0,len,.Lword_loop
231
23210: ldd 0(srcspc,src),t1
23311: ldd 8(srcspc,src),t2
234 ldo 16(src),src
23512: std,ma t1,8(dstspc,dst)
23613: std,ma t2,8(dstspc,dst)
23714: ldd 0(srcspc,src),t1
23815: ldd 8(srcspc,src),t2
239 ldo 16(src),src
24016: std,ma t1,8(dstspc,dst)
24117: std,ma t2,8(dstspc,dst)
242
243 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
244 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
245 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
246 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
247 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
248 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
249 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
250 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
251
252 b .Lcopy_loop_16
253 ldo -32(len),len
254
255.Lword_loop:
256 cmpib,COND(>>=),n 3,len,.Lbyte_loop
25720: ldw,ma 4(srcspc,src),t1
25821: stw,ma t1,4(dstspc,dst)
259 b .Lword_loop
260 ldo -4(len),len
261
262 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
263 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
264
265#endif /* CONFIG_64BIT */
266
267 /* loop until we are 32-bit aligned */
268.Lalign_loop32:
269 extru dst,31,2,t1
270 cmpib,=,n 0,t1,.Lcopy_loop_4
27120: ldb,ma 1(srcspc,src),t1
27221: stb,ma t1,1(dstspc,dst)
273 b .Lalign_loop32
274 ldo -1(len),len
275
276 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
277 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
278
279
280.Lcopy_loop_4:
281 cmpib,COND(>>=),n 15,len,.Lbyte_loop
282
28310: ldw 0(srcspc,src),t1
28411: ldw 4(srcspc,src),t2
28512: stw,ma t1,4(dstspc,dst)
28613: stw,ma t2,4(dstspc,dst)
28714: ldw 8(srcspc,src),t1
28815: ldw 12(srcspc,src),t2
289 ldo 16(src),src
29016: stw,ma t1,4(dstspc,dst)
29117: stw,ma t2,4(dstspc,dst)
292
293 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
294 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
295 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
296 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
297 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
298 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
299 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
300 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
301
302 b .Lcopy_loop_4
303 ldo -16(len),len
304
305.Lbyte_loop:
306 cmpclr,COND(<>) len,%r0,%r0
307 b,n .Lcopy_done
30820: ldb 0(srcspc,src),t1
309 ldo 1(src),src
31021: stb,ma t1,1(dstspc,dst)
311 b .Lbyte_loop
312 ldo -1(len),len
313
314 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
315 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
316
317.Lcopy_done:
318 bv %r0(%r2)
319 sub end,dst,ret0
320
321
322 /* src and dst are not aligned the same way. */
323 /* need to go the hard way */
324.Lunaligned_copy:
325 /* align until dst is 32bit-word-aligned */
326 extru dst,31,2,t1
327 cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
32820: ldb 0(srcspc,src),t1
329 ldo 1(src),src
33021: stb,ma t1,1(dstspc,dst)
331 b .Lunaligned_copy
332 ldo -1(len),len
333
334 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
335 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
336
337.Lcopy_dstaligned:
338
339 /* store src, dst and len in safe place */
340 copy src,save_src
341 copy dst,save_dst
342 copy len,save_len
343
344 /* len now needs give number of words to copy */
345 SHRREG len,2,len
346
347 /*
348 * Copy from a not-aligned src to an aligned dst using shifts.
349 * Handles 4 words per loop.
350 */
351
352 depw,z src,28,2,t0
353 subi 32,t0,t0
354 mtsar t0
355 extru len,31,2,t0
356 cmpib,= 2,t0,.Lcase2
357 /* Make src aligned by rounding it down. */
358 depi 0,31,2,src
359
360 cmpiclr,<> 3,t0,%r0
361 b,n .Lcase3
362 cmpiclr,<> 1,t0,%r0
363 b,n .Lcase1
364.Lcase0:
365 cmpb,= %r0,len,.Lcda_finish
366 nop
367
3681: ldw,ma 4(srcspc,src), a3
369 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3701: ldw,ma 4(srcspc,src), a0
371 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
372 b,n .Ldo3
373.Lcase1:
3741: ldw,ma 4(srcspc,src), a2
375 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3761: ldw,ma 4(srcspc,src), a3
377 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
378 ldo -1(len),len
379 cmpb,=,n %r0,len,.Ldo0
380.Ldo4:
3811: ldw,ma 4(srcspc,src), a0
382 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
383 shrpw a2, a3, %sar, t0
3841: stw,ma t0, 4(dstspc,dst)
385 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
386.Ldo3:
3871: ldw,ma 4(srcspc,src), a1
388 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
389 shrpw a3, a0, %sar, t0
3901: stw,ma t0, 4(dstspc,dst)
391 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
392.Ldo2:
3931: ldw,ma 4(srcspc,src), a2
394 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
395 shrpw a0, a1, %sar, t0
3961: stw,ma t0, 4(dstspc,dst)
397 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
398.Ldo1:
3991: ldw,ma 4(srcspc,src), a3
400 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
401 shrpw a1, a2, %sar, t0
4021: stw,ma t0, 4(dstspc,dst)
403 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
404 ldo -4(len),len
405 cmpb,<> %r0,len,.Ldo4
406 nop
407.Ldo0:
408 shrpw a2, a3, %sar, t0
4091: stw,ma t0, 4(dstspc,dst)
410 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
411
412.Lcda_rdfault:
413.Lcda_finish:
414 /* calculate new src, dst and len and jump to byte-copy loop */
415 sub dst,save_dst,t0
416 add save_src,t0,src
417 b .Lbyte_loop
418 sub save_len,t0,len
419
420.Lcase3:
4211: ldw,ma 4(srcspc,src), a0
422 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4231: ldw,ma 4(srcspc,src), a1
424 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
425 b .Ldo2
426 ldo 1(len),len
427.Lcase2:
4281: ldw,ma 4(srcspc,src), a1
429 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4301: ldw,ma 4(srcspc,src), a2
431 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
432 b .Ldo1
433 ldo 2(len),len
434
435
436 /* fault exception fixup handlers: */
437#ifdef CONFIG_64BIT
438.Lcopy16_fault:
43910: b .Lcopy_done
440 std,ma t1,8(dstspc,dst)
441 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
442#endif
443
444.Lcopy8_fault:
44510: b .Lcopy_done
446 stw,ma t1,4(dstspc,dst)
447 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
448
449 .exit
450ENDPROC_CFI(pa_memcpy)
451 .procend
452
135 .end 453 .end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10ed974..b3d47ec1d80a 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
2 * Optimized memory copy routines. 2 * Optimized memory copy routines.
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
21 * Portions derived from the GNU C Library 21 * Portions derived from the GNU C Library
22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
23 * 23 *
24 * Several strategies are tried to try to get the best performance for various
25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
27 * general registers. Unaligned copies are handled either by aligning the
28 * destination and then using shift-and-write method, or in a few cases by
29 * falling back to a byte-at-a-time copy.
30 *
31 * I chose to implement this in C because it is easier to maintain and debug,
32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4
33 * at the time of writing) is fairly optimal. Unfortunately some of the
34 * semantics of the copy routine (exception handling) is difficult to express
35 * in C, so we have to play some tricks to get it to work.
36 *
37 * All the loads and stores are done via explicit asm() code in order to use
38 * the right space registers.
39 *
40 * Testing with various alignments and buffer sizes shows that this code is
41 * often >10x faster than a simple byte-at-a-time copy, even for strangely
42 * aligned operands. It is interesting to note that the glibc version
43 * of memcpy (written in C) is actually quite fast already. This routine is
44 * able to beat it by 30-40% for aligned copies because of the loop unrolling,
45 * but in some cases the glibc version is still slightly faster. This lends
46 * more credibility that gcc can generate very good code as long as we are
47 * careful.
48 *
49 * TODO:
50 * - cache prefetching needs more experimentation to get optimal settings
51 * - try not to use the post-increment address modifiers; they create additional
52 * interlocks
53 * - replace byte-copy loops with stybs sequences
54 */ 24 */
55 25
56#ifdef __KERNEL__
57#include <linux/module.h> 26#include <linux/module.h>
58#include <linux/compiler.h> 27#include <linux/compiler.h>
59#include <linux/uaccess.h> 28#include <linux/uaccess.h>
60#define s_space "%%sr1"
61#define d_space "%%sr2"
62#else
63#include "memcpy.h"
64#define s_space "%%sr0"
65#define d_space "%%sr0"
66#define pa_memcpy new2_copy
67#endif
68 29
69DECLARE_PER_CPU(struct exception_data, exception_data); 30DECLARE_PER_CPU(struct exception_data, exception_data);
70 31
71#define preserve_branch(label) do { \
72 volatile int dummy = 0; \
73 /* The following branch is never taken, it's just here to */ \
74 /* prevent gcc from optimizing away our exception code. */ \
75 if (unlikely(dummy != dummy)) \
76 goto label; \
77} while (0)
78
79#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 32#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
80#define get_kernel_space() (0) 33#define get_kernel_space() (0)
81 34
82#define MERGE(w0, sh_1, w1, sh_2) ({ \
83 unsigned int _r; \
84 asm volatile ( \
85 "mtsar %3\n" \
86 "shrpw %1, %2, %%sar, %0\n" \
87 : "=r"(_r) \
88 : "r"(w0), "r"(w1), "r"(sh_2) \
89 ); \
90 _r; \
91})
92#define THRESHOLD 16
93
94#ifdef DEBUG_MEMCPY
95#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
96#else
97#define DPRINTF(fmt, args...)
98#endif
99
100#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
101 __asm__ __volatile__ ( \
102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
104 : _tt(_t), "+r"(_a) \
105 : \
106 : "r8")
107
108#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
109 __asm__ __volatile__ ( \
110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
112 : "+r"(_a) \
113 : _tt(_t) \
114 : "r8")
115
116#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
117#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
118#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
119#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
120#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
121#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
122
123#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
124 __asm__ __volatile__ ( \
125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
127 : _tt(_t) \
128 : "r"(_a) \
129 : "r8")
130
131#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
132 __asm__ __volatile__ ( \
133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
135 : \
136 : _tt(_t), "r"(_a) \
137 : "r8")
138
139#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
140#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
141
142#ifdef CONFIG_PREFETCH
143static inline void prefetch_src(const void *addr)
144{
145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
146}
147
148static inline void prefetch_dst(const void *addr)
149{
150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
151}
152#else
153#define prefetch_src(addr) do { } while(0)
154#define prefetch_dst(addr) do { } while(0)
155#endif
156
157#define PA_MEMCPY_OK 0
158#define PA_MEMCPY_LOAD_ERROR 1
159#define PA_MEMCPY_STORE_ERROR 2
160
161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc.
163 */
164static noinline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len)
166{
167 /* gcc complains that a2 and a3 may be uninitialized, but actually
168 * they cannot be. Initialize a2/a3 to shut gcc up.
169 */
170 register unsigned int a0, a1, a2 = 0, a3 = 0;
171 int sh_1, sh_2;
172
173 /* prefetch_src((const void *)src); */
174
175 /* Calculate how to shift a word read at the memory operation
176 aligned srcp to make it aligned for copy. */
177 sh_1 = 8 * (src % sizeof(unsigned int));
178 sh_2 = 8 * sizeof(unsigned int) - sh_1;
179
180 /* Make src aligned by rounding it down. */
181 src &= -sizeof(unsigned int);
182
183 switch (len % 4)
184 {
185 case 2:
186 /* a1 = ((unsigned int *) src)[0];
187 a2 = ((unsigned int *) src)[1]; */
188 ldw(s_space, 0, src, a1, cda_ldw_exc);
189 ldw(s_space, 4, src, a2, cda_ldw_exc);
190 src -= 1 * sizeof(unsigned int);
191 dst -= 3 * sizeof(unsigned int);
192 len += 2;
193 goto do1;
194 case 3:
195 /* a0 = ((unsigned int *) src)[0];
196 a1 = ((unsigned int *) src)[1]; */
197 ldw(s_space, 0, src, a0, cda_ldw_exc);
198 ldw(s_space, 4, src, a1, cda_ldw_exc);
199 src -= 0 * sizeof(unsigned int);
200 dst -= 2 * sizeof(unsigned int);
201 len += 1;
202 goto do2;
203 case 0:
204 if (len == 0)
205 return PA_MEMCPY_OK;
206 /* a3 = ((unsigned int *) src)[0];
207 a0 = ((unsigned int *) src)[1]; */
208 ldw(s_space, 0, src, a3, cda_ldw_exc);
209 ldw(s_space, 4, src, a0, cda_ldw_exc);
210 src -=-1 * sizeof(unsigned int);
211 dst -= 1 * sizeof(unsigned int);
212 len += 0;
213 goto do3;
214 case 1:
215 /* a2 = ((unsigned int *) src)[0];
216 a3 = ((unsigned int *) src)[1]; */
217 ldw(s_space, 0, src, a2, cda_ldw_exc);
218 ldw(s_space, 4, src, a3, cda_ldw_exc);
219 src -=-2 * sizeof(unsigned int);
220 dst -= 0 * sizeof(unsigned int);
221 len -= 1;
222 if (len == 0)
223 goto do0;
224 goto do4; /* No-op. */
225 }
226
227 do
228 {
229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
230do4:
231 /* a0 = ((unsigned int *) src)[0]; */
232 ldw(s_space, 0, src, a0, cda_ldw_exc);
233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
235do3:
236 /* a1 = ((unsigned int *) src)[1]; */
237 ldw(s_space, 4, src, a1, cda_ldw_exc);
238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
240do2:
241 /* a2 = ((unsigned int *) src)[2]; */
242 ldw(s_space, 8, src, a2, cda_ldw_exc);
243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
245do1:
246 /* a3 = ((unsigned int *) src)[3]; */
247 ldw(s_space, 12, src, a3, cda_ldw_exc);
248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
250
251 src += 4 * sizeof(unsigned int);
252 dst += 4 * sizeof(unsigned int);
253 len -= 4;
254 }
255 while (len != 0);
256
257do0:
258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
260
261 preserve_branch(handle_load_error);
262 preserve_branch(handle_store_error);
263
264 return PA_MEMCPY_OK;
265
266handle_load_error:
267 __asm__ __volatile__ ("cda_ldw_exc:\n");
268 return PA_MEMCPY_LOAD_ERROR;
269
270handle_store_error:
271 __asm__ __volatile__ ("cda_stw_exc:\n");
272 return PA_MEMCPY_STORE_ERROR;
273}
274
275
276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */
279static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len)
281{
282 register unsigned long src, dst, t1, t2, t3;
283 register unsigned char *pcs, *pcd;
284 register unsigned int *pws, *pwd;
285 register double *pds, *pdd;
286 unsigned long ret;
287
288 src = (unsigned long)srcp;
289 dst = (unsigned long)dstp;
290 pcs = (unsigned char *)srcp;
291 pcd = (unsigned char *)dstp;
292
293 /* prefetch_src((const void *)srcp); */
294
295 if (len < THRESHOLD)
296 goto byte_copy;
297
298 /* Check alignment */
299 t1 = (src ^ dst);
300 if (unlikely(t1 & (sizeof(double)-1)))
301 goto unaligned_copy;
302
303 /* src and dst have same alignment. */
304
305 /* Copy bytes till we are double-aligned. */
306 t2 = src & (sizeof(double) - 1);
307 if (unlikely(t2 != 0)) {
308 t2 = sizeof(double) - t2;
309 while (t2 && len) {
310 /* *pcd++ = *pcs++; */
311 ldbma(s_space, pcs, t3, pmc_load_exc);
312 len--;
313 stbma(d_space, t3, pcd, pmc_store_exc);
314 t2--;
315 }
316 }
317
318 pds = (double *)pcs;
319 pdd = (double *)pcd;
320
321#if 0
322 /* Copy 8 doubles at a time */
323 while (len >= 8*sizeof(double)) {
324 register double r1, r2, r3, r4, r5, r6, r7, r8;
325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
326 flddma(s_space, pds, r1, pmc_load_exc);
327 flddma(s_space, pds, r2, pmc_load_exc);
328 flddma(s_space, pds, r3, pmc_load_exc);
329 flddma(s_space, pds, r4, pmc_load_exc);
330 fstdma(d_space, r1, pdd, pmc_store_exc);
331 fstdma(d_space, r2, pdd, pmc_store_exc);
332 fstdma(d_space, r3, pdd, pmc_store_exc);
333 fstdma(d_space, r4, pdd, pmc_store_exc);
334
335#if 0
336 if (L1_CACHE_BYTES <= 32)
337 prefetch_src((char *)pds + L1_CACHE_BYTES);
338#endif
339 flddma(s_space, pds, r5, pmc_load_exc);
340 flddma(s_space, pds, r6, pmc_load_exc);
341 flddma(s_space, pds, r7, pmc_load_exc);
342 flddma(s_space, pds, r8, pmc_load_exc);
343 fstdma(d_space, r5, pdd, pmc_store_exc);
344 fstdma(d_space, r6, pdd, pmc_store_exc);
345 fstdma(d_space, r7, pdd, pmc_store_exc);
346 fstdma(d_space, r8, pdd, pmc_store_exc);
347 len -= 8*sizeof(double);
348 }
349#endif
350
351 pws = (unsigned int *)pds;
352 pwd = (unsigned int *)pdd;
353
354word_copy:
355 while (len >= 8*sizeof(unsigned int)) {
356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
358 ldwma(s_space, pws, r1, pmc_load_exc);
359 ldwma(s_space, pws, r2, pmc_load_exc);
360 ldwma(s_space, pws, r3, pmc_load_exc);
361 ldwma(s_space, pws, r4, pmc_load_exc);
362 stwma(d_space, r1, pwd, pmc_store_exc);
363 stwma(d_space, r2, pwd, pmc_store_exc);
364 stwma(d_space, r3, pwd, pmc_store_exc);
365 stwma(d_space, r4, pwd, pmc_store_exc);
366
367 ldwma(s_space, pws, r5, pmc_load_exc);
368 ldwma(s_space, pws, r6, pmc_load_exc);
369 ldwma(s_space, pws, r7, pmc_load_exc);
370 ldwma(s_space, pws, r8, pmc_load_exc);
371 stwma(d_space, r5, pwd, pmc_store_exc);
372 stwma(d_space, r6, pwd, pmc_store_exc);
373 stwma(d_space, r7, pwd, pmc_store_exc);
374 stwma(d_space, r8, pwd, pmc_store_exc);
375 len -= 8*sizeof(unsigned int);
376 }
377
378 while (len >= 4*sizeof(unsigned int)) {
379 register unsigned int r1,r2,r3,r4;
380 ldwma(s_space, pws, r1, pmc_load_exc);
381 ldwma(s_space, pws, r2, pmc_load_exc);
382 ldwma(s_space, pws, r3, pmc_load_exc);
383 ldwma(s_space, pws, r4, pmc_load_exc);
384 stwma(d_space, r1, pwd, pmc_store_exc);
385 stwma(d_space, r2, pwd, pmc_store_exc);
386 stwma(d_space, r3, pwd, pmc_store_exc);
387 stwma(d_space, r4, pwd, pmc_store_exc);
388 len -= 4*sizeof(unsigned int);
389 }
390
391 pcs = (unsigned char *)pws;
392 pcd = (unsigned char *)pwd;
393
394byte_copy:
395 while (len) {
396 /* *pcd++ = *pcs++; */
397 ldbma(s_space, pcs, t3, pmc_load_exc);
398 stbma(d_space, t3, pcd, pmc_store_exc);
399 len--;
400 }
401
402 return PA_MEMCPY_OK;
403
404unaligned_copy:
405 /* possibly we are aligned on a word, but not on a double... */
406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
407 t2 = src & (sizeof(unsigned int) - 1);
408
409 if (unlikely(t2 != 0)) {
410 t2 = sizeof(unsigned int) - t2;
411 while (t2) {
412 /* *pcd++ = *pcs++; */
413 ldbma(s_space, pcs, t3, pmc_load_exc);
414 stbma(d_space, t3, pcd, pmc_store_exc);
415 len--;
416 t2--;
417 }
418 }
419
420 pws = (unsigned int *)pcs;
421 pwd = (unsigned int *)pcd;
422 goto word_copy;
423 }
424
425 /* Align the destination. */
426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
428 while (t2) {
429 /* *pcd++ = *pcs++; */
430 ldbma(s_space, pcs, t3, pmc_load_exc);
431 stbma(d_space, t3, pcd, pmc_store_exc);
432 len--;
433 t2--;
434 }
435 dst = (unsigned long)pcd;
436 src = (unsigned long)pcs;
437 }
438
439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
440 if (ret)
441 return ret;
442
443 pcs += (len & -sizeof(unsigned int));
444 pcd += (len & -sizeof(unsigned int));
445 len %= sizeof(unsigned int);
446
447 preserve_branch(handle_load_error);
448 preserve_branch(handle_store_error);
449
450 goto byte_copy;
451
452handle_load_error:
453 __asm__ __volatile__ ("pmc_load_exc:\n");
454 return PA_MEMCPY_LOAD_ERROR;
455
456handle_store_error:
457 __asm__ __volatile__ ("pmc_store_exc:\n");
458 return PA_MEMCPY_STORE_ERROR;
459}
460
461
462/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 35/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
463static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 36extern unsigned long pa_memcpy(void *dst, const void *src,
464{ 37 unsigned long len);
465 unsigned long ret, fault_addr, reference;
466 struct exception_data *d;
467
468 ret = pa_memcpy_internal(dstp, srcp, len);
469 if (likely(ret == PA_MEMCPY_OK))
470 return 0;
471
472 /* if a load or store fault occured we can get the faulty addr */
473 d = this_cpu_ptr(&exception_data);
474 fault_addr = d->fault_addr;
475
476 /* error in load or store? */
477 if (ret == PA_MEMCPY_LOAD_ERROR)
478 reference = (unsigned long) srcp;
479 else
480 reference = (unsigned long) dstp;
481 38
482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
483 ret, len, fault_addr, reference);
484
485 if (fault_addr >= reference)
486 return len - (fault_addr - reference);
487 else
488 return len;
489}
490
491#ifdef __KERNEL__
492unsigned long __copy_to_user(void __user *dst, const void *src, 39unsigned long __copy_to_user(void __user *dst, const void *src,
493 unsigned long len) 40 unsigned long len)
494{ 41{
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
537 84
538 return __probe_kernel_read(dst, src, size); 85 return __probe_kernel_read(dst, src, size);
539} 86}
540
541#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index deab89a8915a..32ec22146141 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
150 d->fault_space = regs->isr; 150 d->fault_space = regs->isr;
151 d->fault_addr = regs->ior; 151 d->fault_addr = regs->ior;
152 152
153 /*
154 * Fix up get_user() and put_user().
155 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
156 * bit in the relative address of the fixup routine to indicate
157 * that %r8 should be loaded with -EFAULT to report a userspace
158 * access error.
159 */
160 if (fix->fixup & 1) {
161 regs->gr[8] = -EFAULT;
162
163 /* zero target register for get_user() */
164 if (parisc_acctyp(0, regs->iir) == VM_READ) {
165 int treg = regs->iir & 0x1f;
166 regs->gr[treg] = 0;
167 }
168 }
169
153 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; 170 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
154 regs->iaoq[0] &= ~3; 171 regs->iaoq[0] &= ~3;
155 /* 172 /*
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
68 } 68 }
69 69
70#ifdef CONFIG_PPC64_BOOT_WRAPPER 70#ifdef CONFIG_PPC64_BOOT_WRAPPER
71 . = ALIGN(256);
71 .got : 72 .got :
72 { 73 {
73 __toc_start = .; 74 __toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
33 } 33 }
34 34
35 if (len & ~VMX_ALIGN_MASK) { 35 if (len & ~VMX_ALIGN_MASK) {
36 preempt_disable();
36 pagefault_disable(); 37 pagefault_disable();
37 enable_kernel_altivec(); 38 enable_kernel_altivec();
38 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); 39 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
40 disable_kernel_altivec();
39 pagefault_enable(); 41 pagefault_enable();
42 preempt_enable();
40 } 43 }
41 44
42 tail = len & VMX_ALIGN_MASK; 45 tail = len & VMX_ALIGN_MASK;
@@ -52,7 +55,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
52{ 55{
53 u32 *key = crypto_tfm_ctx(tfm); 56 u32 *key = crypto_tfm_ctx(tfm);
54 57
55 *key = 0; 58 *key = ~0;
56 59
57 return 0; 60 return 0;
58} 61}
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 73eb794d6163..bc5fdfd22788 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,6 +51,10 @@
51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) 52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53 53
54/* Put a PPC bit into a "normal" bit position */
55#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
56 ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
57
54#include <asm/barrier.h> 58#include <asm/barrier.h>
55 59
56/* Macro for generating the ***_bits() functions */ 60/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index f97d8cb6bdf6..ed62efe01e49 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -66,6 +66,55 @@
66 66
67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ 67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
68 P8_DSISR_MC_ERAT_MULTIHIT_SEC) 68 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
69
70/*
71 * Machine Check bits on power9
72 */
73#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
74
75#define P9_SRR1_MC_IFETCH(srr1) ( \
76 PPC_BITEXTRACT(srr1, 45, 0) | \
77 PPC_BITEXTRACT(srr1, 44, 1) | \
78 PPC_BITEXTRACT(srr1, 43, 2) | \
79 PPC_BITEXTRACT(srr1, 36, 3) )
80
81/* 0 is reserved */
82#define P9_SRR1_MC_IFETCH_UE 1
83#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
84#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
85#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
86#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
87#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
88/* 7 is reserved */
89#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
90#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
91/* 10 ? */
92#define P9_SRR1_MC_IFETCH_RA 11
93#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
94#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
95#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
96#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
97
98/* DSISR bits for machine check (On Power9) */
99#define P9_DSISR_MC_UE (PPC_BIT(48))
100#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
101#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
102#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
103#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
104#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
105#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
106#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
107#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
108#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
109#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
110#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
111#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
112
113/* SLB error bits */
114#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
115 P9_DSISR_MC_SLB_PARITY_MFSLB | \
116 P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
117
69enum MCE_Version { 118enum MCE_Version {
70 MCE_V1 = 1, 119 MCE_V1 = 1,
71}; 120};
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
93 MCE_ERROR_TYPE_SLB = 2, 142 MCE_ERROR_TYPE_SLB = 2,
94 MCE_ERROR_TYPE_ERAT = 3, 143 MCE_ERROR_TYPE_ERAT = 3,
95 MCE_ERROR_TYPE_TLB = 4, 144 MCE_ERROR_TYPE_TLB = 4,
145 MCE_ERROR_TYPE_USER = 5,
146 MCE_ERROR_TYPE_RA = 6,
147 MCE_ERROR_TYPE_LINK = 7,
96}; 148};
97 149
98enum MCE_UeErrorType { 150enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
121 MCE_TLB_ERROR_MULTIHIT = 2, 173 MCE_TLB_ERROR_MULTIHIT = 2,
122}; 174};
123 175
176enum MCE_UserErrorType {
177 MCE_USER_ERROR_INDETERMINATE = 0,
178 MCE_USER_ERROR_TLBIE = 1,
179};
180
181enum MCE_RaErrorType {
182 MCE_RA_ERROR_INDETERMINATE = 0,
183 MCE_RA_ERROR_IFETCH = 1,
184 MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
185 MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
186 MCE_RA_ERROR_LOAD = 4,
187 MCE_RA_ERROR_STORE = 5,
188 MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
189 MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
190 MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
191};
192
193enum MCE_LinkErrorType {
194 MCE_LINK_ERROR_INDETERMINATE = 0,
195 MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
196 MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
197 MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
198 MCE_LINK_ERROR_STORE_TIMEOUT = 4,
199 MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
200};
201
124struct machine_check_event { 202struct machine_check_event {
125 enum MCE_Version version:8; /* 0x00 */ 203 enum MCE_Version version:8; /* 0x00 */
126 uint8_t in_use; /* 0x01 */ 204 uint8_t in_use; /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
166 uint64_t effective_address; 244 uint64_t effective_address;
167 uint8_t reserved_2[16]; 245 uint8_t reserved_2[16];
168 } tlb_error; 246 } tlb_error;
247
248 struct {
249 enum MCE_UserErrorType user_error_type:8;
250 uint8_t effective_address_provided;
251 uint8_t reserved_1[6];
252 uint64_t effective_address;
253 uint8_t reserved_2[16];
254 } user_error;
255
256 struct {
257 enum MCE_RaErrorType ra_error_type:8;
258 uint8_t effective_address_provided;
259 uint8_t reserved_1[6];
260 uint64_t effective_address;
261 uint8_t reserved_2[16];
262 } ra_error;
263
264 struct {
265 enum MCE_LinkErrorType link_error_type:8;
266 uint8_t effective_address_provided;
267 uint8_t reserved_1[6];
268 uint64_t effective_address;
269 uint8_t reserved_2[16];
270 } link_error;
169 } u; 271 } u;
170}; 272};
171 273
@@ -176,8 +278,12 @@ struct mce_error_info {
176 enum MCE_SlbErrorType slb_error_type:8; 278 enum MCE_SlbErrorType slb_error_type:8;
177 enum MCE_EratErrorType erat_error_type:8; 279 enum MCE_EratErrorType erat_error_type:8;
178 enum MCE_TlbErrorType tlb_error_type:8; 280 enum MCE_TlbErrorType tlb_error_type:8;
281 enum MCE_UserErrorType user_error_type:8;
282 enum MCE_RaErrorType ra_error_type:8;
283 enum MCE_LinkErrorType link_error_type:8;
179 } u; 284 } u;
180 uint8_t reserved[2]; 285 enum MCE_Severity severity:8;
286 enum MCE_Initiator initiator:8;
181}; 287};
182 288
183#define MAX_MC_EVT 100 289#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4b369d83fe9c..1c9470881c4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
387COMPAT_SYS_SPU(preadv2) 387COMPAT_SYS_SPU(preadv2)
388COMPAT_SYS_SPU(pwritev2) 388COMPAT_SYS_SPU(pwritev2)
389SYSCALL(kexec_file_load) 389SYSCALL(kexec_file_load)
390SYSCALL(statx)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index eb1acee91a20..9ba11dbcaca9 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define NR_syscalls 383 15#define NR_syscalls 384
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18 18
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2f26335a3c42..b85f14228857 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -393,5 +393,6 @@
393#define __NR_preadv2 380 393#define __NR_preadv2 380
394#define __NR_pwritev2 381 394#define __NR_pwritev2 381
395#define __NR_kexec_file_load 382 395#define __NR_kexec_file_load 382
396#define __NR_statx 383
396 397
397#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 398#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bb7a1890aeb7..e79b9daa873c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
77extern void __flush_tlb_power9(unsigned int action); 77extern void __flush_tlb_power9(unsigned int action);
78extern long __machine_check_early_realmode_p7(struct pt_regs *regs); 78extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
79extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 79extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
80extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
80#endif /* CONFIG_PPC64 */ 81#endif /* CONFIG_PPC64 */
81#if defined(CONFIG_E500) 82#if defined(CONFIG_E500)
82extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); 83extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
540 .cpu_setup = __setup_cpu_power9, 541 .cpu_setup = __setup_cpu_power9,
541 .cpu_restore = __restore_cpu_power9, 542 .cpu_restore = __restore_cpu_power9,
542 .flush_tlb = __flush_tlb_power9, 543 .flush_tlb = __flush_tlb_power9,
544 .machine_check_early = __machine_check_early_realmode_p9,
543 .platform = "power9", 545 .platform = "power9",
544 }, 546 },
545 { /* Power9 */ 547 { /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
559 .cpu_setup = __setup_cpu_power9, 561 .cpu_setup = __setup_cpu_power9,
560 .cpu_restore = __restore_cpu_power9, 562 .cpu_restore = __restore_cpu_power9,
561 .flush_tlb = __flush_tlb_power9, 563 .flush_tlb = __flush_tlb_power9,
564 .machine_check_early = __machine_check_early_realmode_p9,
562 .platform = "power9", 565 .platform = "power9",
563 }, 566 },
564 { /* Cell Broadband Engine */ 567 { /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
449_GLOBAL(pnv_wakeup_tb_loss) 449_GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13) 450 ld r1,PACAR1(r13)
451 /* 451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack 452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * and they are restored before switching to the process context. Hence 453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * until they are restored, they are free to be used. 454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
455 * 469 *
456 * Save SRR1 and LR in NVGPRs as they might be clobbered in 470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
457 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c6923ff45131..a1475e6aef3a 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
58 case MCE_ERROR_TYPE_TLB: 58 case MCE_ERROR_TYPE_TLB:
59 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; 59 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
60 break; 60 break;
61 case MCE_ERROR_TYPE_USER:
62 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
63 break;
64 case MCE_ERROR_TYPE_RA:
65 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
66 break;
67 case MCE_ERROR_TYPE_LINK:
68 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
69 break;
61 case MCE_ERROR_TYPE_UNKNOWN: 70 case MCE_ERROR_TYPE_UNKNOWN:
62 default: 71 default:
63 break; 72 break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
90 mce->gpr3 = regs->gpr[3]; 99 mce->gpr3 = regs->gpr[3];
91 mce->in_use = 1; 100 mce->in_use = 1;
92 101
93 mce->initiator = MCE_INITIATOR_CPU;
94 /* Mark it recovered if we have handled it and MSR(RI=1). */ 102 /* Mark it recovered if we have handled it and MSR(RI=1). */
95 if (handled && (regs->msr & MSR_RI)) 103 if (handled && (regs->msr & MSR_RI))
96 mce->disposition = MCE_DISPOSITION_RECOVERED; 104 mce->disposition = MCE_DISPOSITION_RECOVERED;
97 else 105 else
98 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; 106 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
99 mce->severity = MCE_SEV_ERROR_SYNC; 107
108 mce->initiator = mce_err->initiator;
109 mce->severity = mce_err->severity;
100 110
101 /* 111 /*
102 * Populate the mce error_type and type-specific error_type. 112 * Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
115 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { 125 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
116 mce->u.erat_error.effective_address_provided = true; 126 mce->u.erat_error.effective_address_provided = true;
117 mce->u.erat_error.effective_address = addr; 127 mce->u.erat_error.effective_address = addr;
128 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
129 mce->u.user_error.effective_address_provided = true;
130 mce->u.user_error.effective_address = addr;
131 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
132 mce->u.ra_error.effective_address_provided = true;
133 mce->u.ra_error.effective_address = addr;
134 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
135 mce->u.link_error.effective_address_provided = true;
136 mce->u.link_error.effective_address = addr;
118 } else if (mce->error_type == MCE_ERROR_TYPE_UE) { 137 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
119 mce->u.ue_error.effective_address_provided = true; 138 mce->u.ue_error.effective_address_provided = true;
120 mce->u.ue_error.effective_address = addr; 139 mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
239 "Parity", 258 "Parity",
240 "Multihit", 259 "Multihit",
241 }; 260 };
261 static const char *mc_user_types[] = {
262 "Indeterminate",
263 "tlbie(l) invalid",
264 };
265 static const char *mc_ra_types[] = {
266 "Indeterminate",
267 "Instruction fetch (bad)",
268 "Page table walk ifetch (bad)",
269 "Page table walk ifetch (foreign)",
270 "Load (bad)",
271 "Store (bad)",
272 "Page table walk Load/Store (bad)",
273 "Page table walk Load/Store (foreign)",
274 "Load/Store (foreign)",
275 };
276 static const char *mc_link_types[] = {
277 "Indeterminate",
278 "Instruction fetch (timeout)",
279 "Page table walk ifetch (timeout)",
280 "Load (timeout)",
281 "Store (timeout)",
282 "Page table walk Load/Store (timeout)",
283 };
242 284
243 /* Print things out */ 285 /* Print things out */
244 if (evt->version != MCE_V1) { 286 if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
315 printk("%s Effective address: %016llx\n", 357 printk("%s Effective address: %016llx\n",
316 level, evt->u.tlb_error.effective_address); 358 level, evt->u.tlb_error.effective_address);
317 break; 359 break;
360 case MCE_ERROR_TYPE_USER:
361 subtype = evt->u.user_error.user_error_type <
362 ARRAY_SIZE(mc_user_types) ?
363 mc_user_types[evt->u.user_error.user_error_type]
364 : "Unknown";
365 printk("%s Error type: User [%s]\n", level, subtype);
366 if (evt->u.user_error.effective_address_provided)
367 printk("%s Effective address: %016llx\n",
368 level, evt->u.user_error.effective_address);
369 break;
370 case MCE_ERROR_TYPE_RA:
371 subtype = evt->u.ra_error.ra_error_type <
372 ARRAY_SIZE(mc_ra_types) ?
373 mc_ra_types[evt->u.ra_error.ra_error_type]
374 : "Unknown";
375 printk("%s Error type: Real address [%s]\n", level, subtype);
376 if (evt->u.ra_error.effective_address_provided)
377 printk("%s Effective address: %016llx\n",
378 level, evt->u.ra_error.effective_address);
379 break;
380 case MCE_ERROR_TYPE_LINK:
381 subtype = evt->u.link_error.link_error_type <
382 ARRAY_SIZE(mc_link_types) ?
383 mc_link_types[evt->u.link_error.link_error_type]
384 : "Unknown";
385 printk("%s Error type: Link [%s]\n", level, subtype);
386 if (evt->u.link_error.effective_address_provided)
387 printk("%s Effective address: %016llx\n",
388 level, evt->u.link_error.effective_address);
389 break;
318 default: 390 default:
319 case MCE_ERROR_TYPE_UNKNOWN: 391 case MCE_ERROR_TYPE_UNKNOWN:
320 printk("%s Error type: Unknown\n", level); 392 printk("%s Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
341 if (evt->u.tlb_error.effective_address_provided) 413 if (evt->u.tlb_error.effective_address_provided)
342 return evt->u.tlb_error.effective_address; 414 return evt->u.tlb_error.effective_address;
343 break; 415 break;
416 case MCE_ERROR_TYPE_USER:
417 if (evt->u.user_error.effective_address_provided)
418 return evt->u.user_error.effective_address;
419 break;
420 case MCE_ERROR_TYPE_RA:
421 if (evt->u.ra_error.effective_address_provided)
422 return evt->u.ra_error.effective_address;
423 break;
424 case MCE_ERROR_TYPE_LINK:
425 if (evt->u.link_error.effective_address_provided)
426 return evt->u.link_error.effective_address;
427 break;
344 default: 428 default:
345 case MCE_ERROR_TYPE_UNKNOWN: 429 case MCE_ERROR_TYPE_UNKNOWN:
346 break; 430 break;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 7353991c4ece..763d6f58caa8 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
116} 116}
117#endif 117#endif
118 118
119static void flush_erat(void)
120{
121 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
122}
123
124#define MCE_FLUSH_SLB 1
125#define MCE_FLUSH_TLB 2
126#define MCE_FLUSH_ERAT 3
127
128static int mce_flush(int what)
129{
130#ifdef CONFIG_PPC_STD_MMU_64
131 if (what == MCE_FLUSH_SLB) {
132 flush_and_reload_slb();
133 return 1;
134 }
135#endif
136 if (what == MCE_FLUSH_ERAT) {
137 flush_erat();
138 return 1;
139 }
140 if (what == MCE_FLUSH_TLB) {
141 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
142 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
143 return 1;
144 }
145 }
146
147 return 0;
148}
149
150static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
151{
152 if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
153 dsisr &= ~slb;
154 if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
155 dsisr &= ~erat;
156 if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
157 dsisr &= ~tlb;
158 /* Any other errors we don't understand? */
159 if (dsisr)
160 return 0;
161 return 1;
162}
163
119static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) 164static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
120{ 165{
121 long handled = 1; 166 long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
281 long handled = 1; 326 long handled = 1;
282 struct mce_error_info mce_error_info = { 0 }; 327 struct mce_error_info mce_error_info = { 0 };
283 328
329 mce_error_info.severity = MCE_SEV_ERROR_SYNC;
330 mce_error_info.initiator = MCE_INITIATOR_CPU;
331
284 srr1 = regs->msr; 332 srr1 = regs->msr;
285 nip = regs->nip; 333 nip = regs->nip;
286 334
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
352 long handled = 1; 400 long handled = 1;
353 struct mce_error_info mce_error_info = { 0 }; 401 struct mce_error_info mce_error_info = { 0 };
354 402
403 mce_error_info.severity = MCE_SEV_ERROR_SYNC;
404 mce_error_info.initiator = MCE_INITIATOR_CPU;
405
355 srr1 = regs->msr; 406 srr1 = regs->msr;
356 nip = regs->nip; 407 nip = regs->nip;
357 408
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
372 save_mce_event(regs, handled, &mce_error_info, nip, addr); 423 save_mce_event(regs, handled, &mce_error_info, nip, addr);
373 return handled; 424 return handled;
374} 425}
426
427static int mce_handle_derror_p9(struct pt_regs *regs)
428{
429 uint64_t dsisr = regs->dsisr;
430
431 return mce_handle_flush_derrors(dsisr,
432 P9_DSISR_MC_SLB_PARITY_MFSLB |
433 P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
434
435 P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
436
437 P9_DSISR_MC_ERAT_MULTIHIT);
438}
439
440static int mce_handle_ierror_p9(struct pt_regs *regs)
441{
442 uint64_t srr1 = regs->msr;
443
444 switch (P9_SRR1_MC_IFETCH(srr1)) {
445 case P9_SRR1_MC_IFETCH_SLB_PARITY:
446 case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
447 return mce_flush(MCE_FLUSH_SLB);
448 case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
449 return mce_flush(MCE_FLUSH_TLB);
450 case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
451 return mce_flush(MCE_FLUSH_ERAT);
452 default:
453 return 0;
454 }
455}
456
457static void mce_get_derror_p9(struct pt_regs *regs,
458 struct mce_error_info *mce_err, uint64_t *addr)
459{
460 uint64_t dsisr = regs->dsisr;
461
462 mce_err->severity = MCE_SEV_ERROR_SYNC;
463 mce_err->initiator = MCE_INITIATOR_CPU;
464
465 if (dsisr & P9_DSISR_MC_USER_TLBIE)
466 *addr = regs->nip;
467 else
468 *addr = regs->dar;
469
470 if (dsisr & P9_DSISR_MC_UE) {
471 mce_err->error_type = MCE_ERROR_TYPE_UE;
472 mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
473 } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
474 mce_err->error_type = MCE_ERROR_TYPE_UE;
475 mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
476 } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
477 mce_err->error_type = MCE_ERROR_TYPE_LINK;
478 mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
479 } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
480 mce_err->error_type = MCE_ERROR_TYPE_LINK;
481 mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
482 } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
483 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
484 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
485 } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
486 mce_err->error_type = MCE_ERROR_TYPE_TLB;
487 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
488 } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
489 mce_err->error_type = MCE_ERROR_TYPE_USER;
490 mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
491 } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
492 mce_err->error_type = MCE_ERROR_TYPE_SLB;
493 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
494 } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
495 mce_err->error_type = MCE_ERROR_TYPE_SLB;
496 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
497 } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
498 mce_err->error_type = MCE_ERROR_TYPE_RA;
499 mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
500 } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
501 mce_err->error_type = MCE_ERROR_TYPE_RA;
502 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
503 } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
504 mce_err->error_type = MCE_ERROR_TYPE_RA;
505 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
506 } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
507 mce_err->error_type = MCE_ERROR_TYPE_RA;
508 mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
509 }
510}
511
512static void mce_get_ierror_p9(struct pt_regs *regs,
513 struct mce_error_info *mce_err, uint64_t *addr)
514{
515 uint64_t srr1 = regs->msr;
516
517 switch (P9_SRR1_MC_IFETCH(srr1)) {
518 case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
519 case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
520 mce_err->severity = MCE_SEV_FATAL;
521 break;
522 default:
523 mce_err->severity = MCE_SEV_ERROR_SYNC;
524 break;
525 }
526
527 mce_err->initiator = MCE_INITIATOR_CPU;
528
529 *addr = regs->nip;
530
531 switch (P9_SRR1_MC_IFETCH(srr1)) {
532 case P9_SRR1_MC_IFETCH_UE:
533 mce_err->error_type = MCE_ERROR_TYPE_UE;
534 mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
535 break;
536 case P9_SRR1_MC_IFETCH_SLB_PARITY:
537 mce_err->error_type = MCE_ERROR_TYPE_SLB;
538 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
539 break;
540 case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
541 mce_err->error_type = MCE_ERROR_TYPE_SLB;
542 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
543 break;
544 case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
545 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
546 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
547 break;
548 case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
549 mce_err->error_type = MCE_ERROR_TYPE_TLB;
550 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
551 break;
552 case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
553 mce_err->error_type = MCE_ERROR_TYPE_UE;
554 mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
555 break;
556 case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
557 mce_err->error_type = MCE_ERROR_TYPE_LINK;
558 mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
559 break;
560 case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
561 mce_err->error_type = MCE_ERROR_TYPE_LINK;
562 mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
563 break;
564 case P9_SRR1_MC_IFETCH_RA:
565 mce_err->error_type = MCE_ERROR_TYPE_RA;
566 mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
567 break;
568 case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
569 mce_err->error_type = MCE_ERROR_TYPE_RA;
570 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
571 break;
572 case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
573 mce_err->error_type = MCE_ERROR_TYPE_RA;
574 mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
575 break;
576 case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
577 mce_err->error_type = MCE_ERROR_TYPE_LINK;
578 mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
579 break;
580 case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
581 mce_err->error_type = MCE_ERROR_TYPE_RA;
582 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
583 break;
584 default:
585 break;
586 }
587}
588
589long __machine_check_early_realmode_p9(struct pt_regs *regs)
590{
591 uint64_t nip, addr;
592 long handled;
593 struct mce_error_info mce_error_info = { 0 };
594
595 nip = regs->nip;
596
597 if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
598 handled = mce_handle_derror_p9(regs);
599 mce_get_derror_p9(regs, &mce_error_info, &addr);
600 } else {
601 handled = mce_handle_ierror_p9(regs);
602 mce_get_ierror_p9(regs, &mce_error_info, &addr);
603 }
604
605 /* Handle UE error. */
606 if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
607 handled = mce_handle_ue_error(regs);
608
609 save_mce_event(regs, handled, &mce_error_info, nip, addr);
610 return handled;
611}
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1487 /* start new resize */ 1487 /* start new resize */
1488 1488
1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1490 if (!resize) {
1491 ret = -ENOMEM;
1492 goto out;
1493 }
1490 resize->order = shift; 1494 resize->order = shift;
1491 resize->kvm = kvm; 1495 resize->kvm = kvm;
1492 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1496 INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
638 unsigned long psize = batch->psize; 638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize; 639 int ssize = batch->ssize;
640 int i; 640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
641 645
642 local_irq_save(flags); 646 local_irq_save(flags);
643 647
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
667 } pte_iterate_hashed_end(); 671 } pte_iterate_hashed_end();
668 } 672 }
669 673
670 if (mmu_has_feature(MMU_FTR_TLBIEL) && 674 if (use_local) {
671 mmu_psize_defs[psize].tlbiel && local) {
672 asm volatile("ptesync":::"memory"); 675 asm volatile("ptesync":::"memory");
673 for (i = 0; i < number; i++) { 676 for (i = 0; i < number; i++) {
674 vpn = batch->vpn[i]; 677 vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be992083d2a..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
397void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
398{ 398{
399 /* Disable radix mode based on kernel command line. */ 399 /* Disable radix mode based on kernel command line. */
400 /* We don't yet have the machinery to do radix as a guest. */ 400 if (disable_radix)
401 if (disable_radix || !(mfmsr() & MSR_HV))
402 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 401 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
403 402
404 /* 403 /*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 595dd718ea87..2ff13249f87a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
188 sdsync = POWER7P_MMCRA_SDAR_VALID; 188 sdsync = POWER7P_MMCRA_SDAR_VALID;
189 else if (ppmu->flags & PPMU_ALT_SIPR) 189 else if (ppmu->flags & PPMU_ALT_SIPR)
190 sdsync = POWER6_MMCRA_SDSYNC; 190 sdsync = POWER6_MMCRA_SDSYNC;
191 else if (ppmu->flags & PPMU_NO_SIAR)
192 sdsync = MMCRA_SAMPLE_ENABLE;
191 else 193 else
192 sdsync = MMCRA_SDSYNC; 194 sdsync = MMCRA_SDSYNC;
193 195
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e79fb5fb817d..cd951fd231c4 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
65 return !(event & ~valid_mask); 65 return !(event & ~valid_mask);
66} 66}
67 67
68static u64 mmcra_sdar_mode(u64 event) 68static inline bool is_event_marked(u64 event)
69{ 69{
70 if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) 70 if (event & EVENT_IS_MARKED)
71 return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; 71 return true;
72
73 return false;
74}
72 75
73 return MMCRA_SDAR_MODE_TLB; 76static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
77{
78 /*
79 * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
80 * continous sampling mode.
81 *
82 * Incase of Power8:
83 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
84 * mode and will be un-changed when setting MMCRA[63] (Marked events).
85 *
86 * Incase of Power9:
87 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
88 * or if group already have any marked events.
89 * Non-Marked events (for DD1):
90 * MMCRA[SDAR_MODE] will be set to 0b01
91 * For rest
92 * MMCRA[SDAR_MODE] will be set from event code.
93 */
94 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
95 if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
96 *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
97 else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
98 *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
99 else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
100 *mmcra |= MMCRA_SDAR_MODE_TLB;
101 } else
102 *mmcra |= MMCRA_SDAR_MODE_TLB;
74} 103}
75 104
76static u64 thresh_cmp_val(u64 value) 105static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
180 value |= CNST_L1_QUAL_VAL(cache); 209 value |= CNST_L1_QUAL_VAL(cache);
181 } 210 }
182 211
183 if (event & EVENT_IS_MARKED) { 212 if (is_event_marked(event)) {
184 mask |= CNST_SAMPLE_MASK; 213 mask |= CNST_SAMPLE_MASK;
185 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); 214 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
186 } 215 }
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
276 } 305 }
277 306
278 /* In continuous sampling mode, update SDAR on TLB miss */ 307 /* In continuous sampling mode, update SDAR on TLB miss */
279 mmcra |= mmcra_sdar_mode(event[i]); 308 mmcra_sdar_mode(event[i], &mmcra);
280 309
281 if (event[i] & EVENT_IS_L1) { 310 if (event[i] & EVENT_IS_L1) {
282 cache = event[i] >> EVENT_CACHE_SEL_SHIFT; 311 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
285 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; 314 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
286 } 315 }
287 316
288 if (event[i] & EVENT_IS_MARKED) { 317 if (is_event_marked(event[i])) {
289 mmcra |= MMCRA_SAMPLE_ENABLE; 318 mmcra |= MMCRA_SAMPLE_ENABLE;
290 319
291 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; 320 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index cf9bd8990159..899210f14ee4 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -246,6 +246,7 @@
246#define MMCRA_THR_CMP_SHIFT 32 246#define MMCRA_THR_CMP_SHIFT 32
247#define MMCRA_SDAR_MODE_SHIFT 42 247#define MMCRA_SDAR_MODE_SHIFT 42
248#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) 248#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
249#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
249#define MMCRA_IFM_SHIFT 30 250#define MMCRA_IFM_SHIFT 30
250 251
251/* MMCR1 Threshold Compare bit constant for power9 */ 252/* MMCR1 Threshold Compare bit constant for power9 */
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 86d9fde93c17..e0f856bfbfe8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
395 struct machine_check_event *evt) 395 struct machine_check_event *evt)
396{ 396{
397 int recovered = 0; 397 int recovered = 0;
398 uint64_t ea = get_mce_fault_addr(evt);
399 398
400 if (!(regs->msr & MSR_RI)) { 399 if (!(regs->msr & MSR_RI)) {
401 /* If MSR_RI isn't set, we cannot recover */ 400 /* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
404 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 403 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
405 /* Platform corrected itself */ 404 /* Platform corrected itself */
406 recovered = 1; 405 recovered = 1;
407 } else if (ea && !is_kernel_addr(ea)) { 406 } else if (evt->severity == MCE_SEV_FATAL) {
407 /* Fatal machine check */
408 pr_err("Machine check interrupt is fatal\n");
409 recovered = 0;
410 } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
411 (user_mode(regs) && !is_global_init(current))) {
408 /* 412 /*
409 * Faulting address is not in kernel text. We should be fine.
410 * We need to find which process uses this address.
411 * For now, kill the task if we have received exception when 413 * For now, kill the task if we have received exception when
412 * in userspace. 414 * in userspace.
413 * 415 *
414 * TODO: Queue up this address for hwpoisioning later. 416 * TODO: Queue up this address for hwpoisioning later.
415 */ 417 */
416 if (user_mode(regs) && !is_global_init(current)) {
417 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
418 recovered = 1;
419 } else
420 recovered = 0;
421 } else if (user_mode(regs) && !is_global_init(current) &&
422 evt->severity == MCE_SEV_ERROR_SYNC) {
423 /*
424 * If we have received a synchronous error when in userspace
425 * kill the task.
426 */
427 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 418 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
428 recovered = 1; 419 recovered = 1;
429 } 420 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6901a06da2f9..e36738291c32 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
1775} 1775}
1776 1776
1777static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, 1777static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1778 struct pci_bus *bus) 1778 struct pci_bus *bus,
1779 bool add_to_group)
1779{ 1780{
1780 struct pci_dev *dev; 1781 struct pci_dev *dev;
1781 1782
1782 list_for_each_entry(dev, &bus->devices, bus_list) { 1783 list_for_each_entry(dev, &bus->devices, bus_list) {
1783 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); 1784 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1784 set_dma_offset(&dev->dev, pe->tce_bypass_base); 1785 set_dma_offset(&dev->dev, pe->tce_bypass_base);
1785 iommu_add_device(&dev->dev); 1786 if (add_to_group)
1787 iommu_add_device(&dev->dev);
1786 1788
1787 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) 1789 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1788 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 1790 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
1791 add_to_group);
1789 } 1792 }
1790} 1793}
1791 1794
@@ -2191,7 +2194,7 @@ found:
2191 set_iommu_table_base(&pe->pdev->dev, tbl); 2194 set_iommu_table_base(&pe->pdev->dev, tbl);
2192 iommu_add_device(&pe->pdev->dev); 2195 iommu_add_device(&pe->pdev->dev);
2193 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 2196 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2194 pnv_ioda_setup_bus_dma(pe, pe->pbus); 2197 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2195 2198
2196 return; 2199 return;
2197 fail: 2200 fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2426 2429
2427 pnv_pci_ioda2_set_bypass(pe, false); 2430 pnv_pci_ioda2_set_bypass(pe, false);
2428 pnv_pci_ioda2_unset_window(&pe->table_group, 0); 2431 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2432 if (pe->pbus)
2433 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2429 pnv_ioda2_table_free(tbl); 2434 pnv_ioda2_table_free(tbl);
2430} 2435}
2431 2436
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2435 table_group); 2440 table_group);
2436 2441
2437 pnv_pci_ioda2_setup_default_config(pe); 2442 pnv_pci_ioda2_setup_default_config(pe);
2443 if (pe->pbus)
2444 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2438} 2445}
2439 2446
2440static struct iommu_table_group_ops pnv_pci_ioda2_ops = { 2447static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2624 level_shift = entries_shift + 3; 2631 level_shift = entries_shift + 3;
2625 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); 2632 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2626 2633
2634 if ((level_shift - 3) * levels + page_shift >= 60)
2635 return -EINVAL;
2636
2627 /* Allocate TCE table */ 2637 /* Allocate TCE table */
2628 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2638 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2629 levels, tce_table_size, &offset, &total_allocated); 2639 levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2728 if (pe->flags & PNV_IODA_PE_DEV) 2738 if (pe->flags & PNV_IODA_PE_DEV)
2729 iommu_add_device(&pe->pdev->dev); 2739 iommu_add_device(&pe->pdev->dev);
2730 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 2740 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2731 pnv_ioda_setup_bus_dma(pe, pe->pbus); 2741 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2732} 2742}
2733 2743
2734#ifdef CONFIG_PCI_MSI 2744#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 251060cf1713..8b1fe895daa3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
751 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; 751 mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
752 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; 752 mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
753 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; 753 mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
754 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; 754
755 if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
756 mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
755} 757}
756 758
757void radix_init_pseries(void) 759void radix_init_pseries(void)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index fa95041fa9f6..33ca29333e18 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
141 141
142unsigned long decompress_kernel(void) 142unsigned long decompress_kernel(void)
143{ 143{
144 unsigned long output_addr; 144 void *output, *kernel_end;
145 unsigned char *output;
146 145
147 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; 146 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
148 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); 147 kernel_end = output + SZ__bss_start;
149 memset(&_bss, 0, &_ebss - &_bss); 148 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 free_mem_ptr = (unsigned long)&_end;
151 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
152 output = (unsigned char *) output_addr;
153 149
154#ifdef CONFIG_BLK_DEV_INITRD 150#ifdef CONFIG_BLK_DEV_INITRD
155 /* 151 /*
156 * Move the initrd right behind the end of the decompressed 152 * Move the initrd right behind the end of the decompressed
157 * kernel image. 153 * kernel image. This also prevents initrd corruption caused by
154 * bss clearing since kernel_end will always be located behind the
155 * current bss section..
158 */ 156 */
159 if (INITRD_START && INITRD_SIZE && 157 if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
160 INITRD_START < (unsigned long) output + SZ__bss_start) { 158 check_ipl_parmblock(kernel_end, INITRD_SIZE);
161 check_ipl_parmblock(output + SZ__bss_start, 159 memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
162 INITRD_START + INITRD_SIZE); 160 INITRD_START = (unsigned long) kernel_end;
163 memmove(output + SZ__bss_start,
164 (void *) INITRD_START, INITRD_SIZE);
165 INITRD_START = (unsigned long) output + SZ__bss_start;
166 } 161 }
167#endif 162#endif
168 163
164 /*
165 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
166 * initialized afterwards since they reside in bss.
167 */
168 memset(&_bss, 0, &_ebss - &_bss);
169 free_mem_ptr = (unsigned long) &_end;
170 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
171
169 puts("Uncompressing Linux... "); 172 puts("Uncompressing Linux... ");
170 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
171 puts("Ok, booting the kernel.\n"); 174 puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 5ce29fe100ba..fbd9116eb17b 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,5 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[], _ehead[]; 6extern char _eshared[], _ehead[];
7extern char __start_ro_after_init[], __end_ro_after_init[];
8 7
9#endif 8#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 136932ff4250..3ea1554d04b3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
147 " jg 2b\n" \ 147 " jg 2b\n" \
148 ".popsection\n" \ 148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
150 : "=d" (__rc), "=Q" (*(to)) \ 150 : "=d" (__rc), "+Q" (*(to)) \
151 : "d" (size), "Q" (*(from)), \ 151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \ 152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \ 153 : "cc"); \
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..5dab859b0d54 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
909{ 909{
910 struct pcpu *pcpu = pcpu_devices; 910 struct pcpu *pcpu = pcpu_devices;
911 911
912 WARN_ON(!cpu_present(0) || !cpu_online(0));
912 pcpu->state = CPU_STATE_CONFIGURED; 913 pcpu->state = CPU_STATE_CONFIGURED;
913 pcpu->address = stap();
914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
915 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 915 S390_lowcore.percpu_offset = __per_cpu_offset[0];
916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
917 set_cpu_present(0, true);
918 set_cpu_online(0, true);
919} 917}
920 918
921void __init smp_cpus_done(unsigned int max_cpus) 919void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
924 922
925void __init smp_setup_processor_id(void) 923void __init smp_setup_processor_id(void)
926{ 924{
925 pcpu_devices[0].address = stap();
927 S390_lowcore.cpu_nr = 0; 926 S390_lowcore.cpu_nr = 0;
928 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 927 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
929} 928}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 5ccf95396251..72307f108c40 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -63,11 +63,9 @@ SECTIONS
63 63
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 __start_ro_after_init = .; 65 __start_ro_after_init = .;
66 __start_data_ro_after_init = .;
67 .data..ro_after_init : { 66 .data..ro_after_init : {
68 *(.data..ro_after_init) 67 *(.data..ro_after_init)
69 } 68 }
70 __end_data_ro_after_init = .;
71 EXCEPTION_TABLE(16) 69 EXCEPTION_TABLE(16)
72 . = ALIGN(PAGE_SIZE); 70 . = ALIGN(PAGE_SIZE);
73 __end_ro_after_init = .; 71 __end_ro_after_init = .;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..ddbffb715b40 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -168,8 +168,7 @@ union page_table_entry {
168 unsigned long z : 1; /* Zero Bit */ 168 unsigned long z : 1; /* Zero Bit */
169 unsigned long i : 1; /* Page-Invalid Bit */ 169 unsigned long i : 1; /* Page-Invalid Bit */
170 unsigned long p : 1; /* DAT-Protection Bit */ 170 unsigned long p : 1; /* DAT-Protection Bit */
171 unsigned long co : 1; /* Change-Recording Override */ 171 unsigned long : 9;
172 unsigned long : 8;
173 }; 172 };
174}; 173};
175 174
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
745 return PGM_PAGE_TRANSLATION; 744 return PGM_PAGE_TRANSLATION;
746 if (pte.z) 745 if (pte.z)
747 return PGM_TRANSLATION_SPEC; 746 return PGM_TRANSLATION_SPEC;
748 if (pte.co && !edat1)
749 return PGM_TRANSLATION_SPEC;
750 dat_protection |= pte.p; 747 dat_protection |= pte.p;
751 raddr.pfra = pte.pfra; 748 raddr.pfra = pte.pfra;
752real_address: 749real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1182 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); 1179 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1183 if (!rc && pte.i) 1180 if (!rc && pte.i)
1184 rc = PGM_PAGE_TRANSLATION; 1181 rc = PGM_PAGE_TRANSLATION;
1185 if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) 1182 if (!rc && pte.z)
1186 rc = PGM_TRANSLATION_SPEC; 1183 rc = PGM_TRANSLATION_SPEC;
1187shadow_page: 1184shadow_page:
1188 pte.p |= dat_protection; 1185 pte.p |= dat_protection;
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index f294dd42fc7d..5961b2d8398a 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -17,6 +17,7 @@
17 17
18#define HPAGE_SHIFT 23 18#define HPAGE_SHIFT 23
19#define REAL_HPAGE_SHIFT 22 19#define REAL_HPAGE_SHIFT 22
20#define HPAGE_2GB_SHIFT 31
20#define HPAGE_256MB_SHIFT 28 21#define HPAGE_256MB_SHIFT 28
21#define HPAGE_64K_SHIFT 16 22#define HPAGE_64K_SHIFT 16
22#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) 23#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
27#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
28#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 29#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 30#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
30#define HUGE_MAX_HSTATE 3 31#define HUGE_MAX_HSTATE 4
31#endif 32#endif
32 33
33#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8a598528ec1f..6fbd931f0570 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
679 return pte_pfn(pte); 679 return pte_pfn(pte);
680} 680}
681 681
682#ifdef CONFIG_TRANSPARENT_HUGEPAGE 682#define __HAVE_ARCH_PMD_WRITE
683static inline unsigned long pmd_dirty(pmd_t pmd) 683static inline unsigned long pmd_write(pmd_t pmd)
684{ 684{
685 pte_t pte = __pte(pmd_val(pmd)); 685 pte_t pte = __pte(pmd_val(pmd));
686 686
687 return pte_dirty(pte); 687 return pte_write(pte);
688} 688}
689 689
690static inline unsigned long pmd_young(pmd_t pmd) 690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691static inline unsigned long pmd_dirty(pmd_t pmd)
691{ 692{
692 pte_t pte = __pte(pmd_val(pmd)); 693 pte_t pte = __pte(pmd_val(pmd));
693 694
694 return pte_young(pte); 695 return pte_dirty(pte);
695} 696}
696 697
697static inline unsigned long pmd_write(pmd_t pmd) 698static inline unsigned long pmd_young(pmd_t pmd)
698{ 699{
699 pte_t pte = __pte(pmd_val(pmd)); 700 pte_t pte = __pte(pmd_val(pmd));
700 701
701 return pte_write(pte); 702 return pte_young(pte);
702} 703}
703 704
704static inline unsigned long pmd_trans_huge(pmd_t pmd) 705static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 365d4cb267b4..dd27159819eb 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -18,12 +18,6 @@
18#include <asm/signal.h> 18#include <asm/signal.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/*
22 * The sparc has no problems with write protection
23 */
24#define wp_works_ok 1
25#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
26
27/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 21/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
28 * That one page is used to protect kernel from intruders, so that 22 * That one page is used to protect kernel from intruders, so that
29 * we can make our access_ok test faster 23 * we can make our access_ok test faster
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6448cfc8292f..b58ee9018433 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,10 +18,6 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/* 21/*
26 * User lives in his very own context, and cannot reference us. Note 22 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da152c20..44101196d02b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -96,6 +96,7 @@ sparc64_boot:
96 andn %g1, PSTATE_AM, %g1 96 andn %g1, PSTATE_AM, %g1
97 wrpr %g1, 0x0, %pstate 97 wrpr %g1, 0x0, %pstate
98 ba,a,pt %xcc, 1f 98 ba,a,pt %xcc, 1f
99 nop
99 100
100 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node
101 .globl prom_getprop_name, prom_mmu_name, prom_peer_name 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
613 nop 614 nop
614 615
615 ba,a,pt %xcc, 80f 616 ba,a,pt %xcc, 80f
617 nop
616niagara4_patch: 618niagara4_patch:
617 call niagara4_patch_copyops 619 call niagara4_patch_copyops
618 nop 620 nop
@@ -622,6 +624,7 @@ niagara4_patch:
622 nop 624 nop
623 625
624 ba,a,pt %xcc, 80f 626 ba,a,pt %xcc, 80f
627 nop
625 628
626niagara2_patch: 629niagara2_patch:
627 call niagara2_patch_copyops 630 call niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
632 nop 635 nop
633 636
634 ba,a,pt %xcc, 80f 637 ba,a,pt %xcc, 80f
638 nop
635 639
636niagara_patch: 640niagara_patch:
637 call niagara_patch_copyops 641 call niagara_patch_copyops
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 34b4933900bf..9276d2f0dd86 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -82,6 +82,7 @@ do_stdfmna:
82 call handle_stdfmna 82 call handle_stdfmna
83 add %sp, PTREGS_OFF, %o0 83 add %sp, PTREGS_OFF, %o0
84 ba,a,pt %xcc, rtrap 84 ba,a,pt %xcc, rtrap
85 nop
85 .size do_stdfmna,.-do_stdfmna 86 .size do_stdfmna,.-do_stdfmna
86 87
87 .type breakpoint_trap,#function 88 .type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index df9e731a76f5..fc5124ccdb53 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -351,7 +351,7 @@ static int genregs64_set(struct task_struct *target,
351 } 351 }
352 352
353 if (!ret) { 353 if (!ret) {
354 unsigned long y; 354 unsigned long y = regs->y;
355 355
356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
357 &y, 357 &y,
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 216948ca4382..709a82ebd294 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
237 bne,pt %xcc, user_rtt_fill_32bit 237 bne,pt %xcc, user_rtt_fill_32bit
238 wrpr %g1, %cwp 238 wrpr %g1, %cwp
239 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
240 nop
240 241
241user_rtt_fill_fixup_dax: 242user_rtt_fill_fixup_dax:
242 ba,pt %xcc, user_rtt_fill_fixup_common 243 ba,pt %xcc, user_rtt_fill_fixup_common
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index 4a73009f66a5..d7e540842809 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
86 rd %pc, %g7 86 rd %pc, %g7
87 87
88 ba,a,pt %xcc, 2f 88 ba,a,pt %xcc, 2f
89 nop
89 90
901: ba,pt %xcc, etrap_irq 911: ba,pt %xcc, etrap_irq
91 rd %pc, %g7 92 rd %pc, %g7
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index 6179e19bc9b9..c19f352f46c7 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -352,6 +352,7 @@ sun4v_mna:
352 call sun4v_do_mna 352 call sun4v_do_mna
353 add %sp, PTREGS_OFF, %o0 353 add %sp, PTREGS_OFF, %o0
354 ba,a,pt %xcc, rtrap 354 ba,a,pt %xcc, rtrap
355 nop
355 356
356 /* Privileged Action. */ 357 /* Privileged Action. */
357sun4v_privact: 358sun4v_privact:
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
index 5604a2b051d4..364af3250646 100644
--- a/arch/sparc/kernel/urtt_fill.S
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
92 call sun4v_data_access_exception 92 call sun4v_data_access_exception
93 nop 93 nop
94 ba,a,pt %xcc, rtrap 94 ba,a,pt %xcc, rtrap
95 nop
95 96
961: call spitfire_data_access_exception 971: call spitfire_data_access_exception
97 nop 98 nop
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 855019a8590e..1ee173cc3c39 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -152,6 +152,8 @@ fill_fixup_dax:
152 call sun4v_data_access_exception 152 call sun4v_data_access_exception
153 nop 153 nop
154 ba,a,pt %xcc, rtrap 154 ba,a,pt %xcc, rtrap
155 nop
1551: call spitfire_data_access_exception 1561: call spitfire_data_access_exception
156 nop 157 nop
157 ba,a,pt %xcc, rtrap 158 ba,a,pt %xcc, rtrap
159 nop
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index c629dbd121b6..64dcd6cdb606 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
326 blu 170f 326 blu 170f
327 nop 327 nop
328 ba,a,pt %xcc, 180f 328 ba,a,pt %xcc, 180f
329 nop
329 330
3304: /* 32 <= low bits < 48 */ 3314: /* 32 <= low bits < 48 */
331 blu 150f 332 blu 150f
332 nop 333 nop
333 ba,a,pt %xcc, 160f 334 ba,a,pt %xcc, 160f
335 nop
3345: /* 0 < low bits < 32 */ 3365: /* 0 < low bits < 32 */
335 blu,a 6f 337 blu,a 6f
336 cmp %g2, 8 338 cmp %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
338 blu 130f 340 blu 130f
339 nop 341 nop
340 ba,a,pt %xcc, 140f 342 ba,a,pt %xcc, 140f
343 nop
3416: /* 0 < low bits < 16 */ 3446: /* 0 < low bits < 16 */
342 bgeu 120f 345 bgeu 120f
343 nop 346 nop
@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
475 brz,pt %o2, 85f 478 brz,pt %o2, 85f
476 sub %o0, %o1, GLOBAL_SPARE 479 sub %o0, %o1, GLOBAL_SPARE
477 ba,a,pt %XCC, 90f 480 ba,a,pt %XCC, 90f
481 nop
478 482
479 .align 64 483 .align 64
48075: /* 16 < len <= 64 */ 48475: /* 16 < len <= 64 */
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 75bb93b1437f..78ea962edcbe 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
530 bne,pt %icc, 1b 530 bne,pt %icc, 1b
531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
532 ba,a,pt %icc, .Lexit 532 ba,a,pt %icc, .Lexit
533 nop
533 .size FUNC_NAME, .-FUNC_NAME 534 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S
index 41da4bdd95cb..7c0c81f18837 100644
--- a/arch/sparc/lib/NG4memset.S
+++ b/arch/sparc/lib/NG4memset.S
@@ -102,4 +102,5 @@ NG4bzero:
102 bne,pt %icc, 1b 102 bne,pt %icc, 1b
103 add %o0, 0x30, %o0 103 add %o0, 0x30, %o0
104 ba,a,pt %icc, .Lpostloop 104 ba,a,pt %icc, .Lpostloop
105 nop
105 .size NG4bzero,.-NG4bzero 106 .size NG4bzero,.-NG4bzero
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index d88c4ed50a00..cd654a719b27 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
394 brz,pt %i2, 85f 394 brz,pt %i2, 85f
395 sub %o0, %i1, %i3 395 sub %o0, %i1, %i3
396 ba,a,pt %XCC, 90f 396 ba,a,pt %XCC, 90f
397 nop
397 398
398 .align 64 399 .align 64
39970: /* 16 < len <= 64 */ 40070: /* 16 < len <= 64 */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 323bc6b6e3ad..ee5273ad918d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 144
145 switch (shift) { 145 switch (shift) {
146 case HPAGE_2GB_SHIFT:
147 hugepage_size = _PAGE_SZ2GB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE;
149 break;
146 case HPAGE_256MB_SHIFT: 150 case HPAGE_256MB_SHIFT:
147 hugepage_size = _PAGE_SZ256MB_4V; 151 hugepage_size = _PAGE_SZ256MB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE; 152 pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
183 unsigned int shift; 187 unsigned int shift;
184 188
185 switch (tte_szbits) { 189 switch (tte_szbits) {
190 case _PAGE_SZ2GB_4V:
191 shift = HPAGE_2GB_SHIFT;
192 break;
186 case _PAGE_SZ256MB_4V: 193 case _PAGE_SZ256MB_4V:
187 shift = HPAGE_256MB_SHIFT; 194 shift = HPAGE_256MB_SHIFT;
188 break; 195 break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
261 if (!pmd) 268 if (!pmd)
262 return NULL; 269 return NULL;
263 270
264 if (sz == PMD_SHIFT) 271 if (sz >= PMD_SIZE)
265 pte = (pte_t *)pmd; 272 pte = (pte_t *)pmd;
266 else 273 else
267 pte = pte_alloc_map(mm, pmd, addr); 274 pte = pte_alloc_map(mm, pmd, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ccd455328989..0cda653ae007 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
337 hugepage_shift = ilog2(hugepage_size); 337 hugepage_shift = ilog2(hugepage_size);
338 338
339 switch (hugepage_shift) { 339 switch (hugepage_shift) {
340 case HPAGE_2GB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343 break;
340 case HPAGE_256MB_SHIFT: 344 case HPAGE_256MB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
342 hv_pgsz_idx = HV_PGSZ_IDX_256MB; 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
1563 if ((long)addr < 0L) { 1567 if ((long)addr < 0L) {
1564 unsigned long pa = __pa(addr); 1568 unsigned long pa = __pa(addr);
1565 1569
1566 if ((addr >> max_phys_bits) != 0UL) 1570 if ((pa >> max_phys_bits) != 0UL)
1567 return false; 1571 return false;
1568 1572
1569 return pfn_valid(pa >> PAGE_SHIFT); 1573 return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index def82f6d626f..8e76ebba2986 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
54enum mbus_module srmmu_modtype; 54enum mbus_module srmmu_modtype;
55static unsigned int hwbug_bitmask; 55static unsigned int hwbug_bitmask;
56int vac_cache_size; 56int vac_cache_size;
57EXPORT_SYMBOL(vac_cache_size);
57int vac_line_size; 58int vac_line_size;
58 59
59extern struct resource sparc_iomap; 60extern struct resource sparc_iomap;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index afda3bbf7854..ee8066c3d96c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
154 if (pte_val(*pte) & _PAGE_VALID) { 154 if (pte_val(*pte) & _PAGE_VALID) {
155 bool exec = pte_exec(*pte); 155 bool exec = pte_exec(*pte);
156 156
157 tlb_batch_add_one(mm, vaddr, exec, false); 157 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
158 } 158 }
159 pte++; 159 pte++;
160 vaddr += PAGE_SIZE; 160 vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
209 pte_t orig_pte = __pte(pmd_val(orig)); 209 pte_t orig_pte = __pte(pmd_val(orig));
210 bool exec = pte_exec(orig_pte); 210 bool exec = pte_exec(orig_pte);
211 211
212 tlb_batch_add_one(mm, addr, exec, true); 212 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
214 true); 214 REAL_HPAGE_SHIFT);
215 } else { 215 } else {
216 tlb_batch_pmd_scan(mm, addr, orig); 216 tlb_batch_pmd_scan(mm, addr, orig);
217 } 217 }
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0a04811f06b7..bedf08b22a47 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
122 122
123 spin_lock_irqsave(&mm->context.lock, flags); 123 spin_lock_irqsave(&mm->context.lock, flags);
124 124
125 if (tb->hugepage_shift < HPAGE_SHIFT) { 125 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
155 155
156 spin_lock_irqsave(&mm->context.lock, flags); 156 spin_lock_irqsave(&mm->context.lock, flags);
157 157
158 if (hugepage_shift < HPAGE_SHIFT) { 158 if (hugepage_shift < REAL_HPAGE_SHIFT) {
159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d449337a360..a94a4d10f2df 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -120,10 +120,6 @@ else
120 # -funit-at-a-time shrinks the kernel .text considerably 120 # -funit-at-a-time shrinks the kernel .text considerably
121 # unfortunately it makes reading oopses harder. 121 # unfortunately it makes reading oopses harder.
122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
123
124 # this works around some issues with generating unwind tables in older gccs
125 # newer gccs do it by default
126 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
127endif 123endif
128 124
129ifdef CONFIG_X86_X32 125ifdef CONFIG_X86_X32
@@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y)
147 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 143 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
148endif 144endif
149 145
146#
147# If the function graph tracer is used with mcount instead of fentry,
148# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
149# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
150#
151ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 ifndef CONFIG_HAVE_FENTRY
153 ACCUMULATE_OUTGOING_ARGS := 1
154 else
155 ifeq ($(call cc-option-yn, -mfentry), n)
156 ACCUMULATE_OUTGOING_ARGS := 1
157 endif
158 endif
159endif
160
161#
162# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
163# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
164# to test for this bug at compile-time because the test case needs to execute,
165# which is a no-go for cross compilers. So check the GCC version instead.
166#
167ifdef CONFIG_JUMP_LABEL
168 ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
169 ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
170 endif
171endif
172
173ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
174 KBUILD_CFLAGS += -maccumulate-outgoing-args
175endif
176
150# Stackpointer is addressed different for 32 bit and 64 bit x86 177# Stackpointer is addressed different for 32 bit and 64 bit x86
151sp-$(CONFIG_X86_32) := esp 178sp-$(CONFIG_X86_32) := esp
152sp-$(CONFIG_X86_64) := rsp 179sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed49c66c..a45eb15b7cf2 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx)
45# cpu entries 45# cpu entries
46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) 46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
47 47
48# Work around the pentium-mmx code generator madness of gcc4.4.x which
49# does stack alignment by generating horrible code _before_ the mcount
50# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
51# tracer assumptions. For i686, generic, core2 this is set by the
52# compiler anyway
53ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
54ADD_ACCUMULATE_OUTGOING_ARGS := y
55endif
56
57# Work around to a bug with asm goto with first implementations of it
58# in gcc causing gcc to mess up the push and pop of the stack in some
59# uses of asm goto.
60ifeq ($(CONFIG_JUMP_LABEL), y)
61ADD_ACCUMULATE_OUTGOING_ARGS := y
62endif
63
64cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
65
66# Bug fix for binutils: this option is required in order to keep 48# Bug fix for binutils: this option is required in order to keep
67# binutils from generating NOPL instructions against our will. 49# binutils from generating NOPL instructions against our will.
68ifneq ($(CONFIG_X86_P6_NOP),y) 50ifneq ($(CONFIG_X86_P6_NOP),y)
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
index 6248740b68b5..31922023de49 100644
--- a/arch/x86/boot/compressed/error.c
+++ b/arch/x86/boot/compressed/error.c
@@ -4,6 +4,7 @@
4 * memcpy() and memmove() are defined for the compressed boot environment. 4 * memcpy() and memmove() are defined for the compressed boot environment.
5 */ 5 */
6#include "misc.h" 6#include "misc.h"
7#include "error.h"
7 8
8void warn(char *m) 9void warn(char *m)
9{ 10{
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 349d4d17aa7f..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
2101 2101
2102static void refresh_pce(void *ignored) 2102static void refresh_pce(void *ignored)
2103{ 2103{
2104 if (current->mm) 2104 if (current->active_mm)
2105 load_mm_cr4(current->mm); 2105 load_mm_cr4(current->active_mm);
2106} 2106}
2107 2107
2108static void x86_pmu_event_mapped(struct perf_event *event) 2108static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
2110 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) 2110 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2111 return; 2111 return;
2112 2112
2113 /*
2114 * This function relies on not being called concurrently in two
2115 * tasks in the same mm. Otherwise one task could observe
2116 * perf_rdpmc_allowed > 1 and return all the way back to
2117 * userspace with CR4.PCE clear while another task is still
2118 * doing on_each_cpu_mask() to propagate CR4.PCE.
2119 *
2120 * For now, this can't happen because all callers hold mmap_sem
2121 * for write. If this changes, we'll need a different solution.
2122 */
2123 lockdep_assert_held_exclusive(&current->mm->mmap_sem);
2124
2113 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1) 2125 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2114 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); 2126 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2115} 2127}
@@ -2244,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2244 struct perf_event_mmap_page *userpg, u64 now) 2256 struct perf_event_mmap_page *userpg, u64 now)
2245{ 2257{
2246 struct cyc2ns_data *data; 2258 struct cyc2ns_data *data;
2259 u64 offset;
2247 2260
2248 userpg->cap_user_time = 0; 2261 userpg->cap_user_time = 0;
2249 userpg->cap_user_time_zero = 0; 2262 userpg->cap_user_time_zero = 0;
@@ -2251,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
2251 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2264 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2252 userpg->pmc_width = x86_pmu.cntval_bits; 2265 userpg->pmc_width = x86_pmu.cntval_bits;
2253 2266
2254 if (!sched_clock_stable()) 2267 if (!using_native_sched_clock() || !sched_clock_stable())
2255 return; 2268 return;
2256 2269
2257 data = cyc2ns_read_begin(); 2270 data = cyc2ns_read_begin();
2258 2271
2272 offset = data->cyc2ns_offset + __sched_clock_offset;
2273
2259 /* 2274 /*
2260 * Internal timekeeping for enabled/running/stopped times 2275 * Internal timekeeping for enabled/running/stopped times
2261 * is always in the local_clock domain. 2276 * is always in the local_clock domain.
@@ -2263,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2263 userpg->cap_user_time = 1; 2278 userpg->cap_user_time = 1;
2264 userpg->time_mult = data->cyc2ns_mul; 2279 userpg->time_mult = data->cyc2ns_mul;
2265 userpg->time_shift = data->cyc2ns_shift; 2280 userpg->time_shift = data->cyc2ns_shift;
2266 userpg->time_offset = data->cyc2ns_offset - now; 2281 userpg->time_offset = offset - now;
2267 2282
2268 /* 2283 /*
2269 * cap_user_time_zero doesn't make sense when we're using a different 2284 * cap_user_time_zero doesn't make sense when we're using a different
@@ -2271,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2271 */ 2286 */
2272 if (!event->attr.use_clockid) { 2287 if (!event->attr.use_clockid) {
2273 userpg->cap_user_time_zero = 1; 2288 userpg->cap_user_time_zero = 1;
2274 userpg->time_zero = data->cyc2ns_offset; 2289 userpg->time_zero = offset;
2275 } 2290 }
2276 2291
2277 cyc2ns_read_end(data); 2292 cyc2ns_read_end(data);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b031ec..c4eda791f877 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
46}; 46};
47 47
48void kvm_page_track_init(struct kvm *kvm); 48void kvm_page_track_init(struct kvm *kvm);
49void kvm_page_track_cleanup(struct kvm *kvm);
49 50
50void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 51void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
51 struct kvm_memory_slot *dont); 52 struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 72277b1028a5..50d35e3185f5 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
121 *(tmp + 1) = 0; 121 *(tmp + 1) = 0;
122} 122}
123 123
124#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
125 defined(CONFIG_PARAVIRT))
126static inline void native_pud_clear(pud_t *pudp) 124static inline void native_pud_clear(pud_t *pudp)
127{ 125{
128} 126}
129#endif
130 127
131static inline void pud_clear(pud_t *pudp) 128static inline void pud_clear(pud_t *pudp)
132{ 129{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1cfb36b8c024..585ee0d42d18 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
62# define set_pud(pudp, pud) native_set_pud(pudp, pud) 62# define set_pud(pudp, pud) native_set_pud(pudp, pud)
63#endif 63#endif
64 64
65#ifndef __PAGETABLE_PMD_FOLDED 65#ifndef __PAGETABLE_PUD_FOLDED
66#define pud_clear(pud) native_pud_clear(pud) 66#define pud_clear(pud) native_pud_clear(pud)
67#endif 67#endif
68 68
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index a04eabd43d06..27e9f9d769b8 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
12 12
13extern int no_timer_check; 13extern int no_timer_check;
14 14
15extern bool using_native_sched_clock(void);
16
15/* 17/*
16 * We use the full linear equation: f(x) = a + b*x, in order to allow 18 * We use the full linear equation: f(x) = a + b*x, in order to allow
17 * a continuous function in the face of dynamic freq changes. 19 * a continuous function in the face of dynamic freq changes.
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 72e8300b1e8a..9cffb44a3cf5 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
485 485
486 if (paddr < uv_hub_info->lowmem_remap_top) 486 if (paddr < uv_hub_info->lowmem_remap_top)
487 paddr |= uv_hub_info->lowmem_remap_base; 487 paddr |= uv_hub_info->lowmem_remap_base;
488 paddr |= uv_hub_info->gnode_upper; 488
489 if (m_val) 489 if (m_val) {
490 paddr |= uv_hub_info->gnode_upper;
490 paddr = ((paddr << uv_hub_info->m_shift) 491 paddr = ((paddr << uv_hub_info->m_shift)
491 >> uv_hub_info->m_shift) | 492 >> uv_hub_info->m_shift) |
492 ((paddr >> uv_hub_info->m_val) 493 ((paddr >> uv_hub_info->m_val)
493 << uv_hub_info->n_lshift); 494 << uv_hub_info->n_lshift);
494 else 495 } else {
495 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 paddr |= uv_soc_phys_ram_to_nasid(paddr)
496 << uv_hub_info->gpa_shift; 497 << uv_hub_info->gpa_shift;
498 }
497 return paddr; 499 return paddr;
498} 500}
499 501
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae32838cac5f..b2879cc23db4 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
179 return -EINVAL; 179 return -EINVAL;
180 } 180 }
181 181
182 if (!enabled) {
183 ++disabled_cpus;
184 return -EINVAL;
185 }
186
182 if (boot_cpu_physical_apicid != -1U) 187 if (boot_cpu_physical_apicid != -1U)
183 ver = boot_cpu_apic_version; 188 ver = boot_cpu_apic_version;
184 189
185 cpu = __generic_processor_info(id, ver, enabled); 190 cpu = generic_processor_info(id, ver);
186 if (cpu >= 0) 191 if (cpu >= 0)
187 early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; 192 early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
188 193
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
710#ifdef CONFIG_ACPI_HOTPLUG_CPU 715#ifdef CONFIG_ACPI_HOTPLUG_CPU
711#include <acpi/processor.h> 716#include <acpi/processor.h>
712 717
713int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 718static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
714{ 719{
715#ifdef CONFIG_ACPI_NUMA 720#ifdef CONFIG_ACPI_NUMA
716 int nid; 721 int nid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aee7deddabd0..8ccb7ef512e0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
2063 return nr_logical_cpuids++; 2063 return nr_logical_cpuids++;
2064} 2064}
2065 2065
2066int __generic_processor_info(int apicid, int version, bool enabled) 2066int generic_processor_info(int apicid, int version)
2067{ 2067{
2068 int cpu, max = nr_cpu_ids; 2068 int cpu, max = nr_cpu_ids;
2069 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2069 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
2121 if (num_processors >= nr_cpu_ids) { 2121 if (num_processors >= nr_cpu_ids) {
2122 int thiscpu = max + disabled_cpus; 2122 int thiscpu = max + disabled_cpus;
2123 2123
2124 if (enabled) { 2124 pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
2125 pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " 2125 "reached. Processor %d/0x%x ignored.\n",
2126 "reached. Processor %d/0x%x ignored.\n", 2126 max, thiscpu, apicid);
2127 max, thiscpu, apicid);
2128 }
2129 2127
2130 disabled_cpus++; 2128 disabled_cpus++;
2131 return -EINVAL; 2129 return -EINVAL;
@@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
2177 apic->x86_32_early_logical_apicid(cpu); 2175 apic->x86_32_early_logical_apicid(cpu);
2178#endif 2176#endif
2179 set_cpu_possible(cpu, true); 2177 set_cpu_possible(cpu, true);
2180 2178 physid_set(apicid, phys_cpu_present_map);
2181 if (enabled) { 2179 set_cpu_present(cpu, true);
2182 num_processors++; 2180 num_processors++;
2183 physid_set(apicid, phys_cpu_present_map);
2184 set_cpu_present(cpu, true);
2185 } else {
2186 disabled_cpus++;
2187 }
2188 2181
2189 return cpu; 2182 return cpu;
2190} 2183}
2191 2184
2192int generic_processor_info(int apicid, int version)
2193{
2194 return __generic_processor_info(apicid, version, true);
2195}
2196
2197int hard_smp_processor_id(void) 2185int hard_smp_processor_id(void)
2198{ 2186{
2199 return read_apic_id(); 2187 return read_apic_id();
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e9f8f8cdd570..86f20cc0a65e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); 1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; 1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1108 hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; 1108 if (mn.m_val)
1109 hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
1109 1110
1110 if (uv_gp_table) { 1111 if (uv_gp_table) {
1111 hi->global_mmr_base = uv_gp_table->mmr_base; 1112 hi->global_mmr_base = uv_gp_table->mmr_base;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index c05509d38b1f..9ac2a5cdd9c2 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
727 if (atomic_dec_and_test(&rdtgrp->waitcount) && 727 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
728 (rdtgrp->flags & RDT_DELETED)) { 728 (rdtgrp->flags & RDT_DELETED)) {
729 kernfs_unbreak_active_protection(kn); 729 kernfs_unbreak_active_protection(kn);
730 kernfs_put(kn); 730 kernfs_put(rdtgrp->kn);
731 kfree(rdtgrp); 731 kfree(rdtgrp);
732 } else { 732 } else {
733 kernfs_unbreak_active_protection(kn); 733 kernfs_unbreak_active_protection(kn);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8e9725c607ea..5accfbdee3f0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,6 +54,8 @@
54 54
55static DEFINE_MUTEX(mce_chrdev_read_mutex); 55static DEFINE_MUTEX(mce_chrdev_read_mutex);
56 56
57static int mce_chrdev_open_count; /* #times opened */
58
57#define mce_log_get_idx_check(p) \ 59#define mce_log_get_idx_check(p) \
58({ \ 60({ \
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
598 if (atomic_read(&num_notifiers) > 2) 600 if (atomic_read(&num_notifiers) > 2)
599 return NOTIFY_DONE; 601 return NOTIFY_DONE;
600 602
603 /* Don't print when mcelog is running */
604 if (mce_chrdev_open_count > 0)
605 return NOTIFY_DONE;
606
601 __print_mce(m); 607 __print_mce(m);
602 608
603 return NOTIFY_DONE; 609 return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1828 */ 1834 */
1829 1835
1830static DEFINE_SPINLOCK(mce_chrdev_state_lock); 1836static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1831static int mce_chrdev_open_count; /* #times opened */
1832static int mce_chrdev_open_exclu; /* already open exclusive? */ 1837static int mce_chrdev_open_exclu; /* already open exclusive? */
1833 1838
1834static int mce_chrdev_open(struct inode *inode, struct file *file) 1839static int mce_chrdev_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 524cc5780a77..6e4a047e4b68 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -60,7 +60,7 @@ static const char * const th_names[] = {
60 "load_store", 60 "load_store",
61 "insn_fetch", 61 "insn_fetch",
62 "combined_unit", 62 "combined_unit",
63 "", 63 "decode_unit",
64 "northbridge", 64 "northbridge",
65 "execution_unit", 65 "execution_unit",
66}; 66};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8f3d9cf26ff9..cbd73eb42170 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,12 @@
29#include <asm/ftrace.h> 29#include <asm/ftrace.h>
30#include <asm/nops.h> 30#include <asm/nops.h>
31 31
32#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
33 !defined(CC_USING_FENTRY) && \
34 !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
35# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
36#endif
37
32#ifdef CONFIG_DYNAMIC_FTRACE 38#ifdef CONFIG_DYNAMIC_FTRACE
33 39
34int ftrace_arch_code_modify_prepare(void) 40int ftrace_arch_code_modify_prepare(void)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372f5dbb..b5785c197e53 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 */ 5 */
6 6
7#define DISABLE_BRANCH_PROFILING
7#include <linux/init.h> 8#include <linux/init.h>
8#include <linux/linkage.h> 9#include <linux/linkage.h>
9#include <linux/types.h> 10#include <linux/types.h>
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index f088ea4c66e7..a723ae9440ab 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
166 spin_lock_irqsave(&desc->lock, flags); 166 spin_lock_irqsave(&desc->lock, flags);
167 167
168 /* 168 /*
169 * most handlers of type NMI_UNKNOWN never return because 169 * Indicate if there are multiple registrations on the
170 * they just assume the NMI is theirs. Just a sanity check 170 * internal NMI handler call chains (SERR and IO_CHECK).
171 * to manage expectations
172 */ 171 */
173 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
174 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); 172 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
175 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); 173 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
176 174
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4f7a9833d8e5..714dfba6a1e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -328,7 +328,7 @@ unsigned long long sched_clock(void)
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330 330
331static inline bool using_native_sched_clock(void) 331bool using_native_sched_clock(void)
332{ 332{
333 return pv_time_ops.sched_clock == native_sched_clock; 333 return pv_time_ops.sched_clock == native_sched_clock;
334} 334}
@@ -336,7 +336,7 @@ static inline bool using_native_sched_clock(void)
336unsigned long long 336unsigned long long
337sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338 338
339static inline bool using_native_sched_clock(void) { return true; } 339bool using_native_sched_clock(void) { return true; }
340#endif 340#endif
341 341
342int check_tsc_unstable(void) 342int check_tsc_unstable(void)
@@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
1333 * the refined calibration and directly register it as a clocksource. 1333 * the refined calibration and directly register it as a clocksource.
1334 */ 1334 */
1335 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { 1335 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1336 if (boot_cpu_has(X86_FEATURE_ART))
1337 art_related_clocksource = &clocksource_tsc;
1336 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1338 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1337 return 0; 1339 return 0;
1338 } 1340 }
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 478d15dbaee4..08339262b666 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
82 return sizeof(*regs); 82 return sizeof(*regs);
83} 83}
84 84
85#ifdef CONFIG_X86_32
86#define GCC_REALIGN_WORDS 3
87#else
88#define GCC_REALIGN_WORDS 1
89#endif
90
85static bool is_last_task_frame(struct unwind_state *state) 91static bool is_last_task_frame(struct unwind_state *state)
86{ 92{
87 unsigned long bp = (unsigned long)state->bp; 93 unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
88 unsigned long regs = (unsigned long)task_pt_regs(state->task); 94 unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
89 95
90 /* 96 /*
91 * We have to check for the last task frame at two different locations 97 * We have to check for the last task frame at two different locations
92 * because gcc can occasionally decide to realign the stack pointer and 98 * because gcc can occasionally decide to realign the stack pointer and
93 * change the offset of the stack frame by a word in the prologue of a 99 * change the offset of the stack frame in the prologue of a function
94 * function called by head/entry code. 100 * called by head/entry code. Examples:
101 *
102 * <start_secondary>:
103 * push %edi
104 * lea 0x8(%esp),%edi
105 * and $0xfffffff8,%esp
106 * pushl -0x4(%edi)
107 * push %ebp
108 * mov %esp,%ebp
109 *
110 * <x86_64_start_kernel>:
111 * lea 0x8(%rsp),%r10
112 * and $0xfffffffffffffff0,%rsp
113 * pushq -0x8(%r10)
114 * push %rbp
115 * mov %rsp,%rbp
116 *
117 * Note that after aligning the stack, it pushes a duplicate copy of
118 * the return address before pushing the frame pointer.
95 */ 119 */
96 return bp == regs - FRAME_HEADER_SIZE || 120 return (state->bp == last_bp ||
97 bp == regs - FRAME_HEADER_SIZE - sizeof(long); 121 (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
98} 122}
99 123
100/* 124/*
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..047b17a26269 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
657{ 657{
658 struct kvm_pic *vpic = kvm->arch.vpic; 658 struct kvm_pic *vpic = kvm->arch.vpic;
659 659
660 if (!vpic)
661 return;
662
660 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 663 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
661 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 664 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
662 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); 665 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5c07d2..289270a6aecb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
635{ 635{
636 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 636 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
637 637
638 if (!ioapic)
639 return;
640
638 cancel_delayed_work_sync(&ioapic->eoi_inject); 641 cancel_delayed_work_sync(&ioapic->eoi_inject);
639 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 642 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
640 kvm->arch.vioapic = NULL; 643 kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e419c32..60168cdd0546 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
161} 161}
162 162
163void kvm_page_track_cleanup(struct kvm *kvm)
164{
165 struct kvm_page_track_notifier_head *head;
166
167 head = &kvm->arch.track_notifier_head;
168 cleanup_srcu_struct(&head->track_srcu);
169}
170
163void kvm_page_track_init(struct kvm *kvm) 171void kvm_page_track_init(struct kvm *kvm)
164{ 172{
165 struct kvm_page_track_notifier_head *head; 173 struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..5fba70646c32 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
1379 unsigned long flags; 1379 unsigned long flags;
1380 struct kvm_arch *vm_data = &kvm->arch; 1380 struct kvm_arch *vm_data = &kvm->arch;
1381 1381
1382 if (!avic)
1383 return;
1384
1382 avic_free_vm_id(vm_data->avic_vm_id); 1385 avic_free_vm_id(vm_data->avic_vm_id);
1383 1386
1384 if (vm_data->avic_logical_id_table_page) 1387 if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 98e82ee1e699..259e9b28ccf8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1240} 1240}
1241 1241
1242static inline bool cpu_has_vmx_invvpid(void)
1243{
1244 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1245}
1246
1242static inline bool cpu_has_vmx_ept(void) 1247static inline bool cpu_has_vmx_ept(void)
1243{ 1248{
1244 return vmcs_config.cpu_based_2nd_exec_ctrl & 1249 return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2753 SECONDARY_EXEC_RDTSCP | 2758 SECONDARY_EXEC_RDTSCP |
2754 SECONDARY_EXEC_DESC | 2759 SECONDARY_EXEC_DESC |
2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2760 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756 SECONDARY_EXEC_ENABLE_VPID |
2757 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2761 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2758 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2762 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2759 SECONDARY_EXEC_WBINVD_EXITING | 2763 SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2781 * though it is treated as global context. The alternative is 2785 * though it is treated as global context. The alternative is
2782 * not failing the single-context invvpid, and it is worse. 2786 * not failing the single-context invvpid, and it is worse.
2783 */ 2787 */
2784 if (enable_vpid) 2788 if (enable_vpid) {
2789 vmx->nested.nested_vmx_secondary_ctls_high |=
2790 SECONDARY_EXEC_ENABLE_VPID;
2785 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2791 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2786 VMX_VPID_EXTENT_SUPPORTED_MASK; 2792 VMX_VPID_EXTENT_SUPPORTED_MASK;
2787 else 2793 } else
2788 vmx->nested.nested_vmx_vpid_caps = 0; 2794 vmx->nested.nested_vmx_vpid_caps = 0;
2789 2795
2790 if (enable_unrestricted_guest) 2796 if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
4024 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 4030 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
4025} 4031}
4026 4032
4033static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4034{
4035 if (enable_ept)
4036 vmx_flush_tlb(vcpu);
4037}
4038
4027static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4039static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4028{ 4040{
4029 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4041 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
6517 if (boot_cpu_has(X86_FEATURE_NX)) 6529 if (boot_cpu_has(X86_FEATURE_NX))
6518 kvm_enable_efer_bits(EFER_NX); 6530 kvm_enable_efer_bits(EFER_NX);
6519 6531
6520 if (!cpu_has_vmx_vpid()) 6532 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
6533 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
6521 enable_vpid = 0; 6534 enable_vpid = 0;
6535
6522 if (!cpu_has_vmx_shadow_vmcs()) 6536 if (!cpu_has_vmx_shadow_vmcs())
6523 enable_shadow_vmcs = 0; 6537 enable_shadow_vmcs = 0;
6524 if (enable_shadow_vmcs) 6538 if (enable_shadow_vmcs)
@@ -8184,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
8184 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 8198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
8185 case EXIT_REASON_PREEMPTION_TIMER: 8199 case EXIT_REASON_PREEMPTION_TIMER:
8186 return false; 8200 return false;
8201 case EXIT_REASON_PML_FULL:
8202 /* We don't expose PML support to L1. */
8203 return false;
8187 default: 8204 default:
8188 return true; 8205 return true;
8189 } 8206 }
@@ -8501,7 +8518,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8501 && kvm_vmx_exit_handlers[exit_reason]) 8518 && kvm_vmx_exit_handlers[exit_reason])
8502 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8519 return kvm_vmx_exit_handlers[exit_reason](vcpu);
8503 else { 8520 else {
8504 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8521 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
8522 exit_reason);
8505 kvm_queue_exception(vcpu, UD_VECTOR); 8523 kvm_queue_exception(vcpu, UD_VECTOR);
8506 return 1; 8524 return 1;
8507 } 8525 }
@@ -8547,6 +8565,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8547 } else { 8565 } else {
8548 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8566 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8549 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8567 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8568 vmx_flush_tlb_ept_only(vcpu);
8550 } 8569 }
8551 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8570 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
8552 8571
@@ -8572,8 +8591,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
8572 */ 8591 */
8573 if (!is_guest_mode(vcpu) || 8592 if (!is_guest_mode(vcpu) ||
8574 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8593 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
8575 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8594 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
8576 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8595 vmcs_write64(APIC_ACCESS_ADDR, hpa);
8596 vmx_flush_tlb_ept_only(vcpu);
8597 }
8577} 8598}
8578 8599
8579static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 8600static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9974,7 +9995,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9974{ 9995{
9975 struct vcpu_vmx *vmx = to_vmx(vcpu); 9996 struct vcpu_vmx *vmx = to_vmx(vcpu);
9976 u32 exec_control; 9997 u32 exec_control;
9977 bool nested_ept_enabled = false;
9978 9998
9979 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9999 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9980 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 10000 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10121,8 +10141,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10121 vmcs12->guest_intr_status); 10141 vmcs12->guest_intr_status);
10122 } 10142 }
10123 10143
10124 nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
10125
10126 /* 10144 /*
10127 * Write an illegal value to APIC_ACCESS_ADDR. Later, 10145 * Write an illegal value to APIC_ACCESS_ADDR. Later,
10128 * nested_get_vmcs12_pages will either fix it up or 10146 * nested_get_vmcs12_pages will either fix it up or
@@ -10252,9 +10270,24 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10252 10270
10253 } 10271 }
10254 10272
10273 if (enable_pml) {
10274 /*
10275 * Conceptually we want to copy the PML address and index from
10276 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
10277 * since we always flush the log on each vmexit, this happens
10278 * to be equivalent to simply resetting the fields in vmcs02.
10279 */
10280 ASSERT(vmx->pml_pg);
10281 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
10282 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
10283 }
10284
10255 if (nested_cpu_has_ept(vmcs12)) { 10285 if (nested_cpu_has_ept(vmcs12)) {
10256 kvm_mmu_unload(vcpu); 10286 kvm_mmu_unload(vcpu);
10257 nested_ept_init_mmu_context(vcpu); 10287 nested_ept_init_mmu_context(vcpu);
10288 } else if (nested_cpu_has2(vmcs12,
10289 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
10290 vmx_flush_tlb_ept_only(vcpu);
10258 } 10291 }
10259 10292
10260 /* 10293 /*
@@ -10282,12 +10315,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10282 vmx_set_efer(vcpu, vcpu->arch.efer); 10315 vmx_set_efer(vcpu, vcpu->arch.efer);
10283 10316
10284 /* Shadow page tables on either EPT or shadow page tables. */ 10317 /* Shadow page tables on either EPT or shadow page tables. */
10285 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, 10318 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
10286 entry_failure_code)) 10319 entry_failure_code))
10287 return 1; 10320 return 1;
10288 10321
10289 kvm_mmu_reset_context(vcpu);
10290
10291 if (!enable_ept) 10322 if (!enable_ept)
10292 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 10323 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
10293 10324
@@ -11056,6 +11087,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11056 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 11087 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
11057 vmx_set_virtual_x2apic_mode(vcpu, 11088 vmx_set_virtual_x2apic_mode(vcpu,
11058 vcpu->arch.apic_base & X2APIC_ENABLE); 11089 vcpu->arch.apic_base & X2APIC_ENABLE);
11090 } else if (!nested_cpu_has_ept(vmcs12) &&
11091 nested_cpu_has2(vmcs12,
11092 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11093 vmx_flush_tlb_ept_only(vcpu);
11059 } 11094 }
11060 11095
11061 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 11096 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620a6fdc..ccbd45ecd41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
8153 if (kvm_x86_ops->vm_destroy) 8153 if (kvm_x86_ops->vm_destroy)
8154 kvm_x86_ops->vm_destroy(kvm); 8154 kvm_x86_ops->vm_destroy(kvm);
8155 kvm_iommu_unmap_guest(kvm); 8155 kvm_iommu_unmap_guest(kvm);
8156 kfree(kvm->arch.vpic); 8156 kvm_pic_destroy(kvm);
8157 kfree(kvm->arch.vioapic); 8157 kvm_ioapic_destroy(kvm);
8158 kvm_free_vcpus(kvm); 8158 kvm_free_vcpus(kvm);
8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8160 kvm_mmu_uninit_vm(kvm); 8160 kvm_mmu_uninit_vm(kvm);
8161 kvm_page_track_cleanup(kvm);
8161} 8162}
8162 8163
8163void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 8164void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8566{ 8567{
8567 struct x86_exception fault; 8568 struct x86_exception fault;
8568 8569
8569 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8570 if (work->wakeup_all) 8570 if (work->wakeup_all)
8571 work->arch.token = ~0; /* broadcast wakeup */ 8571 work->arch.token = ~0; /* broadcast wakeup */
8572 else 8572 else
8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8574 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8574 8575
8575 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8576 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8576 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8577 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..9a53a06e5a3e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
293 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8d63d7a104c3..4c90cfdc128b 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
1#define DISABLE_BRANCH_PROFILING
1#define pr_fmt(fmt) "kasan: " fmt 2#define pr_fmt(fmt) "kasan: " fmt
2#include <linux/bootmem.h> 3#include <linux/bootmem.h>
3#include <linux/kasan.h> 4#include <linux/kasan.h>
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..aed206475aa7 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48#if defined(CONFIG_X86_ESPFIX64) 48#if defined(CONFIG_X86_ESPFIX64)
49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; 49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
50#elif defined(CONFIG_EFI) 50#elif defined(CONFIG_EFI)
51static const unsigned long vaddr_end = EFI_VA_START; 51static const unsigned long vaddr_end = EFI_VA_END;
52#else 52#else
53static const unsigned long vaddr_end = __START_KERNEL_map; 53static const unsigned long vaddr_end = __START_KERNEL_map;
54#endif 54#endif
@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void)
105 */ 105 */
106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 106 BUILD_BUG_ON(vaddr_start >= vaddr_end);
107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
108 vaddr_end >= EFI_VA_START); 108 vaddr_end >= EFI_VA_END);
109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
110 IS_ENABLED(CONFIG_EFI)) && 110 IS_ENABLED(CONFIG_EFI)) &&
111 vaddr_end >= __START_KERNEL_map); 111 vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 5126dfd52b18..cd44ae727df7 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
590 * we might run off the end of the bounds table if we are on 590 * we might run off the end of the bounds table if we are on
591 * a 64-bit kernel and try to get 8 bytes. 591 * a 64-bit kernel and try to get 8 bytes.
592 */ 592 */
593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, 593static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
594 long __user *bd_entry_ptr) 594 long __user *bd_entry_ptr)
595{ 595{
596 u32 bd_entry_32; 596 u32 bd_entry_32;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index a7dbec4dce27..3dbde04febdc 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
26obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 26obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
27# MISC Devices 27# MISC Devices
28obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 28obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
29obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
29obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o 30obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
30obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o 31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644
index 000000000000..a6c3705a28ad
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
@@ -0,0 +1,82 @@
1/*
2 * Intel Merrifield power button support
3 *
4 * (C) Copyright 2017 Intel Corporation
5 *
6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13
14#include <linux/init.h>
15#include <linux/ioport.h>
16#include <linux/platform_device.h>
17#include <linux/sfi.h>
18
19#include <asm/intel-mid.h>
20#include <asm/intel_scu_ipc.h>
21
22static struct resource mrfld_power_btn_resources[] = {
23 {
24 .flags = IORESOURCE_IRQ,
25 },
26};
27
28static struct platform_device mrfld_power_btn_dev = {
29 .name = "msic_power_btn",
30 .id = PLATFORM_DEVID_NONE,
31 .num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
32 .resource = mrfld_power_btn_resources,
33};
34
35static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
36 unsigned long code, void *data)
37{
38 if (code == SCU_DOWN) {
39 platform_device_unregister(&mrfld_power_btn_dev);
40 return 0;
41 }
42
43 return platform_device_register(&mrfld_power_btn_dev);
44}
45
46static struct notifier_block mrfld_power_btn_scu_notifier = {
47 .notifier_call = mrfld_power_btn_scu_status_change,
48};
49
50static int __init register_mrfld_power_btn(void)
51{
52 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
53 return -ENODEV;
54
55 /*
56 * We need to be sure that the SCU IPC is ready before
57 * PMIC power button device can be registered:
58 */
59 intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
60
61 return 0;
62}
63arch_initcall(register_mrfld_power_btn);
64
65static void __init *mrfld_power_btn_platform_data(void *info)
66{
67 struct resource *res = mrfld_power_btn_resources;
68 struct sfi_device_table_entry *pentry = info;
69
70 res->start = res->end = pentry->irq;
71 return NULL;
72}
73
74static const struct devs_id mrfld_power_btn_dev_id __initconst = {
75 .name = "bcove_power_btn",
76 .type = SFI_DEV_TYPE_IPC,
77 .delay = 1,
78 .msic = 1,
79 .get_platform_data = &mrfld_power_btn_platform_data,
80};
81
82sfi_device(mrfld_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 86edd1e941eb..9e304e2ea4f5 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -19,7 +19,7 @@
19#include <asm/intel_scu_ipc.h> 19#include <asm/intel_scu_ipc.h>
20#include <asm/io_apic.h> 20#include <asm/io_apic.h>
21 21
22#define TANGIER_EXT_TIMER0_MSI 15 22#define TANGIER_EXT_TIMER0_MSI 12
23 23
24static struct platform_device wdt_dev = { 24static struct platform_device wdt_dev = {
25 .name = "intel_mid_wdt", 25 .name = "intel_mid_wdt",
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index e793fe509971..e42978d4deaf 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -17,16 +17,6 @@
17 17
18#include "intel_mid_weak_decls.h" 18#include "intel_mid_weak_decls.h"
19 19
20static void penwell_arch_setup(void);
21/* penwell arch ops */
22static struct intel_mid_ops penwell_ops = {
23 .arch_setup = penwell_arch_setup,
24};
25
26static void mfld_power_off(void)
27{
28}
29
30static unsigned long __init mfld_calibrate_tsc(void) 20static unsigned long __init mfld_calibrate_tsc(void)
31{ 21{
32 unsigned long fast_calibrate; 22 unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
63static void __init penwell_arch_setup(void) 53static void __init penwell_arch_setup(void)
64{ 54{
65 x86_platform.calibrate_tsc = mfld_calibrate_tsc; 55 x86_platform.calibrate_tsc = mfld_calibrate_tsc;
66 pm_power_off = mfld_power_off;
67} 56}
68 57
58static struct intel_mid_ops penwell_ops = {
59 .arch_setup = penwell_arch_setup,
60};
61
69void *get_penwell_ops(void) 62void *get_penwell_ops(void)
70{ 63{
71 return &penwell_ops; 64 return &penwell_ops;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 555b9fa0ad43..7dbdb780264d 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
9targets += purgatory.ro 9targets += purgatory.ro
10 10
11KASAN_SANITIZE := n
11KCOV_INSTRUMENT := n 12KCOV_INSTRUMENT := n
12 13
13# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 14# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d70edbc..4ddbfd57a7c8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
164 164
165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
166 166
167#ifdef CONFIG_MMU
168static inline unsigned long ___pa(unsigned long va)
169{
170 unsigned long off = va - PAGE_OFFSET;
171
172 if (off >= XCHAL_KSEG_SIZE)
173 off -= XCHAL_KSEG_SIZE;
174
175 return off + PHYS_OFFSET;
176}
177#define __pa(x) ___pa((unsigned long)(x))
178#else
167#define __pa(x) \ 179#define __pa(x) \
168 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) 180 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
181#endif
169#define __va(x) \ 182#define __va(x) \
170 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) 183 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
171#define pfn_valid(pfn) \ 184#define pfn_valid(pfn) \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index cd400af4a6b2..6be7eb27fd29 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
774#define __NR_pkey_free 350 774#define __NR_pkey_free 350
775__SYSCALL(350, sys_pkey_free, 1) 775__SYSCALL(350, sys_pkey_free, 1)
776 776
777#define __NR_syscall_count 351 777#define __NR_statx 351
778__SYSCALL(351, sys_statx, 5)
779
780#define __NR_syscall_count 352
778 781
779/* 782/*
780 * sysxtensa syscall handler 783 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c82c43bff296..bae697a06a98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
483 483
484static int show_trace_cb(struct stackframe *frame, void *data) 484static int show_trace_cb(struct stackframe *frame, void *data)
485{ 485{
486 if (kernel_text_address(frame->pc)) { 486 if (kernel_text_address(frame->pc))
487 pr_cont(" [<%08lx>]", frame->pc); 487 pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
488 print_symbol(" %s\n", frame->pc);
489 }
490 return 0; 488 return 0;
491} 489}
492 490
diff --git a/block/bio.c b/block/bio.c
index 5eec5e08417f..e75878f8b14a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
376 bio_list_init(&punt); 376 bio_list_init(&punt);
377 bio_list_init(&nopunt); 377 bio_list_init(&nopunt);
378 378
379 while ((bio = bio_list_pop(current->bio_list))) 379 while ((bio = bio_list_pop(&current->bio_list[0])))
380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
381 current->bio_list[0] = nopunt;
381 382
382 *current->bio_list = nopunt; 383 bio_list_init(&nopunt);
384 while ((bio = bio_list_pop(&current->bio_list[1])))
385 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
386 current->bio_list[1] = nopunt;
383 387
384 spin_lock(&bs->rescue_lock); 388 spin_lock(&bs->rescue_lock);
385 bio_list_merge(&bs->rescue_list, &punt); 389 bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
466 * we retry with the original gfp_flags. 470 * we retry with the original gfp_flags.
467 */ 471 */
468 472
469 if (current->bio_list && !bio_list_empty(current->bio_list)) 473 if (current->bio_list &&
474 (!bio_list_empty(&current->bio_list[0]) ||
475 !bio_list_empty(&current->bio_list[1])))
470 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 476 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
471 477
472 p = mempool_alloc(bs->bio_pool, gfp_mask); 478 p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index 0eeb99ef654f..d772c221cc17 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1973,7 +1973,14 @@ end_io:
1973 */ 1973 */
1974blk_qc_t generic_make_request(struct bio *bio) 1974blk_qc_t generic_make_request(struct bio *bio)
1975{ 1975{
1976 struct bio_list bio_list_on_stack; 1976 /*
1977 * bio_list_on_stack[0] contains bios submitted by the current
1978 * make_request_fn.
1979 * bio_list_on_stack[1] contains bios that were submitted before
1980 * the current make_request_fn, but that haven't been processed
1981 * yet.
1982 */
1983 struct bio_list bio_list_on_stack[2];
1977 blk_qc_t ret = BLK_QC_T_NONE; 1984 blk_qc_t ret = BLK_QC_T_NONE;
1978 1985
1979 if (!generic_make_request_checks(bio)) 1986 if (!generic_make_request_checks(bio))
@@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
1990 * should be added at the tail 1997 * should be added at the tail
1991 */ 1998 */
1992 if (current->bio_list) { 1999 if (current->bio_list) {
1993 bio_list_add(current->bio_list, bio); 2000 bio_list_add(&current->bio_list[0], bio);
1994 goto out; 2001 goto out;
1995 } 2002 }
1996 2003
@@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
2009 * bio_list, and call into ->make_request() again. 2016 * bio_list, and call into ->make_request() again.
2010 */ 2017 */
2011 BUG_ON(bio->bi_next); 2018 BUG_ON(bio->bi_next);
2012 bio_list_init(&bio_list_on_stack); 2019 bio_list_init(&bio_list_on_stack[0]);
2013 current->bio_list = &bio_list_on_stack; 2020 current->bio_list = bio_list_on_stack;
2014 do { 2021 do {
2015 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2022 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2016 2023
2017 if (likely(blk_queue_enter(q, false) == 0)) { 2024 if (likely(blk_queue_enter(q, false) == 0)) {
2018 struct bio_list hold;
2019 struct bio_list lower, same; 2025 struct bio_list lower, same;
2020 2026
2021 /* Create a fresh bio_list for all subordinate requests */ 2027 /* Create a fresh bio_list for all subordinate requests */
2022 hold = bio_list_on_stack; 2028 bio_list_on_stack[1] = bio_list_on_stack[0];
2023 bio_list_init(&bio_list_on_stack); 2029 bio_list_init(&bio_list_on_stack[0]);
2024 ret = q->make_request_fn(q, bio); 2030 ret = q->make_request_fn(q, bio);
2025 2031
2026 blk_queue_exit(q); 2032 blk_queue_exit(q);
@@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
2030 */ 2036 */
2031 bio_list_init(&lower); 2037 bio_list_init(&lower);
2032 bio_list_init(&same); 2038 bio_list_init(&same);
2033 while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL) 2039 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2034 if (q == bdev_get_queue(bio->bi_bdev)) 2040 if (q == bdev_get_queue(bio->bi_bdev))
2035 bio_list_add(&same, bio); 2041 bio_list_add(&same, bio);
2036 else 2042 else
2037 bio_list_add(&lower, bio); 2043 bio_list_add(&lower, bio);
2038 /* now assemble so we handle the lowest level first */ 2044 /* now assemble so we handle the lowest level first */
2039 bio_list_merge(&bio_list_on_stack, &lower); 2045 bio_list_merge(&bio_list_on_stack[0], &lower);
2040 bio_list_merge(&bio_list_on_stack, &same); 2046 bio_list_merge(&bio_list_on_stack[0], &same);
2041 bio_list_merge(&bio_list_on_stack, &hold); 2047 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2042 } else { 2048 } else {
2043 bio_io_error(bio); 2049 bio_io_error(bio);
2044 } 2050 }
2045 bio = bio_list_pop(current->bio_list); 2051 bio = bio_list_pop(&bio_list_on_stack[0]);
2046 } while (bio); 2052 } while (bio);
2047 current->bio_list = NULL; /* deactivate */ 2053 current->bio_list = NULL; /* deactivate */
2048 2054
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 09af8ff18719..c974a1bbf4cb 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
171 171
172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
173{ 173{
174 struct elevator_queue *e = hctx->queue->elevator; 174 struct request_queue *q = hctx->queue;
175 struct elevator_queue *e = q->elevator;
175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 176 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
176 bool did_work = false; 177 bool did_work = false;
177 LIST_HEAD(rq_list); 178 LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
203 */ 204 */
204 if (!list_empty(&rq_list)) { 205 if (!list_empty(&rq_list)) {
205 blk_mq_sched_mark_restart_hctx(hctx); 206 blk_mq_sched_mark_restart_hctx(hctx);
206 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); 207 did_work = blk_mq_dispatch_rq_list(q, &rq_list);
207 } else if (!has_sched_dispatch) { 208 } else if (!has_sched_dispatch) {
208 blk_mq_flush_busy_ctxs(hctx, &rq_list); 209 blk_mq_flush_busy_ctxs(hctx, &rq_list);
209 blk_mq_dispatch_rq_list(hctx, &rq_list); 210 blk_mq_dispatch_rq_list(q, &rq_list);
210 } 211 }
211 212
212 /* 213 /*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
222 if (!rq) 223 if (!rq)
223 break; 224 break;
224 list_add(&rq->queuelist, &rq_list); 225 list_add(&rq->queuelist, &rq_list);
225 } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); 226 } while (blk_mq_dispatch_rq_list(q, &rq_list));
226 } 227 }
227} 228}
228 229
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
317 return true; 318 return true;
318} 319}
319 320
320static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 321static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
321{ 322{
322 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 323 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
323 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 324 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
324 if (blk_mq_hctx_has_pending(hctx)) 325 if (blk_mq_hctx_has_pending(hctx)) {
325 blk_mq_run_hw_queue(hctx, true); 326 blk_mq_run_hw_queue(hctx, true);
327 return true;
328 }
326 } 329 }
330 return false;
327} 331}
328 332
329void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 333/**
330{ 334 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
331 struct request_queue *q = hctx->queue; 335 * @pos: loop cursor.
332 unsigned int i; 336 * @skip: the list element that will not be examined. Iteration starts at
337 * @skip->next.
338 * @head: head of the list to examine. This list must have at least one
339 * element, namely @skip.
340 * @member: name of the list_head structure within typeof(*pos).
341 */
342#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
343 for ((pos) = (skip); \
344 (pos = (pos)->member.next != (head) ? list_entry_rcu( \
345 (pos)->member.next, typeof(*pos), member) : \
346 list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
347 (pos) != (skip); )
333 348
334 if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 349/*
335 if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 350 * Called after a driver tag has been freed to check whether a hctx needs to
336 queue_for_each_hw_ctx(q, hctx, i) 351 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
337 blk_mq_sched_restart_hctx(hctx); 352 * queues in a round-robin fashion if the tag set of @hctx is shared with other
353 * hardware queues.
354 */
355void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
356{
357 struct blk_mq_tags *const tags = hctx->tags;
358 struct blk_mq_tag_set *const set = hctx->queue->tag_set;
359 struct request_queue *const queue = hctx->queue, *q;
360 struct blk_mq_hw_ctx *hctx2;
361 unsigned int i, j;
362
363 if (set->flags & BLK_MQ_F_TAG_SHARED) {
364 rcu_read_lock();
365 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
366 tag_set_list) {
367 queue_for_each_hw_ctx(q, hctx2, i)
368 if (hctx2->tags == tags &&
369 blk_mq_sched_restart_hctx(hctx2))
370 goto done;
371 }
372 j = hctx->queue_num + 1;
373 for (i = 0; i < queue->nr_hw_queues; i++, j++) {
374 if (j == queue->nr_hw_queues)
375 j = 0;
376 hctx2 = queue->queue_hw_ctx[j];
377 if (hctx2->tags == tags &&
378 blk_mq_sched_restart_hctx(hctx2))
379 break;
338 } 380 }
381done:
382 rcu_read_unlock();
339 } else { 383 } else {
340 blk_mq_sched_restart_hctx(hctx); 384 blk_mq_sched_restart_hctx(hctx);
341 } 385 }
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
431 } 475 }
432} 476}
433 477
434int blk_mq_sched_setup(struct request_queue *q) 478static int blk_mq_sched_alloc_tags(struct request_queue *q,
479 struct blk_mq_hw_ctx *hctx,
480 unsigned int hctx_idx)
481{
482 struct blk_mq_tag_set *set = q->tag_set;
483 int ret;
484
485 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
486 set->reserved_tags);
487 if (!hctx->sched_tags)
488 return -ENOMEM;
489
490 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
491 if (ret)
492 blk_mq_sched_free_tags(set, hctx, hctx_idx);
493
494 return ret;
495}
496
497static void blk_mq_sched_tags_teardown(struct request_queue *q)
435{ 498{
436 struct blk_mq_tag_set *set = q->tag_set; 499 struct blk_mq_tag_set *set = q->tag_set;
437 struct blk_mq_hw_ctx *hctx; 500 struct blk_mq_hw_ctx *hctx;
438 int ret, i; 501 int i;
502
503 queue_for_each_hw_ctx(q, hctx, i)
504 blk_mq_sched_free_tags(set, hctx, i);
505}
506
507int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
508 unsigned int hctx_idx)
509{
510 struct elevator_queue *e = q->elevator;
511
512 if (!e)
513 return 0;
514
515 return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
516}
517
518void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
519 unsigned int hctx_idx)
520{
521 struct elevator_queue *e = q->elevator;
522
523 if (!e)
524 return;
525
526 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
527}
528
529int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
530{
531 struct blk_mq_hw_ctx *hctx;
532 unsigned int i;
533 int ret;
534
535 if (!e) {
536 q->elevator = NULL;
537 return 0;
538 }
439 539
440 /* 540 /*
441 * Default to 256, since we don't split into sync/async like the 541 * Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
443 */ 543 */
444 q->nr_requests = 2 * BLKDEV_MAX_RQ; 544 q->nr_requests = 2 * BLKDEV_MAX_RQ;
445 545
446 /*
447 * We're switching to using an IO scheduler, so setup the hctx
448 * scheduler tags and switch the request map from the regular
449 * tags to scheduler tags. First allocate what we need, so we
450 * can safely fail and fallback, if needed.
451 */
452 ret = 0;
453 queue_for_each_hw_ctx(q, hctx, i) { 546 queue_for_each_hw_ctx(q, hctx, i) {
454 hctx->sched_tags = blk_mq_alloc_rq_map(set, i, 547 ret = blk_mq_sched_alloc_tags(q, hctx, i);
455 q->nr_requests, set->reserved_tags);
456 if (!hctx->sched_tags) {
457 ret = -ENOMEM;
458 break;
459 }
460 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
461 if (ret) 548 if (ret)
462 break; 549 goto err;
463 } 550 }
464 551
465 /* 552 ret = e->ops.mq.init_sched(q, e);
466 * If we failed, free what we did allocate 553 if (ret)
467 */ 554 goto err;
468 if (ret) {
469 queue_for_each_hw_ctx(q, hctx, i) {
470 if (!hctx->sched_tags)
471 continue;
472 blk_mq_sched_free_tags(set, hctx, i);
473 }
474
475 return ret;
476 }
477 555
478 return 0; 556 return 0;
557
558err:
559 blk_mq_sched_tags_teardown(q);
560 q->elevator = NULL;
561 return ret;
479} 562}
480 563
481void blk_mq_sched_teardown(struct request_queue *q) 564void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
482{ 565{
483 struct blk_mq_tag_set *set = q->tag_set; 566 if (e->type->ops.mq.exit_sched)
484 struct blk_mq_hw_ctx *hctx; 567 e->type->ops.mq.exit_sched(e);
485 int i; 568 blk_mq_sched_tags_teardown(q);
486 569 q->elevator = NULL;
487 queue_for_each_hw_ctx(q, hctx, i)
488 blk_mq_sched_free_tags(set, hctx, i);
489} 570}
490 571
491int blk_mq_sched_init(struct request_queue *q) 572int blk_mq_sched_init(struct request_queue *q)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19 struct request **merged_request); 19 struct request **merged_request);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); 22void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23 23
24void blk_mq_sched_insert_request(struct request *rq, bool at_head, 24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block); 25 bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32 struct list_head *rq_list, 32 struct list_head *rq_list,
33 struct request *(*get_rq)(struct blk_mq_hw_ctx *)); 33 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34 34
35int blk_mq_sched_setup(struct request_queue *q); 35int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
36void blk_mq_sched_teardown(struct request_queue *q); 36void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
37
38int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39 unsigned int hctx_idx);
40void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41 unsigned int hctx_idx);
37 42
38int blk_mq_sched_init(struct request_queue *q); 43int blk_mq_sched_init(struct request_queue *q);
39 44
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
131 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 136 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
132} 137}
133 138
134/*
135 * Mark a hardware queue and the request queue it belongs to as needing a
136 * restart.
137 */
138static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
139{
140 struct request_queue *q = hctx->queue;
141
142 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
143 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
144 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
145 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
146}
147
148static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 139static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
149{ 140{
150 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 141 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e48bc2c72615..9d97bfc4d465 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
295 for (i = 0; i < set->nr_hw_queues; i++) { 295 for (i = 0; i < set->nr_hw_queues; i++) {
296 struct blk_mq_tags *tags = set->tags[i]; 296 struct blk_mq_tags *tags = set->tags[i];
297 297
298 if (!tags)
299 continue;
300
298 for (j = 0; j < tags->nr_tags; j++) { 301 for (j = 0; j < tags->nr_tags; j++) {
299 if (!tags->static_rqs[j]) 302 if (!tags->static_rqs[j])
300 continue; 303 continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 159187a28d66..572966f49596 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
321 321
322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); 322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323 323
324 blk_mq_put_ctx(alloc_data.ctx);
325 blk_queue_exit(q); 324 blk_queue_exit(q);
326 325
327 if (!rq) 326 if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
349 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 348 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 if (sched_tag != -1) 349 if (sched_tag != -1)
351 blk_mq_sched_completed_request(hctx, rq); 350 blk_mq_sched_completed_request(hctx, rq);
352 blk_mq_sched_restart_queues(hctx); 351 blk_mq_sched_restart(hctx);
353 blk_queue_exit(q); 352 blk_queue_exit(q);
354} 353}
355 354
@@ -697,17 +696,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
697{ 696{
698 struct blk_mq_timeout_data *data = priv; 697 struct blk_mq_timeout_data *data = priv;
699 698
700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 699 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
701 /*
702 * If a request wasn't started before the queue was
703 * marked dying, kill it here or it'll go unnoticed.
704 */
705 if (unlikely(blk_queue_dying(rq->q))) {
706 rq->errors = -EIO;
707 blk_mq_end_request(rq, rq->errors);
708 }
709 return; 700 return;
710 }
711 701
712 if (time_after_eq(jiffies, rq->deadline)) { 702 if (time_after_eq(jiffies, rq->deadline)) {
713 if (!blk_mark_rq_complete(rq)) 703 if (!blk_mark_rq_complete(rq))
@@ -855,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
855 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 845 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
856 }; 846 };
857 847
858 if (rq->tag != -1) { 848 if (rq->tag != -1)
859done: 849 goto done;
860 if (hctx)
861 *hctx = data.hctx;
862 return true;
863 }
864 850
865 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 851 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
866 data.flags |= BLK_MQ_REQ_RESERVED; 852 data.flags |= BLK_MQ_REQ_RESERVED;
@@ -872,10 +858,12 @@ done:
872 atomic_inc(&data.hctx->nr_active); 858 atomic_inc(&data.hctx->nr_active);
873 } 859 }
874 data.hctx->tags->rqs[rq->tag] = rq; 860 data.hctx->tags->rqs[rq->tag] = rq;
875 goto done;
876 } 861 }
877 862
878 return false; 863done:
864 if (hctx)
865 *hctx = data.hctx;
866 return rq->tag != -1;
879} 867}
880 868
881static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 869static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -972,13 +960,16 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
972 return true; 960 return true;
973} 961}
974 962
975bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) 963bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
976{ 964{
977 struct request_queue *q = hctx->queue; 965 struct blk_mq_hw_ctx *hctx;
978 struct request *rq; 966 struct request *rq;
979 LIST_HEAD(driver_list); 967 LIST_HEAD(driver_list);
980 struct list_head *dptr; 968 struct list_head *dptr;
981 int queued, ret = BLK_MQ_RQ_QUEUE_OK; 969 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
970
971 if (list_empty(list))
972 return false;
982 973
983 /* 974 /*
984 * Start off with dptr being NULL, so we start the first request 975 * Start off with dptr being NULL, so we start the first request
@@ -989,8 +980,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
989 /* 980 /*
990 * Now process all the entries, sending them to the driver. 981 * Now process all the entries, sending them to the driver.
991 */ 982 */
992 queued = 0; 983 errors = queued = 0;
993 while (!list_empty(list)) { 984 do {
994 struct blk_mq_queue_data bd; 985 struct blk_mq_queue_data bd;
995 986
996 rq = list_first_entry(list, struct request, queuelist); 987 rq = list_first_entry(list, struct request, queuelist);
@@ -1046,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1046 default: 1037 default:
1047 pr_err("blk-mq: bad return on queue: %d\n", ret); 1038 pr_err("blk-mq: bad return on queue: %d\n", ret);
1048 case BLK_MQ_RQ_QUEUE_ERROR: 1039 case BLK_MQ_RQ_QUEUE_ERROR:
1040 errors++;
1049 rq->errors = -EIO; 1041 rq->errors = -EIO;
1050 blk_mq_end_request(rq, rq->errors); 1042 blk_mq_end_request(rq, rq->errors);
1051 break; 1043 break;
@@ -1060,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1060 */ 1052 */
1061 if (!dptr && list->next != list->prev) 1053 if (!dptr && list->next != list->prev)
1062 dptr = &driver_list; 1054 dptr = &driver_list;
1063 } 1055 } while (!list_empty(list));
1064 1056
1065 hctx->dispatched[queued_to_index(queued)]++; 1057 hctx->dispatched[queued_to_index(queued)]++;
1066 1058
@@ -1097,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1097 blk_mq_run_hw_queue(hctx, true); 1089 blk_mq_run_hw_queue(hctx, true);
1098 } 1090 }
1099 1091
1100 return queued != 0; 1092 return (queued + errors) != 0;
1101} 1093}
1102 1094
1103static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1095static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1143,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1143 return hctx->next_cpu; 1135 return hctx->next_cpu;
1144} 1136}
1145 1137
1146void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1138static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1139 unsigned long msecs)
1147{ 1140{
1148 if (unlikely(blk_mq_hctx_stopped(hctx) || 1141 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1149 !blk_mq_hw_queue_mapped(hctx))) 1142 !blk_mq_hw_queue_mapped(hctx)))
@@ -1160,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1160 put_cpu(); 1153 put_cpu();
1161 } 1154 }
1162 1155
1163 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); 1156 if (msecs == 0)
1157 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
1158 &hctx->run_work);
1159 else
1160 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1161 &hctx->delayed_run_work,
1162 msecs_to_jiffies(msecs));
1163}
1164
1165void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1166{
1167 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1168}
1169EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1170
1171void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1172{
1173 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1164} 1174}
1165 1175
1166void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1176void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1263,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
1263 __blk_mq_run_hw_queue(hctx); 1273 __blk_mq_run_hw_queue(hctx);
1264} 1274}
1265 1275
1276static void blk_mq_delayed_run_work_fn(struct work_struct *work)
1277{
1278 struct blk_mq_hw_ctx *hctx;
1279
1280 hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
1281
1282 __blk_mq_run_hw_queue(hctx);
1283}
1284
1266static void blk_mq_delay_work_fn(struct work_struct *work) 1285static void blk_mq_delay_work_fn(struct work_struct *work)
1267{ 1286{
1268 struct blk_mq_hw_ctx *hctx; 1287 struct blk_mq_hw_ctx *hctx;
@@ -1434,7 +1453,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1434 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1453 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1435} 1454}
1436 1455
1437static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) 1456static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1457 bool may_sleep)
1438{ 1458{
1439 struct request_queue *q = rq->q; 1459 struct request_queue *q = rq->q;
1440 struct blk_mq_queue_data bd = { 1460 struct blk_mq_queue_data bd = {
@@ -1475,7 +1495,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1475 } 1495 }
1476 1496
1477insert: 1497insert:
1478 blk_mq_sched_insert_request(rq, false, true, true, false); 1498 blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
1479} 1499}
1480 1500
1481/* 1501/*
@@ -1569,11 +1589,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1569 1589
1570 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { 1590 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1571 rcu_read_lock(); 1591 rcu_read_lock();
1572 blk_mq_try_issue_directly(old_rq, &cookie); 1592 blk_mq_try_issue_directly(old_rq, &cookie, false);
1573 rcu_read_unlock(); 1593 rcu_read_unlock();
1574 } else { 1594 } else {
1575 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); 1595 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1576 blk_mq_try_issue_directly(old_rq, &cookie); 1596 blk_mq_try_issue_directly(old_rq, &cookie, true);
1577 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); 1597 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1578 } 1598 }
1579 goto done; 1599 goto done;
@@ -1931,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1931 hctx->fq->flush_rq, hctx_idx, 1951 hctx->fq->flush_rq, hctx_idx,
1932 flush_start_tag + hctx_idx); 1952 flush_start_tag + hctx_idx);
1933 1953
1954 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1955
1934 if (set->ops->exit_hctx) 1956 if (set->ops->exit_hctx)
1935 set->ops->exit_hctx(hctx, hctx_idx); 1957 set->ops->exit_hctx(hctx, hctx_idx);
1936 1958
@@ -1967,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1967 node = hctx->numa_node = set->numa_node; 1989 node = hctx->numa_node = set->numa_node;
1968 1990
1969 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); 1991 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1992 INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
1970 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1993 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1971 spin_lock_init(&hctx->lock); 1994 spin_lock_init(&hctx->lock);
1972 INIT_LIST_HEAD(&hctx->dispatch); 1995 INIT_LIST_HEAD(&hctx->dispatch);
@@ -1997,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
1997 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2020 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1998 goto free_bitmap; 2021 goto free_bitmap;
1999 2022
2023 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2024 goto exit_hctx;
2025
2000 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2026 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2001 if (!hctx->fq) 2027 if (!hctx->fq)
2002 goto exit_hctx; 2028 goto sched_exit_hctx;
2003 2029
2004 if (set->ops->init_request && 2030 if (set->ops->init_request &&
2005 set->ops->init_request(set->driver_data, 2031 set->ops->init_request(set->driver_data,
@@ -2014,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
2014 2040
2015 free_fq: 2041 free_fq:
2016 kfree(hctx->fq); 2042 kfree(hctx->fq);
2043 sched_exit_hctx:
2044 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2017 exit_hctx: 2045 exit_hctx:
2018 if (set->ops->exit_hctx) 2046 if (set->ops->exit_hctx)
2019 set->ops->exit_hctx(hctx, hctx_idx); 2047 set->ops->exit_hctx(hctx, hctx_idx);
@@ -2240,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
2240 struct blk_mq_hw_ctx *hctx; 2268 struct blk_mq_hw_ctx *hctx;
2241 unsigned int i; 2269 unsigned int i;
2242 2270
2243 blk_mq_sched_teardown(q);
2244
2245 /* hctx kobj stays in hctx */ 2271 /* hctx kobj stays in hctx */
2246 queue_for_each_hw_ctx(q, hctx, i) { 2272 queue_for_each_hw_ctx(q, hctx, i) {
2247 if (!hctx) 2273 if (!hctx)
@@ -2572,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2572 return 0; 2598 return 0;
2573} 2599}
2574 2600
2601static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2602{
2603 if (set->ops->map_queues)
2604 return set->ops->map_queues(set);
2605 else
2606 return blk_mq_map_queues(set);
2607}
2608
2575/* 2609/*
2576 * Alloc a tag set to be associated with one or more request queues. 2610 * Alloc a tag set to be associated with one or more request queues.
2577 * May fail with EINVAL for various error conditions. May adjust the 2611 * May fail with EINVAL for various error conditions. May adjust the
@@ -2626,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2626 if (!set->mq_map) 2660 if (!set->mq_map)
2627 goto out_free_tags; 2661 goto out_free_tags;
2628 2662
2629 if (set->ops->map_queues) 2663 ret = blk_mq_update_queue_map(set);
2630 ret = set->ops->map_queues(set);
2631 else
2632 ret = blk_mq_map_queues(set);
2633 if (ret) 2664 if (ret)
2634 goto out_free_mq_map; 2665 goto out_free_mq_map;
2635 2666
@@ -2721,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2721 blk_mq_freeze_queue(q); 2752 blk_mq_freeze_queue(q);
2722 2753
2723 set->nr_hw_queues = nr_hw_queues; 2754 set->nr_hw_queues = nr_hw_queues;
2755 blk_mq_update_queue_map(set);
2724 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2756 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2725 blk_mq_realloc_hw_ctxs(set, q); 2757 blk_mq_realloc_hw_ctxs(set, q);
2726 2758
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b79f9a7d8cf6..660a17e1d033 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 31void blk_mq_free_queue(struct request_queue *q);
32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33void blk_mq_wake_waiters(struct request_queue *q); 33void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 9b43efb8933f..186fcb981e9b 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
30 30
31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) 31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
32{ 32{
33 blk_stat_flush_batch(src);
34
33 if (!src->nr_samples) 35 if (!src->nr_samples)
34 return; 36 return;
35 37
36 blk_stat_flush_batch(src);
37
38 dst->min = min(dst->min, src->min); 38 dst->min = min(dst->min, src->min);
39 dst->max = max(dst->max, src->max); 39 dst->max = max(dst->max, src->max);
40 40
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c44b321335f3..37f0b3ad635e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
816 816
817 if (q->elevator) { 817 if (q->elevator) {
818 ioc_clear_queue(q); 818 ioc_clear_queue(q);
819 elevator_exit(q->elevator); 819 elevator_exit(q, q->elevator);
820 } 820 }
821 821
822 blk_exit_rl(&q->root_rl); 822 blk_exit_rl(&q->root_rl);
diff --git a/block/elevator.c b/block/elevator.c
index 01139f549b5b..dbeecf7be719 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
242 } 242 }
243 } 243 }
244 244
245 if (e->uses_mq) { 245 if (e->uses_mq)
246 err = blk_mq_sched_setup(q); 246 err = blk_mq_init_sched(q, e);
247 if (!err) 247 else
248 err = e->ops.mq.init_sched(q, e);
249 } else
250 err = e->ops.sq.elevator_init_fn(q, e); 248 err = e->ops.sq.elevator_init_fn(q, e);
251 if (err) { 249 if (err)
252 if (e->uses_mq)
253 blk_mq_sched_teardown(q);
254 elevator_put(e); 250 elevator_put(e);
255 }
256 return err; 251 return err;
257} 252}
258EXPORT_SYMBOL(elevator_init); 253EXPORT_SYMBOL(elevator_init);
259 254
260void elevator_exit(struct elevator_queue *e) 255void elevator_exit(struct request_queue *q, struct elevator_queue *e)
261{ 256{
262 mutex_lock(&e->sysfs_lock); 257 mutex_lock(&e->sysfs_lock);
263 if (e->uses_mq && e->type->ops.mq.exit_sched) 258 if (e->uses_mq && e->type->ops.mq.exit_sched)
264 e->type->ops.mq.exit_sched(e); 259 blk_mq_exit_sched(q, e);
265 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) 260 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
266 e->type->ops.sq.elevator_exit_fn(e); 261 e->type->ops.sq.elevator_exit_fn(e);
267 mutex_unlock(&e->sysfs_lock); 262 mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
946} 941}
947EXPORT_SYMBOL_GPL(elv_unregister); 942EXPORT_SYMBOL_GPL(elv_unregister);
948 943
944static int elevator_switch_mq(struct request_queue *q,
945 struct elevator_type *new_e)
946{
947 int ret;
948
949 blk_mq_freeze_queue(q);
950 blk_mq_quiesce_queue(q);
951
952 if (q->elevator) {
953 if (q->elevator->registered)
954 elv_unregister_queue(q);
955 ioc_clear_queue(q);
956 elevator_exit(q, q->elevator);
957 }
958
959 ret = blk_mq_init_sched(q, new_e);
960 if (ret)
961 goto out;
962
963 if (new_e) {
964 ret = elv_register_queue(q);
965 if (ret) {
966 elevator_exit(q, q->elevator);
967 goto out;
968 }
969 }
970
971 if (new_e)
972 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
973 else
974 blk_add_trace_msg(q, "elv switch: none");
975
976out:
977 blk_mq_unfreeze_queue(q);
978 blk_mq_start_stopped_hw_queues(q, true);
979 return ret;
980
981}
982
949/* 983/*
950 * switch to new_e io scheduler. be careful not to introduce deadlocks - 984 * switch to new_e io scheduler. be careful not to introduce deadlocks -
951 * we don't free the old io scheduler, before we have allocated what we 985 * we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
958 bool old_registered = false; 992 bool old_registered = false;
959 int err; 993 int err;
960 994
961 if (q->mq_ops) { 995 if (q->mq_ops)
962 blk_mq_freeze_queue(q); 996 return elevator_switch_mq(q, new_e);
963 blk_mq_quiesce_queue(q);
964 }
965 997
966 /* 998 /*
967 * Turn on BYPASS and drain all requests w/ elevator private data. 999 * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
973 if (old) { 1005 if (old) {
974 old_registered = old->registered; 1006 old_registered = old->registered;
975 1007
976 if (old->uses_mq) 1008 blk_queue_bypass_start(q);
977 blk_mq_sched_teardown(q);
978
979 if (!q->mq_ops)
980 blk_queue_bypass_start(q);
981 1009
982 /* unregister and clear all auxiliary data of the old elevator */ 1010 /* unregister and clear all auxiliary data of the old elevator */
983 if (old_registered) 1011 if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
987 } 1015 }
988 1016
989 /* allocate, init and register new elevator */ 1017 /* allocate, init and register new elevator */
990 if (new_e) { 1018 err = new_e->ops.sq.elevator_init_fn(q, new_e);
991 if (new_e->uses_mq) { 1019 if (err)
992 err = blk_mq_sched_setup(q); 1020 goto fail_init;
993 if (!err)
994 err = new_e->ops.mq.init_sched(q, new_e);
995 } else
996 err = new_e->ops.sq.elevator_init_fn(q, new_e);
997 if (err)
998 goto fail_init;
999 1021
1000 err = elv_register_queue(q); 1022 err = elv_register_queue(q);
1001 if (err) 1023 if (err)
1002 goto fail_register; 1024 goto fail_register;
1003 } else
1004 q->elevator = NULL;
1005 1025
1006 /* done, kill the old one and finish */ 1026 /* done, kill the old one and finish */
1007 if (old) { 1027 if (old) {
1008 elevator_exit(old); 1028 elevator_exit(q, old);
1009 if (!q->mq_ops) 1029 blk_queue_bypass_end(q);
1010 blk_queue_bypass_end(q);
1011 } 1030 }
1012 1031
1013 if (q->mq_ops) { 1032 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1014 blk_mq_unfreeze_queue(q);
1015 blk_mq_start_stopped_hw_queues(q, true);
1016 }
1017
1018 if (new_e)
1019 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1020 else
1021 blk_add_trace_msg(q, "elv switch: none");
1022 1033
1023 return 0; 1034 return 0;
1024 1035
1025fail_register: 1036fail_register:
1026 if (q->mq_ops) 1037 elevator_exit(q, q->elevator);
1027 blk_mq_sched_teardown(q);
1028 elevator_exit(q->elevator);
1029fail_init: 1038fail_init:
1030 /* switch failed, restore and re-register old elevator */ 1039 /* switch failed, restore and re-register old elevator */
1031 if (old) { 1040 if (old) {
1032 q->elevator = old; 1041 q->elevator = old;
1033 elv_register_queue(q); 1042 elv_register_queue(q);
1034 if (!q->mq_ops) 1043 blk_queue_bypass_end(q);
1035 blk_queue_bypass_end(q);
1036 }
1037 if (q->mq_ops) {
1038 blk_mq_unfreeze_queue(q);
1039 blk_mq_start_stopped_hw_queues(q, true);
1040 } 1044 }
1041 1045
1042 return err; 1046 return err;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f5e18c2a4852..690deca17c35 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -266,7 +266,7 @@ unlock:
266 return err; 266 return err;
267} 267}
268 268
269int af_alg_accept(struct sock *sk, struct socket *newsock) 269int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
270{ 270{
271 struct alg_sock *ask = alg_sk(sk); 271 struct alg_sock *ask = alg_sk(sk);
272 const struct af_alg_type *type; 272 const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
281 if (!type) 281 if (!type)
282 goto unlock; 282 goto unlock;
283 283
284 sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0); 284 sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
285 err = -ENOMEM; 285 err = -ENOMEM;
286 if (!sk2) 286 if (!sk2)
287 goto unlock; 287 goto unlock;
@@ -323,9 +323,10 @@ unlock:
323} 323}
324EXPORT_SYMBOL_GPL(af_alg_accept); 324EXPORT_SYMBOL_GPL(af_alg_accept);
325 325
326static int alg_accept(struct socket *sock, struct socket *newsock, int flags) 326static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
327 bool kern)
327{ 328{
328 return af_alg_accept(sock->sk, newsock); 329 return af_alg_accept(sock->sk, newsock, kern);
329} 330}
330 331
331static const struct proto_ops alg_proto_ops = { 332static const struct proto_ops alg_proto_ops = {
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 54fc90e8339c..5e92bd275ef3 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,8 @@ unlock:
239 return err ?: len; 239 return err ?: len;
240} 240}
241 241
242static int hash_accept(struct socket *sock, struct socket *newsock, int flags) 242static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
243 bool kern)
243{ 244{
244 struct sock *sk = sock->sk; 245 struct sock *sk = sock->sk;
245 struct alg_sock *ask = alg_sk(sk); 246 struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
260 if (err) 261 if (err)
261 return err; 262 return err;
262 263
263 err = af_alg_accept(ask->parent, newsock); 264 err = af_alg_accept(ask->parent, newsock, kern);
264 if (err) 265 if (err)
265 return err; 266 return err;
266 267
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
378} 379}
379 380
380static int hash_accept_nokey(struct socket *sock, struct socket *newsock, 381static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
381 int flags) 382 int flags, bool kern)
382{ 383{
383 int err; 384 int err;
384 385
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
386 if (err) 387 if (err)
387 return err; 388 return err;
388 389
389 return hash_accept(sock, newsock, flags); 390 return hash_accept(sock, newsock, flags, kern);
390} 391}
391 392
392static struct proto_ops algif_hash_ops_nokey = { 393static struct proto_ops algif_hash_ops_nokey = {
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ecd8474018e3..3ea095adafd9 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
286 286
287 subreq->cryptlen = LRW_BUFFER_SIZE; 287 subreq->cryptlen = LRW_BUFFER_SIZE;
288 if (req->cryptlen > LRW_BUFFER_SIZE) { 288 if (req->cryptlen > LRW_BUFFER_SIZE) {
289 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
290 rctx->ext = kmalloc(subreq->cryptlen, gfp); 290
291 rctx->ext = kmalloc(n, gfp);
292 if (rctx->ext)
293 subreq->cryptlen = n;
291 } 294 }
292 295
293 rctx->src = req->src; 296 rctx->src = req->src;
diff --git a/crypto/xts.c b/crypto/xts.c
index baeb34dd8582..c976bfac29da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
230 230
231 subreq->cryptlen = XTS_BUFFER_SIZE; 231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) { 232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234 rctx->ext = kmalloc(subreq->cryptlen, gfp); 234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
235 } 238 }
236 239
237 rctx->src = req->src; 240 rctx->src = req->src;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a391bbc48105..d94f92f88ca1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the Linux ACPI interpreter 2# Makefile for the Linux ACPI interpreter
3# 3#
4 4
5ccflags-y := -Os
6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT 5ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7 6
8# 7#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a51da4..03250e1f1103 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
25ACPI_MODULE_NAME("platform"); 25ACPI_MODULE_NAME("platform");
26 26
27static const struct acpi_device_id forbidden_id_list[] = { 27static const struct acpi_device_id forbidden_id_list[] = {
28 {"PNP0000", 0}, /* PIC */ 28 {"PNP0000", 0}, /* PIC */
29 {"PNP0100", 0}, /* Timer */ 29 {"PNP0100", 0}, /* Timer */
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */
31 {"", 0}, 33 {"", 0},
32}; 34};
33 35
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4467a8089ab8..0143135b3abe 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
182 182
183void __weak arch_unregister_cpu(int cpu) {} 183void __weak arch_unregister_cpu(int cpu) {}
184 184
185int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
186{
187 return -ENODEV;
188}
189
190static int acpi_processor_hotadd_init(struct acpi_processor *pr) 185static int acpi_processor_hotadd_init(struct acpi_processor *pr)
191{ 186{
192 unsigned long long sta; 187 unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
285 pr->acpi_id = value; 280 pr->acpi_id = value;
286 } 281 }
287 282
283 if (acpi_duplicate_processor_id(pr->acpi_id)) {
284 dev_err(&device->dev,
285 "Failed to get unique processor _UID (0x%x)\n",
286 pr->acpi_id);
287 return -ENODEV;
288 }
289
288 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, 290 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
289 pr->acpi_id); 291 pr->acpi_id);
290 if (invalid_phys_cpuid(pr->phys_id)) 292 if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
585static int nr_unique_ids __initdata; 587static int nr_unique_ids __initdata;
586 588
587/* The number of the duplicate processor IDs */ 589/* The number of the duplicate processor IDs */
588static int nr_duplicate_ids __initdata; 590static int nr_duplicate_ids;
589 591
590/* Used to store the unique processor IDs */ 592/* Used to store the unique processor IDs */
591static int unique_processor_ids[] __initdata = { 593static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
593}; 595};
594 596
595/* Used to store the duplicate processor IDs */ 597/* Used to store the duplicate processor IDs */
596static int duplicate_processor_ids[] __initdata = { 598static int duplicate_processor_ids[] = {
597 [0 ... NR_CPUS - 1] = -1, 599 [0 ... NR_CPUS - 1] = -1,
598}; 600};
599 601
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
638 void **rv) 640 void **rv)
639{ 641{
640 acpi_status status; 642 acpi_status status;
643 acpi_object_type acpi_type;
644 unsigned long long uid;
641 union acpi_object object = { 0 }; 645 union acpi_object object = { 0 };
642 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 646 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
643 647
644 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 648 status = acpi_get_type(handle, &acpi_type);
645 if (ACPI_FAILURE(status)) 649 if (ACPI_FAILURE(status))
646 acpi_handle_info(handle, "Not get the processor object\n"); 650 return false;
647 else 651
648 processor_validated_ids_update(object.processor.proc_id); 652 switch (acpi_type) {
653 case ACPI_TYPE_PROCESSOR:
654 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
655 if (ACPI_FAILURE(status))
656 goto err;
657 uid = object.processor.proc_id;
658 break;
659
660 case ACPI_TYPE_DEVICE:
661 status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
662 if (ACPI_FAILURE(status))
663 goto err;
664 break;
665 default:
666 goto err;
667 }
668
669 processor_validated_ids_update(uid);
670 return true;
671
672err:
673 acpi_handle_info(handle, "Invalid processor object\n");
674 return false;
649 675
650 return AE_OK;
651} 676}
652 677
653static void __init acpi_processor_check_duplicates(void) 678void __init acpi_processor_check_duplicates(void)
654{ 679{
655 /* Search all processor nodes in ACPI namespace */ 680 /* check the correctness for all processors in ACPI namespace */
656 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 681 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
657 ACPI_UINT32_MAX, 682 ACPI_UINT32_MAX,
658 acpi_processor_ids_walk, 683 acpi_processor_ids_walk,
659 NULL, NULL, NULL); 684 NULL, NULL, NULL);
685 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
686 NULL, NULL);
660} 687}
661 688
662bool __init acpi_processor_validate_proc_id(int proc_id) 689bool acpi_duplicate_processor_id(int proc_id)
663{ 690{
664 int i; 691 int i;
665 692
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b192b42a8351..79b3c9c5a3bc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1073,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
1073 if (list_empty(&ghes_sci)) 1073 if (list_empty(&ghes_sci))
1074 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1074 unregister_acpi_hed_notifier(&ghes_notifier_sci);
1075 mutex_unlock(&ghes_list_mutex); 1075 mutex_unlock(&ghes_list_mutex);
1076 synchronize_rcu();
1076 break; 1077 break;
1077 case ACPI_HEST_NOTIFY_NMI: 1078 case ACPI_HEST_NOTIFY_NMI:
1078 ghes_nmi_remove(ghes); 1079 ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 80cb5eb75b63..34fbe027e73a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
1249 acpi_wakeup_device_init(); 1249 acpi_wakeup_device_init();
1250 acpi_debugger_init(); 1250 acpi_debugger_init();
1251 acpi_setup_sb_notify_handler(); 1251 acpi_setup_sb_notify_handler();
1252 acpi_set_processor_mapping();
1253 return 0; 1252 return 0;
1254} 1253}
1255 1254
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index fb19e1cdb641..edc8663b5db3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 /* 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP 102 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
103 * device ID, it is better to make it look less attractive here, so that 103 * better to make it look less attractive here, so that the other device
104 * the other device with the same _ADR value (that may not have a valid 104 * with the same _ADR value (that may not have a valid device ID) can be
105 * device ID) can be matched going forward. [This means a second spec 105 * matched going forward. [This means a second spec violation in a row,
106 * violation in a row, so whatever we do here is best effort anyway.] 106 * so whatever we do here is best effort anyway.]
107 */ 107 */
108 return sta_present && list_empty(&adev->pnp.ids) ? 108 return sta_present && !adev->pnp.type.platform_id ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
110} 110}
111 111
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 1120dfd625b8..7e4fbf9a53a3 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
45 struct resource *res = data; 45 struct resource *res = data;
46 struct resource_win win; 46 struct resource_win win;
47 47
48 /*
49 * We might assign this to 'res' later, make sure all pointers are
50 * cleared before the resource is added to the global list
51 */
52 memset(&win, 0, sizeof(win));
53
48 res->flags = 0; 54 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) 55 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
50 return AE_OK; 56 return AE_OK;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 611a5585a902..b933061b6b60 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
32} 32}
33 33
34static int map_lapic_id(struct acpi_subtable_header *entry, 34static int map_lapic_id(struct acpi_subtable_header *entry,
35 u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled) 35 u32 acpi_id, phys_cpuid_t *apic_id)
36{ 36{
37 struct acpi_madt_local_apic *lapic = 37 struct acpi_madt_local_apic *lapic =
38 container_of(entry, struct acpi_madt_local_apic, header); 38 container_of(entry, struct acpi_madt_local_apic, header);
39 39
40 if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED)) 40 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
41 return -ENODEV; 41 return -ENODEV;
42 42
43 if (lapic->processor_id != acpi_id) 43 if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
48} 48}
49 49
50static int map_x2apic_id(struct acpi_subtable_header *entry, 50static int map_x2apic_id(struct acpi_subtable_header *entry,
51 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, 51 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
52 bool ignore_disabled)
53{ 52{
54 struct acpi_madt_local_x2apic *apic = 53 struct acpi_madt_local_x2apic *apic =
55 container_of(entry, struct acpi_madt_local_x2apic, header); 54 container_of(entry, struct acpi_madt_local_x2apic, header);
56 55
57 if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED)) 56 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
58 return -ENODEV; 57 return -ENODEV;
59 58
60 if (device_declaration && (apic->uid == acpi_id)) { 59 if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
66} 65}
67 66
68static int map_lsapic_id(struct acpi_subtable_header *entry, 67static int map_lsapic_id(struct acpi_subtable_header *entry,
69 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, 68 int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
70 bool ignore_disabled)
71{ 69{
72 struct acpi_madt_local_sapic *lsapic = 70 struct acpi_madt_local_sapic *lsapic =
73 container_of(entry, struct acpi_madt_local_sapic, header); 71 container_of(entry, struct acpi_madt_local_sapic, header);
74 72
75 if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 73 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
76 return -ENODEV; 74 return -ENODEV;
77 75
78 if (device_declaration) { 76 if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
89 * Retrieve the ARM CPU physical identifier (MPIDR) 87 * Retrieve the ARM CPU physical identifier (MPIDR)
90 */ 88 */
91static int map_gicc_mpidr(struct acpi_subtable_header *entry, 89static int map_gicc_mpidr(struct acpi_subtable_header *entry,
92 int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr, 90 int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
93 bool ignore_disabled)
94{ 91{
95 struct acpi_madt_generic_interrupt *gicc = 92 struct acpi_madt_generic_interrupt *gicc =
96 container_of(entry, struct acpi_madt_generic_interrupt, header); 93 container_of(entry, struct acpi_madt_generic_interrupt, header);
97 94
98 if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED)) 95 if (!(gicc->flags & ACPI_MADT_ENABLED))
99 return -ENODEV; 96 return -ENODEV;
100 97
101 /* device_declaration means Device object in DSDT, in the 98 /* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
112} 109}
113 110
114static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, 111static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
115 int type, u32 acpi_id, bool ignore_disabled) 112 int type, u32 acpi_id)
116{ 113{
117 unsigned long madt_end, entry; 114 unsigned long madt_end, entry;
118 phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ 115 phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
130 struct acpi_subtable_header *header = 127 struct acpi_subtable_header *header =
131 (struct acpi_subtable_header *)entry; 128 (struct acpi_subtable_header *)entry;
132 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 129 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
133 if (!map_lapic_id(header, acpi_id, &phys_id, 130 if (!map_lapic_id(header, acpi_id, &phys_id))
134 ignore_disabled))
135 break; 131 break;
136 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 132 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
137 if (!map_x2apic_id(header, type, acpi_id, &phys_id, 133 if (!map_x2apic_id(header, type, acpi_id, &phys_id))
138 ignore_disabled))
139 break; 134 break;
140 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 135 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
141 if (!map_lsapic_id(header, type, acpi_id, &phys_id, 136 if (!map_lsapic_id(header, type, acpi_id, &phys_id))
142 ignore_disabled))
143 break; 137 break;
144 } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { 138 } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
145 if (!map_gicc_mpidr(header, type, acpi_id, &phys_id, 139 if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
146 ignore_disabled))
147 break; 140 break;
148 } 141 }
149 entry += header->length; 142 entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
161 if (!madt) 154 if (!madt)
162 return PHYS_CPUID_INVALID; 155 return PHYS_CPUID_INVALID;
163 156
164 rv = map_madt_entry(madt, 1, acpi_id, true); 157 rv = map_madt_entry(madt, 1, acpi_id);
165 158
166 acpi_put_table((struct acpi_table_header *)madt); 159 acpi_put_table((struct acpi_table_header *)madt);
167 160
168 return rv; 161 return rv;
169} 162}
170 163
171static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, 164static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
172 bool ignore_disabled)
173{ 165{
174 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
175 union acpi_object *obj; 167 union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
190 182
191 header = (struct acpi_subtable_header *)obj->buffer.pointer; 183 header = (struct acpi_subtable_header *)obj->buffer.pointer;
192 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) 184 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
193 map_lapic_id(header, acpi_id, &phys_id, ignore_disabled); 185 map_lapic_id(header, acpi_id, &phys_id);
194 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) 186 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
195 map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled); 187 map_lsapic_id(header, type, acpi_id, &phys_id);
196 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) 188 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
197 map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled); 189 map_x2apic_id(header, type, acpi_id, &phys_id);
198 else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) 190 else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
199 map_gicc_mpidr(header, type, acpi_id, &phys_id, 191 map_gicc_mpidr(header, type, acpi_id, &phys_id);
200 ignore_disabled);
201 192
202exit: 193exit:
203 kfree(buffer.pointer); 194 kfree(buffer.pointer);
204 return phys_id; 195 return phys_id;
205} 196}
206 197
207static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type, 198phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
208 u32 acpi_id, bool ignore_disabled)
209{ 199{
210 phys_cpuid_t phys_id; 200 phys_cpuid_t phys_id;
211 201
212 phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled); 202 phys_id = map_mat_entry(handle, type, acpi_id);
213 if (invalid_phys_cpuid(phys_id)) 203 if (invalid_phys_cpuid(phys_id))
214 phys_id = map_madt_entry(get_madt_table(), type, acpi_id, 204 phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
215 ignore_disabled);
216 205
217 return phys_id; 206 return phys_id;
218} 207}
219 208
220phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
221{
222 return __acpi_get_phys_id(handle, type, acpi_id, true);
223}
224
225int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) 209int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
226{ 210{
227#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
278} 262}
279EXPORT_SYMBOL_GPL(acpi_get_cpuid); 263EXPORT_SYMBOL_GPL(acpi_get_cpuid);
280 264
281#ifdef CONFIG_ACPI_HOTPLUG_CPU
282static bool __init
283map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
284{
285 int type, id;
286 u32 acpi_id;
287 acpi_status status;
288 acpi_object_type acpi_type;
289 unsigned long long tmp;
290 union acpi_object object = { 0 };
291 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
292
293 status = acpi_get_type(handle, &acpi_type);
294 if (ACPI_FAILURE(status))
295 return false;
296
297 switch (acpi_type) {
298 case ACPI_TYPE_PROCESSOR:
299 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
300 if (ACPI_FAILURE(status))
301 return false;
302 acpi_id = object.processor.proc_id;
303
304 /* validate the acpi_id */
305 if(acpi_processor_validate_proc_id(acpi_id))
306 return false;
307 break;
308 case ACPI_TYPE_DEVICE:
309 status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
310 if (ACPI_FAILURE(status))
311 return false;
312 acpi_id = tmp;
313 break;
314 default:
315 return false;
316 }
317
318 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
319
320 *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
321 id = acpi_map_cpuid(*phys_id, acpi_id);
322
323 if (id < 0)
324 return false;
325 *cpuid = id;
326 return true;
327}
328
329static acpi_status __init
330set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
331 void **rv)
332{
333 phys_cpuid_t phys_id;
334 int cpu_id;
335
336 if (!map_processor(handle, &phys_id, &cpu_id))
337 return AE_ERROR;
338
339 acpi_map_cpu2node(handle, cpu_id, phys_id);
340 return AE_OK;
341}
342
343void __init acpi_set_processor_mapping(void)
344{
345 /* Set persistent cpu <-> node mapping for all processors. */
346 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
347 ACPI_UINT32_MAX, set_processor_node_mapping,
348 NULL, NULL, NULL);
349}
350#else
351void __init acpi_set_processor_mapping(void) {}
352#endif /* CONFIG_ACPI_HOTPLUG_CPU */
353
354#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 265#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
355static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, 266static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
356 u64 *phys_addr, int *ioapic_id) 267 u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 01c94669a2b0..3afa8c1fa127 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
30 return true; 30 return true;
31 31
32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && 32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
33 h->oem_revision == 0) 33 h->oem_revision == 1)
34 return true; 34 return true;
35 35
36 return false; 36 return false;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 85d833289f28..4c96f3ac4976 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
177 case AHCI_LS1043A: 177 case AHCI_LS1043A:
178 if (!qpriv->ecc_addr) 178 if (!qpriv->ecc_addr)
179 return -EINVAL; 179 return -EINVAL;
180 writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); 180 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
181 qpriv->ecc_addr);
181 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 182 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
182 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 183 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
183 if (qpriv->is_dmacoherent) 184 if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
194 case AHCI_LS1046A: 195 case AHCI_LS1046A:
195 if (!qpriv->ecc_addr) 196 if (!qpriv->ecc_addr)
196 return -EINVAL; 197 return -EINVAL;
197 writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); 198 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
199 qpriv->ecc_addr);
198 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 200 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
199 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 201 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
200 if (qpriv->is_dmacoherent) 202 if (qpriv->is_dmacoherent)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2bd92dca3e62..274d6d7193d7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1482 break; 1482 break;
1483 1483
1484 default: 1484 default:
1485 WARN_ON_ONCE(1);
1486 return AC_ERR_SYSTEM; 1485 return AC_ERR_SYSTEM;
1487 } 1486 }
1488 1487
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 46698232e6bf..19e6e539a061 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
224 224
225static void ata_tport_release(struct device *dev) 225static void ata_tport_release(struct device *dev)
226{ 226{
227 put_device(dev->parent);
228} 227}
229 228
230/** 229/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
284 device_initialize(dev); 283 device_initialize(dev);
285 dev->type = &ata_port_type; 284 dev->type = &ata_port_type;
286 285
287 dev->parent = get_device(parent); 286 dev->parent = parent;
288 dev->release = ata_tport_release; 287 dev->release = ata_tport_release;
289 dev_set_name(dev, "ata%d", ap->print_id); 288 dev_set_name(dev, "ata%d", ap->print_id);
290 transport_setup_device(dev); 289 transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
348 347
349static void ata_tlink_release(struct device *dev) 348static void ata_tlink_release(struct device *dev)
350{ 349{
351 put_device(dev->parent);
352} 350}
353 351
354/** 352/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
410 int error; 408 int error;
411 409
412 device_initialize(dev); 410 device_initialize(dev);
413 dev->parent = get_device(&ap->tdev); 411 dev->parent = &ap->tdev;
414 dev->release = ata_tlink_release; 412 dev->release = ata_tlink_release;
415 if (ata_is_host_link(link)) 413 if (ata_is_host_link(link))
416 dev_set_name(dev, "link%d", ap->print_id); 414 dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
589 587
590static void ata_tdev_release(struct device *dev) 588static void ata_tdev_release(struct device *dev)
591{ 589{
592 put_device(dev->parent);
593} 590}
594 591
595/** 592/**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
662 int error; 659 int error;
663 660
664 device_initialize(dev); 661 device_initialize(dev);
665 dev->parent = get_device(&link->tdev); 662 dev->parent = &link->tdev;
666 dev->release = ata_tdev_release; 663 dev->release = ata_tdev_release;
667 if (ata_is_host_link(link)) 664 if (ata_is_host_link(link))
668 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); 665 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d2aafc..83f1439e57fd 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
218 { .compatible = "img,boston-lcd", .data = &boston_config }, 218 { .compatible = "img,boston-lcd", .data = &boston_config },
219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 219 { .compatible = "mti,malta-lcd", .data = &malta_config },
220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config },
221 { /* sentinel */ }
221}; 222};
222 223
223/** 224/**
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 684bda4d14a1..6bb60fb6a30b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
639 return restart_syscall(); 639 return restart_syscall();
640} 640}
641 641
642void assert_held_device_hotplug(void)
643{
644 lockdep_assert_held(&device_hotplug_lock);
645}
646
647#ifdef CONFIG_BLOCK 642#ifdef CONFIG_BLOCK
648static inline int device_is_not_partition(struct device *dev) 643static inline int device_is_not_partition(struct device *dev)
649{ 644{
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7e4287bc19e5..d8a23561b4cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex);
47struct nbd_sock { 47struct nbd_sock {
48 struct socket *sock; 48 struct socket *sock;
49 struct mutex tx_lock; 49 struct mutex tx_lock;
50 struct request *pending;
51 int sent;
50}; 52};
51 53
52#define NBD_TIMEDOUT 0 54#define NBD_TIMEDOUT 0
@@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd)
124 126
125static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 127static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
126{ 128{
127 bd_set_size(bdev, 0); 129 if (bdev->bd_openers <= 1)
130 bd_set_size(bdev, 0);
128 set_capacity(nbd->disk, 0); 131 set_capacity(nbd->disk, 0);
129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 132 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
130 133
@@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
190 193
191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 194 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 195 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
193 req->errors++; 196 req->errors = -EIO;
194 197
195 mutex_lock(&nbd->config_lock); 198 mutex_lock(&nbd->config_lock);
196 sock_shutdown(nbd); 199 sock_shutdown(nbd);
@@ -202,7 +205,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
202 * Send or receive packet. 205 * Send or receive packet.
203 */ 206 */
204static int sock_xmit(struct nbd_device *nbd, int index, int send, 207static int sock_xmit(struct nbd_device *nbd, int index, int send,
205 struct iov_iter *iter, int msg_flags) 208 struct iov_iter *iter, int msg_flags, int *sent)
206{ 209{
207 struct socket *sock = nbd->socks[index]->sock; 210 struct socket *sock = nbd->socks[index]->sock;
208 int result; 211 int result;
@@ -237,6 +240,8 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
237 result = -EPIPE; /* short read */ 240 result = -EPIPE; /* short read */
238 break; 241 break;
239 } 242 }
243 if (sent)
244 *sent += result;
240 } while (msg_data_left(&msg)); 245 } while (msg_data_left(&msg));
241 246
242 tsk_restore_flags(current, pflags, PF_MEMALLOC); 247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
@@ -248,6 +253,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
248static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 253static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
249{ 254{
250 struct request *req = blk_mq_rq_from_pdu(cmd); 255 struct request *req = blk_mq_rq_from_pdu(cmd);
256 struct nbd_sock *nsock = nbd->socks[index];
251 int result; 257 int result;
252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 258 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 259 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
@@ -256,6 +262,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
256 struct bio *bio; 262 struct bio *bio;
257 u32 type; 263 u32 type;
258 u32 tag = blk_mq_unique_tag(req); 264 u32 tag = blk_mq_unique_tag(req);
265 int sent = nsock->sent, skip = 0;
259 266
260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 267 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
261 268
@@ -283,6 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
283 return -EIO; 290 return -EIO;
284 } 291 }
285 292
293 /* We did a partial send previously, and we at least sent the whole
294 * request struct, so just go and send the rest of the pages in the
295 * request.
296 */
297 if (sent) {
298 if (sent >= sizeof(request)) {
299 skip = sent - sizeof(request);
300 goto send_pages;
301 }
302 iov_iter_advance(&from, sent);
303 }
286 request.type = htonl(type); 304 request.type = htonl(type);
287 if (type != NBD_CMD_FLUSH) { 305 if (type != NBD_CMD_FLUSH) {
288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 306 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -294,15 +312,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
294 cmd, nbdcmd_to_ascii(type), 312 cmd, nbdcmd_to_ascii(type),
295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 313 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
296 result = sock_xmit(nbd, index, 1, &from, 314 result = sock_xmit(nbd, index, 1, &from,
297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 315 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
298 if (result <= 0) { 316 if (result <= 0) {
317 if (result == -ERESTARTSYS) {
318 /* If we havne't sent anything we can just return BUSY,
319 * however if we have sent something we need to make
320 * sure we only allow this req to be sent until we are
321 * completely done.
322 */
323 if (sent) {
324 nsock->pending = req;
325 nsock->sent = sent;
326 }
327 return BLK_MQ_RQ_QUEUE_BUSY;
328 }
299 dev_err_ratelimited(disk_to_dev(nbd->disk), 329 dev_err_ratelimited(disk_to_dev(nbd->disk),
300 "Send control failed (result %d)\n", result); 330 "Send control failed (result %d)\n", result);
301 return -EIO; 331 return -EIO;
302 } 332 }
303 333send_pages:
304 if (type != NBD_CMD_WRITE) 334 if (type != NBD_CMD_WRITE)
305 return 0; 335 goto out;
306 336
307 bio = req->bio; 337 bio = req->bio;
308 while (bio) { 338 while (bio) {
@@ -318,8 +348,25 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
318 cmd, bvec.bv_len); 348 cmd, bvec.bv_len);
319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 349 iov_iter_bvec(&from, ITER_BVEC | WRITE,
320 &bvec, 1, bvec.bv_len); 350 &bvec, 1, bvec.bv_len);
321 result = sock_xmit(nbd, index, 1, &from, flags); 351 if (skip) {
352 if (skip >= iov_iter_count(&from)) {
353 skip -= iov_iter_count(&from);
354 continue;
355 }
356 iov_iter_advance(&from, skip);
357 skip = 0;
358 }
359 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
322 if (result <= 0) { 360 if (result <= 0) {
361 if (result == -ERESTARTSYS) {
362 /* We've already sent the header, we
363 * have no choice but to set pending and
364 * return BUSY.
365 */
366 nsock->pending = req;
367 nsock->sent = sent;
368 return BLK_MQ_RQ_QUEUE_BUSY;
369 }
323 dev_err(disk_to_dev(nbd->disk), 370 dev_err(disk_to_dev(nbd->disk),
324 "Send data failed (result %d)\n", 371 "Send data failed (result %d)\n",
325 result); 372 result);
@@ -336,6 +383,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
336 } 383 }
337 bio = next; 384 bio = next;
338 } 385 }
386out:
387 nsock->pending = NULL;
388 nsock->sent = 0;
339 return 0; 389 return 0;
340} 390}
341 391
@@ -353,7 +403,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
353 403
354 reply.magic = 0; 404 reply.magic = 0;
355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 405 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 406 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
357 if (result <= 0) { 407 if (result <= 0) {
358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 408 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 409 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -383,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
383 if (ntohl(reply.error)) { 433 if (ntohl(reply.error)) {
384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 434 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
385 ntohl(reply.error)); 435 ntohl(reply.error));
386 req->errors++; 436 req->errors = -EIO;
387 return cmd; 437 return cmd;
388 } 438 }
389 439
@@ -395,11 +445,11 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
395 rq_for_each_segment(bvec, req, iter) { 445 rq_for_each_segment(bvec, req, iter) {
396 iov_iter_bvec(&to, ITER_BVEC | READ, 446 iov_iter_bvec(&to, ITER_BVEC | READ,
397 &bvec, 1, bvec.bv_len); 447 &bvec, 1, bvec.bv_len);
398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 448 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
399 if (result <= 0) { 449 if (result <= 0) {
400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 450 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
401 result); 451 result);
402 req->errors++; 452 req->errors = -EIO;
403 return cmd; 453 return cmd;
404 } 454 }
405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 455 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
@@ -469,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
469 if (!blk_mq_request_started(req)) 519 if (!blk_mq_request_started(req))
470 return; 520 return;
471 cmd = blk_mq_rq_to_pdu(req); 521 cmd = blk_mq_rq_to_pdu(req);
472 req->errors++; 522 req->errors = -EIO;
473 nbd_end_request(cmd); 523 nbd_end_request(cmd);
474} 524}
475 525
@@ -482,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd)
482} 532}
483 533
484 534
485static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 535static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
486{ 536{
487 struct request *req = blk_mq_rq_from_pdu(cmd); 537 struct request *req = blk_mq_rq_from_pdu(cmd);
488 struct nbd_device *nbd = cmd->nbd; 538 struct nbd_device *nbd = cmd->nbd;
489 struct nbd_sock *nsock; 539 struct nbd_sock *nsock;
540 int ret;
490 541
491 if (index >= nbd->num_connections) { 542 if (index >= nbd->num_connections) {
492 dev_err_ratelimited(disk_to_dev(nbd->disk), 543 dev_err_ratelimited(disk_to_dev(nbd->disk),
493 "Attempted send on invalid socket\n"); 544 "Attempted send on invalid socket\n");
494 goto error_out; 545 return -EINVAL;
495 } 546 }
496 547
497 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 548 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
498 dev_err_ratelimited(disk_to_dev(nbd->disk), 549 dev_err_ratelimited(disk_to_dev(nbd->disk),
499 "Attempted send on closed socket\n"); 550 "Attempted send on closed socket\n");
500 goto error_out; 551 return -EINVAL;
501 } 552 }
502 553
503 req->errors = 0; 554 req->errors = 0;
@@ -508,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
508 mutex_unlock(&nsock->tx_lock); 559 mutex_unlock(&nsock->tx_lock);
509 dev_err_ratelimited(disk_to_dev(nbd->disk), 560 dev_err_ratelimited(disk_to_dev(nbd->disk),
510 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
511 goto error_out; 562 return -EINVAL;
512 } 563 }
513 564
514 if (nbd_send_cmd(nbd, cmd, index) != 0) { 565 /* Handle the case that we have a pending request that was partially
515 dev_err_ratelimited(disk_to_dev(nbd->disk), 566 * transmitted that _has_ to be serviced first. We need to call requeue
516 "Request send failed\n"); 567 * here so that it gets put _after_ the request that is already on the
517 req->errors++; 568 * dispatch list.
518 nbd_end_request(cmd); 569 */
570 if (unlikely(nsock->pending && nsock->pending != req)) {
571 blk_mq_requeue_request(req, true);
572 ret = 0;
573 goto out;
519 } 574 }
520 575 ret = nbd_send_cmd(nbd, cmd, index);
576out:
521 mutex_unlock(&nsock->tx_lock); 577 mutex_unlock(&nsock->tx_lock);
522 578 return ret;
523 return;
524
525error_out:
526 req->errors++;
527 nbd_end_request(cmd);
528} 579}
529 580
530static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 581static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
531 const struct blk_mq_queue_data *bd) 582 const struct blk_mq_queue_data *bd)
532{ 583{
533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 584 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
585 int ret;
534 586
535 /* 587 /*
536 * Since we look at the bio's to send the request over the network we 588 * Since we look at the bio's to send the request over the network we
@@ -543,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
543 */ 595 */
544 init_completion(&cmd->send_complete); 596 init_completion(&cmd->send_complete);
545 blk_mq_start_request(bd->rq); 597 blk_mq_start_request(bd->rq);
546 nbd_handle_cmd(cmd, hctx->queue_num); 598
599 /* We can be called directly from the user space process, which means we
600 * could possibly have signals pending so our sendmsg will fail. In
601 * this case we need to return that we are busy, otherwise error out as
602 * appropriate.
603 */
604 ret = nbd_handle_cmd(cmd, hctx->queue_num);
605 if (ret < 0)
606 ret = BLK_MQ_RQ_QUEUE_ERROR;
607 if (!ret)
608 ret = BLK_MQ_RQ_QUEUE_OK;
547 complete(&cmd->send_complete); 609 complete(&cmd->send_complete);
548 610
549 return BLK_MQ_RQ_QUEUE_OK; 611 return ret;
550} 612}
551 613
552static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 614static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
@@ -581,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
581 643
582 mutex_init(&nsock->tx_lock); 644 mutex_init(&nsock->tx_lock);
583 nsock->sock = sock; 645 nsock->sock = sock;
646 nsock->pending = NULL;
647 nsock->sent = 0;
584 socks[nbd->num_connections++] = nsock; 648 socks[nbd->num_connections++] = nsock;
585 649
586 if (max_part) 650 if (max_part)
@@ -602,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd)
602 666
603static void nbd_bdev_reset(struct block_device *bdev) 667static void nbd_bdev_reset(struct block_device *bdev)
604{ 668{
669 if (bdev->bd_openers > 1)
670 return;
605 set_device_ro(bdev, false); 671 set_device_ro(bdev, false);
606 bdev->bd_inode->i_size = 0; 672 bdev->bd_inode->i_size = 0;
607 if (max_part > 0) { 673 if (max_part > 0) {
@@ -634,7 +700,7 @@ static void send_disconnects(struct nbd_device *nbd)
634 700
635 for (i = 0; i < nbd->num_connections; i++) { 701 for (i = 0; i < nbd->num_connections; i++) {
636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 702 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
637 ret = sock_xmit(nbd, i, 1, &from, 0); 703 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
638 if (ret <= 0) 704 if (ret <= 0)
639 dev_err(disk_to_dev(nbd->disk), 705 dev_err(disk_to_dev(nbd->disk),
640 "Send disconnect failed %d\n", ret); 706 "Send disconnect failed %d\n", ret);
@@ -665,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
665{ 731{
666 sock_shutdown(nbd); 732 sock_shutdown(nbd);
667 nbd_clear_que(nbd); 733 nbd_clear_que(nbd);
668 kill_bdev(bdev); 734
735 __invalidate_device(bdev, true);
669 nbd_bdev_reset(bdev); 736 nbd_bdev_reset(bdev);
670 /* 737 /*
671 * We want to give the run thread a chance to wait for everybody 738 * We want to give the run thread a chance to wait for everybody
@@ -781,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 848 nbd_size_set(nbd, bdev, nbd->blksize, arg);
782 return 0; 849 return 0;
783 case NBD_SET_TIMEOUT: 850 case NBD_SET_TIMEOUT:
784 nbd->tag_set.timeout = arg * HZ; 851 if (arg) {
852 nbd->tag_set.timeout = arg * HZ;
853 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
854 }
785 return 0; 855 return 0;
786 856
787 case NBD_SET_FLAGS: 857 case NBD_SET_FLAGS:
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index c2c14a12713b..08e054507d0b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -344,7 +344,8 @@ config BT_WILINK
344 344
345config BT_QCOMSMD 345config BT_QCOMSMD
346 tristate "Qualcomm SMD based HCI support" 346 tristate "Qualcomm SMD based HCI support"
347 depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST 347 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
348 depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
348 select BT_QCA 349 select BT_QCA
349 help 350 help
350 Qualcomm SMD based HCI driver. 351 Qualcomm SMD based HCI driver.
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac756f08..9959c762da2f 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
55struct amd768_priv { 55struct amd768_priv {
56 void __iomem *iobase; 56 void __iomem *iobase;
57 struct pci_dev *pcidev; 57 struct pci_dev *pcidev;
58 u32 pmbase;
58}; 59};
59 60
60static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) 61static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ found:
148 if (pmbase == 0) 149 if (pmbase == 0)
149 return -EIO; 150 return -EIO;
150 151
151 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 152 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152 if (!priv) 153 if (!priv)
153 return -ENOMEM; 154 return -ENOMEM;
154 155
155 if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, 156 if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
156 PMBASE_SIZE, DRV_NAME)) {
157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", 157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
158 pmbase + 0xF0); 158 pmbase + 0xF0);
159 return -EBUSY; 159 err = -EBUSY;
160 goto out;
160 } 161 }
161 162
162 priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, 163 priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
163 PMBASE_SIZE);
164 if (!priv->iobase) { 164 if (!priv->iobase) {
165 pr_err(DRV_NAME "Cannot map ioport\n"); 165 pr_err(DRV_NAME "Cannot map ioport\n");
166 return -ENOMEM; 166 err = -EINVAL;
167 goto err_iomap;
167 } 168 }
168 169
169 amd_rng.priv = (unsigned long)priv; 170 amd_rng.priv = (unsigned long)priv;
171 priv->pmbase = pmbase;
170 priv->pcidev = pdev; 172 priv->pcidev = pdev;
171 173
172 pr_info(DRV_NAME " detected\n"); 174 pr_info(DRV_NAME " detected\n");
173 return devm_hwrng_register(&pdev->dev, &amd_rng); 175 err = hwrng_register(&amd_rng);
176 if (err) {
177 pr_err(DRV_NAME " registering failed (%d)\n", err);
178 goto err_hwrng;
179 }
180 return 0;
181
182err_hwrng:
183 ioport_unmap(priv->iobase);
184err_iomap:
185 release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
186out:
187 kfree(priv);
188 return err;
174} 189}
175 190
176static void __exit mod_exit(void) 191static void __exit mod_exit(void)
177{ 192{
193 struct amd768_priv *priv;
194
195 priv = (struct amd768_priv *)amd_rng.priv;
196
197 hwrng_unregister(&amd_rng);
198
199 ioport_unmap(priv->iobase);
200
201 release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
202
203 kfree(priv);
178} 204}
179 205
180module_init(mod_init); 206module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a245942029..e1d421a36a13 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33 33
34
35#define PFX KBUILD_MODNAME ": "
36
34#define GEODE_RNG_DATA_REG 0x50 37#define GEODE_RNG_DATA_REG 0x50
35#define GEODE_RNG_STATUS_REG 0x54 38#define GEODE_RNG_STATUS_REG 0x54
36 39
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
82 85
83static int __init mod_init(void) 86static int __init mod_init(void)
84{ 87{
88 int err = -ENODEV;
85 struct pci_dev *pdev = NULL; 89 struct pci_dev *pdev = NULL;
86 const struct pci_device_id *ent; 90 const struct pci_device_id *ent;
87 void __iomem *mem; 91 void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
89 93
90 for_each_pci_dev(pdev) { 94 for_each_pci_dev(pdev) {
91 ent = pci_match_id(pci_tbl, pdev); 95 ent = pci_match_id(pci_tbl, pdev);
92 if (ent) { 96 if (ent)
93 rng_base = pci_resource_start(pdev, 0); 97 goto found;
94 if (rng_base == 0)
95 return -ENODEV;
96
97 mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
98 if (!mem)
99 return -ENOMEM;
100 geode_rng.priv = (unsigned long)mem;
101
102 pr_info("AMD Geode RNG detected\n");
103 return devm_hwrng_register(&pdev->dev, &geode_rng);
104 }
105 } 98 }
106
107 /* Device not found. */ 99 /* Device not found. */
108 return -ENODEV; 100 goto out;
101
102found:
103 rng_base = pci_resource_start(pdev, 0);
104 if (rng_base == 0)
105 goto out;
106 err = -ENOMEM;
107 mem = ioremap(rng_base, 0x58);
108 if (!mem)
109 goto out;
110 geode_rng.priv = (unsigned long)mem;
111
112 pr_info("AMD Geode RNG detected\n");
113 err = hwrng_register(&geode_rng);
114 if (err) {
115 pr_err(PFX "RNG registering failed (%d)\n",
116 err);
117 goto err_unmap;
118 }
119out:
120 return err;
121
122err_unmap:
123 iounmap(mem);
124 goto out;
109} 125}
110 126
111static void __exit mod_exit(void) 127static void __exit mod_exit(void)
112{ 128{
129 void __iomem *mem = (void __iomem *)geode_rng.priv;
130
131 hwrng_unregister(&geode_rng);
132 iounmap(mem);
113} 133}
114 134
115module_init(mod_init); 135module_init(mod_init);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3ad86fdf954e..b1ad12552b56 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
397 irq, err); 397 irq, err);
398 return err; 398 return err;
399 } 399 }
400 omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
401 400
402 priv->clk = of_clk_get(pdev->dev.of_node, 0); 401 priv->clk = devm_clk_get(&pdev->dev, NULL);
403 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) 402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
404 return -EPROBE_DEFER; 403 return -EPROBE_DEFER;
405 if (!IS_ERR(priv->clk)) { 404 if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
408 dev_err(&pdev->dev, "unable to enable the clk, " 407 dev_err(&pdev->dev, "unable to enable the clk, "
409 "err = %d\n", err); 408 "err = %d\n", err);
410 } 409 }
410
411 /*
412 * On OMAP4, enabling the shutdown_oflo interrupt is
413 * done in the interrupt mask register. There is no
414 * such register on EIP76, and it's enabled by the
415 * same bit in the control register
416 */
417 if (priv->pdata->regs[RNG_INTMASK_REG])
418 omap_rng_write(priv, RNG_INTMASK_REG,
419 RNG_SHUTDOWN_OFLO_MASK);
420 else
421 omap_rng_write(priv, RNG_CONTROL_REG,
422 RNG_SHUTDOWN_OFLO_MASK);
411 } 423 }
412 return 0; 424 return 0;
413} 425}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2a558c706581..3e73bcdf9e65 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -84,11 +84,14 @@ struct pp_struct {
84 struct ieee1284_info state; 84 struct ieee1284_info state;
85 struct ieee1284_info saved_state; 85 struct ieee1284_info saved_state;
86 long default_inactivity; 86 long default_inactivity;
87 int index;
87}; 88};
88 89
89/* should we use PARDEVICE_MAX here? */ 90/* should we use PARDEVICE_MAX here? */
90static struct device *devices[PARPORT_MAX]; 91static struct device *devices[PARPORT_MAX];
91 92
93static DEFINE_IDA(ida_index);
94
92/* pp_struct.flags bitfields */ 95/* pp_struct.flags bitfields */
93#define PP_CLAIMED (1<<0) 96#define PP_CLAIMED (1<<0)
94#define PP_EXCL (1<<1) 97#define PP_EXCL (1<<1)
@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
290 struct pardevice *pdev = NULL; 293 struct pardevice *pdev = NULL;
291 char *name; 294 char *name;
292 struct pardev_cb ppdev_cb; 295 struct pardev_cb ppdev_cb;
293 int rc = 0; 296 int rc = 0, index;
294 297
295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 298 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
296 if (name == NULL) 299 if (name == NULL)
@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
303 goto err; 306 goto err;
304 } 307 }
305 308
309 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
306 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 310 memset(&ppdev_cb, 0, sizeof(ppdev_cb));
307 ppdev_cb.irq_func = pp_irq; 311 ppdev_cb.irq_func = pp_irq;
308 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 312 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
309 ppdev_cb.private = pp; 313 ppdev_cb.private = pp;
310 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 314 pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
311 parport_put_port(port); 315 parport_put_port(port);
312 316
313 if (!pdev) { 317 if (!pdev) {
314 pr_warn("%s: failed to register device!\n", name); 318 pr_warn("%s: failed to register device!\n", name);
315 rc = -ENXIO; 319 rc = -ENXIO;
320 ida_simple_remove(&ida_index, index);
316 goto err; 321 goto err;
317 } 322 }
318 323
319 pp->pdev = pdev; 324 pp->pdev = pdev;
325 pp->index = index;
320 dev_dbg(&pdev->dev, "registered pardevice\n"); 326 dev_dbg(&pdev->dev, "registered pardevice\n");
321err: 327err:
322 kfree(name); 328 kfree(name);
@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
755 761
756 if (pp->pdev) { 762 if (pp->pdev) {
757 parport_unregister_device(pp->pdev); 763 parport_unregister_device(pp->pdev);
764 ida_simple_remove(&ida_index, pp->index);
758 pp->pdev = NULL; 765 pp->pdev = NULL;
759 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 766 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
760 } 767 }
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe217d1..67201f67a14a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2502 2502
2503 clk->core = hw->core; 2503 clk->core = hw->core;
2504 clk->dev_id = dev_id; 2504 clk->dev_id = dev_id;
2505 clk->con_id = con_id; 2505 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
2506 clk->max_rate = ULONG_MAX; 2506 clk->max_rate = ULONG_MAX;
2507 2507
2508 clk_prepare_lock(); 2508 clk_prepare_lock();
@@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
2518 hlist_del(&clk->clks_node); 2518 hlist_del(&clk->clks_node);
2519 clk_prepare_unlock(); 2519 clk_prepare_unlock();
2520 2520
2521 kfree_const(clk->con_id);
2521 kfree(clk); 2522 kfree(clk);
2522} 2523}
2523 2524
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 924f560dcf80..00d4150e33c3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; 127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; 128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
129 129
130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; 130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
131 131
132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; 132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; 133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
450 return; 450 return;
451 } 451 }
452 452
453 /*
454 * Make uart_pll_clk a child of the gpll, as all other sources are
455 * not that usable / stable.
456 */
457 writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
458 reg_base + RK2928_CLKSEL_CON(13));
459
453 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); 460 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
454 if (IS_ERR(ctx)) { 461 if (IS_ERR(ctx)) {
455 pr_err("%s: rockchip clk init failed\n", __func__); 462 pr_err("%s: rockchip clk init failed\n", __func__);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 695bbf9ef428..72109d2cf41b 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -80,6 +80,7 @@ config SUN6I_A31_CCU
80 select SUNXI_CCU_DIV 80 select SUNXI_CCU_DIV
81 select SUNXI_CCU_NK 81 select SUNXI_CCU_NK
82 select SUNXI_CCU_NKM 82 select SUNXI_CCU_NKM
83 select SUNXI_CCU_NKMP
83 select SUNXI_CCU_NM 84 select SUNXI_CCU_NM
84 select SUNXI_CCU_MP 85 select SUNXI_CCU_MP
85 select SUNXI_CCU_PHASE 86 select SUNXI_CCU_PHASE
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index e3c084cc6da5..f54114c607df 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); 566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
567 567
568/* Fixed Factor clocks */ 568/* Fixed Factor clocks */
569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); 569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
570 570
571/* We hardcode the divider to 4 for now */ 571/* We hardcode the divider to 4 for now */
572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", 572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4c9a920ff4ab..89e68d29bf45 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
608 0x150, 0, 4, 24, 2, BIT(31), 608 0x150, 0, 4, 24, 2, BIT(31),
609 CLK_SET_RATE_PARENT); 609 CLK_SET_RATE_PARENT);
610 610
611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); 611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
612 612
613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); 613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
614 614
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 22c2ca7a2a22..b583f186a804 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
85 unsigned int m, p; 85 unsigned int m, p;
86 u32 reg; 86 u32 reg;
87 87
88 /* Adjust parent_rate according to pre-dividers */
89 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
90 -1, &parent_rate);
91
88 reg = readl(cmp->common.base + cmp->common.reg); 92 reg = readl(cmp->common.base + cmp->common.reg);
89 93
90 m = reg >> cmp->m.shift; 94 m = reg >> cmp->m.shift;
@@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
117 unsigned int m, p; 121 unsigned int m, p;
118 u32 reg; 122 u32 reg;
119 123
124 /* Adjust parent_rate according to pre-dividers */
125 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
126 -1, &parent_rate);
127
120 max_m = cmp->m.max ?: 1 << cmp->m.width; 128 max_m = cmp->m.max ?: 1 << cmp->m.width;
121 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); 129 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
122 130
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index a2b40a000157..488055ed944f 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
107 p = reg >> nkmp->p.shift; 107 p = reg >> nkmp->p.shift;
108 p &= (1 << nkmp->p.width) - 1; 108 p &= (1 << nkmp->p.width) - 1;
109 109
110 return parent_rate * n * k >> p / m; 110 return (parent_rate * n * k >> p) / m;
111} 111}
112 112
113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, 113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
index 8c30fec86094..eb89b502acbd 100644
--- a/drivers/clocksource/clkevt-probe.c
+++ b/drivers/clocksource/clkevt-probe.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/clockchip.h> 20#include <linux/clockchips.h>
21 21
22extern struct of_device_id __clkevt_of_table[]; 22extern struct of_device_id __clkevt_of_table[];
23 23
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 745844ee973e..d4ca9962a759 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -10,7 +10,6 @@
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/atmel_tc.h> 12#include <linux/atmel_tc.h>
13#include <linux/sched_clock.h>
14 13
15 14
16/* 15/*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
57 return (upper << 16) | lower; 56 return (upper << 16) | lower;
58} 57}
59 58
60static u32 tc_get_cv32(void)
61{
62 return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
63}
64
65static u64 tc_get_cycles32(struct clocksource *cs) 59static u64 tc_get_cycles32(struct clocksource *cs)
66{ 60{
67 return tc_get_cv32(); 61 return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
68} 62}
69 63
70static struct clocksource clksrc = { 64static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
75 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 69 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
76}; 70};
77 71
78static u64 notrace tc_read_sched_clock(void)
79{
80 return tc_get_cv32();
81}
82
83#ifdef CONFIG_GENERIC_CLOCKEVENTS 72#ifdef CONFIG_GENERIC_CLOCKEVENTS
84 73
85struct tc_clkevt_device { 74struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
350 clksrc.read = tc_get_cycles32; 339 clksrc.read = tc_get_cycles32;
351 /* setup ony channel 0 */ 340 /* setup ony channel 0 */
352 tcb_setup_single_chan(tc, best_divisor_idx); 341 tcb_setup_single_chan(tc, best_divisor_idx);
353
354 /* register sched_clock on chips with single 32 bit counter */
355 sched_clock_register(tc_read_sched_clock, 32, divided_rate);
356 } else { 342 } else {
357 /* tclib will give us three clocks no matter what the 343 /* tclib will give us three clocks no matter what the
358 * underlying platform supports. 344 * underlying platform supports.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 74fa5c5904d3..74ed7e9a7f27 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,6 +247,12 @@ config ARM_TEGRA124_CPUFREQ
247 help 247 help
248 This adds the CPUFreq driver support for Tegra124 SOCs. 248 This adds the CPUFreq driver support for Tegra124 SOCs.
249 249
250config ARM_TEGRA186_CPUFREQ
251 tristate "Tegra186 CPUFreq support"
252 depends on ARCH_TEGRA && TEGRA_BPMP
253 help
254 This adds the CPUFreq driver support for Tegra186 SOCs.
255
250config ARM_TI_CPUFREQ 256config ARM_TI_CPUFREQ
251 bool "Texas Instruments CPUFreq support" 257 bool "Texas Instruments CPUFreq support"
252 depends on ARCH_OMAP2PLUS 258 depends on ARCH_OMAP2PLUS
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9f5a8045f36d..b7e78f063c4f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
77obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o 77obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
78obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o 78obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
79obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o 79obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
80obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
80obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o 81obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
81obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o 82obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
82obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o 83obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 38b9fdf854a4..0e3f6496524d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
680 char *buf) 680 char *buf)
681{ 681{
682 unsigned int cur_freq = __cpufreq_get(policy); 682 unsigned int cur_freq = __cpufreq_get(policy);
683 if (!cur_freq) 683
684 return sprintf(buf, "<unknown>"); 684 if (cur_freq)
685 return sprintf(buf, "%u\n", cur_freq); 685 return sprintf(buf, "%u\n", cur_freq);
686
687 return sprintf(buf, "<unknown>\n");
686} 688}
687 689
688/** 690/**
@@ -916,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
916 .release = cpufreq_sysfs_release, 918 .release = cpufreq_sysfs_release,
917}; 919};
918 920
919static int add_cpu_dev_symlink(struct cpufreq_policy *policy, 921static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
920 struct device *dev)
921{ 922{
923 struct device *dev = get_cpu_device(cpu);
924
925 if (!dev)
926 return;
927
928 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
929 return;
930
922 dev_dbg(dev, "%s: Adding symlink\n", __func__); 931 dev_dbg(dev, "%s: Adding symlink\n", __func__);
923 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 932 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
933 dev_err(dev, "cpufreq symlink creation failed\n");
924} 934}
925 935
926static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, 936static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1178,10 +1188,13 @@ static int cpufreq_online(unsigned int cpu)
1178 policy->user_policy.min = policy->min; 1188 policy->user_policy.min = policy->min;
1179 policy->user_policy.max = policy->max; 1189 policy->user_policy.max = policy->max;
1180 1190
1181 write_lock_irqsave(&cpufreq_driver_lock, flags); 1191 for_each_cpu(j, policy->related_cpus) {
1182 for_each_cpu(j, policy->related_cpus)
1183 per_cpu(cpufreq_cpu_data, j) = policy; 1192 per_cpu(cpufreq_cpu_data, j) = policy;
1184 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1193 add_cpu_dev_symlink(policy, j);
1194 }
1195 } else {
1196 policy->min = policy->user_policy.min;
1197 policy->max = policy->user_policy.max;
1185 } 1198 }
1186 1199
1187 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1200 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1270,13 +1283,15 @@ out_exit_policy:
1270 1283
1271 if (cpufreq_driver->exit) 1284 if (cpufreq_driver->exit)
1272 cpufreq_driver->exit(policy); 1285 cpufreq_driver->exit(policy);
1286
1287 for_each_cpu(j, policy->real_cpus)
1288 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1289
1273out_free_policy: 1290out_free_policy:
1274 cpufreq_policy_free(policy); 1291 cpufreq_policy_free(policy);
1275 return ret; 1292 return ret;
1276} 1293}
1277 1294
1278static int cpufreq_offline(unsigned int cpu);
1279
1280/** 1295/**
1281 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1296 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1282 * @dev: CPU device. 1297 * @dev: CPU device.
@@ -1298,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1298 1313
1299 /* Create sysfs link on CPU registration */ 1314 /* Create sysfs link on CPU registration */
1300 policy = per_cpu(cpufreq_cpu_data, cpu); 1315 policy = per_cpu(cpufreq_cpu_data, cpu);
1301 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1316 if (policy)
1302 return 0; 1317 add_cpu_dev_symlink(policy, cpu);
1303 1318
1304 ret = add_cpu_dev_symlink(policy, dev); 1319 return 0;
1305 if (ret) {
1306 cpumask_clear_cpu(cpu, policy->real_cpus);
1307 cpufreq_offline(cpu);
1308 }
1309
1310 return ret;
1311} 1320}
1312 1321
1313static int cpufreq_offline(unsigned int cpu) 1322static int cpufreq_offline(unsigned int cpu)
@@ -2389,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2389 *********************************************************************/ 2398 *********************************************************************/
2390static enum cpuhp_state hp_online; 2399static enum cpuhp_state hp_online;
2391 2400
2401static int cpuhp_cpufreq_online(unsigned int cpu)
2402{
2403 cpufreq_online(cpu);
2404
2405 return 0;
2406}
2407
2408static int cpuhp_cpufreq_offline(unsigned int cpu)
2409{
2410 cpufreq_offline(cpu);
2411
2412 return 0;
2413}
2414
2392/** 2415/**
2393 * cpufreq_register_driver - register a CPU Frequency driver 2416 * cpufreq_register_driver - register a CPU Frequency driver
2394 * @driver_data: A struct cpufreq_driver containing the values# 2417 * @driver_data: A struct cpufreq_driver containing the values#
@@ -2451,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2451 } 2474 }
2452 2475
2453 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", 2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2454 cpufreq_online, 2477 cpuhp_cpufreq_online,
2455 cpufreq_offline); 2478 cpuhp_cpufreq_offline);
2456 if (ret < 0) 2479 if (ret < 0)
2457 goto err_if_unreg; 2480 goto err_if_unreg;
2458 hp_online = ret; 2481 hp_online = ret;
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 5c3ec1dd4921..3575b82210ba 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/cpufreq.h> 13#include <linux/cpufreq.h>
14#include <linux/cpu_cooling.h>
14#include <linux/delay.h> 15#include <linux/delay.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -18,6 +19,7 @@
18 19
19static struct cpufreq_frequency_table *freq_table; 20static struct cpufreq_frequency_table *freq_table;
20static struct clk *armss_clk; 21static struct clk *armss_clk;
22static struct thermal_cooling_device *cdev;
21 23
22static int dbx500_cpufreq_target(struct cpufreq_policy *policy, 24static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
23 unsigned int index) 25 unsigned int index)
@@ -32,6 +34,22 @@ static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
32 return cpufreq_generic_init(policy, freq_table, 20 * 1000); 34 return cpufreq_generic_init(policy, freq_table, 20 * 1000);
33} 35}
34 36
37static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
38{
39 if (!IS_ERR(cdev))
40 cpufreq_cooling_unregister(cdev);
41 return 0;
42}
43
44static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
45{
46 cdev = cpufreq_cooling_register(policy->cpus);
47 if (IS_ERR(cdev))
48 pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
49 else
50 pr_info("Cooling device registered: %s\n", cdev->type);
51}
52
35static struct cpufreq_driver dbx500_cpufreq_driver = { 53static struct cpufreq_driver dbx500_cpufreq_driver = {
36 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | 54 .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
37 CPUFREQ_NEED_INITIAL_FREQ_CHECK, 55 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
@@ -39,6 +57,8 @@ static struct cpufreq_driver dbx500_cpufreq_driver = {
39 .target_index = dbx500_cpufreq_target, 57 .target_index = dbx500_cpufreq_target,
40 .get = cpufreq_generic_get, 58 .get = cpufreq_generic_get,
41 .init = dbx500_cpufreq_init, 59 .init = dbx500_cpufreq_init,
60 .exit = dbx500_cpufreq_exit,
61 .ready = dbx500_cpufreq_ready,
42 .name = "DBX500", 62 .name = "DBX500",
43 .attr = cpufreq_generic_attr, 63 .attr = cpufreq_generic_attr,
44}; 64};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 7719b02e04f5..9c13f097fd8c 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -161,8 +161,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
161 161
162static int imx6q_cpufreq_init(struct cpufreq_policy *policy) 162static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
163{ 163{
164 int ret;
165
164 policy->clk = arm_clk; 166 policy->clk = arm_clk;
165 return cpufreq_generic_init(policy, freq_table, transition_latency); 167 ret = cpufreq_generic_init(policy, freq_table, transition_latency);
168 policy->suspend_freq = policy->max;
169
170 return ret;
166} 171}
167 172
168static struct cpufreq_driver imx6q_cpufreq_driver = { 173static struct cpufreq_driver imx6q_cpufreq_driver = {
@@ -173,6 +178,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
173 .init = imx6q_cpufreq_init, 178 .init = imx6q_cpufreq_init,
174 .name = "imx6q-cpufreq", 179 .name = "imx6q-cpufreq",
175 .attr = cpufreq_generic_attr, 180 .attr = cpufreq_generic_attr,
181 .suspend = cpufreq_generic_suspend,
176}; 182};
177 183
178static int imx6q_cpufreq_probe(struct platform_device *pdev) 184static int imx6q_cpufreq_probe(struct platform_device *pdev)
@@ -222,6 +228,13 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
222 arm_reg = regulator_get(cpu_dev, "arm"); 228 arm_reg = regulator_get(cpu_dev, "arm");
223 pu_reg = regulator_get_optional(cpu_dev, "pu"); 229 pu_reg = regulator_get_optional(cpu_dev, "pu");
224 soc_reg = regulator_get(cpu_dev, "soc"); 230 soc_reg = regulator_get(cpu_dev, "soc");
231 if (PTR_ERR(arm_reg) == -EPROBE_DEFER ||
232 PTR_ERR(soc_reg) == -EPROBE_DEFER ||
233 PTR_ERR(pu_reg) == -EPROBE_DEFER) {
234 ret = -EPROBE_DEFER;
235 dev_dbg(cpu_dev, "regulators not ready, defer\n");
236 goto put_reg;
237 }
225 if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { 238 if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
226 dev_err(cpu_dev, "failed to get regulators\n"); 239 dev_err(cpu_dev, "failed to get regulators\n");
227 ret = -ENOENT; 240 ret = -ENOENT;
@@ -255,7 +268,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
255 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 268 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
256 if (ret) { 269 if (ret) {
257 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); 270 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
258 goto put_reg; 271 goto out_free_opp;
259 } 272 }
260 273
261 /* Make imx6_soc_volt array's size same as arm opp number */ 274 /* Make imx6_soc_volt array's size same as arm opp number */
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index ab25b1235a5e..fd1886faf33a 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -573,14 +573,33 @@ static struct platform_driver mt8173_cpufreq_platdrv = {
573 .probe = mt8173_cpufreq_probe, 573 .probe = mt8173_cpufreq_probe,
574}; 574};
575 575
576static int mt8173_cpufreq_driver_init(void) 576/* List of machines supported by this driver */
577static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
578 { .compatible = "mediatek,mt817x", },
579 { .compatible = "mediatek,mt8173", },
580 { .compatible = "mediatek,mt8176", },
581
582 { }
583};
584
585static int __init mt8173_cpufreq_driver_init(void)
577{ 586{
587 struct device_node *np;
588 const struct of_device_id *match;
578 struct platform_device *pdev; 589 struct platform_device *pdev;
579 int err; 590 int err;
580 591
581 if (!of_machine_is_compatible("mediatek,mt8173")) 592 np = of_find_node_by_path("/");
593 if (!np)
582 return -ENODEV; 594 return -ENODEV;
583 595
596 match = of_match_node(mt8173_cpufreq_machines, np);
597 of_node_put(np);
598 if (!match) {
599 pr_warn("Machine is not compatible with mt8173-cpufreq\n");
600 return -ENODEV;
601 }
602
584 err = platform_driver_register(&mt8173_cpufreq_platdrv); 603 err = platform_driver_register(&mt8173_cpufreq_platdrv);
585 if (err) 604 if (err)
586 return err; 605 return err;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index bfec1bcd3835..e2ea433a5f9c 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -52,17 +52,27 @@ static u32 get_bus_freq(void)
52{ 52{
53 struct device_node *soc; 53 struct device_node *soc;
54 u32 sysfreq; 54 u32 sysfreq;
55 struct clk *pltclk;
56 int ret;
55 57
58 /* get platform freq by searching bus-frequency property */
56 soc = of_find_node_by_type(NULL, "soc"); 59 soc = of_find_node_by_type(NULL, "soc");
57 if (!soc) 60 if (soc) {
58 return 0; 61 ret = of_property_read_u32(soc, "bus-frequency", &sysfreq);
59 62 of_node_put(soc);
60 if (of_property_read_u32(soc, "bus-frequency", &sysfreq)) 63 if (!ret)
61 sysfreq = 0; 64 return sysfreq;
65 }
62 66
63 of_node_put(soc); 67 /* get platform freq by its clock name */
68 pltclk = clk_get(NULL, "cg-pll0-div1");
69 if (IS_ERR(pltclk)) {
70 pr_err("%s: can't get bus frequency %ld\n",
71 __func__, PTR_ERR(pltclk));
72 return PTR_ERR(pltclk);
73 }
64 74
65 return sysfreq; 75 return clk_get_rate(pltclk);
66} 76}
67 77
68static struct clk *cpu_to_clk(int cpu) 78static struct clk *cpu_to_clk(int cpu)
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
new file mode 100644
index 000000000000..fe7875311d62
--- /dev/null
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -0,0 +1,275 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <linux/cpufreq.h>
15#include <linux/dma-mapping.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/platform_device.h>
19
20#include <soc/tegra/bpmp.h>
21#include <soc/tegra/bpmp-abi.h>
22
23#define EDVD_CORE_VOLT_FREQ(core) (0x20 + (core) * 0x4)
24#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
25#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
26
27struct tegra186_cpufreq_cluster_info {
28 unsigned long offset;
29 int cpus[4];
30 unsigned int bpmp_cluster_id;
31};
32
33#define NO_CPU -1
34static const struct tegra186_cpufreq_cluster_info tegra186_clusters[] = {
35 /* Denver cluster */
36 {
37 .offset = SZ_64K * 7,
38 .cpus = { 1, 2, NO_CPU, NO_CPU },
39 .bpmp_cluster_id = 0,
40 },
41 /* A57 cluster */
42 {
43 .offset = SZ_64K * 6,
44 .cpus = { 0, 3, 4, 5 },
45 .bpmp_cluster_id = 1,
46 },
47};
48
49struct tegra186_cpufreq_cluster {
50 const struct tegra186_cpufreq_cluster_info *info;
51 struct cpufreq_frequency_table *table;
52};
53
54struct tegra186_cpufreq_data {
55 void __iomem *regs;
56
57 size_t num_clusters;
58 struct tegra186_cpufreq_cluster *clusters;
59};
60
61static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
62{
63 struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
64 unsigned int i;
65
66 for (i = 0; i < data->num_clusters; i++) {
67 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
68 const struct tegra186_cpufreq_cluster_info *info =
69 cluster->info;
70 int core;
71
72 for (core = 0; core < ARRAY_SIZE(info->cpus); core++) {
73 if (info->cpus[core] == policy->cpu)
74 break;
75 }
76 if (core == ARRAY_SIZE(info->cpus))
77 continue;
78
79 policy->driver_data =
80 data->regs + info->offset + EDVD_CORE_VOLT_FREQ(core);
81 cpufreq_table_validate_and_show(policy, cluster->table);
82 }
83
84 policy->cpuinfo.transition_latency = 300 * 1000;
85
86 return 0;
87}
88
89static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
90 unsigned int index)
91{
92 struct cpufreq_frequency_table *tbl = policy->freq_table + index;
93 void __iomem *edvd_reg = policy->driver_data;
94 u32 edvd_val = tbl->driver_data;
95
96 writel(edvd_val, edvd_reg);
97
98 return 0;
99}
100
101static struct cpufreq_driver tegra186_cpufreq_driver = {
102 .name = "tegra186",
103 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
104 .verify = cpufreq_generic_frequency_table_verify,
105 .target_index = tegra186_cpufreq_set_target,
106 .init = tegra186_cpufreq_init,
107 .attr = cpufreq_generic_attr,
108};
109
110static struct cpufreq_frequency_table *init_vhint_table(
111 struct platform_device *pdev, struct tegra_bpmp *bpmp,
112 unsigned int cluster_id)
113{
114 struct cpufreq_frequency_table *table;
115 struct mrq_cpu_vhint_request req;
116 struct tegra_bpmp_message msg;
117 struct cpu_vhint_data *data;
118 int err, i, j, num_rates = 0;
119 dma_addr_t phys;
120 void *virt;
121
122 virt = dma_alloc_coherent(bpmp->dev, sizeof(*data), &phys,
123 GFP_KERNEL | GFP_DMA32);
124 if (!virt)
125 return ERR_PTR(-ENOMEM);
126
127 data = (struct cpu_vhint_data *)virt;
128
129 memset(&req, 0, sizeof(req));
130 req.addr = phys;
131 req.cluster_id = cluster_id;
132
133 memset(&msg, 0, sizeof(msg));
134 msg.mrq = MRQ_CPU_VHINT;
135 msg.tx.data = &req;
136 msg.tx.size = sizeof(req);
137
138 err = tegra_bpmp_transfer(bpmp, &msg);
139 if (err) {
140 table = ERR_PTR(err);
141 goto free;
142 }
143
144 for (i = data->vfloor; i <= data->vceil; i++) {
145 u16 ndiv = data->ndiv[i];
146
147 if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
148 continue;
149
150 /* Only store lowest voltage index for each rate */
151 if (i > 0 && ndiv == data->ndiv[i - 1])
152 continue;
153
154 num_rates++;
155 }
156
157 table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
158 GFP_KERNEL);
159 if (!table) {
160 table = ERR_PTR(-ENOMEM);
161 goto free;
162 }
163
164 for (i = data->vfloor, j = 0; i <= data->vceil; i++) {
165 struct cpufreq_frequency_table *point;
166 u16 ndiv = data->ndiv[i];
167 u32 edvd_val = 0;
168
169 if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
170 continue;
171
172 /* Only store lowest voltage index for each rate */
173 if (i > 0 && ndiv == data->ndiv[i - 1])
174 continue;
175
176 edvd_val |= i << EDVD_CORE_VOLT_FREQ_V_SHIFT;
177 edvd_val |= ndiv << EDVD_CORE_VOLT_FREQ_F_SHIFT;
178
179 point = &table[j++];
180 point->driver_data = edvd_val;
181 point->frequency = data->ref_clk_hz * ndiv / data->pdiv /
182 data->mdiv / 1000;
183 }
184
185 table[j].frequency = CPUFREQ_TABLE_END;
186
187free:
188 dma_free_coherent(bpmp->dev, sizeof(*data), virt, phys);
189
190 return table;
191}
192
193static int tegra186_cpufreq_probe(struct platform_device *pdev)
194{
195 struct tegra186_cpufreq_data *data;
196 struct tegra_bpmp *bpmp;
197 struct resource *res;
198 unsigned int i = 0, err;
199
200 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
201 if (!data)
202 return -ENOMEM;
203
204 data->clusters = devm_kcalloc(&pdev->dev, ARRAY_SIZE(tegra186_clusters),
205 sizeof(*data->clusters), GFP_KERNEL);
206 if (!data->clusters)
207 return -ENOMEM;
208
209 data->num_clusters = ARRAY_SIZE(tegra186_clusters);
210
211 bpmp = tegra_bpmp_get(&pdev->dev);
212 if (IS_ERR(bpmp))
213 return PTR_ERR(bpmp);
214
215 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
216 data->regs = devm_ioremap_resource(&pdev->dev, res);
217 if (IS_ERR(data->regs)) {
218 err = PTR_ERR(data->regs);
219 goto put_bpmp;
220 }
221
222 for (i = 0; i < data->num_clusters; i++) {
223 struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
224
225 cluster->info = &tegra186_clusters[i];
226 cluster->table = init_vhint_table(
227 pdev, bpmp, cluster->info->bpmp_cluster_id);
228 if (IS_ERR(cluster->table)) {
229 err = PTR_ERR(cluster->table);
230 goto put_bpmp;
231 }
232 }
233
234 tegra_bpmp_put(bpmp);
235
236 tegra186_cpufreq_driver.driver_data = data;
237
238 err = cpufreq_register_driver(&tegra186_cpufreq_driver);
239 if (err)
240 return err;
241
242 return 0;
243
244put_bpmp:
245 tegra_bpmp_put(bpmp);
246
247 return err;
248}
249
250static int tegra186_cpufreq_remove(struct platform_device *pdev)
251{
252 cpufreq_unregister_driver(&tegra186_cpufreq_driver);
253
254 return 0;
255}
256
257static const struct of_device_id tegra186_cpufreq_of_match[] = {
258 { .compatible = "nvidia,tegra186-ccplex-cluster", },
259 { }
260};
261MODULE_DEVICE_TABLE(of, tegra186_cpufreq_of_match);
262
263static struct platform_driver tegra186_cpufreq_platform_driver = {
264 .driver = {
265 .name = "tegra186-cpufreq",
266 .of_match_table = tegra186_cpufreq_of_match,
267 },
268 .probe = tegra186_cpufreq_probe,
269 .remove = tegra186_cpufreq_remove,
270};
271module_platform_driver(tegra186_cpufreq_platform_driver);
272
273MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
274MODULE_DESCRIPTION("NVIDIA Tegra186 cpufreq driver");
275MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 370593006f5f..cda8f62d555b 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void)
175 drv->state_count += 1; 175 drv->state_count += 1;
176 } 176 }
177 177
178 /*
179 * On the PowerNV platform cpu_present may be less than cpu_possible in
180 * cases when firmware detects the CPU, but it is not available to the
181 * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
182 * run time and hence cpu_devices are not created for those CPUs by the
183 * generic topology_init().
184 *
185 * drv->cpumask defaults to cpu_possible_mask in
186 * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
187 * cpu_devices are not created for CPUs in cpu_possible_mask that
188 * cannot be hot-added later at run time.
189 *
190 * Trying cpuidle_register_device() on a CPU without a cpu_device is
191 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
192 */
193
194 drv->cpumask = (struct cpumask *)cpu_present_mask;
195
178 return 0; 196 return 0;
179} 197}
180 198
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index c5adc8c9ac43..ae948b1da93a 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
616 int error; 616 int error;
617 617
618 /*
619 * Return if cpu_device is not setup for this CPU.
620 *
621 * This could happen if the arch did not set up cpu_device
622 * since this CPU is not in cpu_present mask and the
623 * driver did not send a correct CPU mask during registration.
624 * Without this check we would end up passing bogus
625 * value for &cpu_dev->kobj in kobject_init_and_add()
626 */
627 if (!cpu_dev)
628 return -ENODEV;
629
618 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); 630 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
619 if (!kdev) 631 if (!kdev)
620 return -ENOMEM; 632 return -ENOMEM;
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 41cc853f8569..fc08b4ed69d9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = {
1015 1015
1016const struct ccp_vdata ccpv5b = { 1016const struct ccp_vdata ccpv5b = {
1017 .version = CCP_VERSION(5, 0), 1017 .version = CCP_VERSION(5, 0),
1018 .dma_chan_attr = DMA_PRIVATE,
1018 .setup = ccp5other_config, 1019 .setup = ccp5other_config,
1019 .perform = &ccp5_actions, 1020 .perform = &ccp5_actions,
1020 .bar = 2, 1021 .bar = 2,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 511ab042b5e7..92d1c6959f08 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
283 */ 283 */
284int ccp_enqueue_cmd(struct ccp_cmd *cmd) 284int ccp_enqueue_cmd(struct ccp_cmd *cmd)
285{ 285{
286 struct ccp_device *ccp = ccp_get_device(); 286 struct ccp_device *ccp;
287 unsigned long flags; 287 unsigned long flags;
288 unsigned int i; 288 unsigned int i;
289 int ret; 289 int ret;
290 290
291 /* Some commands might need to be sent to a specific device */
292 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
293
291 if (!ccp) 294 if (!ccp)
292 return -ENODEV; 295 return -ENODEV;
293 296
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 2b5c01fade05..aa36f3f81860 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -179,6 +179,10 @@
179 179
180/* ------------------------ General CCP Defines ------------------------ */ 180/* ------------------------ General CCP Defines ------------------------ */
181 181
182#define CCP_DMA_DFLT 0x0
183#define CCP_DMA_PRIV 0x1
184#define CCP_DMA_PUB 0x2
185
182#define CCP_DMAPOOL_MAX_SIZE 64 186#define CCP_DMAPOOL_MAX_SIZE 64
183#define CCP_DMAPOOL_ALIGN BIT(5) 187#define CCP_DMAPOOL_ALIGN BIT(5)
184 188
@@ -636,6 +640,7 @@ struct ccp_actions {
636/* Structure to hold CCP version-specific values */ 640/* Structure to hold CCP version-specific values */
637struct ccp_vdata { 641struct ccp_vdata {
638 const unsigned int version; 642 const unsigned int version;
643 const unsigned int dma_chan_attr;
639 void (*setup)(struct ccp_device *); 644 void (*setup)(struct ccp_device *);
640 const struct ccp_actions *perform; 645 const struct ccp_actions *perform;
641 const unsigned int bar; 646 const unsigned int bar;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278f4019..e00be01fbf5a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -25,6 +26,37 @@
25 (mask == 0) ? 64 : fls64(mask); \ 26 (mask == 0) ? 64 : fls64(mask); \
26}) 27})
27 28
29/* The CCP as a DMA provider can be configured for public or private
30 * channels. Default is specified in the vdata for the device (PCI ID).
31 * This module parameter will override for all channels on all devices:
32 * dma_chan_attr = 0x2 to force all channels public
33 * = 0x1 to force all channels private
34 * = 0x0 to defer to the vdata setting
35 * = any other value: warning, revert to 0x0
36 */
37static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38module_param(dma_chan_attr, uint, 0444);
39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40
41unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42{
43 switch (dma_chan_attr) {
44 case CCP_DMA_DFLT:
45 return ccp->vdata->dma_chan_attr;
46
47 case CCP_DMA_PRIV:
48 return DMA_PRIVATE;
49
50 case CCP_DMA_PUB:
51 return 0;
52
53 default:
54 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55 dma_chan_attr);
56 return ccp->vdata->dma_chan_attr;
57 }
58}
59
28static void ccp_free_cmd_resources(struct ccp_device *ccp, 60static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list) 61 struct list_head *list)
30{ 62{
@@ -390,6 +422,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
390 goto err; 422 goto err;
391 423
392 ccp_cmd = &cmd->ccp_cmd; 424 ccp_cmd = &cmd->ccp_cmd;
425 ccp_cmd->ccp = chan->ccp;
393 ccp_pt = &ccp_cmd->u.passthru_nomap; 426 ccp_pt = &ccp_cmd->u.passthru_nomap;
394 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; 427 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
395 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; 428 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
@@ -674,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
674 dma_cap_set(DMA_SG, dma_dev->cap_mask); 707 dma_cap_set(DMA_SG, dma_dev->cap_mask);
675 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 708 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
676 709
710 /* The DMA channels for this device can be set to public or private,
711 * and overridden by the module parameter dma_chan_attr.
712 * Default: according to the value in vdata (dma_chan_attr=0)
713 * dma_chan_attr=0x1: all channels private (override vdata)
714 * dma_chan_attr=0x2: all channels public (override vdata)
715 */
716 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718
677 INIT_LIST_HEAD(&dma_dev->channels); 719 INIT_LIST_HEAD(&dma_dev->channels);
678 for (i = 0; i < ccp->cmd_q_count; i++) { 720 for (i = 0; i < ccp->cmd_q_count; i++) {
679 chan = ccp->ccp_dma_chan + i; 721 chan = ccp->ccp_dma_chan + i;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0ce85c..1b9da3dc799b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
270 scatterwalk_done(&walk, out, 0); 270 scatterwalk_done(&walk, out, 0);
271} 271}
272 272
273static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 273static void s5p_sg_done(struct s5p_aes_dev *dev)
274{ 274{
275 if (dev->sg_dst_cpy) { 275 if (dev->sg_dst_cpy) {
276 dev_dbg(dev->dev, 276 dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
281 } 281 }
282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
284}
284 285
285 /* holding a lock outside */ 286/* Calls the completion. Cannot be called with dev->lock hold. */
287static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
288{
286 dev->req->base.complete(&dev->req->base, err); 289 dev->req->base.complete(&dev->req->base, err);
287 dev->busy = false; 290 dev->busy = false;
288} 291}
@@ -368,51 +371,44 @@ exit:
368} 371}
369 372
370/* 373/*
371 * Returns true if new transmitting (output) data is ready and its 374 * Returns -ERRNO on error (mapping of new data failed).
372 * address+length have to be written to device (by calling 375 * On success returns:
373 * s5p_set_dma_outdata()). False otherwise. 376 * - 0 if there is no more data,
377 * - 1 if new transmitting (output) data is ready and its address+length
378 * have to be written to device (by calling s5p_set_dma_outdata()).
374 */ 379 */
375static bool s5p_aes_tx(struct s5p_aes_dev *dev) 380static int s5p_aes_tx(struct s5p_aes_dev *dev)
376{ 381{
377 int err = 0; 382 int ret = 0;
378 bool ret = false;
379 383
380 s5p_unset_outdata(dev); 384 s5p_unset_outdata(dev);
381 385
382 if (!sg_is_last(dev->sg_dst)) { 386 if (!sg_is_last(dev->sg_dst)) {
383 err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 387 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
384 if (err) 388 if (!ret)
385 s5p_aes_complete(dev, err); 389 ret = 1;
386 else
387 ret = true;
388 } else {
389 s5p_aes_complete(dev, err);
390
391 dev->busy = true;
392 tasklet_schedule(&dev->tasklet);
393 } 390 }
394 391
395 return ret; 392 return ret;
396} 393}
397 394
398/* 395/*
399 * Returns true if new receiving (input) data is ready and its 396 * Returns -ERRNO on error (mapping of new data failed).
400 * address+length have to be written to device (by calling 397 * On success returns:
401 * s5p_set_dma_indata()). False otherwise. 398 * - 0 if there is no more data,
399 * - 1 if new receiving (input) data is ready and its address+length
400 * have to be written to device (by calling s5p_set_dma_indata()).
402 */ 401 */
403static bool s5p_aes_rx(struct s5p_aes_dev *dev) 402static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
404{ 403{
405 int err; 404 int ret = 0;
406 bool ret = false;
407 405
408 s5p_unset_indata(dev); 406 s5p_unset_indata(dev);
409 407
410 if (!sg_is_last(dev->sg_src)) { 408 if (!sg_is_last(dev->sg_src)) {
411 err = s5p_set_indata(dev, sg_next(dev->sg_src)); 409 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
412 if (err) 410 if (!ret)
413 s5p_aes_complete(dev, err); 411 ret = 1;
414 else
415 ret = true;
416 } 412 }
417 413
418 return ret; 414 return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
422{ 418{
423 struct platform_device *pdev = dev_id; 419 struct platform_device *pdev = dev_id;
424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 420 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
425 bool set_dma_tx = false; 421 int err_dma_tx = 0;
426 bool set_dma_rx = false; 422 int err_dma_rx = 0;
423 bool tx_end = false;
427 unsigned long flags; 424 unsigned long flags;
428 uint32_t status; 425 uint32_t status;
426 int err;
429 427
430 spin_lock_irqsave(&dev->lock, flags); 428 spin_lock_irqsave(&dev->lock, flags);
431 429
430 /*
431 * Handle rx or tx interrupt. If there is still data (scatterlist did not
432 * reach end), then map next scatterlist entry.
433 * In case of such mapping error, s5p_aes_complete() should be called.
434 *
435 * If there is no more data in tx scatter list, call s5p_aes_complete()
436 * and schedule new tasklet.
437 */
432 status = SSS_READ(dev, FCINTSTAT); 438 status = SSS_READ(dev, FCINTSTAT);
433 if (status & SSS_FCINTSTAT_BRDMAINT) 439 if (status & SSS_FCINTSTAT_BRDMAINT)
434 set_dma_rx = s5p_aes_rx(dev); 440 err_dma_rx = s5p_aes_rx(dev);
435 if (status & SSS_FCINTSTAT_BTDMAINT) 441
436 set_dma_tx = s5p_aes_tx(dev); 442 if (status & SSS_FCINTSTAT_BTDMAINT) {
443 if (sg_is_last(dev->sg_dst))
444 tx_end = true;
445 err_dma_tx = s5p_aes_tx(dev);
446 }
437 447
438 SSS_WRITE(dev, FCINTPEND, status); 448 SSS_WRITE(dev, FCINTPEND, status);
439 449
440 /* 450 if (err_dma_rx < 0) {
441 * Writing length of DMA block (either receiving or transmitting) 451 err = err_dma_rx;
442 * will start the operation immediately, so this should be done 452 goto error;
443 * at the end (even after clearing pending interrupts to not miss the 453 }
444 * interrupt). 454 if (err_dma_tx < 0) {
445 */ 455 err = err_dma_tx;
446 if (set_dma_tx) 456 goto error;
447 s5p_set_dma_outdata(dev, dev->sg_dst); 457 }
448 if (set_dma_rx) 458
449 s5p_set_dma_indata(dev, dev->sg_src); 459 if (tx_end) {
460 s5p_sg_done(dev);
461
462 spin_unlock_irqrestore(&dev->lock, flags);
463
464 s5p_aes_complete(dev, 0);
465 dev->busy = true;
466 tasklet_schedule(&dev->tasklet);
467 } else {
468 /*
469 * Writing length of DMA block (either receiving or
470 * transmitting) will start the operation immediately, so this
471 * should be done at the end (even after clearing pending
472 * interrupts to not miss the interrupt).
473 */
474 if (err_dma_tx == 1)
475 s5p_set_dma_outdata(dev, dev->sg_dst);
476 if (err_dma_rx == 1)
477 s5p_set_dma_indata(dev, dev->sg_src);
450 478
479 spin_unlock_irqrestore(&dev->lock, flags);
480 }
481
482 return IRQ_HANDLED;
483
484error:
485 s5p_sg_done(dev);
451 spin_unlock_irqrestore(&dev->lock, flags); 486 spin_unlock_irqrestore(&dev->lock, flags);
487 s5p_aes_complete(dev, err);
452 488
453 return IRQ_HANDLED; 489 return IRQ_HANDLED;
454} 490}
@@ -597,8 +633,9 @@ outdata_error:
597 s5p_unset_indata(dev); 633 s5p_unset_indata(dev);
598 634
599indata_error: 635indata_error:
600 s5p_aes_complete(dev, err); 636 s5p_sg_done(dev);
601 spin_unlock_irqrestore(&dev->lock, flags); 637 spin_unlock_irqrestore(&dev->lock, flags);
638 s5p_aes_complete(dev, err);
602} 639}
603 640
604static void s5p_tasklet_cb(unsigned long data) 641static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
805 dev_warn(dev, "feed control interrupt is not available.\n"); 842 dev_warn(dev, "feed control interrupt is not available.\n");
806 goto err_irq; 843 goto err_irq;
807 } 844 }
808 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, 845 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
809 IRQF_SHARED, pdev->name, pdev); 846 s5p_aes_interrupt, IRQF_ONESHOT,
847 pdev->name, pdev);
810 if (err < 0) { 848 if (err < 0) {
811 dev_warn(dev, "feed control interrupt is not available.\n"); 849 dev_warn(dev, "feed control interrupt is not available.\n");
812 goto err_irq; 850 goto err_irq;
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 8d9829ff2a78..80c6db279ae1 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
427 int rc = VM_FAULT_SIGBUS; 427 int rc = VM_FAULT_SIGBUS;
428 phys_addr_t phys; 428 phys_addr_t phys;
429 pfn_t pfn; 429 pfn_t pfn;
430 unsigned int fault_size = PAGE_SIZE;
430 431
431 if (check_vma(dax_dev, vmf->vma, __func__)) 432 if (check_vma(dax_dev, vmf->vma, __func__))
432 return VM_FAULT_SIGBUS; 433 return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
437 return VM_FAULT_SIGBUS; 438 return VM_FAULT_SIGBUS;
438 } 439 }
439 440
441 if (fault_size != dax_region->align)
442 return VM_FAULT_SIGBUS;
443
440 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); 444 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
441 if (phys == -1) { 445 if (phys == -1) {
442 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 446 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
443 vmf->pgoff); 447 vmf->pgoff);
444 return VM_FAULT_SIGBUS; 448 return VM_FAULT_SIGBUS;
445 } 449 }
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
464 phys_addr_t phys; 468 phys_addr_t phys;
465 pgoff_t pgoff; 469 pgoff_t pgoff;
466 pfn_t pfn; 470 pfn_t pfn;
471 unsigned int fault_size = PMD_SIZE;
467 472
468 if (check_vma(dax_dev, vmf->vma, __func__)) 473 if (check_vma(dax_dev, vmf->vma, __func__))
469 return VM_FAULT_SIGBUS; 474 return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
480 return VM_FAULT_SIGBUS; 485 return VM_FAULT_SIGBUS;
481 } 486 }
482 487
488 if (fault_size < dax_region->align)
489 return VM_FAULT_SIGBUS;
490 else if (fault_size > dax_region->align)
491 return VM_FAULT_FALLBACK;
492
493 /* if we are outside of the VMA */
494 if (pmd_addr < vmf->vma->vm_start ||
495 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
496 return VM_FAULT_SIGBUS;
497
483 pgoff = linear_page_index(vmf->vma, pmd_addr); 498 pgoff = linear_page_index(vmf->vma, pmd_addr);
484 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); 499 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
485 if (phys == -1) { 500 if (phys == -1) {
486 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 501 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
487 pgoff); 502 pgoff);
488 return VM_FAULT_SIGBUS; 503 return VM_FAULT_SIGBUS;
489 } 504 }
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
503 phys_addr_t phys; 518 phys_addr_t phys;
504 pgoff_t pgoff; 519 pgoff_t pgoff;
505 pfn_t pfn; 520 pfn_t pfn;
521 unsigned int fault_size = PUD_SIZE;
522
506 523
507 if (check_vma(dax_dev, vmf->vma, __func__)) 524 if (check_vma(dax_dev, vmf->vma, __func__))
508 return VM_FAULT_SIGBUS; 525 return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
519 return VM_FAULT_SIGBUS; 536 return VM_FAULT_SIGBUS;
520 } 537 }
521 538
539 if (fault_size < dax_region->align)
540 return VM_FAULT_SIGBUS;
541 else if (fault_size > dax_region->align)
542 return VM_FAULT_FALLBACK;
543
544 /* if we are outside of the VMA */
545 if (pud_addr < vmf->vma->vm_start ||
546 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
547 return VM_FAULT_SIGBUS;
548
522 pgoff = linear_page_index(vmf->vma, pud_addr); 549 pgoff = linear_page_index(vmf->vma, pud_addr);
523 phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); 550 phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
524 if (phys == -1) { 551 if (phys == -1) {
525 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 552 dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
526 pgoff); 553 pgoff);
527 return VM_FAULT_SIGBUS; 554 return VM_FAULT_SIGBUS;
528 } 555 }
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc596cf24..6204cc32d09c 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
251 */ 251 */
252 252
253 /* have we filled in period_length yet? */ 253 /* have we filled in period_length yet? */
254 if (*total_len + control_block->length < period_len) 254 if (*total_len + control_block->length < period_len) {
255 /* update number of bytes in this period so far */
256 *total_len += control_block->length;
255 return; 257 return;
258 }
256 259
257 /* calculate the length that remains to reach period_length */ 260 /* calculate the length that remains to reach period_length */
258 control_block->length = period_len - *total_len; 261 control_block->length = period_len - *total_len;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24e0221fd66d..d9118ec23025 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1108 switch (order) { 1108 switch (order) {
1109 case 0 ... 1: 1109 case 0 ... 1:
1110 return &unmap_pool[0]; 1110 return &unmap_pool[0];
1111#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1111 case 2 ... 4: 1112 case 2 ... 4:
1112 return &unmap_pool[1]; 1113 return &unmap_pool[1];
1113 case 5 ... 7: 1114 case 5 ... 7:
1114 return &unmap_pool[2]; 1115 return &unmap_pool[2];
1115 case 8: 1116 case 8:
1116 return &unmap_pool[3]; 1117 return &unmap_pool[3];
1118#endif
1117 default: 1119 default:
1118 BUG(); 1120 BUG();
1119 return NULL; 1121 return NULL;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce81f8..4773f2867234 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS
43 43
44config EDAC_DEBUG 44config EDAC_DEBUG
45 bool "Debugging" 45 bool "Debugging"
46 select DEBUG_FS
46 help 47 help
47 This turns on debugging information for the entire EDAC subsystem. 48 This turns on debugging information for the entire EDAC subsystem.
48 You do so by inserting edac_module with "edac_debug_level=x." Valid 49 You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@ config EDAC_SKX
259 Support for error detection and correction the Intel 260 Support for error detection and correction the Intel
260 Skylake server Integrated Memory Controllers. 261 Skylake server Integrated Memory Controllers.
261 262
263config EDAC_PND2
264 tristate "Intel Pondicherry2"
265 depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
266 help
267 Support for error detection and correction on the Intel
268 Pondicherry2 Integrated Memory Controller. This SoC IP is
269 first used on the Apollo Lake platform and Denverton
270 micro-server but may appear on others in the future.
271
262config EDAC_MPC85XX 272config EDAC_MPC85XX
263 tristate "Freescale MPC83xx / MPC85xx" 273 tristate "Freescale MPC83xx / MPC85xx"
264 depends on EDAC_MM_EDAC && FSL_SOC 274 depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e8b9a9..587107e90996 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
34obj-$(CONFIG_EDAC_SKX) += skx_edac.o 34obj-$(CONFIG_EDAC_SKX) += skx_edac.o
35obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
35obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 36obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
36obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 37obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
37obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 38obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 1670d27bcac8..f683919981b0 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1293 dimm->mtype = MEM_FB_DDR2; 1293 dimm->mtype = MEM_FB_DDR2;
1294 1294
1295 /* ask what device type on this row */ 1295 /* ask what device type on this row */
1296 if (MTR_DRAM_WIDTH(mtr)) 1296 if (MTR_DRAM_WIDTH(mtr) == 8)
1297 dimm->dtype = DEV_X8; 1297 dimm->dtype = DEV_X8;
1298 else 1298 else
1299 dimm->dtype = DEV_X4; 1299 dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index abf6ef22e220..37a9ba71da44 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1207 1207
1208 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1209 dimm->grain = 8; 1209 dimm->grain = 8;
1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1211 DEV_X8 : DEV_X4;
1211 dimm->mtype = MEM_FB_DDR2; 1212 dimm->mtype = MEM_FB_DDR2;
1212 /* 1213 /*
1213 * The eccc mechanism is SDDC (aka SECC), with 1214 * The eccc mechanism is SDDC (aka SECC), with
1214 * is similar to Chipkill. 1215 * is similar to Chipkill.
1215 */ 1216 */
1216 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1217 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1217 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1218 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1218 ndimms++; 1219 ndimms++;
1219 } 1220 }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644
index 000000000000..928e0dba41fc
--- /dev/null
+++ b/drivers/edac/pnd2_edac.c
@@ -0,0 +1,1546 @@
1/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132#ifdef CONFIG_X86_INTEL_SBI_APL
133#include "linux/platform_data/sbi_apl.h"
134int sbi_send(int port, int off, int op, u32 *data)
135{
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161}
162#else
163int sbi_send(int port, int off, int op, u32 *data)
164{
165 return -EUNATCH;
166}
167#endif
168
169static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170{
171 int ret = 0;
172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 case 4:
178 ret = sbi_send(port, off, op, (u32 *)data);
179 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
181 break;
182 }
183
184 return ret;
185}
186
187static u64 get_mem_ctrl_hub_base_addr(void)
188{
189 struct b_cr_mchbar_lo_pci lo;
190 struct b_cr_mchbar_hi_pci hi;
191 struct pci_dev *pdev;
192
193 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
194 if (pdev) {
195 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
197 pci_dev_put(pdev);
198 } else {
199 return 0;
200 }
201
202 if (!lo.enable) {
203 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
204 return 0;
205 }
206
207 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
208}
209
210static u64 get_sideband_reg_base_addr(void)
211{
212 struct pci_dev *pdev;
213 u32 hi, lo;
214
215 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
216 if (pdev) {
217 pci_read_config_dword(pdev, 0x10, &lo);
218 pci_read_config_dword(pdev, 0x14, &hi);
219 pci_dev_put(pdev);
220 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
221 } else {
222 return 0xfd000000;
223 }
224}
225
226static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
227{
228 struct pci_dev *pdev;
229 char *base;
230 u64 addr;
231
232 if (op == 4) {
233 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
234 if (!pdev)
235 return -ENODEV;
236
237 pci_read_config_dword(pdev, off, data);
238 pci_dev_put(pdev);
239 } else {
240 /* MMIO via memory controller hub base address */
241 if (op == 0 && port == 0x4c) {
242 addr = get_mem_ctrl_hub_base_addr();
243 if (!addr)
244 return -ENODEV;
245 } else {
246 /* MMIO via sideband register base address */
247 addr = get_sideband_reg_base_addr();
248 if (!addr)
249 return -ENODEV;
250 addr += (port << 16);
251 }
252
253 base = ioremap((resource_size_t)addr, 0x10000);
254 if (!base)
255 return -ENODEV;
256
257 if (sz == 8)
258 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 *(u32 *)data = *(u32 *)(base + off);
260
261 iounmap(base);
262 }
263
264 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
266
267 return 0;
268}
269
270#define RD_REGP(regp, regname, port) \
271 ops->rd_reg(port, \
272 regname##_offset, \
273 regname##_r_opcode, \
274 regp, sizeof(struct regname), \
275 #regname)
276
277#define RD_REG(regp, regname) \
278 ops->rd_reg(regname ## _port, \
279 regname##_offset, \
280 regname##_r_opcode, \
281 regp, sizeof(struct regname), \
282 #regname)
283
284static u64 top_lm, top_hm;
285static bool two_slices;
286static bool two_channels; /* Both PMI channels in one slice enabled */
287
288static u8 sym_chan_mask;
289static u8 asym_chan_mask;
290static u8 chan_mask;
291
292static int slice_selector = -1;
293static int chan_selector = -1;
294static u64 slice_hash_mask;
295static u64 chan_hash_mask;
296
297static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
298{
299 rp->enabled = 1;
300 rp->base = base;
301 rp->limit = limit;
302 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
303}
304
305static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
306{
307 if (mask == 0) {
308 pr_info(FW_BUG "MOT mask cannot be zero\n");
309 return;
310 }
311 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 pr_info(FW_BUG "MOT mask not power of two\n");
313 return;
314 }
315 if (base & ~mask) {
316 pr_info(FW_BUG "MOT region base/mask alignment error\n");
317 return;
318 }
319 rp->base = base;
320 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
321 rp->enabled = 1;
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
323}
324
325static bool in_region(struct region *rp, u64 addr)
326{
327 if (!rp->enabled)
328 return false;
329
330 return rp->base <= addr && addr <= rp->limit;
331}
332
333static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
334{
335 int mask = 0;
336
337 if (!p->slice_0_mem_disabled)
338 mask |= p->sym_slice0_channel_enabled;
339
340 if (!p->slice_1_disabled)
341 mask |= p->sym_slice1_channel_enabled << 2;
342
343 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
344 mask &= 0x5;
345
346 return mask;
347}
348
349static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 struct b_cr_asym_mem_region0_mchbar *as0,
351 struct b_cr_asym_mem_region1_mchbar *as1,
352 struct b_cr_asym_2way_mem_region_mchbar *as2way)
353{
354 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
355 int mask = 0;
356
357 if (as2way->asym_2way_interleave_enable)
358 mask = intlv[as2way->asym_2way_intlv_mode];
359 if (as0->slice0_asym_enable)
360 mask |= (1 << as0->slice0_asym_channel_select);
361 if (as1->slice1_asym_enable)
362 mask |= (4 << as1->slice1_asym_channel_select);
363 if (p->slice_0_mem_disabled)
364 mask &= 0xc;
365 if (p->slice_1_disabled)
366 mask &= 0x3;
367 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
368 mask &= 0x5;
369
370 return mask;
371}
372
373static struct b_cr_tolud_pci tolud;
374static struct b_cr_touud_lo_pci touud_lo;
375static struct b_cr_touud_hi_pci touud_hi;
376static struct b_cr_asym_mem_region0_mchbar asym0;
377static struct b_cr_asym_mem_region1_mchbar asym1;
378static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379static struct b_cr_mot_out_base_mchbar mot_base;
380static struct b_cr_mot_out_mask_mchbar mot_mask;
381static struct b_cr_slice_channel_hash chash;
382
383/* Apollo Lake dunit */
384/*
385 * Validated on board with just two DIMMs in the [0] and [2] positions
386 * in this array. Other port number matches documentation, but caution
387 * advised.
388 */
389static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
391
392/* Denverton dunit */
393static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394static struct d_cr_dsch dsch;
395static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
403
404static void apl_mk_region(char *name, struct region *rp, void *asym)
405{
406 struct b_cr_asym_mem_region0_mchbar *a = asym;
407
408 mk_region(name, rp,
409 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
412}
413
414static void dnv_mk_region(char *name, struct region *rp, void *asym)
415{
416 struct b_cr_asym_mem_region_denverton *a = asym;
417
418 mk_region(name, rp,
419 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
422}
423
424static int apl_get_registers(void)
425{
426 int i;
427
428 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
429 return -ENODEV;
430
431 for (i = 0; i < APL_NUM_CHANNELS; i++)
432 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
433 return -ENODEV;
434
435 return 0;
436}
437
438static int dnv_get_registers(void)
439{
440 int i;
441
442 if (RD_REG(&dsch, d_cr_dsch))
443 return -ENODEV;
444
445 for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
454 return -ENODEV;
455
456 return 0;
457}
458
459/*
460 * Read all the h/w config registers once here (they don't
461 * change at run time. Figure out which address ranges have
462 * which interleave characteristics.
463 */
464static int get_registers(void)
465{
466 const int intlv[] = { 10, 11, 12, 12 };
467
468 if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 RD_REG(&chash, b_cr_slice_channel_hash))
476 return -ENODEV;
477
478 if (ops->get_registers())
479 return -ENODEV;
480
481 if (ops->type == DNV) {
482 /* PMI channel idx (always 0) for asymmetric region */
483 asym0.slice0_asym_channel_select = 0;
484 asym1.slice1_asym_channel_select = 0;
485 /* PMI channel bitmap (always 1) for symmetric region */
486 chash.sym_slice0_channel_enabled = 0x1;
487 chash.sym_slice1_channel_enabled = 0x1;
488 }
489
490 if (asym0.slice0_asym_enable)
491 ops->mk_region("as0", &as0, &asym0);
492
493 if (asym1.slice1_asym_enable)
494 ops->mk_region("as1", &as1, &asym1);
495
496 if (asym_2way.asym_2way_interleave_enable) {
497 mk_region("as2way", &as2,
498 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
501 }
502
503 if (mot_base.imr_en) {
504 mk_region_mask("mot", &mot,
505 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
507 }
508
509 top_lm = U64_LSHIFT(tolud.tolud, 20);
510 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
511
512 two_slices = !chash.slice_1_disabled &&
513 !chash.slice_0_mem_disabled &&
514 (chash.sym_slice0_channel_enabled != 0) &&
515 (chash.sym_slice1_channel_enabled != 0);
516 two_channels = !chash.ch_1_disabled &&
517 !chash.enable_pmi_dual_data_mode &&
518 ((chash.sym_slice0_channel_enabled == 3) ||
519 (chash.sym_slice1_channel_enabled == 3));
520
521 sym_chan_mask = gen_sym_mask(&chash);
522 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 chan_mask = sym_chan_mask | asym_chan_mask;
524
525 if (two_slices && !two_channels) {
526 if (chash.hvm_mode)
527 slice_selector = 29;
528 else
529 slice_selector = intlv[chash.interleave_mode];
530 } else if (!two_slices && two_channels) {
531 if (chash.hvm_mode)
532 chan_selector = 29;
533 else
534 chan_selector = intlv[chash.interleave_mode];
535 } else if (two_slices && two_channels) {
536 if (chash.hvm_mode) {
537 slice_selector = 29;
538 chan_selector = 30;
539 } else {
540 slice_selector = intlv[chash.interleave_mode];
541 chan_selector = intlv[chash.interleave_mode] + 1;
542 }
543 }
544
545 if (two_slices) {
546 if (!chash.hvm_mode)
547 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
548 if (!two_channels)
549 slice_hash_mask |= BIT_ULL(slice_selector);
550 }
551
552 if (two_channels) {
553 if (!chash.hvm_mode)
554 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
555 if (!two_slices)
556 chan_hash_mask |= BIT_ULL(chan_selector);
557 }
558
559 return 0;
560}
561
562/* Get a contiguous memory address (remove the MMIO gap) */
563static u64 remove_mmio_gap(u64 sys)
564{
565 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
566}
567
568/* Squeeze out one address bit, shift upper part down to fill gap */
569static void remove_addr_bit(u64 *addr, int bitidx)
570{
571 u64 mask;
572
573 if (bitidx == -1)
574 return;
575
576 mask = (1ull << bitidx) - 1;
577 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
578}
579
580/* XOR all the bits from addr specified in mask */
581static int hash_by_mask(u64 addr, u64 mask)
582{
583 u64 result = addr & mask;
584
585 result = (result >> 32) ^ result;
586 result = (result >> 16) ^ result;
587 result = (result >> 8) ^ result;
588 result = (result >> 4) ^ result;
589 result = (result >> 2) ^ result;
590 result = (result >> 1) ^ result;
591
592 return (int)result & 1;
593}
594
595/*
596 * First stage decode. Take the system address and figure out which
597 * second stage will deal with it based on interleave modes.
598 */
599static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
600{
601 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 /* Determine if address is in the MOT region. */
607 bool mot_hit = in_region(&mot, addr);
608 /* Calculate the number of symmetric regions enabled. */
609 int sym_channels = hweight8(sym_chan_mask);
610
611 /*
612 * The amount we need to shift the asym base can be determined by the
613 * number of enabled symmetric channels.
614 * NOTE: This can only work because symmetric memory is not supposed
615 * to do a 3-way interleave.
616 */
617 int sym_chan_shift = sym_channels >> 1;
618
619 /* Give up if address is out of range, or in MMIO gap */
620 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
623 return -EINVAL;
624 }
625
626 /* Get a contiguous memory address (remove the MMIO gap) */
627 contig_addr = remove_mmio_gap(addr);
628
629 if (in_region(&as0, addr)) {
630 *pmiidx = asym0.slice0_asym_channel_select;
631
632 contig_base = remove_mmio_gap(as0.base);
633 contig_offset = contig_addr - contig_base;
634 contig_base_adj = (contig_base >> sym_chan_shift) *
635 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 } else if (in_region(&as1, addr)) {
638 *pmiidx = 2u + asym1.slice1_asym_channel_select;
639
640 contig_base = remove_mmio_gap(as1.base);
641 contig_offset = contig_addr - contig_base;
642 contig_base_adj = (contig_base >> sym_chan_shift) *
643 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
646 bool channel1;
647
648 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 hash_by_mask(contig_addr, chan_hash_mask);
652 *pmiidx |= (u32)channel1;
653
654 contig_base = remove_mmio_gap(as2.base);
655 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 contig_offset = contig_addr - contig_base;
657 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
659 } else {
660 /* Otherwise we're in normal, boring symmetric mode. */
661 *pmiidx = 0u;
662
663 if (two_slices) {
664 bool slice1;
665
666 if (mot_hit) {
667 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
669 } else {
670 slice_intlv_bit_rm = slice_selector;
671 slice1 = hash_by_mask(addr, slice_hash_mask);
672 }
673
674 *pmiidx = (u32)slice1 << 1;
675 }
676
677 if (two_channels) {
678 bool channel1;
679
680 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 MOT_CHAN_INTLV_BIT_1SLC_2CH;
682
683 if (mot_hit) {
684 chan_intlv_bit_rm = mot_intlv_bit;
685 channel1 = (addr >> mot_intlv_bit) & 1;
686 } else {
687 chan_intlv_bit_rm = chan_selector;
688 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
689 }
690
691 *pmiidx |= (u32)channel1;
692 }
693 }
694
695 /* Remove the chan_selector bit first */
696 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 /* Remove the slice bit (we remove it second because it must be lower */
698 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 *pmiaddr = contig_addr;
700
701 return 0;
702}
703
704/* Translate PMI address to memory (rank, row, bank, column) */
705#define C(n) (0x10 | (n)) /* column */
706#define B(n) (0x20 | (n)) /* bank */
707#define R(n) (0x40 | (n)) /* row */
708#define RS (0x80) /* rank */
709
710/* addrdec values */
711#define AMAP_1KB 0
712#define AMAP_2KB 1
713#define AMAP_4KB 2
714#define AMAP_RSVD 3
715
716/* dden values */
717#define DEN_4Gb 0
718#define DEN_8Gb 2
719
720/* dwid values */
721#define X8 0
722#define X16 1
723
724static struct dimm_geometry {
725 u8 addrdec;
726 u8 dden;
727 u8 dwid;
728 u8 rowbits, colbits;
729 u16 bits[PMI_ADDRESS_WIDTH];
730} dimms[] = {
731 {
732 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 .rowbits = 15, .colbits = 10,
734 .bits = {
735 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
736 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
737 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
738 0, 0, 0, 0
739 }
740 },
741 {
742 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 .rowbits = 16, .colbits = 10,
744 .bits = {
745 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
746 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
747 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
748 R(15), 0, 0, 0
749 }
750 },
751 {
752 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 .rowbits = 16, .colbits = 10,
754 .bits = {
755 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
756 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
757 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
758 R(15), 0, 0, 0
759 }
760 },
761 {
762 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 .rowbits = 16, .colbits = 11,
764 .bits = {
765 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
766 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
767 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
768 R(14), R(15), 0, 0
769 }
770 },
771 {
772 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 .rowbits = 15, .colbits = 10,
774 .bits = {
775 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
776 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
777 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
778 0, 0, 0, 0
779 }
780 },
781 {
782 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 .rowbits = 16, .colbits = 10,
784 .bits = {
785 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
786 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
787 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
788 R(15), 0, 0, 0
789 }
790 },
791 {
792 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 .rowbits = 16, .colbits = 10,
794 .bits = {
795 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
796 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
797 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
798 R(15), 0, 0, 0
799 }
800 },
801 {
802 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 .rowbits = 16, .colbits = 11,
804 .bits = {
805 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
806 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
807 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
808 R(14), R(15), 0, 0
809 }
810 },
811 {
812 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 .rowbits = 15, .colbits = 10,
814 .bits = {
815 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
816 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
817 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
818 0, 0, 0, 0
819 }
820 },
821 {
822 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 .rowbits = 16, .colbits = 10,
824 .bits = {
825 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
826 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
827 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
828 R(15), 0, 0, 0
829 }
830 },
831 {
832 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 .rowbits = 16, .colbits = 10,
834 .bits = {
835 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
836 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
837 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
838 R(15), 0, 0, 0
839 }
840 },
841 {
842 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 .rowbits = 16, .colbits = 11,
844 .bits = {
845 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
846 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
847 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
848 R(14), R(15), 0, 0
849 }
850 }
851};
852
853static int bank_hash(u64 pmiaddr, int idx, int shft)
854{
855 int bhash = 0;
856
857 switch (idx) {
858 case 0:
859 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
860 break;
861 case 1:
862 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 bhash ^= ((pmiaddr >> 22) & 1) << 1;
864 break;
865 case 2:
866 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
867 break;
868 }
869
870 return bhash;
871}
872
873static int rank_hash(u64 pmiaddr)
874{
875 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
876}
877
878/* Second stage decode. Compute rank, bank, row & column. */
879static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 struct dram_addr *daddr, char *msg)
881{
882 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 struct pnd2_pvt *pvt = mci->pvt_info;
884 int g = pvt->dimm_geom[pmiidx];
885 struct dimm_geometry *d = &dimms[g];
886 int column = 0, bank = 0, row = 0, rank = 0;
887 int i, idx, type, skiprs = 0;
888
889 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 int bit = (pmiaddr >> i) & 1;
891
892 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
894 return -EINVAL;
895 }
896
897 type = d->bits[i + skiprs] & ~0xf;
898 idx = d->bits[i + skiprs] & 0xf;
899
900 /*
901 * On single rank DIMMs ignore the rank select bit
902 * and shift remainder of "bits[]" down one place.
903 */
904 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
905 skiprs = 1;
906 type = d->bits[i + skiprs] & ~0xf;
907 idx = d->bits[i + skiprs] & 0xf;
908 }
909
910 switch (type) {
911 case C(0):
912 column |= (bit << idx);
913 break;
914 case B(0):
915 bank |= (bit << idx);
916 if (cr_drp0->bahen)
917 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
918 break;
919 case R(0):
920 row |= (bit << idx);
921 break;
922 case RS:
923 rank = bit;
924 if (cr_drp0->rsien)
925 rank ^= rank_hash(pmiaddr);
926 break;
927 default:
928 if (bit) {
929 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
930 return -EINVAL;
931 }
932 goto done;
933 }
934 }
935
936done:
937 daddr->col = column;
938 daddr->bank = bank;
939 daddr->row = row;
940 daddr->rank = rank;
941 daddr->dimm = 0;
942
943 return 0;
944}
945
946/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
948
949static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 struct dram_addr *daddr, char *msg)
951{
952 /* Rank 0 or 1 */
953 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
954 /* Rank 2 or 3 */
955 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
956
957 /*
958 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 * flip them if DIMM1 is larger than DIMM0.
960 */
961 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
962
963 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
966 if (dsch.ddr4en)
967 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 if (dmap1[pmiidx].bxor) {
969 if (dsch.ddr4en) {
970 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 if (dsch.chan_width == 0)
973 /* 64/72 bit dram channel width */
974 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
975 else
976 /* 32/40 bit dram channel width */
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
979 } else {
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 if (dsch.chan_width == 0)
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
984 else
985 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
986 }
987 }
988
989 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 if (dmap4[pmiidx].row14 != 31)
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 if (dmap4[pmiidx].row15 != 31)
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 if (dmap4[pmiidx].row16 != 31)
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 if (dmap4[pmiidx].row17 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1011
1012 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1021
1022 return 0;
1023}
1024
1025static int check_channel(int ch)
1026{
1027 if (drp0[ch].dramtype != 0) {
1028 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1029 return 1;
1030 } else if (drp0[ch].eccen == 0) {
1031 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1032 return 1;
1033 }
1034 return 0;
1035}
1036
1037static int apl_check_ecc_active(void)
1038{
1039 int i, ret = 0;
1040
1041 /* Check dramtype and ECC mode for each present DIMM */
1042 for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 if (chan_mask & BIT(i))
1044 ret += check_channel(i);
1045 return ret ? -EINVAL : 0;
1046}
1047
1048#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1049
1050static int check_unit(int ch)
1051{
1052 struct d_cr_drp *d = &drp[ch];
1053
1054 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1056 return 1;
1057 }
1058 return 0;
1059}
1060
1061static int dnv_check_ecc_active(void)
1062{
1063 int i, ret = 0;
1064
1065 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 ret += check_unit(i);
1067 return ret ? -EINVAL : 0;
1068}
1069
1070static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 struct dram_addr *daddr, char *msg)
1072{
1073 u64 pmiaddr;
1074 u32 pmiidx;
1075 int ret;
1076
1077 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1078 if (ret)
1079 return ret;
1080
1081 pmiaddr >>= ops->pmiaddr_shift;
1082 /* pmi channel idx to dimm channel idx */
1083 pmiidx >>= ops->pmiidx_shift;
1084 daddr->chan = pmiidx;
1085
1086 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1087 if (ret)
1088 return ret;
1089
1090 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1092
1093 return 0;
1094}
1095
1096static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 struct dram_addr *daddr)
1098{
1099 enum hw_event_mc_err_type tp_event;
1100 char *optype, msg[PND2_MSG_SIZE];
1101 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 bool overflow = m->status & MCI_STATUS_OVER;
1103 bool uc_err = m->status & MCI_STATUS_UC;
1104 bool recov = m->status & MCI_STATUS_S;
1105 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1109 int rc;
1110
1111 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 HW_EVENT_ERR_CORRECTED;
1113
1114 /*
1115 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 * memory errors should fit in this mask:
1117 * 000f 0000 1mmm cccc (binary)
1118 * where:
1119 * f = Correction Report Filtering Bit. If 1, subsequent errors
1120 * won't be shown
1121 * mmm = error type
1122 * cccc = channel
1123 * If the mask doesn't match, report an error to the parsing logic
1124 */
1125 if (!((errcode & 0xef80) == 0x80)) {
1126 optype = "Can't parse: it is not a mem";
1127 } else {
1128 switch (optypenum) {
1129 case 0:
1130 optype = "generic undef request error";
1131 break;
1132 case 1:
1133 optype = "memory read error";
1134 break;
1135 case 2:
1136 optype = "memory write error";
1137 break;
1138 case 3:
1139 optype = "addr/cmd error";
1140 break;
1141 case 4:
1142 optype = "memory scrubbing error";
1143 break;
1144 default:
1145 optype = "reserved";
1146 break;
1147 }
1148 }
1149
1150 /* Only decode errors with an valid address (ADDRV) */
1151 if (!(m->status & MCI_STATUS_ADDRV))
1152 return;
1153
1154 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1155 if (rc)
1156 goto address_error;
1157
1158 snprintf(msg, sizeof(msg),
1159 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1162
1163 edac_dbg(0, "%s\n", msg);
1164
1165 /* Call the helper to output message */
1166 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1168
1169 return;
1170
1171address_error:
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1173}
1174
1175static void apl_get_dimm_config(struct mem_ctl_info *mci)
1176{
1177 struct pnd2_pvt *pvt = mci->pvt_info;
1178 struct dimm_info *dimm;
1179 struct d_cr_drp0 *d;
1180 u64 capacity;
1181 int i, g;
1182
1183 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 if (!(chan_mask & BIT(i)))
1185 continue;
1186
1187 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1188 if (!dimm) {
1189 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1190 continue;
1191 }
1192
1193 d = &drp0[i];
1194 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 if (dimms[g].addrdec == d->addrdec &&
1196 dimms[g].dden == d->dden &&
1197 dimms[g].dwid == d->dwid)
1198 break;
1199
1200 if (g == ARRAY_SIZE(dimms)) {
1201 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1202 continue;
1203 }
1204
1205 pvt->dimm_geom[i] = g;
1206 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 (1ul << dimms[g].colbits);
1208 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1210 dimm->grain = 32;
1211 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 dimm->mtype = MEM_DDR3;
1213 dimm->edac_mode = EDAC_SECDED;
1214 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1215 }
1216}
1217
1218static const int dnv_dtypes[] = {
1219 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1220};
1221
1222static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1223{
1224 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 struct dimm_info *dimm;
1226 struct d_cr_drp *d;
1227 u64 capacity;
1228
1229 if (dsch.ddr4en) {
1230 memtype = MEM_DDR4;
1231 banks = 16;
1232 colbits = 10;
1233 } else {
1234 memtype = MEM_DDR3;
1235 banks = 8;
1236 }
1237
1238 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 if (dmap4[i].row14 == 31)
1240 rowbits = 14;
1241 else if (dmap4[i].row15 == 31)
1242 rowbits = 15;
1243 else if (dmap4[i].row16 == 31)
1244 rowbits = 16;
1245 else if (dmap4[i].row17 == 31)
1246 rowbits = 17;
1247 else
1248 rowbits = 18;
1249
1250 if (memtype == MEM_DDR3) {
1251 if (dmap1[i].ca11 != 0x3f)
1252 colbits = 12;
1253 else
1254 colbits = 10;
1255 }
1256
1257 d = &drp[i];
1258 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 ranks_of_dimm[1] = d->rken2 + d->rken3;
1262
1263 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 if (!ranks_of_dimm[j])
1265 continue;
1266
1267 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1268 if (!dimm) {
1269 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1270 continue;
1271 }
1272
1273 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1276 dimm->grain = 32;
1277 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 dimm->mtype = memtype;
1279 dimm->edac_mode = EDAC_SECDED;
1280 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1281 }
1282 }
1283}
1284
1285static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1286{
1287 struct edac_mc_layer layers[2];
1288 struct mem_ctl_info *mci;
1289 struct pnd2_pvt *pvt;
1290 int rc;
1291
1292 rc = ops->check_ecc();
1293 if (rc < 0)
1294 return rc;
1295
1296 /* Allocate a new MC control structure */
1297 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[0].size = ops->channels;
1299 layers[0].is_virt_csrow = false;
1300 layers[1].type = EDAC_MC_LAYER_SLOT;
1301 layers[1].size = ops->dimms_per_channel;
1302 layers[1].is_virt_csrow = true;
1303 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1304 if (!mci)
1305 return -ENOMEM;
1306
1307 pvt = mci->pvt_info;
1308 memset(pvt, 0, sizeof(*pvt));
1309
1310 mci->mod_name = "pnd2_edac.c";
1311 mci->dev_name = ops->name;
1312 mci->ctl_name = "Pondicherry2";
1313
1314 /* Get dimm basic config and the memory layout */
1315 ops->get_dimm_config(mci);
1316
1317 if (edac_mc_add_mc(mci)) {
1318 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1319 edac_mc_free(mci);
1320 return -EINVAL;
1321 }
1322
1323 *ppmci = mci;
1324
1325 return 0;
1326}
1327
1328static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1329{
1330 if (unlikely(!mci || !mci->pvt_info)) {
1331 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1332 return;
1333 }
1334
1335 /* Remove MC sysfs nodes */
1336 edac_mc_del_mc(NULL);
1337 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1338 edac_mc_free(mci);
1339}
1340
1341/*
1342 * Callback function registered with core kernel mce code.
1343 * Called once for each logged error.
1344 */
1345static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1346{
1347 struct mce *mce = (struct mce *)data;
1348 struct mem_ctl_info *mci;
1349 struct dram_addr daddr;
1350 char *type;
1351
1352 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1353 return NOTIFY_DONE;
1354
1355 mci = pnd2_mci;
1356 if (!mci)
1357 return NOTIFY_DONE;
1358
1359 /*
1360 * Just let mcelog handle it if the error is
1361 * outside the memory controller. A memory error
1362 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 * bit 12 has an special meaning.
1364 */
1365 if ((mce->status & 0xefff) >> 7 != 1)
1366 return NOTIFY_DONE;
1367
1368 if (mce->mcgstatus & MCG_STATUS_MCIP)
1369 type = "Exception";
1370 else
1371 type = "Event";
1372
1373 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1381
1382 pnd2_mce_output_error(mci, mce, &daddr);
1383
1384 /* Advice mcelog that the error were handled */
1385 return NOTIFY_STOP;
1386}
1387
1388static struct notifier_block pnd2_mce_dec = {
1389 .notifier_call = pnd2_mce_check_error,
1390};
1391
1392#ifdef CONFIG_EDAC_DEBUG
1393/*
1394 * Write an address to this file to exercise the address decode
1395 * logic in this driver.
1396 */
1397static u64 pnd2_fake_addr;
1398#define PND2_BLOB_SIZE 1024
1399static char pnd2_result[PND2_BLOB_SIZE];
1400static struct dentry *pnd2_test;
1401static struct debugfs_blob_wrapper pnd2_blob = {
1402 .data = pnd2_result,
1403 .size = 0
1404};
1405
1406static int debugfs_u64_set(void *data, u64 val)
1407{
1408 struct dram_addr daddr;
1409 struct mce m;
1410
1411 *(u64 *)data = val;
1412 m.mcgstatus = 0;
1413 /* ADDRV + MemRd + Unknown channel */
1414 m.status = MCI_STATUS_ADDRV + 0x9f;
1415 m.addr = val;
1416 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 pnd2_blob.size = strlen(pnd2_blob.data);
1421
1422 return 0;
1423}
1424DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1425
1426static void setup_pnd2_debug(void)
1427{
1428 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 &pnd2_fake_addr, &fops_u64_wo);
1431 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1432}
1433
1434static void teardown_pnd2_debug(void)
1435{
1436 debugfs_remove_recursive(pnd2_test);
1437}
1438#else
1439static void setup_pnd2_debug(void) {}
1440static void teardown_pnd2_debug(void) {}
1441#endif /* CONFIG_EDAC_DEBUG */
1442
1443
1444static int pnd2_probe(void)
1445{
1446 int rc;
1447
1448 edac_dbg(2, "\n");
1449 rc = get_registers();
1450 if (rc)
1451 return rc;
1452
1453 return pnd2_register_mci(&pnd2_mci);
1454}
1455
1456static void pnd2_remove(void)
1457{
1458 edac_dbg(0, "\n");
1459 pnd2_unregister_mci(pnd2_mci);
1460}
1461
1462static struct dunit_ops apl_ops = {
1463 .name = "pnd2/apl",
1464 .type = APL,
1465 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1466 .pmiidx_shift = 0,
1467 .channels = APL_NUM_CHANNELS,
1468 .dimms_per_channel = 1,
1469 .rd_reg = apl_rd_reg,
1470 .get_registers = apl_get_registers,
1471 .check_ecc = apl_check_ecc_active,
1472 .mk_region = apl_mk_region,
1473 .get_dimm_config = apl_get_dimm_config,
1474 .pmi2mem = apl_pmi2mem,
1475};
1476
1477static struct dunit_ops dnv_ops = {
1478 .name = "pnd2/dnv",
1479 .type = DNV,
1480 .pmiaddr_shift = 0,
1481 .pmiidx_shift = 1,
1482 .channels = DNV_NUM_CHANNELS,
1483 .dimms_per_channel = 2,
1484 .rd_reg = dnv_rd_reg,
1485 .get_registers = dnv_get_registers,
1486 .check_ecc = dnv_check_ecc_active,
1487 .mk_region = dnv_mk_region,
1488 .get_dimm_config = dnv_get_dimm_config,
1489 .pmi2mem = dnv_pmi2mem,
1490};
1491
1492static const struct x86_cpu_id pnd2_cpuids[] = {
1493 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1495 { }
1496};
1497MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1498
1499static int __init pnd2_init(void)
1500{
1501 const struct x86_cpu_id *id;
1502 int rc;
1503
1504 edac_dbg(2, "\n");
1505
1506 id = x86_match_cpu(pnd2_cpuids);
1507 if (!id)
1508 return -ENODEV;
1509
1510 ops = (struct dunit_ops *)id->driver_data;
1511
1512 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1513 opstate_init();
1514
1515 rc = pnd2_probe();
1516 if (rc < 0) {
1517 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1518 return rc;
1519 }
1520
1521 if (!pnd2_mci)
1522 return -ENODEV;
1523
1524 mce_register_decode_chain(&pnd2_mce_dec);
1525 setup_pnd2_debug();
1526
1527 return 0;
1528}
1529
1530static void __exit pnd2_exit(void)
1531{
1532 edac_dbg(2, "\n");
1533 teardown_pnd2_debug();
1534 mce_unregister_decode_chain(&pnd2_mce_dec);
1535 pnd2_remove();
1536}
1537
1538module_init(pnd2_init);
1539module_exit(pnd2_exit);
1540
1541module_param(edac_op_state, int, 0444);
1542MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1543
1544MODULE_LICENSE("GPL v2");
1545MODULE_AUTHOR("Tony Luck");
1546MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644
index 000000000000..61b6e79492bb
--- /dev/null
+++ b/drivers/edac/pnd2_edac.h
@@ -0,0 +1,301 @@
1/*
2 * Register bitfield descriptions for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _PND2_REGS_H
17#define _PND2_REGS_H
18
19struct b_cr_touud_lo_pci {
20 u32 lock : 1;
21 u32 reserved_1 : 19;
22 u32 touud : 12;
23};
24
25#define b_cr_touud_lo_pci_port 0x4c
26#define b_cr_touud_lo_pci_offset 0xa8
27#define b_cr_touud_lo_pci_r_opcode 0x04
28
29struct b_cr_touud_hi_pci {
30 u32 touud : 7;
31 u32 reserved_0 : 25;
32};
33
34#define b_cr_touud_hi_pci_port 0x4c
35#define b_cr_touud_hi_pci_offset 0xac
36#define b_cr_touud_hi_pci_r_opcode 0x04
37
38struct b_cr_tolud_pci {
39 u32 lock : 1;
40 u32 reserved_0 : 19;
41 u32 tolud : 12;
42};
43
44#define b_cr_tolud_pci_port 0x4c
45#define b_cr_tolud_pci_offset 0xbc
46#define b_cr_tolud_pci_r_opcode 0x04
47
48struct b_cr_mchbar_lo_pci {
49 u32 enable : 1;
50 u32 pad_3_1 : 3;
51 u32 pad_14_4: 11;
52 u32 base: 17;
53};
54
55struct b_cr_mchbar_hi_pci {
56 u32 base : 7;
57 u32 pad_31_7 : 25;
58};
59
60/* Symmetric region */
61struct b_cr_slice_channel_hash {
62 u64 slice_1_disabled : 1;
63 u64 hvm_mode : 1;
64 u64 interleave_mode : 2;
65 u64 slice_0_mem_disabled : 1;
66 u64 reserved_0 : 1;
67 u64 slice_hash_mask : 14;
68 u64 reserved_1 : 11;
69 u64 enable_pmi_dual_data_mode : 1;
70 u64 ch_1_disabled : 1;
71 u64 reserved_2 : 1;
72 u64 sym_slice0_channel_enabled : 2;
73 u64 sym_slice1_channel_enabled : 2;
74 u64 ch_hash_mask : 14;
75 u64 reserved_3 : 11;
76 u64 lock : 1;
77};
78
79#define b_cr_slice_channel_hash_port 0x4c
80#define b_cr_slice_channel_hash_offset 0x4c58
81#define b_cr_slice_channel_hash_r_opcode 0x06
82
83struct b_cr_mot_out_base_mchbar {
84 u32 reserved_0 : 14;
85 u32 mot_out_base : 15;
86 u32 reserved_1 : 1;
87 u32 tr_en : 1;
88 u32 imr_en : 1;
89};
90
91#define b_cr_mot_out_base_mchbar_port 0x4c
92#define b_cr_mot_out_base_mchbar_offset 0x6af0
93#define b_cr_mot_out_base_mchbar_r_opcode 0x00
94
95struct b_cr_mot_out_mask_mchbar {
96 u32 reserved_0 : 14;
97 u32 mot_out_mask : 15;
98 u32 reserved_1 : 1;
99 u32 ia_iwb_en : 1;
100 u32 gt_iwb_en : 1;
101};
102
103#define b_cr_mot_out_mask_mchbar_port 0x4c
104#define b_cr_mot_out_mask_mchbar_offset 0x6af4
105#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
106
107struct b_cr_asym_mem_region0_mchbar {
108 u32 pad : 4;
109 u32 slice0_asym_base : 11;
110 u32 pad_18_15 : 4;
111 u32 slice0_asym_limit : 11;
112 u32 slice0_asym_channel_select : 1;
113 u32 slice0_asym_enable : 1;
114};
115
116#define b_cr_asym_mem_region0_mchbar_port 0x4c
117#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
118#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
119
120struct b_cr_asym_mem_region1_mchbar {
121 u32 pad : 4;
122 u32 slice1_asym_base : 11;
123 u32 pad_18_15 : 4;
124 u32 slice1_asym_limit : 11;
125 u32 slice1_asym_channel_select : 1;
126 u32 slice1_asym_enable : 1;
127};
128
129#define b_cr_asym_mem_region1_mchbar_port 0x4c
130#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
131#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
132
133/* Some bit fields moved in above two structs on Denverton */
134struct b_cr_asym_mem_region_denverton {
135 u32 pad : 4;
136 u32 slice_asym_base : 8;
137 u32 pad_19_12 : 8;
138 u32 slice_asym_limit : 8;
139 u32 pad_28_30 : 3;
140 u32 slice_asym_enable : 1;
141};
142
143struct b_cr_asym_2way_mem_region_mchbar {
144 u32 pad : 2;
145 u32 asym_2way_intlv_mode : 2;
146 u32 asym_2way_base : 11;
147 u32 pad_16_15 : 2;
148 u32 asym_2way_limit : 11;
149 u32 pad_30_28 : 3;
150 u32 asym_2way_interleave_enable : 1;
151};
152
153#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
154#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
155#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
156
157/* Apollo Lake d-unit */
158
159struct d_cr_drp0 {
160 u32 rken0 : 1;
161 u32 rken1 : 1;
162 u32 ddmen : 1;
163 u32 rsvd3 : 1;
164 u32 dwid : 2;
165 u32 dden : 3;
166 u32 rsvd13_9 : 5;
167 u32 rsien : 1;
168 u32 bahen : 1;
169 u32 rsvd18_16 : 3;
170 u32 caswizzle : 2;
171 u32 eccen : 1;
172 u32 dramtype : 3;
173 u32 blmode : 3;
174 u32 addrdec : 2;
175 u32 dramdevice_pr : 2;
176};
177
178#define d_cr_drp0_offset 0x1400
179#define d_cr_drp0_r_opcode 0x00
180
181/* Denverton d-unit */
182
183struct d_cr_dsch {
184 u32 ch0en : 1;
185 u32 ch1en : 1;
186 u32 ddr4en : 1;
187 u32 coldwake : 1;
188 u32 newbypdis : 1;
189 u32 chan_width : 1;
190 u32 rsvd6_6 : 1;
191 u32 ooodis : 1;
192 u32 rsvd18_8 : 11;
193 u32 ic : 1;
194 u32 rsvd31_20 : 12;
195};
196
197#define d_cr_dsch_port 0x16
198#define d_cr_dsch_offset 0x0
199#define d_cr_dsch_r_opcode 0x0
200
201struct d_cr_ecc_ctrl {
202 u32 eccen : 1;
203 u32 rsvd31_1 : 31;
204};
205
206#define d_cr_ecc_ctrl_offset 0x180
207#define d_cr_ecc_ctrl_r_opcode 0x0
208
209struct d_cr_drp {
210 u32 rken0 : 1;
211 u32 rken1 : 1;
212 u32 rken2 : 1;
213 u32 rken3 : 1;
214 u32 dimmdwid0 : 2;
215 u32 dimmdden0 : 2;
216 u32 dimmdwid1 : 2;
217 u32 dimmdden1 : 2;
218 u32 rsvd15_12 : 4;
219 u32 dimmflip : 1;
220 u32 rsvd31_17 : 15;
221};
222
223#define d_cr_drp_offset 0x158
224#define d_cr_drp_r_opcode 0x0
225
226struct d_cr_dmap {
227 u32 ba0 : 5;
228 u32 ba1 : 5;
229 u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
230 u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
231 u32 rs0 : 5;
232 u32 rs1 : 5;
233 u32 rsvd : 2;
234};
235
236#define d_cr_dmap_offset 0x174
237#define d_cr_dmap_r_opcode 0x0
238
239struct d_cr_dmap1 {
240 u32 ca11 : 6;
241 u32 bxor : 1;
242 u32 rsvd : 25;
243};
244
245#define d_cr_dmap1_offset 0xb4
246#define d_cr_dmap1_r_opcode 0x0
247
248struct d_cr_dmap2 {
249 u32 row0 : 5;
250 u32 row1 : 5;
251 u32 row2 : 5;
252 u32 row3 : 5;
253 u32 row4 : 5;
254 u32 row5 : 5;
255 u32 rsvd : 2;
256};
257
258#define d_cr_dmap2_offset 0x148
259#define d_cr_dmap2_r_opcode 0x0
260
261struct d_cr_dmap3 {
262 u32 row6 : 5;
263 u32 row7 : 5;
264 u32 row8 : 5;
265 u32 row9 : 5;
266 u32 row10 : 5;
267 u32 row11 : 5;
268 u32 rsvd : 2;
269};
270
271#define d_cr_dmap3_offset 0x14c
272#define d_cr_dmap3_r_opcode 0x0
273
274struct d_cr_dmap4 {
275 u32 row12 : 5;
276 u32 row13 : 5;
277 u32 row14 : 5;
278 u32 row15 : 5;
279 u32 row16 : 5;
280 u32 row17 : 5;
281 u32 rsvd : 2;
282};
283
284#define d_cr_dmap4_offset 0x150
285#define d_cr_dmap4_r_opcode 0x0
286
287struct d_cr_dmap5 {
288 u32 ca3 : 4;
289 u32 ca4 : 4;
290 u32 ca5 : 4;
291 u32 ca6 : 4;
292 u32 ca7 : 4;
293 u32 ca8 : 4;
294 u32 ca9 : 4;
295 u32 rsvd : 4;
296};
297
298#define d_cr_dmap5_offset 0x154
299#define d_cr_dmap5_r_opcode 0x0
300
301#endif /* _PND2_REGS_H */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 6c270d9d304a..669246056812 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); 1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
1597 if (!reg) 1597 if (!reg)
1598 goto chk_iob_axi0; 1598 goto chk_iob_axi0;
1599 dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); 1599 dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
1600 if (reg & IOBPA_RDATA_CORRUPT_MASK) 1600 if (reg & IOBPA_RDATA_CORRUPT_MASK)
1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); 1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK) 1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 96bbae579c0b..fc09c76248b4 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -44,7 +44,7 @@ config EXTCON_GPIO
44 44
45config EXTCON_INTEL_INT3496 45config EXTCON_INTEL_INT3496
46 tristate "Intel INT3496 ACPI device extcon driver" 46 tristate "Intel INT3496 ACPI device extcon driver"
47 depends on GPIOLIB && ACPI 47 depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
48 help 48 help
49 Say Y here to enable extcon support for USB OTG ports controlled by 49 Say Y here to enable extcon support for USB OTG ports controlled by
50 an Intel INT3496 ACPI device. 50 an Intel INT3496 ACPI device.
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index a3131b036de6..9d17984bbbd4 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
45 EXTCON_NONE, 45 EXTCON_NONE,
46}; 46};
47 47
48static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
49static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
50static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
51
52static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
53 { "id-gpios", &id_gpios, 1 },
54 { "vbus-gpios", &vbus_gpios, 1 },
55 { "mux-gpios", &mux_gpios, 1 },
56 { },
57};
58
48static void int3496_do_usb_id(struct work_struct *work) 59static void int3496_do_usb_id(struct work_struct *work)
49{ 60{
50 struct int3496_data *data = 61 struct int3496_data *data =
@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
83 struct int3496_data *data; 94 struct int3496_data *data;
84 int ret; 95 int ret;
85 96
97 ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
98 acpi_int3496_default_gpios);
99 if (ret) {
100 dev_err(dev, "can't add GPIO ACPI mapping\n");
101 return ret;
102 }
103
86 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 104 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
87 if (!data) 105 if (!data)
88 return -ENOMEM; 106 return -ENOMEM;
@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
90 data->dev = dev; 108 data->dev = dev;
91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 109 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
92 110
93 data->gpio_usb_id = devm_gpiod_get_index(dev, "id", 111 data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
94 INT3496_GPIO_USB_ID,
95 GPIOD_IN);
96 if (IS_ERR(data->gpio_usb_id)) { 112 if (IS_ERR(data->gpio_usb_id)) {
97 ret = PTR_ERR(data->gpio_usb_id); 113 ret = PTR_ERR(data->gpio_usb_id);
98 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 114 dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
99 return ret; 115 return ret;
116 } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
117 dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
118 gpiod_direction_input(data->gpio_usb_id);
100 } 119 }
101 120
102 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 121 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
103 if (data->usb_id_irq <= 0) { 122 if (data->usb_id_irq < 0) {
104 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 123 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
105 return -EINVAL; 124 return data->usb_id_irq;
106 } 125 }
107 126
108 data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", 127 data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
109 INT3496_GPIO_VBUS_EN,
110 GPIOD_ASIS);
111 if (IS_ERR(data->gpio_vbus_en)) 128 if (IS_ERR(data->gpio_vbus_en))
112 dev_info(dev, "can't request VBUS EN GPIO\n"); 129 dev_info(dev, "can't request VBUS EN GPIO\n");
113 130
114 data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", 131 data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
115 INT3496_GPIO_USB_MUX,
116 GPIOD_ASIS);
117 if (IS_ERR(data->gpio_usb_mux)) 132 if (IS_ERR(data->gpio_usb_mux))
118 dev_info(dev, "can't request USB MUX GPIO\n"); 133 dev_info(dev, "can't request USB MUX GPIO\n");
119 134
@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
154 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 169 devm_free_irq(&pdev->dev, data->usb_id_irq, data);
155 cancel_delayed_work_sync(&data->work); 170 cancel_delayed_work_sync(&data->work);
156 171
172 acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
173
157 return 0; 174 return 0;
158} 175}
159 176
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e7d404059b73..b372aad3b449 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
389 return 0; 389 return 0;
390 } 390 }
391 } 391 }
392 pr_err_once("requested map not found.\n");
393 return -ENOENT; 392 return -ENOENT;
394} 393}
395 394
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 08b026864d4e..8554d7aec31c 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -254,7 +254,7 @@ void __init efi_esrt_init(void)
254 254
255 rc = efi_mem_desc_lookup(efi.esrt, &md); 255 rc = efi_mem_desc_lookup(efi.esrt, &md);
256 if (rc < 0) { 256 if (rc < 0) {
257 pr_err("ESRT header is not in the memory map.\n"); 257 pr_warn("ESRT header is not in the memory map.\n");
258 return; 258 return;
259 } 259 }
260 260
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 9e1a138fed53..16a8951b2bed 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
96 gpio->regmap = a10sr->regmap; 96 gpio->regmap = a10sr->regmap;
97 97
98 gpio->gp = altr_a10sr_gc; 98 gpio->gp = altr_a10sr_gc;
99 99 gpio->gp.parent = pdev->dev.parent;
100 gpio->gp.of_node = pdev->dev.of_node; 100 gpio->gp.of_node = pdev->dev.of_node;
101 101
102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); 102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 5bddbd507ca9..3fe6a21e05a5 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
90 90
91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); 91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
92 92
93 if (type == IRQ_TYPE_NONE) 93 if (type == IRQ_TYPE_NONE) {
94 irq_set_handler_locked(d, handle_bad_irq);
94 return 0; 95 return 0;
95 if (type == IRQ_TYPE_LEVEL_HIGH && 96 }
96 altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) 97 if (type == altera_gc->interrupt_trigger) {
97 return 0; 98 if (type == IRQ_TYPE_LEVEL_HIGH)
98 if (type == IRQ_TYPE_EDGE_RISING && 99 irq_set_handler_locked(d, handle_level_irq);
99 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) 100 else
100 return 0; 101 irq_set_handler_locked(d, handle_simple_irq);
101 if (type == IRQ_TYPE_EDGE_FALLING &&
102 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
103 return 0;
104 if (type == IRQ_TYPE_EDGE_BOTH &&
105 altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
106 return 0; 102 return 0;
107 103 }
104 irq_set_handler_locked(d, handle_bad_irq);
108 return -EINVAL; 105 return -EINVAL;
109} 106}
110 107
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
230 chained_irq_exit(chip, desc); 227 chained_irq_exit(chip, desc);
231} 228}
232 229
233
234static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) 230static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
235{ 231{
236 struct altera_gpio_chip *altera_gc; 232 struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
310 altera_gc->interrupt_trigger = reg; 306 altera_gc->interrupt_trigger = reg;
311 307
312 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, 308 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
313 handle_simple_irq, IRQ_TYPE_NONE); 309 handle_bad_irq, IRQ_TYPE_NONE);
314 310
315 if (ret) { 311 if (ret) {
316 dev_err(&pdev->dev, "could not add irqchip\n"); 312 dev_err(&pdev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index bdb692345428..2a57d024481d 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
270static irqreturn_t mcp23s08_irq(int irq, void *data) 270static irqreturn_t mcp23s08_irq(int irq, void *data)
271{ 271{
272 struct mcp23s08 *mcp = data; 272 struct mcp23s08 *mcp = data;
273 int intcap, intf, i; 273 int intcap, intf, i, gpio, gpio_orig, intcap_mask;
274 unsigned int child_irq; 274 unsigned int child_irq;
275 bool intf_set, intcap_changed, gpio_bit_changed,
276 defval_changed, gpio_set;
275 277
276 mutex_lock(&mcp->lock); 278 mutex_lock(&mcp->lock);
277 if (mcp_read(mcp, MCP_INTF, &intf) < 0) { 279 if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
287 } 289 }
288 290
289 mcp->cache[MCP_INTCAP] = intcap; 291 mcp->cache[MCP_INTCAP] = intcap;
292
293 /* This clears the interrupt(configurable on S18) */
294 if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
295 mutex_unlock(&mcp->lock);
296 return IRQ_HANDLED;
297 }
298 gpio_orig = mcp->cache[MCP_GPIO];
299 mcp->cache[MCP_GPIO] = gpio;
290 mutex_unlock(&mcp->lock); 300 mutex_unlock(&mcp->lock);
291 301
302 if (mcp->cache[MCP_INTF] == 0) {
303 /* There is no interrupt pending */
304 return IRQ_HANDLED;
305 }
306
307 dev_dbg(mcp->chip.parent,
308 "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
309 intcap, intf, gpio_orig, gpio);
292 310
293 for (i = 0; i < mcp->chip.ngpio; i++) { 311 for (i = 0; i < mcp->chip.ngpio; i++) {
294 if ((BIT(i) & mcp->cache[MCP_INTF]) && 312 /* We must check all of the inputs on the chip,
295 ((BIT(i) & intcap & mcp->irq_rise) || 313 * otherwise we may not notice a change on >=2 pins.
296 (mcp->irq_fall & ~intcap & BIT(i)) || 314 *
297 (BIT(i) & mcp->cache[MCP_INTCON]))) { 315 * On at least the mcp23s17, INTCAP is only updated
316 * one byte at a time(INTCAPA and INTCAPB are
317 * not written to at the same time - only on a per-bank
318 * basis).
319 *
320 * INTF only contains the single bit that caused the
321 * interrupt per-bank. On the mcp23s17, there is
322 * INTFA and INTFB. If two pins are changed on the A
323 * side at the same time, INTF will only have one bit
324 * set. If one pin on the A side and one pin on the B
325 * side are changed at the same time, INTF will have
326 * two bits set. Thus, INTF can't be the only check
327 * to see if the input has changed.
328 */
329
330 intf_set = BIT(i) & mcp->cache[MCP_INTF];
331 if (i < 8 && intf_set)
332 intcap_mask = 0x00FF;
333 else if (i >= 8 && intf_set)
334 intcap_mask = 0xFF00;
335 else
336 intcap_mask = 0x00;
337
338 intcap_changed = (intcap_mask &
339 (BIT(i) & mcp->cache[MCP_INTCAP])) !=
340 (intcap_mask & (BIT(i) & gpio_orig));
341 gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
342 gpio_bit_changed = (BIT(i) & gpio_orig) !=
343 (BIT(i) & mcp->cache[MCP_GPIO]);
344 defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
345 ((BIT(i) & mcp->cache[MCP_GPIO]) !=
346 (BIT(i) & mcp->cache[MCP_DEFVAL]));
347
348 if (((gpio_bit_changed || intcap_changed) &&
349 (BIT(i) & mcp->irq_rise) && gpio_set) ||
350 ((gpio_bit_changed || intcap_changed) &&
351 (BIT(i) & mcp->irq_fall) && !gpio_set) ||
352 defval_changed) {
298 child_irq = irq_find_mapping(mcp->chip.irqdomain, i); 353 child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
299 handle_nested_irq(child_irq); 354 handle_nested_irq(child_irq);
300 } 355 }
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 06dac72cb69c..d99338689213 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
197 struct seq_file *sfile; 197 struct seq_file *sfile;
198 struct gpio_desc *desc; 198 struct gpio_desc *desc;
199 struct gpio_chip *gc; 199 struct gpio_chip *gc;
200 int status, val; 200 int val;
201 char buf; 201 char buf;
202 202
203 sfile = file->private_data; 203 sfile = file->private_data;
@@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
206 chip = priv->chip; 206 chip = priv->chip;
207 gc = &chip->gc; 207 gc = &chip->gc;
208 208
209 status = copy_from_user(&buf, usr_buf, 1); 209 if (copy_from_user(&buf, usr_buf, 1))
210 if (status) 210 return -EFAULT;
211 return status;
212 211
213 if (buf == '0') 212 if (buf == '0')
214 val = 0; 213 val = 0;
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 40a8881c2ce8..f1c6ec17b90a 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -42,9 +42,7 @@ struct xgene_gpio {
42 struct gpio_chip chip; 42 struct gpio_chip chip;
43 void __iomem *base; 43 void __iomem *base;
44 spinlock_t lock; 44 spinlock_t lock;
45#ifdef CONFIG_PM
46 u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; 45 u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
47#endif
48}; 46};
49 47
50static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) 48static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
138 return 0; 136 return 0;
139} 137}
140 138
141#ifdef CONFIG_PM 139static __maybe_unused int xgene_gpio_suspend(struct device *dev)
142static int xgene_gpio_suspend(struct device *dev)
143{ 140{
144 struct xgene_gpio *gpio = dev_get_drvdata(dev); 141 struct xgene_gpio *gpio = dev_get_drvdata(dev);
145 unsigned long bank_offset; 142 unsigned long bank_offset;
@@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
152 return 0; 149 return 0;
153} 150}
154 151
155static int xgene_gpio_resume(struct device *dev) 152static __maybe_unused int xgene_gpio_resume(struct device *dev)
156{ 153{
157 struct xgene_gpio *gpio = dev_get_drvdata(dev); 154 struct xgene_gpio *gpio = dev_get_drvdata(dev);
158 unsigned long bank_offset; 155 unsigned long bank_offset;
@@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
166} 163}
167 164
168static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); 165static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
169#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
170#else
171#define XGENE_GPIO_PM_OPS NULL
172#endif
173 166
174static int xgene_gpio_probe(struct platform_device *pdev) 167static int xgene_gpio_probe(struct platform_device *pdev)
175{ 168{
@@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
241 .name = "xgene-gpio", 234 .name = "xgene-gpio",
242 .of_match_table = xgene_gpio_of_match, 235 .of_match_table = xgene_gpio_of_match,
243 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), 236 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
244 .pm = XGENE_GPIO_PM_OPS, 237 .pm = &xgene_gpio_pm,
245 }, 238 },
246 .probe = xgene_gpio_probe, 239 .probe = xgene_gpio_probe,
247}; 240};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9b37a3692b3f..2bd683e2be02 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
266 goto fail_free_event; 266 goto fail_free_event;
267 } 267 }
268 268
269 if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
270 enable_irq_wake(irq);
271
269 list_add_tail(&event->node, &acpi_gpio->events); 272 list_add_tail(&event->node, &acpi_gpio->events);
270 return AE_OK; 273 return AE_OK;
271 274
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
339 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 342 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
340 struct gpio_desc *desc; 343 struct gpio_desc *desc;
341 344
345 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
346 disable_irq_wake(event->irq);
347
342 free_irq(event->irq, event); 348 free_irq(event->irq, event);
343 desc = event->desc; 349 desc = event->desc;
344 if (WARN_ON(IS_ERR(desc))) 350 if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
571 } 577 }
572 578
573 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); 579 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
574 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) 580 if (!IS_ERR(desc))
575 break; 581 break;
582 if (PTR_ERR(desc) == -EPROBE_DEFER)
583 return ERR_CAST(desc);
576 } 584 }
577 585
578 /* Then from plain _CRS GPIOs */ 586 /* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb57915b..8a08e81ee90d 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
3# of AMDSOC/AMDGPU drm driver. 3# of AMDSOC/AMDGPU drm driver.
4# It provides the HW control for ACP related functionalities. 4# It provides the HW control for ACP related functionalities.
5 5
6subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
7
8AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o 6AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60ff36d..99424cb8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ free_partial_kdata:
240 for (; i >= 0; i--) 240 for (; i >= 0; i--)
241 drm_free_large(p->chunks[i].kdata); 241 drm_free_large(p->chunks[i].kdata);
242 kfree(p->chunks); 242 kfree(p->chunks);
243 p->chunks = NULL;
244 p->nchunks = 0;
243put_ctx: 245put_ctx:
244 amdgpu_ctx_put(p->ctx); 246 amdgpu_ctx_put(p->ctx);
245free_chunk: 247free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4120b351a8e5..de0cf3315484 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
475 int r; 475 int r;
476 476
477 if (adev->wb.wb_obj == NULL) { 477 if (adev->wb.wb_obj == NULL) {
478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, 478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
480 &adev->wb.wb_obj, &adev->wb.gpu_addr, 480 &adev->wb.wb_obj, &adev->wb.gpu_addr,
481 (void **)&adev->wb.wb); 481 (void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
488 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 488 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489 489
490 /* clear wb memory */ 490 /* clear wb memory */
491 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); 491 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
492 } 492 }
493 493
494 return 0; 494 return 0;
@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2590 use_bank = 0; 2590 use_bank = 0;
2591 } 2591 }
2592 2592
2593 *pos &= 0x3FFFF; 2593 *pos &= (1UL << 22) - 1;
2594 2594
2595 if (use_bank) { 2595 if (use_bank) {
2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2666 use_bank = 0; 2666 use_bank = 0;
2667 } 2667 }
2668 2668
2669 *pos &= 0x3FFFF; 2669 *pos &= (1UL << 22) - 1;
2670 2670
2671 if (use_bank) { 2671 if (use_bank) {
2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f7adbace428a..b76cd699eb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 425 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
425 426
426 {0, 0, 0} 427 {0, 0, 0}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b52fbc..c5dec210d529 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3464 (adev->pdev->device == 0x6667)) { 3464 (adev->pdev->device == 0x6667)) {
3465 max_sclk = 75000; 3465 max_sclk = 75000;
3466 } 3466 }
3467 } else if (adev->asic_type == CHIP_OLAND) {
3468 if ((adev->pdev->revision == 0xC7) ||
3469 (adev->pdev->revision == 0x80) ||
3470 (adev->pdev->revision == 0x81) ||
3471 (adev->pdev->revision == 0x83) ||
3472 (adev->pdev->revision == 0x87) ||
3473 (adev->pdev->device == 0x6604) ||
3474 (adev->pdev->device == 0x6605)) {
3475 max_sclk = 75000;
3476 }
3467 } 3477 }
3468 3478
3469 if (rps->vce_active) { 3479 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24ef8d6..4a785d6acfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
1051 /* rev0 hardware requires workarounds to support PG */ 1051 /* rev0 hardware requires workarounds to support PG */
1052 adev->pg_flags = 0; 1052 adev->pg_flags = 0;
1053 if (adev->rev_id != 0x00) { 1053 if (adev->rev_id != 0x00) {
1054 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1054 adev->pg_flags |=
1055 AMD_PG_SUPPORT_GFX_SMG | 1055 AMD_PG_SUPPORT_GFX_SMG |
1056 AMD_PG_SUPPORT_GFX_PIPELINE | 1056 AMD_PG_SUPPORT_GFX_PIPELINE |
1057 AMD_PG_SUPPORT_CP | 1057 AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3c6d0e..261b828ad590 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
178 if (bgate) { 178 if (bgate) {
179 cgs_set_powergating_state(hwmgr->device, 179 cgs_set_powergating_state(hwmgr->device,
180 AMD_IP_BLOCK_TYPE_VCE, 180 AMD_IP_BLOCK_TYPE_VCE,
181 AMD_PG_STATE_UNGATE); 181 AMD_PG_STATE_GATE);
182 cgs_set_clockgating_state(hwmgr->device, 182 cgs_set_clockgating_state(hwmgr->device,
183 AMD_IP_BLOCK_TYPE_VCE, 183 AMD_IP_BLOCK_TYPE_VCE,
184 AMD_CG_STATE_GATE); 184 AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 08e6a71f5d05..294b53697334 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
63 63
64 clk_prepare_enable(hwdev->pxlclk); 64 clk_prepare_enable(hwdev->pxlclk);
65 65
66 /* mclk needs to be set to the same or higher rate than pxlclk */ 66 /* We rely on firmware to set mclk to a sensible level. */
67 clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
68 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); 67 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
69 68
70 hwdev->modeset(hwdev, &vm); 69 hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf5b58d..9f5513006eee 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, 84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, 86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
87}; 87};
88 88
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5 89#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada10fe5..d5aec082294c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) 37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
38#define MALIDP_LAYER_COMP_SIZE 0x010 38#define MALIDP_LAYER_COMP_SIZE 0x010
39#define MALIDP_LAYER_OFFSET 0x014 39#define MALIDP_LAYER_OFFSET 0x014
40#define MALIDP550_LS_ENABLE 0x01c
41#define MALIDP550_LS_R1_IN_SIZE 0x020
40 42
41/* 43/*
42 * This 4-entry look-up-table is used to determine the full 8-bit alpha value 44 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
242 LAYER_V_VAL(plane->state->crtc_y), 244 LAYER_V_VAL(plane->state->crtc_y),
243 mp->layer->base + MALIDP_LAYER_OFFSET); 245 mp->layer->base + MALIDP_LAYER_OFFSET);
244 246
247 if (mp->layer->id == DE_SMART)
248 malidp_hw_write(mp->hwdev,
249 LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
250 mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
251
245 /* first clear the rotation bits */ 252 /* first clear the rotation bits */
246 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 253 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
247 val &= ~LAYER_ROT_MASK; 254 val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
330 plane->hwdev = malidp->dev; 337 plane->hwdev = malidp->dev;
331 plane->layer = &map->layers[i]; 338 plane->layer = &map->layers[i];
332 339
333 /* Skip the features which the SMART layer doesn't have */ 340 if (id == DE_SMART) {
334 if (id == DE_SMART) 341 /*
342 * Enable the first rectangle in the SMART layer to be
343 * able to use it as a drm plane.
344 */
345 malidp_hw_write(malidp->dev, 1,
346 plane->layer->base + MALIDP550_LS_ENABLE);
347 /* Skip the features which the SMART layer doesn't have. */
335 continue; 348 continue;
349 }
336 350
337 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); 351 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
338 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, 352 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a84e99..b816067a65c5 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
84/* Stride register offsets relative to Lx_BASE */ 84/* Stride register offsets relative to Lx_BASE */
85#define MALIDP_DE_LG_STRIDE 0x18 85#define MALIDP_DE_LG_STRIDE 0x18
86#define MALIDP_DE_LV_STRIDE0 0x18 86#define MALIDP_DE_LV_STRIDE0 0x18
87#define MALIDP550_DE_LS_R1_STRIDE 0x28
87 88
88/* macros to set values into registers */ 89/* macros to set values into registers */
89#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) 90#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6d4d9700734..324a688b3f30 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1260 * to KMS, hence fail if different settings are requested. 1260 * to KMS, hence fail if different settings are requested.
1261 */ 1261 */
1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 || 1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
1263 var->xres != fb->width || var->yres != fb->height || 1263 var->xres > fb->width || var->yres > fb->height ||
1264 var->xres_virtual != fb->width || var->yres_virtual != fb->height) { 1264 var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
1265 DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " 1265 DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", 1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
1267 var->xres, var->yres, var->bits_per_pixel, 1267 var->xres, var->yres, var->bits_per_pixel,
1268 var->xres_virtual, var->yres_virtual, 1268 var->xres_virtual, var->yres_virtual,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..da48819ff2e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1311,6 +1311,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1311 goto out_pm_put; 1311 goto out_pm_put;
1312 } 1312 }
1313 1313
1314 mutex_lock(&gpu->lock);
1315
1314 fence = etnaviv_gpu_fence_alloc(gpu); 1316 fence = etnaviv_gpu_fence_alloc(gpu);
1315 if (!fence) { 1317 if (!fence) {
1316 event_free(gpu, event); 1318 event_free(gpu, event);
@@ -1318,8 +1320,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1318 goto out_pm_put; 1320 goto out_pm_put;
1319 } 1321 }
1320 1322
1321 mutex_lock(&gpu->lock);
1322
1323 gpu->event[event].fence = fence; 1323 gpu->event[event].fence = fence;
1324 submit->fence = fence->seqno; 1324 submit->fence = fence->seqno;
1325 gpu->active_fence = submit->fence; 1325 gpu->active_fence = submit->fence;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 0fd6f7a18364..c0e8d3302292 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -68,6 +68,8 @@ struct decon_context {
68 unsigned long flags; 68 unsigned long flags;
69 unsigned long out_type; 69 unsigned long out_type;
70 int first_win; 70 int first_win;
71 spinlock_t vblank_lock;
72 u32 frame_id;
71}; 73};
72 74
73static const uint32_t decon_formats[] = { 75static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
103 if (ctx->out_type & IFTYPE_I80) 105 if (ctx->out_type & IFTYPE_I80)
104 val |= VIDINTCON0_FRAMEDONE; 106 val |= VIDINTCON0_FRAMEDONE;
105 else 107 else
106 val |= VIDINTCON0_INTFRMEN; 108 val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
107 109
108 writel(val, ctx->addr + DECON_VIDINTCON0); 110 writel(val, ctx->addr + DECON_VIDINTCON0);
109 } 111 }
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
122 writel(0, ctx->addr + DECON_VIDINTCON0); 124 writel(0, ctx->addr + DECON_VIDINTCON0);
123} 125}
124 126
127/* return number of starts/ends of frame transmissions since reset */
128static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
129{
130 u32 frm, pfrm, status, cnt = 2;
131
132 /* To get consistent result repeat read until frame id is stable.
133 * Usually the loop will be executed once, in rare cases when the loop
134 * is executed at frame change time 2nd pass will be needed.
135 */
136 frm = readl(ctx->addr + DECON_CRFMID);
137 do {
138 status = readl(ctx->addr + DECON_VIDCON1);
139 pfrm = frm;
140 frm = readl(ctx->addr + DECON_CRFMID);
141 } while (frm != pfrm && --cnt);
142
143 /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
144 * of RGB, it should be taken into account.
145 */
146 if (!frm)
147 return 0;
148
149 switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
150 case VIDCON1_VSTATUS_VS:
151 if (!(ctx->out_type & IFTYPE_I80))
152 --frm;
153 break;
154 case VIDCON1_VSTATUS_BP:
155 --frm;
156 break;
157 case VIDCON1_I80_ACTIVE:
158 case VIDCON1_VSTATUS_AC:
159 if (end)
160 --frm;
161 break;
162 default:
163 break;
164 }
165
166 return frm;
167}
168
125static void decon_setup_trigger(struct decon_context *ctx) 169static void decon_setup_trigger(struct decon_context *ctx)
126{ 170{
127 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) 171 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
128 return; 172 return;
129 173
130 if (!(ctx->out_type & I80_HW_TRG)) { 174 if (!(ctx->out_type & I80_HW_TRG)) {
131 writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN 175 writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
132 | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, 176 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
133 ctx->addr + DECON_TRIGCON); 177 ctx->addr + DECON_TRIGCON);
134 return; 178 return;
135 } 179 }
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
365static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 409static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
366{ 410{
367 struct decon_context *ctx = crtc->ctx; 411 struct decon_context *ctx = crtc->ctx;
412 unsigned long flags;
368 int i; 413 int i;
369 414
370 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 415 if (test_bit(BIT_SUSPENDED, &ctx->flags))
371 return; 416 return;
372 417
418 spin_lock_irqsave(&ctx->vblank_lock, flags);
419
373 for (i = ctx->first_win; i < WINDOWS_NR; i++) 420 for (i = ctx->first_win; i < WINDOWS_NR; i++)
374 decon_shadow_protect_win(ctx, i, false); 421 decon_shadow_protect_win(ctx, i, false);
375 422
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
378 425
379 if (ctx->out_type & IFTYPE_I80) 426 if (ctx->out_type & IFTYPE_I80)
380 set_bit(BIT_WIN_UPDATED, &ctx->flags); 427 set_bit(BIT_WIN_UPDATED, &ctx->flags);
428
429 ctx->frame_id = decon_get_frame_count(ctx, true);
430
431 exynos_crtc_handle_event(crtc);
432
433 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
381} 434}
382 435
383static void decon_swreset(struct decon_context *ctx) 436static void decon_swreset(struct decon_context *ctx)
384{ 437{
385 unsigned int tries; 438 unsigned int tries;
439 unsigned long flags;
386 440
387 writel(0, ctx->addr + DECON_VIDCON0); 441 writel(0, ctx->addr + DECON_VIDCON0);
388 for (tries = 2000; tries; --tries) { 442 for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
400 454
401 WARN(tries == 0, "failed to software reset DECON\n"); 455 WARN(tries == 0, "failed to software reset DECON\n");
402 456
457 spin_lock_irqsave(&ctx->vblank_lock, flags);
458 ctx->frame_id = 0;
459 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
460
403 if (!(ctx->out_type & IFTYPE_HDMI)) 461 if (!(ctx->out_type & IFTYPE_HDMI))
404 return; 462 return;
405 463
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
578 .unbind = decon_unbind, 636 .unbind = decon_unbind,
579}; 637};
580 638
639static void decon_handle_vblank(struct decon_context *ctx)
640{
641 u32 frm;
642
643 spin_lock(&ctx->vblank_lock);
644
645 frm = decon_get_frame_count(ctx, true);
646
647 if (frm != ctx->frame_id) {
648 /* handle only if incremented, take care of wrap-around */
649 if ((s32)(frm - ctx->frame_id) > 0)
650 drm_crtc_handle_vblank(&ctx->crtc->base);
651 ctx->frame_id = frm;
652 }
653
654 spin_unlock(&ctx->vblank_lock);
655}
656
581static irqreturn_t decon_irq_handler(int irq, void *dev_id) 657static irqreturn_t decon_irq_handler(int irq, void *dev_id)
582{ 658{
583 struct decon_context *ctx = dev_id; 659 struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
598 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) 674 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
599 return IRQ_HANDLED; 675 return IRQ_HANDLED;
600 } 676 }
601 drm_crtc_handle_vblank(&ctx->crtc->base); 677 decon_handle_vblank(ctx);
602 } 678 }
603 679
604out: 680out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
671 __set_bit(BIT_SUSPENDED, &ctx->flags); 747 __set_bit(BIT_SUSPENDED, &ctx->flags);
672 ctx->dev = dev; 748 ctx->dev = dev;
673 ctx->out_type = (unsigned long)of_device_get_match_data(dev); 749 ctx->out_type = (unsigned long)of_device_get_match_data(dev);
750 spin_lock_init(&ctx->vblank_lock);
674 751
675 if (ctx->out_type & IFTYPE_HDMI) { 752 if (ctx->out_type & IFTYPE_HDMI) {
676 ctx->first_win = 1; 753 ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
678 ctx->out_type |= IFTYPE_I80; 755 ctx->out_type |= IFTYPE_I80;
679 } 756 }
680 757
681 if (ctx->out_type | I80_HW_TRG) { 758 if (ctx->out_type & I80_HW_TRG) {
682 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 759 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
683 "samsung,disp-sysreg"); 760 "samsung,disp-sysreg");
684 if (IS_ERR(ctx->sysreg)) { 761 if (IS_ERR(ctx->sysreg)) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f9ab19e205e2..48811806fa27 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
526 526
527 for (i = 0; i < WINDOWS_NR; i++) 527 for (i = 0; i < WINDOWS_NR; i++)
528 decon_shadow_protect_win(ctx, i, false); 528 decon_shadow_protect_win(ctx, i, false);
529 exynos_crtc_handle_event(crtc);
529} 530}
530 531
531static void decon_init(struct decon_context *ctx) 532static void decon_init(struct decon_context *ctx)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 5367b6664fe3..c65f4509932c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_crtc_state) 85 struct drm_crtc_state *old_crtc_state)
86{ 86{
87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
88 struct drm_pending_vblank_event *event;
89 unsigned long flags;
90 88
91 if (exynos_crtc->ops->atomic_flush) 89 if (exynos_crtc->ops->atomic_flush)
92 exynos_crtc->ops->atomic_flush(exynos_crtc); 90 exynos_crtc->ops->atomic_flush(exynos_crtc);
91}
92
93static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
94 .enable = exynos_drm_crtc_enable,
95 .disable = exynos_drm_crtc_disable,
96 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
97 .atomic_check = exynos_crtc_atomic_check,
98 .atomic_begin = exynos_crtc_atomic_begin,
99 .atomic_flush = exynos_crtc_atomic_flush,
100};
101
102void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
103{
104 struct drm_crtc *crtc = &exynos_crtc->base;
105 struct drm_pending_vblank_event *event = crtc->state->event;
106 unsigned long flags;
93 107
94 event = crtc->state->event;
95 if (event) { 108 if (event) {
96 crtc->state->event = NULL; 109 crtc->state->event = NULL;
97
98 spin_lock_irqsave(&crtc->dev->event_lock, flags); 110 spin_lock_irqsave(&crtc->dev->event_lock, flags);
99 if (drm_crtc_vblank_get(crtc) == 0) 111 if (drm_crtc_vblank_get(crtc) == 0)
100 drm_crtc_arm_vblank_event(crtc, event); 112 drm_crtc_arm_vblank_event(crtc, event);
@@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
105 117
106} 118}
107 119
108static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
109 .enable = exynos_drm_crtc_enable,
110 .disable = exynos_drm_crtc_disable,
111 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
112 .atomic_check = exynos_crtc_atomic_check,
113 .atomic_begin = exynos_crtc_atomic_begin,
114 .atomic_flush = exynos_crtc_atomic_flush,
115};
116
117static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) 120static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
118{ 121{
119 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6a581a8af465..abd5d6ceac0c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
40 */ 40 */
41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); 41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
42 42
43void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
44
43#endif 45#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 812e2ec0761d..d7ef26370e67 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -86,7 +86,7 @@
86#define DSIM_SYNC_INFORM (1 << 27) 86#define DSIM_SYNC_INFORM (1 << 27)
87#define DSIM_EOT_DISABLE (1 << 28) 87#define DSIM_EOT_DISABLE (1 << 28)
88#define DSIM_MFLUSH_VS (1 << 29) 88#define DSIM_MFLUSH_VS (1 << 29)
89/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ 89/* This flag is valid only for exynos3250/3472/5260/5430 */
90#define DSIM_CLKLANE_STOP (1 << 30) 90#define DSIM_CLKLANE_STOP (1 << 30)
91 91
92/* DSIM_ESCMODE */ 92/* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
473 .reg_values = reg_values, 473 .reg_values = reg_values,
474}; 474};
475 475
476static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
477 .reg_ofs = exynos_reg_ofs,
478 .plltmr_reg = 0x58,
479 .has_clklane_stop = 1,
480 .num_clks = 2,
481 .max_freq = 1000,
482 .wait_for_reset = 1,
483 .num_bits_resol = 11,
484 .reg_values = reg_values,
485};
486
487static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 476static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
488 .reg_ofs = exynos_reg_ofs, 477 .reg_ofs = exynos_reg_ofs,
489 .plltmr_reg = 0x58, 478 .plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
521 .data = &exynos3_dsi_driver_data }, 510 .data = &exynos3_dsi_driver_data },
522 { .compatible = "samsung,exynos4210-mipi-dsi", 511 { .compatible = "samsung,exynos4210-mipi-dsi",
523 .data = &exynos4_dsi_driver_data }, 512 .data = &exynos4_dsi_driver_data },
524 { .compatible = "samsung,exynos4415-mipi-dsi",
525 .data = &exynos4415_dsi_driver_data },
526 { .compatible = "samsung,exynos5410-mipi-dsi", 513 { .compatible = "samsung,exynos5410-mipi-dsi",
527 .data = &exynos5_dsi_driver_data }, 514 .data = &exynos5_dsi_driver_data },
528 { .compatible = "samsung,exynos5422-mipi-dsi", 515 { .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
979 bool first = !xfer->tx_done; 966 bool first = !xfer->tx_done;
980 u32 reg; 967 u32 reg;
981 968
982 dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", 969 dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
983 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); 970 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
984 971
985 if (length > DSI_TX_FIFO_SIZE) 972 if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
1177 spin_unlock_irqrestore(&dsi->transfer_lock, flags); 1164 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
1178 1165
1179 dev_dbg(dsi->dev, 1166 dev_dbg(dsi->dev,
1180 "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", 1167 "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
1181 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, 1168 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
1182 xfer->rx_done); 1169 xfer->rx_done);
1183 1170
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1348 int te_gpio_irq; 1335 int te_gpio_irq;
1349 1336
1350 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); 1337 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
1338 if (dsi->te_gpio == -ENOENT)
1339 return 0;
1340
1351 if (!gpio_is_valid(dsi->te_gpio)) { 1341 if (!gpio_is_valid(dsi->te_gpio)) {
1352 dev_err(dsi->dev, "no te-gpios specified\n");
1353 ret = dsi->te_gpio; 1342 ret = dsi->te_gpio;
1343 dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
1354 goto out; 1344 goto out;
1355 } 1345 }
1356 1346
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 95871577015d..5b18b5c5fdf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
1695 goto err_put_clk; 1695 goto err_put_clk;
1696 } 1696 }
1697 1697
1698 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1698 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1699 1699
1700 spin_lock_init(&ctx->lock); 1700 spin_lock_init(&ctx->lock);
1701 platform_set_drvdata(pdev, ctx); 1701 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9fa444c6053..3f04d72c448d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -71,10 +71,10 @@
71#define TRIGCON 0x1A4 71#define TRIGCON 0x1A4
72#define TRGMODE_ENABLE (1 << 0) 72#define TRGMODE_ENABLE (1 << 0)
73#define SWTRGCMD_ENABLE (1 << 1) 73#define SWTRGCMD_ENABLE (1 << 1)
74/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ 74/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
75#define HWTRGEN_ENABLE (1 << 3) 75#define HWTRGEN_ENABLE (1 << 3)
76#define HWTRGMASK_ENABLE (1 << 4) 76#define HWTRGMASK_ENABLE (1 << 4)
77/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ 77/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
78#define HWTRIGEN_PER_ENABLE (1 << 31) 78#define HWTRIGEN_PER_ENABLE (1 << 31)
79 79
80/* display mode change control register except exynos4 */ 80/* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
138 .has_vtsel = 1, 138 .has_vtsel = 1,
139}; 139};
140 140
141static struct fimd_driver_data exynos4415_fimd_driver_data = {
142 .timing_base = 0x20000,
143 .lcdblk_offset = 0x210,
144 .lcdblk_vt_shift = 10,
145 .lcdblk_bypass_shift = 1,
146 .trg_type = I80_HW_TRG,
147 .has_shadowcon = 1,
148 .has_vidoutcon = 1,
149 .has_vtsel = 1,
150 .has_trigger_per_te = 1,
151};
152
153static struct fimd_driver_data exynos5_fimd_driver_data = { 141static struct fimd_driver_data exynos5_fimd_driver_data = {
154 .timing_base = 0x20000, 142 .timing_base = 0x20000,
155 .lcdblk_offset = 0x214, 143 .lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
210 .data = &exynos3_fimd_driver_data }, 198 .data = &exynos3_fimd_driver_data },
211 { .compatible = "samsung,exynos4210-fimd", 199 { .compatible = "samsung,exynos4210-fimd",
212 .data = &exynos4_fimd_driver_data }, 200 .data = &exynos4_fimd_driver_data },
213 { .compatible = "samsung,exynos4415-fimd",
214 .data = &exynos4415_fimd_driver_data },
215 { .compatible = "samsung,exynos5250-fimd", 201 { .compatible = "samsung,exynos5250-fimd",
216 .data = &exynos5_fimd_driver_data }, 202 .data = &exynos5_fimd_driver_data },
217 { .compatible = "samsung,exynos5420-fimd", 203 { .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
257 val |= VIDINTCON0_INT_FRAME; 243 val |= VIDINTCON0_INT_FRAME;
258 244
259 val &= ~VIDINTCON0_FRAMESEL0_MASK; 245 val &= ~VIDINTCON0_FRAMESEL0_MASK;
260 val |= VIDINTCON0_FRAMESEL0_VSYNC; 246 val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
261 val &= ~VIDINTCON0_FRAMESEL1_MASK; 247 val &= ~VIDINTCON0_FRAMESEL1_MASK;
262 val |= VIDINTCON0_FRAMESEL1_NONE; 248 val |= VIDINTCON0_FRAMESEL1_NONE;
263 } 249 }
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
723 709
724 for (i = 0; i < WINDOWS_NR; i++) 710 for (i = 0; i < WINDOWS_NR; i++)
725 fimd_shadow_protect_win(ctx, i, false); 711 fimd_shadow_protect_win(ctx, i, false);
712
713 exynos_crtc_handle_event(crtc);
726} 714}
727 715
728static void fimd_update_plane(struct exynos_drm_crtc *crtc, 716static void fimd_update_plane(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4c28f7ffcc4d..55a1579d11b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp); 221 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index bef57987759d..0506b2b17ac1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 9c84ee76f18a..3edda18cc2d2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
392 property->prop_id, property->cmd, ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[%p]\n", m_node); 521 DRM_DEBUG_KMS("node[%pK]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
659 659
660 mutex_lock(&c_node->event_lock); 660 mutex_lock(&c_node->event_lock);
661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
662 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); 662 DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
663 663
664 /* 664 /*
665 * qbuf == NULL condition means all event deletion. 665 * qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
750 750
751 /* find memory node from memory list */ 751 /* find memory node from memory list */
752 list_for_each_entry(m_node, head, list) { 752 list_for_each_entry(m_node, head, list) {
753 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); 753 DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
754 754
755 /* compare buffer id */ 755 /* compare buffer id */
756 if (m_node->buf_id == qbuf->buf_id) 756 if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
767 struct exynos_drm_ipp_ops *ops = NULL; 767 struct exynos_drm_ipp_ops *ops = NULL;
768 int ret = 0; 768 int ret = 0;
769 769
770 DRM_DEBUG_KMS("node[%p]\n", m_node); 770 DRM_DEBUG_KMS("node[%pK]\n", m_node);
771 771
772 if (!m_node) { 772 if (!m_node) {
773 DRM_ERROR("invalid queue node.\n"); 773 DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1232 m_node = list_first_entry(head, 1232 m_node = list_first_entry(head,
1233 struct drm_exynos_ipp_mem_node, list); 1233 struct drm_exynos_ipp_mem_node, list);
1234 1234
1235 DRM_DEBUG_KMS("m_node[%p]\n", m_node); 1235 DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
1236 1236
1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1238 if (ret) { 1238 if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1601 } 1601 }
1602 ippdrv->prop_list.ipp_id = ret; 1602 ippdrv->prop_list.ipp_id = ret;
1603 1603
1604 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", 1604 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
1605 count++, ippdrv, ret); 1605 count++, ippdrv, ret);
1606 1606
1607 /* store parent device for node */ 1607 /* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1659 1659
1660 file_priv->ipp_dev = dev; 1660 file_priv->ipp_dev = dev;
1661 1661
1662 DRM_DEBUG_KMS("done priv[%p]\n", dev); 1662 DRM_DEBUG_KMS("done priv[%pK]\n", dev);
1663 1663
1664 return 0; 1664 return 0;
1665} 1665}
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1676 mutex_lock(&ippdrv->cmd_lock); 1676 mutex_lock(&ippdrv->cmd_lock);
1677 list_for_each_entry_safe(c_node, tc_node, 1677 list_for_each_entry_safe(c_node, tc_node,
1678 &ippdrv->cmd_list, list) { 1678 &ippdrv->cmd_list, list) {
1679 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", 1679 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
1680 count++, ippdrv); 1680 count++, ippdrv);
1681 1681
1682 if (c_node->filp == file) { 1682 if (c_node->filp == file) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 6591e406084c..79282a820ecc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
748 goto err_ippdrv_register; 748 goto err_ippdrv_register;
749 } 749 }
750 750
751 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); 751 DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
752 752
753 platform_set_drvdata(pdev, rot); 753 platform_set_drvdata(pdev, rot);
754 754
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 57fe514d5c5b..5d9a62a87eec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
170 .enable_vblank = vidi_enable_vblank, 170 .enable_vblank = vidi_enable_vblank,
171 .disable_vblank = vidi_disable_vblank, 171 .disable_vblank = vidi_disable_vblank,
172 .update_plane = vidi_update_plane, 172 .update_plane = vidi_update_plane,
173 .atomic_flush = exynos_crtc_handle_event,
173}; 174};
174 175
175static void vidi_fake_vblank_timer(unsigned long arg) 176static void vidi_fake_vblank_timer(unsigned long arg)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 72143ac10525..25edb635a197 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
1012 return; 1012 return;
1013 1013
1014 mixer_vsync_set_update(mixer_ctx, true); 1014 mixer_vsync_set_update(mixer_ctx, true);
1015 exynos_crtc_handle_event(crtc);
1015} 1016}
1016 1017
1017static void mixer_enable(struct exynos_drm_crtc *crtc) 1018static void mixer_enable(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caaca9751..325618d969fe 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
242 const char *item; 242 const char *item;
243 243
244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
245 gvt_err("Invalid vGPU creation params\n"); 245 gvt_vgpu_err("Invalid vGPU creation params\n");
246 return -EINVAL; 246 return -EINVAL;
247 } 247 }
248 248
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
285 return 0; 285 return 0;
286 286
287no_enough_resource: 287no_enough_resource:
288 gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 288 gvt_vgpu_err("fail to allocate resource %s\n", item);
289 gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 289 gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
290 vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 290 BYTES_TO_MB(request), BYTES_TO_MB(avail),
291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 291 BYTES_TO_MB(max), BYTES_TO_MB(taken));
292 return -ENOSPC; 292 return -ENOSPC;
293} 293}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b241c8..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
817 return ret; 817 return ret;
818} 818}
819 819
820static inline bool is_force_nonpriv_mmio(unsigned int offset)
821{
822 return (offset >= 0x24d0 && offset < 0x2500);
823}
824
825static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826 unsigned int offset, unsigned int index)
827{
828 struct intel_gvt *gvt = s->vgpu->gvt;
829 unsigned int data = cmd_val(s, index + 1);
830
831 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833 offset, data);
834 return -EINVAL;
835 }
836 return 0;
837}
838
820static int cmd_reg_handler(struct parser_exec_state *s, 839static int cmd_reg_handler(struct parser_exec_state *s,
821 unsigned int offset, unsigned int index, char *cmd) 840 unsigned int offset, unsigned int index, char *cmd)
822{ 841{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
824 struct intel_gvt *gvt = vgpu->gvt; 843 struct intel_gvt *gvt = vgpu->gvt;
825 844
826 if (offset + 4 > gvt->device_info.mmio_size) { 845 if (offset + 4 > gvt->device_info.mmio_size) {
827 gvt_err("%s access to (%x) outside of MMIO range\n", 846 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
828 cmd, offset); 847 cmd, offset);
829 return -EINVAL; 848 return -EINVAL;
830 } 849 }
831 850
832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 851 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n", 852 gvt_vgpu_err("%s access to non-render register (%x)\n",
834 s->vgpu->id, cmd, offset); 853 cmd, offset);
835 return 0; 854 return 0;
836 } 855 }
837 856
838 if (is_shadowed_mmio(offset)) { 857 if (is_shadowed_mmio(offset)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 858 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
840 s->vgpu->id, offset);
841 return 0; 859 return 0;
842 } 860 }
843 861
862 if (is_force_nonpriv_mmio(offset) &&
863 force_nonpriv_reg_handler(s, offset, index))
864 return -EINVAL;
865
844 if (offset == i915_mmio_reg_offset(DERRMR) || 866 if (offset == i915_mmio_reg_offset(DERRMR) ||
845 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 867 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 868 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1030 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync == 1) { 1031 else if (post_sync == 1) {
1010 /* check ggtt*/ 1032 /* check ggtt*/
1011 if ((cmd_val(s, 2) & (1 << 2))) { 1033 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1034 gma = cmd_val(s, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes == 8) 1035 if (gmadr_bytes == 8)
1014 gma |= (cmd_gma_hi(s, 3)) << 32; 1036 gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129 struct mi_display_flip_command_info *info) 1151 struct mi_display_flip_command_info *info)
1130{ 1152{
1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1153 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154 struct intel_vgpu *vgpu = s->vgpu;
1132 u32 dword0 = cmd_val(s, 0); 1155 u32 dword0 = cmd_val(s, 0);
1133 u32 dword1 = cmd_val(s, 1); 1156 u32 dword1 = cmd_val(s, 1);
1134 u32 dword2 = cmd_val(s, 2); 1157 u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1167 break; 1190 break;
1168 1191
1169 default: 1192 default:
1170 gvt_err("unknown plane code %d\n", plane); 1193 gvt_vgpu_err("unknown plane code %d\n", plane);
1171 return -EINVAL; 1194 return -EINVAL;
1172 } 1195 }
1173 1196
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
1274static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1297static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1275{ 1298{
1276 struct mi_display_flip_command_info info; 1299 struct mi_display_flip_command_info info;
1300 struct intel_vgpu *vgpu = s->vgpu;
1277 int ret; 1301 int ret;
1278 int i; 1302 int i;
1279 int len = cmd_length(s); 1303 int len = cmd_length(s);
1280 1304
1281 ret = decode_mi_display_flip(s, &info); 1305 ret = decode_mi_display_flip(s, &info);
1282 if (ret) { 1306 if (ret) {
1283 gvt_err("fail to decode MI display flip command\n"); 1307 gvt_vgpu_err("fail to decode MI display flip command\n");
1284 return ret; 1308 return ret;
1285 } 1309 }
1286 1310
1287 ret = check_mi_display_flip(s, &info); 1311 ret = check_mi_display_flip(s, &info);
1288 if (ret) { 1312 if (ret) {
1289 gvt_err("invalid MI display flip command\n"); 1313 gvt_vgpu_err("invalid MI display flip command\n");
1290 return ret; 1314 return ret;
1291 } 1315 }
1292 1316
1293 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1294 if (ret) { 1318 if (ret) {
1295 gvt_err("fail to update plane mmio\n"); 1319 gvt_vgpu_err("fail to update plane mmio\n");
1296 return ret; 1320 return ret;
1297 } 1321 }
1298 1322
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1350 int ret; 1374 int ret;
1351 1375
1352 if (op_size > max_surface_size) { 1376 if (op_size > max_surface_size) {
1353 gvt_err("command address audit fail name %s\n", s->info->name); 1377 gvt_vgpu_err("command address audit fail name %s\n",
1378 s->info->name);
1354 return -EINVAL; 1379 return -EINVAL;
1355 } 1380 }
1356 1381
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1367 } 1392 }
1368 return 0; 1393 return 0;
1369err: 1394err:
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1395 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s->info->name, guest_gma, op_size); 1396 s->info->name, guest_gma, op_size);
1372 1397
1373 pr_err("cmd dump: "); 1398 pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1412 1437
1413static inline int unexpected_cmd(struct parser_exec_state *s) 1438static inline int unexpected_cmd(struct parser_exec_state *s)
1414{ 1439{
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1440 struct intel_vgpu *vgpu = s->vgpu;
1416 s->vgpu->id, s->info->name); 1441
1442 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1417 return -EINVAL; 1444 return -EINVAL;
1418} 1445}
1419 1446
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1516 while (gma != end_gma) { 1543 while (gma != end_gma) {
1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1544 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1545 if (gpa == INTEL_GVT_INVALID_ADDR) {
1519 gvt_err("invalid gma address: %lx\n", gma); 1546 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1520 return -EFAULT; 1547 return -EFAULT;
1521 } 1548 }
1522 1549
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1557 uint32_t bb_size = 0; 1584 uint32_t bb_size = 0;
1558 uint32_t cmd_len = 0; 1585 uint32_t cmd_len = 0;
1559 bool met_bb_end = false; 1586 bool met_bb_end = false;
1587 struct intel_vgpu *vgpu = s->vgpu;
1560 u32 cmd; 1588 u32 cmd;
1561 1589
1562 /* get the start gm address of the batch buffer */ 1590 /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1565 1593
1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1594 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1567 if (info == NULL) { 1595 if (info == NULL) {
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1596 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd, get_opcode(cmd, s->ring_id)); 1597 cmd, get_opcode(cmd, s->ring_id));
1570 return -EINVAL; 1598 return -EINVAL;
1571 } 1599 }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1574 gma, gma + 4, &cmd); 1602 gma, gma + 4, &cmd);
1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1603 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1576 if (info == NULL) { 1604 if (info == NULL) {
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1605 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd, get_opcode(cmd, s->ring_id)); 1606 cmd, get_opcode(cmd, s->ring_id));
1579 return -EINVAL; 1607 return -EINVAL;
1580 } 1608 }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1599static int perform_bb_shadow(struct parser_exec_state *s) 1627static int perform_bb_shadow(struct parser_exec_state *s)
1600{ 1628{
1601 struct intel_shadow_bb_entry *entry_obj; 1629 struct intel_shadow_bb_entry *entry_obj;
1630 struct intel_vgpu *vgpu = s->vgpu;
1602 unsigned long gma = 0; 1631 unsigned long gma = 0;
1603 uint32_t bb_size; 1632 uint32_t bb_size;
1604 void *dst = NULL; 1633 void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1633 1662
1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1663 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1635 if (ret) { 1664 if (ret) {
1636 gvt_err("failed to set shadow batch to CPU\n"); 1665 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1637 goto unmap_src; 1666 goto unmap_src;
1638 } 1667 }
1639 1668
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1645 gma, gma + bb_size, 1674 gma, gma + bb_size,
1646 dst); 1675 dst);
1647 if (ret) { 1676 if (ret) {
1648 gvt_err("fail to copy guest ring buffer\n"); 1677 gvt_vgpu_err("fail to copy guest ring buffer\n");
1649 goto unmap_src; 1678 goto unmap_src;
1650 } 1679 }
1651 1680
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1676{ 1705{
1677 bool second_level; 1706 bool second_level;
1678 int ret = 0; 1707 int ret = 0;
1708 struct intel_vgpu *vgpu = s->vgpu;
1679 1709
1680 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1682 return -EINVAL; 1712 return -EINVAL;
1683 } 1713 }
1684 1714
1685 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1686 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1688 return -EINVAL; 1718 return -EINVAL;
1689 } 1719 }
1690 1720
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1702 if (batch_buffer_needs_scan(s)) { 1732 if (batch_buffer_needs_scan(s)) {
1703 ret = perform_bb_shadow(s); 1733 ret = perform_bb_shadow(s);
1704 if (ret < 0) 1734 if (ret < 0)
1705 gvt_err("invalid shadow batch buffer\n"); 1735 gvt_vgpu_err("invalid shadow batch buffer\n");
1706 } else { 1736 } else {
1707 /* emulate a batch buffer end to do return right */ 1737 /* emulate a batch buffer end to do return right */
1708 ret = cmd_handler_mi_batch_buffer_end(s); 1738 ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2429 int ret = 0; 2459 int ret = 0;
2430 cycles_t t0, t1, t2; 2460 cycles_t t0, t1, t2;
2431 struct parser_exec_state s_before_advance_custom; 2461 struct parser_exec_state s_before_advance_custom;
2462 struct intel_vgpu *vgpu = s->vgpu;
2432 2463
2433 t0 = get_cycles(); 2464 t0 = get_cycles();
2434 2465
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2436 2467
2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2468 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2438 if (info == NULL) { 2469 if (info == NULL) {
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2470 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd, get_opcode(cmd, s->ring_id)); 2471 cmd, get_opcode(cmd, s->ring_id));
2441 return -EINVAL; 2472 return -EINVAL;
2442 } 2473 }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2452 if (info->handler) { 2483 if (info->handler) {
2453 ret = info->handler(s); 2484 ret = info->handler(s);
2454 if (ret < 0) { 2485 if (ret < 0) {
2455 gvt_err("%s handler error\n", info->name); 2486 gvt_vgpu_err("%s handler error\n", info->name);
2456 return ret; 2487 return ret;
2457 } 2488 }
2458 } 2489 }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2494 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2464 ret = cmd_advance_default(s); 2495 ret = cmd_advance_default(s);
2465 if (ret) { 2496 if (ret) {
2466 gvt_err("%s IP advance error\n", info->name); 2497 gvt_vgpu_err("%s IP advance error\n", info->name);
2467 return ret; 2498 return ret;
2468 } 2499 }
2469 } 2500 }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
2486 2517
2487 unsigned long gma_head, gma_tail, gma_bottom; 2518 unsigned long gma_head, gma_tail, gma_bottom;
2488 int ret = 0; 2519 int ret = 0;
2520 struct intel_vgpu *vgpu = s->vgpu;
2489 2521
2490 gma_head = rb_start + rb_head; 2522 gma_head = rb_start + rb_head;
2491 gma_tail = rb_start + rb_tail; 2523 gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2529 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2498 if (!(s->ip_gma >= rb_start) || 2530 if (!(s->ip_gma >= rb_start) ||
2499 !(s->ip_gma < gma_bottom)) { 2531 !(s->ip_gma < gma_bottom)) {
2500 gvt_err("ip_gma %lx out of ring scope." 2532 gvt_vgpu_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n", 2533 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s->ip_gma, rb_start, 2534 s->ip_gma, rb_start,
2503 gma_bottom); 2535 gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
2505 return -EINVAL; 2537 return -EINVAL;
2506 } 2538 }
2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2539 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2508 gvt_err("ip_gma %lx out of range." 2540 gvt_vgpu_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2541 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s->ip_gma, rb_start, 2542 s->ip_gma, rb_start,
2511 rb_head, rb_tail); 2543 rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
2515 } 2547 }
2516 ret = cmd_parser_exec(s); 2548 ret = cmd_parser_exec(s);
2517 if (ret) { 2549 if (ret) {
2518 gvt_err("cmd parser error\n"); 2550 gvt_vgpu_err("cmd parser error\n");
2519 parser_exec_state_dump(s); 2551 parser_exec_state_dump(s);
2520 break; 2552 break;
2521 } 2553 }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2639 gma_head, gma_top, 2671 gma_head, gma_top,
2640 workload->shadow_ring_buffer_va); 2672 workload->shadow_ring_buffer_va);
2641 if (ret) { 2673 if (ret) {
2642 gvt_err("fail to copy guest ring buffer\n"); 2674 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2675 return ret;
2644 } 2676 }
2645 copy_len = gma_top - gma_head; 2677 copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2651 gma_head, gma_tail, 2683 gma_head, gma_tail,
2652 workload->shadow_ring_buffer_va + copy_len); 2684 workload->shadow_ring_buffer_va + copy_len);
2653 if (ret) { 2685 if (ret) {
2654 gvt_err("fail to copy guest ring buffer\n"); 2686 gvt_vgpu_err("fail to copy guest ring buffer\n");
2655 return ret; 2687 return ret;
2656 } 2688 }
2657 ring->tail += workload->rb_len; 2689 ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2662int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2694int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2663{ 2695{
2664 int ret; 2696 int ret;
2697 struct intel_vgpu *vgpu = workload->vgpu;
2665 2698
2666 ret = shadow_workload_ring_buffer(workload); 2699 ret = shadow_workload_ring_buffer(workload);
2667 if (ret) { 2700 if (ret) {
2668 gvt_err("fail to shadow workload ring_buffer\n"); 2701 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669 return ret; 2702 return ret;
2670 } 2703 }
2671 2704
2672 ret = scan_workload(workload); 2705 ret = scan_workload(workload);
2673 if (ret) { 2706 if (ret) {
2674 gvt_err("scan workload error\n"); 2707 gvt_vgpu_err("scan workload error\n");
2675 return ret; 2708 return ret;
2676 } 2709 }
2677 return 0; 2710 return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681{ 2714{
2682 int ctx_size = wa_ctx->indirect_ctx.size; 2715 int ctx_size = wa_ctx->indirect_ctx.size;
2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2716 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2684 struct drm_i915_gem_object *obj; 2718 struct drm_i915_gem_object *obj;
2685 int ret = 0; 2719 int ret = 0;
2686 void *map; 2720 void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2694 /* get the va of the shadow batch buffer */ 2728 /* get the va of the shadow batch buffer */
2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2729 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2696 if (IS_ERR(map)) { 2730 if (IS_ERR(map)) {
2697 gvt_err("failed to vmap shadow indirect ctx\n"); 2731 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2698 ret = PTR_ERR(map); 2732 ret = PTR_ERR(map);
2699 goto put_obj; 2733 goto put_obj;
2700 } 2734 }
2701 2735
2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2736 ret = i915_gem_object_set_to_cpu_domain(obj, false);
2703 if (ret) { 2737 if (ret) {
2704 gvt_err("failed to set shadow indirect ctx to CPU\n"); 2738 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2705 goto unmap_src; 2739 goto unmap_src;
2706 } 2740 }
2707 2741
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 guest_gma, guest_gma + ctx_size, 2744 guest_gma, guest_gma + ctx_size,
2711 map); 2745 map);
2712 if (ret) { 2746 if (ret) {
2713 gvt_err("fail to copy guest indirect ctx\n"); 2747 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2714 goto unmap_src; 2748 goto unmap_src;
2715 } 2749 }
2716 2750
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2744int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2778int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2745{ 2779{
2746 int ret; 2780 int ret;
2781 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2747 2782
2748 if (wa_ctx->indirect_ctx.size == 0) 2783 if (wa_ctx->indirect_ctx.size == 0)
2749 return 0; 2784 return 0;
2750 2785
2751 ret = shadow_indirect_ctx(wa_ctx); 2786 ret = shadow_indirect_ctx(wa_ctx);
2752 if (ret) { 2787 if (ret) {
2753 gvt_err("fail to shadow indirect ctx\n"); 2788 gvt_vgpu_err("fail to shadow indirect ctx\n");
2754 return ret; 2789 return ret;
2755 } 2790 }
2756 2791
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2758 2793
2759 ret = scan_wa_ctx(wa_ctx); 2794 ret = scan_wa_ctx(wa_ctx);
2760 if (ret) { 2795 if (ret) {
2761 gvt_err("scan wa ctx error\n"); 2796 gvt_vgpu_err("scan wa ctx error\n");
2762 return ret; 2797 return ret;
2763 } 2798 }
2764 2799
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7bd980a..b0cff4dc2684 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
27#define gvt_err(fmt, args...) \ 27#define gvt_err(fmt, args...) \
28 DRM_ERROR("gvt: "fmt, ##args) 28 DRM_ERROR("gvt: "fmt, ##args)
29 29
30#define gvt_vgpu_err(fmt, args...) \
31do { \
32 if (IS_ERR_OR_NULL(vgpu)) \
33 DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
34 else \
35 DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
36} while (0)
37
30#define gvt_dbg_core(fmt, args...) \ 38#define gvt_dbg_core(fmt, args...) \
31 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) 39 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
32 40
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85dff7b2a..42cd09ec63fa 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
52 unsigned char chr = 0; 52 unsigned char chr = 0;
53 53
54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { 54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
55 gvt_err("Driver tries to read EDID without proper sequence!\n"); 55 gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (edid->current_edid_read >= EDID_SIZE) { 58 if (edid->current_edid_read >= EDID_SIZE) {
59 gvt_err("edid_get_byte() exceeds the size of EDID!\n"); 59 gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
60 return 0; 60 return 0;
61 } 61 }
62 62
63 if (!edid->edid_available) { 63 if (!edid->edid_available) {
64 gvt_err("Reading EDID but EDID is not available!\n"); 64 gvt_vgpu_err("Reading EDID but EDID is not available!\n");
65 return 0; 65 return 0;
66 } 66 }
67 67
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
72 chr = edid_data->edid_block[edid->current_edid_read]; 72 chr = edid_data->edid_block[edid->current_edid_read];
73 edid->current_edid_read++; 73 edid->current_edid_read++;
74 } else { 74 } else {
75 gvt_err("No EDID available during the reading?\n"); 75 gvt_vgpu_err("No EDID available during the reading?\n");
76 } 76 }
77 return chr; 77 return chr;
78} 78}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; 223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
224 break; 224 break;
225 default: 225 default:
226 gvt_err("Unknown/reserved GMBUS cycle detected!\n"); 226 gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
227 break; 227 break;
228 } 228 }
229 /* 229 /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
292 */ 292 */
293 } else { 293 } else {
294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); 294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
295 gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", 295 gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
296 vgpu->id);
297 } 296 }
298 return 0; 297 return 0;
299} 298}
@@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
496 unsigned char val = edid_get_byte(vgpu); 495 unsigned char val = edid_get_byte(vgpu);
497 496
498 aux_data_for_write = (val << 16); 497 aux_data_for_write = (val << 16);
499 } 498 } else
499 aux_data_for_write = (0xff << 16);
500 } 500 }
501 /* write the return value in AUX_CH_DATA reg which includes: 501 /* write the return value in AUX_CH_DATA reg which includes:
502 * ACK of I2C_WRITE 502 * ACK of I2C_WRITE
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd3c03f..f1f426a97aa9 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
172 struct intel_vgpu_execlist *execlist, 172 struct intel_vgpu_execlist *execlist,
173 struct execlist_ctx_descriptor_format *ctx) 173 struct execlist_ctx_descriptor_format *ctx)
174{ 174{
175 struct intel_vgpu *vgpu = execlist->vgpu;
175 struct intel_vgpu_execlist_slot *running = execlist->running_slot; 176 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; 177 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; 178 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id); 184 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
184 185
185 if (WARN_ON(!same_context(ctx, execlist->running_context))) { 186 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186 gvt_err("schedule out context is not running context," 187 gvt_vgpu_err("schedule out context is not running context,"
187 "ctx id %x running ctx id %x\n", 188 "ctx id %x running ctx id %x\n",
188 ctx->context_id, 189 ctx->context_id,
189 execlist->running_context->context_id); 190 execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
254 status.udw = vgpu_vreg(vgpu, status_reg + 4); 255 status.udw = vgpu_vreg(vgpu, status_reg + 4);
255 256
256 if (status.execlist_queue_full) { 257 if (status.execlist_queue_full) {
257 gvt_err("virtual execlist slots are full\n"); 258 gvt_vgpu_err("virtual execlist slots are full\n");
258 return NULL; 259 return NULL;
259 } 260 }
260 261
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
270 271
271 struct execlist_ctx_descriptor_format *ctx0, *ctx1; 272 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272 struct execlist_context_status_format status; 273 struct execlist_context_status_format status;
274 struct intel_vgpu *vgpu = execlist->vgpu;
273 275
274 gvt_dbg_el("emulate schedule-in\n"); 276 gvt_dbg_el("emulate schedule-in\n");
275 277
276 if (!slot) { 278 if (!slot) {
277 gvt_err("no available execlist slot\n"); 279 gvt_vgpu_err("no available execlist slot\n");
278 return -EINVAL; 280 return -EINVAL;
279 } 281 }
280 282
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
375 377
376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 378 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
377 if (IS_ERR(vma)) { 379 if (IS_ERR(vma)) {
378 gvt_err("Cannot pin\n");
379 return; 380 return;
380 } 381 }
381 382
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
428 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 429 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
429 0, CACHELINE_BYTES, 0); 430 0, CACHELINE_BYTES, 0);
430 if (IS_ERR(vma)) { 431 if (IS_ERR(vma)) {
431 gvt_err("Cannot pin indirect ctx obj\n");
432 return; 432 return;
433 } 433 }
434 434
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
561{ 561{
562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
563 struct intel_vgpu_mm *mm; 563 struct intel_vgpu_mm *mm;
564 struct intel_vgpu *vgpu = workload->vgpu;
564 int page_table_level; 565 int page_table_level;
565 u32 pdp[8]; 566 u32 pdp[8];
566 567
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
569 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 570 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
570 page_table_level = 4; 571 page_table_level = 4;
571 } else { 572 } else {
572 gvt_err("Advanced Context mode(SVM) is not supported!\n"); 573 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
573 return -EINVAL; 574 return -EINVAL;
574 } 575 }
575 576
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
583 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 584 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
584 pdp, page_table_level, 0); 585 pdp, page_table_level, 0);
585 if (IS_ERR(mm)) { 586 if (IS_ERR(mm)) {
586 gvt_err("fail to create mm object.\n"); 587 gvt_vgpu_err("fail to create mm object.\n");
587 return PTR_ERR(mm); 588 return PTR_ERR(mm);
588 } 589 }
589 } 590 }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
609 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 610 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
610 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); 611 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
611 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 612 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
612 gvt_err("invalid guest context LRCA: %x\n", desc->lrca); 613 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
613 return -EINVAL; 614 return -EINVAL;
614 } 615 }
615 616
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
724 continue; 725 continue;
725 726
726 if (!desc[i]->privilege_access) { 727 if (!desc[i]->privilege_access) {
727 gvt_err("vgpu%d: unexpected GGTT elsp submission\n", 728 gvt_vgpu_err("unexpected GGTT elsp submission\n");
728 vgpu->id);
729 return -EINVAL; 729 return -EINVAL;
730 } 730 }
731 731
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
735 } 735 }
736 736
737 if (!valid_desc_bitmap) { 737 if (!valid_desc_bitmap) {
738 gvt_err("vgpu%d: no valid desc in a elsp submission\n", 738 gvt_vgpu_err("no valid desc in a elsp submission\n");
739 vgpu->id);
740 return -EINVAL; 739 return -EINVAL;
741 } 740 }
742 741
743 if (!test_bit(0, (void *)&valid_desc_bitmap) && 742 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
744 test_bit(1, (void *)&valid_desc_bitmap)) { 743 test_bit(1, (void *)&valid_desc_bitmap)) {
745 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", 744 gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
746 vgpu->id);
747 return -EINVAL; 745 return -EINVAL;
748 } 746 }
749 747
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
752 ret = submit_context(vgpu, ring_id, &valid_desc[i], 750 ret = submit_context(vgpu, ring_id, &valid_desc[i],
753 emulate_schedule_in); 751 emulate_schedule_in);
754 if (ret) { 752 if (ret) {
755 gvt_err("vgpu%d: fail to schedule workload\n", 753 gvt_vgpu_err("fail to schedule workload\n");
756 vgpu->id);
757 return ret; 754 return ret;
758 } 755 }
759 emulate_schedule_in = false; 756 emulate_schedule_in = false;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6a5ff23ded90..b832bea64e03 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{ 49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size); 53 addr, size);
54 return false; 54 return false;
55 } 55 }
56 return true; 56 return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 430
431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
432 if (mfn == INTEL_GVT_INVALID_ADDR) { 432 if (mfn == INTEL_GVT_INVALID_ADDR) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn); 433 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
434 return -ENXIO; 434 return -ENXIO;
435 } 435 }
436 436
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
611 611
612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
613 if (dma_mapping_error(kdev, daddr)) { 613 if (dma_mapping_error(kdev, daddr)) {
614 gvt_err("fail to map dma addr\n"); 614 gvt_vgpu_err("fail to map dma addr\n");
615 return -EINVAL; 615 return -EINVAL;
616 } 616 }
617 617
@@ -735,7 +735,7 @@ retry:
735 if (reclaim_one_mm(vgpu->gvt)) 735 if (reclaim_one_mm(vgpu->gvt))
736 goto retry; 736 goto retry;
737 737
738 gvt_err("fail to allocate ppgtt shadow page\n"); 738 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
739 return ERR_PTR(-ENOMEM); 739 return ERR_PTR(-ENOMEM);
740 } 740 }
741 741
@@ -750,14 +750,14 @@ retry:
750 */ 750 */
751 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 751 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
752 if (ret) { 752 if (ret) {
753 gvt_err("fail to initialize shadow page for spt\n"); 753 gvt_vgpu_err("fail to initialize shadow page for spt\n");
754 goto err; 754 goto err;
755 } 755 }
756 756
757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
758 gfn, ppgtt_write_protection_handler, NULL); 758 gfn, ppgtt_write_protection_handler, NULL);
759 if (ret) { 759 if (ret) {
760 gvt_err("fail to initialize guest page for spt\n"); 760 gvt_vgpu_err("fail to initialize guest page for spt\n");
761 goto err; 761 goto err;
762 } 762 }
763 763
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
776 if (p) 776 if (p)
777 return shadow_page_to_ppgtt_spt(p); 777 return shadow_page_to_ppgtt_spt(p);
778 778
779 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", 779 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
780 vgpu->id, mfn);
781 return NULL; 780 return NULL;
782} 781}
783 782
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
827 } 826 }
828 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
829 if (!s) { 828 if (!s) {
830 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", 829 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
831 vgpu->id, ops->get_pfn(e)); 830 ops->get_pfn(e));
832 return -ENXIO; 831 return -ENXIO;
833 } 832 }
834 return ppgtt_invalidate_shadow_page(s); 833 return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
836 835
837static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 836static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
838{ 837{
838 struct intel_vgpu *vgpu = spt->vgpu;
839 struct intel_gvt_gtt_entry e; 839 struct intel_gvt_gtt_entry e;
840 unsigned long index; 840 unsigned long index;
841 int ret; 841 int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
854 854
855 for_each_present_shadow_entry(spt, &e, index) { 855 for_each_present_shadow_entry(spt, &e, index) {
856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
857 gvt_err("GVT doesn't support pse bit for now\n"); 857 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
858 return -EINVAL; 858 return -EINVAL;
859 } 859 }
860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
868 ppgtt_free_shadow_page(spt); 868 ppgtt_free_shadow_page(spt);
869 return 0; 869 return 0;
870fail: 870fail:
871 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", 871 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
872 spt->vgpu->id, spt, e.val64, e.type); 872 spt, e.val64, e.type);
873 return ret; 873 return ret;
874} 874}
875 875
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
914 } 914 }
915 return s; 915 return s;
916fail: 916fail:
917 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 917 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
918 vgpu->id, s, we->val64, we->type); 918 s, we->val64, we->type);
919 return ERR_PTR(ret); 919 return ERR_PTR(ret);
920} 920}
921 921
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
953 953
954 for_each_present_guest_entry(spt, &ge, i) { 954 for_each_present_guest_entry(spt, &ge, i) {
955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
956 gvt_err("GVT doesn't support pse bit now\n"); 956 gvt_vgpu_err("GVT doesn't support pse bit now\n");
957 ret = -EINVAL; 957 ret = -EINVAL;
958 goto fail; 958 goto fail;
959 } 959 }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
969 } 969 }
970 return 0; 970 return 0;
971fail: 971fail:
972 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 972 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
973 vgpu->id, spt, ge.val64, ge.type); 973 spt, ge.val64, ge.type);
974 return ret; 974 return ret;
975} 975}
976 976
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
999 struct intel_vgpu_ppgtt_spt *s = 999 struct intel_vgpu_ppgtt_spt *s =
1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1001 if (!s) { 1001 if (!s) {
1002 gvt_err("fail to find guest page\n"); 1002 gvt_vgpu_err("fail to find guest page\n");
1003 ret = -ENXIO; 1003 ret = -ENXIO;
1004 goto fail; 1004 goto fail;
1005 } 1005 }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
1011 ppgtt_set_shadow_entry(spt, &e, index); 1011 ppgtt_set_shadow_entry(spt, &e, index);
1012 return 0; 1012 return 0;
1013fail: 1013fail:
1014 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 1014 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1015 vgpu->id, spt, e.val64, e.type); 1015 spt, e.val64, e.type);
1016 return ret; 1016 return ret;
1017} 1017}
1018 1018
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1046 } 1046 }
1047 return 0; 1047 return 0;
1048fail: 1048fail:
1049 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, 1049 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1050 spt, we->val64, we->type); 1050 spt, we->val64, we->type);
1051 return ret; 1051 return ret;
1052} 1052}
1053 1053
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
1250 } 1250 }
1251 return 0; 1251 return 0;
1252fail: 1252fail:
1253 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", 1253 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1254 vgpu->id, spt, we->val64, we->type); 1254 spt, we->val64, we->type);
1255 return ret; 1255 return ret;
1256} 1256}
1257 1257
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
1493 1493
1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1495 if (IS_ERR(spt)) { 1495 if (IS_ERR(spt)) {
1496 gvt_err("fail to populate guest root pointer\n"); 1496 gvt_vgpu_err("fail to populate guest root pointer\n");
1497 ret = PTR_ERR(spt); 1497 ret = PTR_ERR(spt);
1498 goto fail; 1498 goto fail;
1499 } 1499 }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1566 1566
1567 ret = gtt->mm_alloc_page_table(mm); 1567 ret = gtt->mm_alloc_page_table(mm);
1568 if (ret) { 1568 if (ret) {
1569 gvt_err("fail to allocate page table for mm\n"); 1569 gvt_vgpu_err("fail to allocate page table for mm\n");
1570 goto fail; 1570 goto fail;
1571 } 1571 }
1572 1572
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1584 } 1584 }
1585 return mm; 1585 return mm;
1586fail: 1586fail:
1587 gvt_err("fail to create mm\n"); 1587 gvt_vgpu_err("fail to create mm\n");
1588 if (mm) 1588 if (mm)
1589 intel_gvt_mm_unreference(mm); 1589 intel_gvt_mm_unreference(mm);
1590 return ERR_PTR(ret); 1590 return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1760 mm->page_table_level, gma, gpa); 1760 mm->page_table_level, gma, gpa);
1761 return gpa; 1761 return gpa;
1762err: 1762err:
1763 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1763 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1764 return INTEL_GVT_INVALID_ADDR; 1764 return INTEL_GVT_INVALID_ADDR;
1765} 1765}
1766 1766
@@ -1836,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1836 if (ops->test_present(&e)) { 1836 if (ops->test_present(&e)) {
1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1837 ret = gtt_entry_p2m(vgpu, &e, &m);
1838 if (ret) { 1838 if (ret) {
1839 gvt_err("vgpu%d: fail to translate guest gtt entry\n", 1839 gvt_vgpu_err("fail to translate guest gtt entry\n");
1840 vgpu->id); 1840 /* guest driver may read/write the entry when partial
1841 return ret; 1841 * update the entry in this situation p2m will fail
1842 * settting the shadow entry to point to a scratch page
1843 */
1844 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1842 } 1845 }
1843 } else { 1846 } else {
1844 m = e; 1847 m = e;
1845 m.val64 = 0; 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1846 } 1849 }
1847 1850
1848 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1893,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1893 1896
1894 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1897 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1895 if (!scratch_pt) { 1898 if (!scratch_pt) {
1896 gvt_err("fail to allocate scratch page\n"); 1899 gvt_vgpu_err("fail to allocate scratch page\n");
1897 return -ENOMEM; 1900 return -ENOMEM;
1898 } 1901 }
1899 1902
1900 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1903 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1901 4096, PCI_DMA_BIDIRECTIONAL); 1904 4096, PCI_DMA_BIDIRECTIONAL);
1902 if (dma_mapping_error(dev, daddr)) { 1905 if (dma_mapping_error(dev, daddr)) {
1903 gvt_err("fail to dmamap scratch_pt\n"); 1906 gvt_vgpu_err("fail to dmamap scratch_pt\n");
1904 __free_page(virt_to_page(scratch_pt)); 1907 __free_page(virt_to_page(scratch_pt));
1905 return -ENOMEM; 1908 return -ENOMEM;
1906 } 1909 }
@@ -2003,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2006 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2004 NULL, 1, 0); 2007 NULL, 1, 0);
2005 if (IS_ERR(ggtt_mm)) { 2008 if (IS_ERR(ggtt_mm)) {
2006 gvt_err("fail to create mm for ggtt.\n"); 2009 gvt_vgpu_err("fail to create mm for ggtt.\n");
2007 return PTR_ERR(ggtt_mm); 2010 return PTR_ERR(ggtt_mm);
2008 } 2011 }
2009 2012
@@ -2076,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
2076 for (i = 0; i < preallocated_oos_pages; i++) { 2079 for (i = 0; i < preallocated_oos_pages; i++) {
2077 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2080 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2078 if (!oos_page) { 2081 if (!oos_page) {
2079 gvt_err("fail to pre-allocate oos page\n");
2080 ret = -ENOMEM; 2082 ret = -ENOMEM;
2081 goto fail; 2083 goto fail;
2082 } 2084 }
@@ -2166,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2166 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2168 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2167 pdp, page_table_level, 0); 2169 pdp, page_table_level, 0);
2168 if (IS_ERR(mm)) { 2170 if (IS_ERR(mm)) {
2169 gvt_err("fail to create mm\n"); 2171 gvt_vgpu_err("fail to create mm\n");
2170 return PTR_ERR(mm); 2172 return PTR_ERR(mm);
2171 } 2173 }
2172 } 2174 }
@@ -2196,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2196 2198
2197 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2199 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2198 if (!mm) { 2200 if (!mm) {
2199 gvt_err("fail to find ppgtt instance.\n"); 2201 gvt_vgpu_err("fail to find ppgtt instance.\n");
2200 return -EINVAL; 2202 return -EINVAL;
2201 } 2203 }
2202 intel_gvt_mm_unreference(mm); 2204 intel_gvt_mm_unreference(mm);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
162 atomic_t running_workload_num; 162 atomic_t running_workload_num;
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 struct i915_gem_context *shadow_ctx; 164 struct i915_gem_context *shadow_ctx;
165 struct notifier_block shadow_ctx_notifier_block;
166 165
167#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) 166#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
168 struct { 167 struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
233 struct intel_gvt_gtt gtt; 232 struct intel_gvt_gtt gtt;
234 struct intel_gvt_opregion opregion; 233 struct intel_gvt_opregion opregion;
235 struct intel_gvt_workload_scheduler scheduler; 234 struct intel_gvt_workload_scheduler scheduler;
235 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
237 struct intel_vgpu_type *types; 237 struct intel_vgpu_type *types;
238 unsigned int num_types; 238 unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8e43395c748a..6da9ae1618e3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST); 181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182 182
183 if (!vgpu->mmio.disable_warn_untrack) { 183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n", 184 gvt_vgpu_err("found oob fence register access\n");
185 vgpu->id); 185 gvt_vgpu_err("total fence %d, access fence %d\n",
186 gvt_err("vgpu%d: total fence %d, access fence %d\n", 186 vgpu_fence_sz(vgpu), fence_num);
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 } 187 }
190 memset(p_data, 0, bytes); 188 memset(p_data, 0, bytes);
191 return -EINVAL; 189 return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
249 break; 247 break;
250 default: 248 default:
251 /*should not hit here*/ 249 /*should not hit here*/
252 gvt_err("invalid forcewake offset 0x%x\n", offset); 250 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
253 return -EINVAL; 251 return -EINVAL;
254 } 252 }
255 } else { 253 } else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
530 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; 528 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
531 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; 529 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
532 } else { 530 } else {
533 gvt_err("Invalid train pattern %d\n", train_pattern); 531 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
534 return -EINVAL; 532 return -EINVAL;
535 } 533 }
536 534
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
588 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) 586 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
589 index = FDI_RX_IMR_TO_PIPE(offset); 587 index = FDI_RX_IMR_TO_PIPE(offset);
590 else { 588 else {
591 gvt_err("Unsupport registers %x\n", offset); 589 gvt_vgpu_err("Unsupport registers %x\n", offset);
592 return -EINVAL; 590 return -EINVAL;
593 } 591 }
594 592
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
818 u32 data; 816 u32 data;
819 817
820 if (!dpy_is_valid_port(port_index)) { 818 if (!dpy_is_valid_port(port_index)) {
821 gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); 819 gvt_vgpu_err("Unsupported DP port access!\n");
822 return 0; 820 return 0;
823 } 821 }
824 822
@@ -972,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
972 return 0; 970 return 0;
973} 971}
974 972
973static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
974 void *p_data, unsigned int bytes)
975{
976 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
977 write_vreg(vgpu, offset, p_data, bytes);
978 return 0;
979}
980
975static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 981static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
976 void *p_data, unsigned int bytes) 982 void *p_data, unsigned int bytes)
977{ 983{
@@ -1016,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1016 1022
1017 if (i == num) { 1023 if (i == num) {
1018 if (num == SBI_REG_MAX) { 1024 if (num == SBI_REG_MAX) {
1019 gvt_err("vgpu%d: SBI caching meets maximum limits\n", 1025 gvt_vgpu_err("SBI caching meets maximum limits\n");
1020 vgpu->id);
1021 return; 1026 return;
1022 } 1027 }
1023 display->sbi.number++; 1028 display->sbi.number++;
@@ -1097,7 +1102,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1097 break; 1102 break;
1098 } 1103 }
1099 if (invalid_read) 1104 if (invalid_read)
1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1105 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1101 offset, bytes, *(u32 *)p_data); 1106 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true; 1107 vgpu->pv_notified = true;
1103 return 0; 1108 return 0;
@@ -1125,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1125 case 1: /* Remove this in guest driver. */ 1130 case 1: /* Remove this in guest driver. */
1126 break; 1131 break;
1127 default: 1132 default:
1128 gvt_err("Invalid PV notification %d\n", notification); 1133 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1129 } 1134 }
1130 return ret; 1135 return ret;
1131} 1136}
@@ -1181,7 +1186,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1186 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break; 1187 break;
1183 default: 1188 default:
1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1189 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1185 offset, bytes, data); 1190 offset, bytes, data);
1186 break; 1191 break;
1187 } 1192 }
@@ -1415,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1415 if (execlist->elsp_dwords.index == 3) { 1420 if (execlist->elsp_dwords.index == 3) {
1416 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1421 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1417 if(ret) 1422 if(ret)
1418 gvt_err("fail submit workload on ring %d\n", ring_id); 1423 gvt_vgpu_err("fail submit workload on ring %d\n",
1424 ring_id);
1419 } 1425 }
1420 1426
1421 ++execlist->elsp_dwords.index; 1427 ++execlist->elsp_dwords.index;
@@ -2240,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2240 MMIO_D(0x7180, D_ALL); 2246 MMIO_D(0x7180, D_ALL);
2241 MMIO_D(0x7408, D_ALL); 2247 MMIO_D(0x7408, D_ALL);
2242 MMIO_D(0x7c00, D_ALL); 2248 MMIO_D(0x7c00, D_ALL);
2243 MMIO_D(GEN6_MBCTL, D_ALL); 2249 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2244 MMIO_D(0x911c, D_ALL); 2250 MMIO_D(0x911c, D_ALL);
2245 MMIO_D(0x9120, D_ALL); 2251 MMIO_D(0x9120, D_ALL);
2246 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); 2252 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2988,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2988 write_vreg(vgpu, offset, p_data, bytes); 2994 write_vreg(vgpu, offset, p_data, bytes);
2989 return 0; 2995 return 0;
2990} 2996}
2997
2998/**
2999 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3000 * force-nopriv register
3001 *
3002 * @gvt: a GVT device
3003 * @offset: register offset
3004 *
3005 * Returns:
3006 * True if the register is in force-nonpriv whitelist;
3007 * False if outside;
3008 */
3009bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3010 unsigned int offset)
3011{
3012 return in_whitelist(offset);
3013}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 84d801638ede..d641214578a7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
426 426
427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) 427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
428{ 428{
429 struct intel_vgpu *vgpu; 429 struct intel_vgpu *vgpu = NULL;
430 struct intel_vgpu_type *type; 430 struct intel_vgpu_type *type;
431 struct device *pdev; 431 struct device *pdev;
432 void *gvt; 432 void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
437 437
438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
439 if (!type) { 439 if (!type) {
440 gvt_err("failed to find type %s to create\n", 440 gvt_vgpu_err("failed to find type %s to create\n",
441 kobject_name(kobj)); 441 kobject_name(kobj));
442 ret = -EINVAL; 442 ret = -EINVAL;
443 goto out; 443 goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
446 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 446 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
447 if (IS_ERR_OR_NULL(vgpu)) { 447 if (IS_ERR_OR_NULL(vgpu)) {
448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
449 gvt_err("failed to create intel vgpu: %d\n", ret); 449 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
450 goto out; 450 goto out;
451 } 451 }
452 452
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, 526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
527 &vgpu->vdev.iommu_notifier); 527 &vgpu->vdev.iommu_notifier);
528 if (ret != 0) { 528 if (ret != 0) {
529 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 529 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
530 ret);
530 goto out; 531 goto out;
531 } 532 }
532 533
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, 535 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
535 &vgpu->vdev.group_notifier); 536 &vgpu->vdev.group_notifier);
536 if (ret != 0) { 537 if (ret != 0) {
537 gvt_err("vfio_register_notifier for group failed: %d\n", ret); 538 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
539 ret);
538 goto undo_iommu; 540 goto undo_iommu;
539 } 541 }
540 542
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
635 637
636 638
637 if (index >= VFIO_PCI_NUM_REGIONS) { 639 if (index >= VFIO_PCI_NUM_REGIONS) {
638 gvt_err("invalid index: %u\n", index); 640 gvt_vgpu_err("invalid index: %u\n", index);
639 return -EINVAL; 641 return -EINVAL;
640 } 642 }
641 643
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
669 case VFIO_PCI_VGA_REGION_INDEX: 671 case VFIO_PCI_VGA_REGION_INDEX:
670 case VFIO_PCI_ROM_REGION_INDEX: 672 case VFIO_PCI_ROM_REGION_INDEX:
671 default: 673 default:
672 gvt_err("unsupported region: %u\n", index); 674 gvt_vgpu_err("unsupported region: %u\n", index);
673 } 675 }
674 676
675 return ret == 0 ? count : ret; 677 return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
861 863
862 trigger = eventfd_ctx_fdget(fd); 864 trigger = eventfd_ctx_fdget(fd);
863 if (IS_ERR(trigger)) { 865 if (IS_ERR(trigger)) {
864 gvt_err("eventfd_ctx_fdget failed\n"); 866 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
865 return PTR_ERR(trigger); 867 return PTR_ERR(trigger);
866 } 868 }
867 vgpu->vdev.msi_trigger = trigger; 869 vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1120 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, 1122 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1121 VFIO_PCI_NUM_IRQS, &data_size); 1123 VFIO_PCI_NUM_IRQS, &data_size);
1122 if (ret) { 1124 if (ret) {
1123 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1125 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1124 return -EINVAL; 1126 return -EINVAL;
1125 } 1127 }
1126 if (data_size) { 1128 if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1310 1312
1311 kvm = vgpu->vdev.kvm; 1313 kvm = vgpu->vdev.kvm;
1312 if (!kvm || kvm->mm != current->mm) { 1314 if (!kvm || kvm->mm != current->mm) {
1313 gvt_err("KVM is required to use Intel vGPU\n"); 1315 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1314 return -ESRCH; 1316 return -ESRCH;
1315 } 1317 }
1316 1318
@@ -1324,6 +1326,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1324 vgpu->handle = (unsigned long)info; 1326 vgpu->handle = (unsigned long)info;
1325 info->vgpu = vgpu; 1327 info->vgpu = vgpu;
1326 info->kvm = kvm; 1328 info->kvm = kvm;
1329 kvm_get_kvm(info->kvm);
1327 1330
1328 kvmgt_protect_table_init(info); 1331 kvmgt_protect_table_init(info);
1329 gvt_cache_init(vgpu); 1332 gvt_cache_init(vgpu);
@@ -1337,12 +1340,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1337 1340
1338static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1341static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1339{ 1342{
1343 struct intel_vgpu *vgpu = info->vgpu;
1344
1340 if (!info) { 1345 if (!info) {
1341 gvt_err("kvmgt_guest_info invalid\n"); 1346 gvt_vgpu_err("kvmgt_guest_info invalid\n");
1342 return false; 1347 return false;
1343 } 1348 }
1344 1349
1345 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1350 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1351 kvm_put_kvm(info->kvm);
1346 kvmgt_protect_table_destroy(info); 1352 kvmgt_protect_table_destroy(info);
1347 gvt_cache_destroy(info->vgpu); 1353 gvt_cache_destroy(info->vgpu);
1348 vfree(info); 1354 vfree(info);
@@ -1383,12 +1389,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1383 unsigned long iova, pfn; 1389 unsigned long iova, pfn;
1384 struct kvmgt_guest_info *info; 1390 struct kvmgt_guest_info *info;
1385 struct device *dev; 1391 struct device *dev;
1392 struct intel_vgpu *vgpu;
1386 int rc; 1393 int rc;
1387 1394
1388 if (!handle_valid(handle)) 1395 if (!handle_valid(handle))
1389 return INTEL_GVT_INVALID_ADDR; 1396 return INTEL_GVT_INVALID_ADDR;
1390 1397
1391 info = (struct kvmgt_guest_info *)handle; 1398 info = (struct kvmgt_guest_info *)handle;
1399 vgpu = info->vgpu;
1392 iova = gvt_cache_find(info->vgpu, gfn); 1400 iova = gvt_cache_find(info->vgpu, gfn);
1393 if (iova != INTEL_GVT_INVALID_ADDR) 1401 if (iova != INTEL_GVT_INVALID_ADDR)
1394 return iova; 1402 return iova;
@@ -1397,13 +1405,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1397 dev = mdev_dev(info->vgpu->vdev.mdev); 1405 dev = mdev_dev(info->vgpu->vdev.mdev);
1398 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1406 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1399 if (rc != 1) { 1407 if (rc != 1) {
1400 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1408 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1409 gfn, rc);
1401 return INTEL_GVT_INVALID_ADDR; 1410 return INTEL_GVT_INVALID_ADDR;
1402 } 1411 }
1403 /* transfer to host iova for GFX to use DMA */ 1412 /* transfer to host iova for GFX to use DMA */
1404 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1413 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1405 if (rc) { 1414 if (rc) {
1406 gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1415 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1407 vfio_unpin_pages(dev, &gfn, 1); 1416 vfio_unpin_pages(dev, &gfn, 1);
1408 return INTEL_GVT_INVALID_ADDR; 1417 return INTEL_GVT_INVALID_ADDR;
1409 } 1418 }
@@ -1417,7 +1426,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1417{ 1426{
1418 struct kvmgt_guest_info *info; 1427 struct kvmgt_guest_info *info;
1419 struct kvm *kvm; 1428 struct kvm *kvm;
1420 int ret; 1429 int idx, ret;
1421 bool kthread = current->mm == NULL; 1430 bool kthread = current->mm == NULL;
1422 1431
1423 if (!handle_valid(handle)) 1432 if (!handle_valid(handle))
@@ -1429,8 +1438,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1429 if (kthread) 1438 if (kthread)
1430 use_mm(kvm->mm); 1439 use_mm(kvm->mm);
1431 1440
1441 idx = srcu_read_lock(&kvm->srcu);
1432 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1442 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1433 kvm_read_guest(kvm, gpa, buf, len); 1443 kvm_read_guest(kvm, gpa, buf, len);
1444 srcu_read_unlock(&kvm->srcu, idx);
1434 1445
1435 if (kthread) 1446 if (kthread)
1436 unuse_mm(kvm->mm); 1447 unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 60b698cb8365..1ba3bdb09341 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
143 p_data, bytes); 143 p_data, bytes);
144 if (ret) { 144 if (ret) {
145 gvt_err("vgpu%d: guest page read error %d, " 145 gvt_vgpu_err("guest page read error %d, "
146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
147 vgpu->id, ret, 147 ret, gp->gfn, pa, *(u32 *)p_data,
148 gp->gfn, pa, *(u32 *)p_data, bytes); 148 bytes);
149 } 149 }
150 mutex_unlock(&gvt->lock); 150 mutex_unlock(&gvt->lock);
151 return ret; 151 return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
201 201
202 if (!vgpu->mmio.disable_warn_untrack) { 202 if (!vgpu->mmio.disable_warn_untrack) {
203 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 vgpu->id, offset, bytes, *(u32 *)p_data); 204 offset, bytes, *(u32 *)p_data);
205 205
206 if (offset == 0x206c) { 206 if (offset == 0x206c) {
207 gvt_err("------------------------------------------\n"); 207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_err("vgpu%d: likely triggers a gfx reset\n", 208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 vgpu->id); 209 gvt_vgpu_err("------------------------------------------\n");
210 gvt_err("------------------------------------------\n");
211 vgpu->mmio.disable_warn_untrack = true; 210 vgpu->mmio.disable_warn_untrack = true;
212 } 211 }
213 } 212 }
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
220 mutex_unlock(&gvt->lock); 219 mutex_unlock(&gvt->lock);
221 return 0; 220 return 0;
222err: 221err:
223 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 222 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
224 vgpu->id, offset, bytes); 223 offset, bytes);
225 mutex_unlock(&gvt->lock); 224 mutex_unlock(&gvt->lock);
226 return ret; 225 return ret;
227} 226}
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
259 if (gp) { 258 if (gp) {
260 ret = gp->handler(gp, pa, p_data, bytes); 259 ret = gp->handler(gp, pa, p_data, bytes);
261 if (ret) { 260 if (ret) {
262 gvt_err("vgpu%d: guest page write error %d, " 261 gvt_err("guest page write error %d, "
263 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 262 "gfn 0x%lx, pa 0x%llx, "
264 vgpu->id, ret, 263 "var 0x%x, len %d\n",
265 gp->gfn, pa, *(u32 *)p_data, bytes); 264 ret, gp->gfn, pa,
265 *(u32 *)p_data, bytes);
266 } 266 }
267 mutex_unlock(&gvt->lock); 267 mutex_unlock(&gvt->lock);
268 return ret; 268 return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
329 329
330 /* all register bits are RO. */ 330 /* all register bits are RO. */
331 if (ro_mask == ~(u64)0) { 331 if (ro_mask == ~(u64)0) {
332 gvt_err("vgpu%d: try to write RO reg %x\n", 332 gvt_vgpu_err("try to write RO reg %x\n",
333 vgpu->id, offset); 333 offset);
334 ret = 0; 334 ret = 0;
335 goto out; 335 goto out;
336 } 336 }
@@ -360,8 +360,8 @@ out:
360 mutex_unlock(&gvt->lock); 360 mutex_unlock(&gvt->lock);
361 return 0; 361 return 0;
362err: 362err:
363 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 363 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
364 vgpu->id, offset, bytes); 364 bytes);
365 mutex_unlock(&gvt->lock); 365 mutex_unlock(&gvt->lock);
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f56f35..a3a027025cd0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
107 void *p_data, unsigned int bytes); 107 void *p_data, unsigned int bytes);
108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
109 void *p_data, unsigned int bytes); 109 void *p_data, unsigned int bytes);
110
111bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
112 unsigned int offset);
110#endif 113#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 5d1caf9daba9..311799136d7f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 + i * PAGE_SIZE); 68 + i * PAGE_SIZE);
69 if (mfn == INTEL_GVT_INVALID_ADDR) { 69 if (mfn == INTEL_GVT_INVALID_ADDR) {
70 gvt_err("fail to get MFN from VA\n"); 70 gvt_vgpu_err("fail to get MFN from VA\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, 73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 vgpu_opregion(vgpu)->gfn[i], 74 vgpu_opregion(vgpu)->gfn[i],
75 mfn, 1, map); 75 mfn, 1, map);
76 if (ret) { 76 if (ret) {
77 gvt_err("fail to map GFN to MFN, errno: %d\n", ret); 77 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 ret);
78 return ret; 79 return ret;
79 } 80 }
80 } 81 }
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
287 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 288 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
288 289
289 if (!(swsci & SWSCI_SCI_SELECT)) { 290 if (!(swsci & SWSCI_SCI_SELECT)) {
290 gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); 291 gvt_vgpu_err("requesting SMI service\n");
291 return 0; 292 return 0;
292 } 293 }
293 /* ignore non 0->1 trasitions */ 294 /* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
300 func = GVT_OPREGION_FUNC(*scic); 301 func = GVT_OPREGION_FUNC(*scic);
301 subfunc = GVT_OPREGION_SUBFUNC(*scic); 302 subfunc = GVT_OPREGION_SUBFUNC(*scic);
302 if (!querying_capabilities(*scic)) { 303 if (!querying_capabilities(*scic)) {
303 gvt_err("vgpu%d: requesting runtime service: func \"%s\"," 304 gvt_vgpu_err("requesting runtime service: func \"%s\","
304 " subfunc \"%s\"\n", 305 " subfunc \"%s\"\n",
305 vgpu->id,
306 opregion_func_name(func), 306 opregion_func_name(func),
307 opregion_subfunc_name(subfunc)); 307 opregion_subfunc_name(subfunc));
308 /* 308 /*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 73f052a4f424..0beb83563b08 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
167 I915_WRITE_FW(reg, 0x1); 167 I915_WRITE_FW(reg, 0x1);
168 168
169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) 169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
170 gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); 170 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
171 else 171 else
172 vgpu_vreg(vgpu, regs[ring_id]) = 0; 172 vgpu_vreg(vgpu, regs[ring_id]) = 0;
173 173
@@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
207 l3_offset.reg = 0xb020; 207 l3_offset.reg = 0xb020;
208 for (i = 0; i < 32; i++) { 208 for (i = 0; i < 32; i++) {
209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 209 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
211 POSTING_READ(l3_offset); 211 POSTING_READ(l3_offset);
212 l3_offset.reg += 4; 212 l3_offset.reg += 4;
213 } 213 }
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584ac5f0..34b9acdf3479 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
101 struct list_head runq_head; 101 struct list_head runq_head;
102}; 102};
103 103
104#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 104#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
105 105
106static void tbs_sched_func(struct work_struct *work) 106static void tbs_sched_func(struct work_struct *work)
107{ 107{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
223 return; 223 return;
224 224
225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 225 list_add_tail(&vgpu_data->list, &sched_data->runq_head);
226 schedule_delayed_work(&sched_data->work, sched_data->period); 226 schedule_delayed_work(&sched_data->work, 0);
227} 227}
228 228
229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c949025..a44782412f2c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
84 (u32)((workload->ctx_desc.lrca + i) << 84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT)); 85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_err("Invalid guest context descriptor\n"); 87 gvt_vgpu_err("Invalid guest context descriptor\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 90
@@ -127,19 +127,22 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 return 0; 127 return 0;
128} 128}
129 129
130static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131{
132 return i915_gem_context_force_single_submission(req->ctx);
133}
134
130static int shadow_context_status_change(struct notifier_block *nb, 135static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data) 136 unsigned long action, void *data)
132{ 137{
133 struct intel_vgpu *vgpu = container_of(nb, 138 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
134 struct intel_vgpu, shadow_ctx_notifier_block); 139 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
135 struct drm_i915_gem_request *req = 140 shadow_ctx_notifier_block[req->engine->id]);
136 (struct drm_i915_gem_request *)data; 141 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
137 struct intel_gvt_workload_scheduler *scheduler =
138 &vgpu->gvt->scheduler;
139 struct intel_vgpu_workload *workload = 142 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 143 scheduler->current_workload[req->engine->id];
141 144
142 if (unlikely(!workload)) 145 if (!is_gvt_request(req) || unlikely(!workload))
143 return NOTIFY_OK; 146 return NOTIFY_OK;
144 147
145 switch (action) { 148 switch (action) {
@@ -175,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
175 int ring_id = workload->ring_id; 178 int ring_id = workload->ring_id;
176 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 179 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
177 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 180 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
178 struct drm_i915_gem_request *rq; 182 struct drm_i915_gem_request *rq;
183 struct intel_vgpu *vgpu = workload->vgpu;
179 int ret; 184 int ret;
180 185
181 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", 186 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
187 192
188 mutex_lock(&dev_priv->drm.struct_mutex); 193 mutex_lock(&dev_priv->drm.struct_mutex);
189 194
195 /* pin shadow context by gvt even the shadow context will be pinned
196 * when i915 alloc request. That is because gvt will update the guest
197 * context from shadow context when workload is completed, and at that
198 * moment, i915 may already unpined the shadow context to make the
199 * shadow_ctx pages invalid. So gvt need to pin itself. After update
200 * the guest context, gvt can unpin the shadow_ctx safely.
201 */
202 ret = engine->context_pin(engine, shadow_ctx);
203 if (ret) {
204 gvt_vgpu_err("fail to pin shadow context\n");
205 workload->status = ret;
206 mutex_unlock(&dev_priv->drm.struct_mutex);
207 return ret;
208 }
209
190 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 210 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
191 if (IS_ERR(rq)) { 211 if (IS_ERR(rq)) {
192 gvt_err("fail to allocate gem request\n"); 212 gvt_vgpu_err("fail to allocate gem request\n");
193 ret = PTR_ERR(rq); 213 ret = PTR_ERR(rq);
194 goto out; 214 goto out;
195 } 215 }
@@ -202,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
202 if (ret) 222 if (ret)
203 goto out; 223 goto out;
204 224
205 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 225 if ((workload->ring_id == RCS) &&
206 if (ret) 226 (workload->wa_ctx.indirect_ctx.size != 0)) {
207 goto out; 227 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
228 if (ret)
229 goto out;
230 }
208 231
209 ret = populate_shadow_context(workload); 232 ret = populate_shadow_context(workload);
210 if (ret) 233 if (ret)
@@ -227,6 +250,9 @@ out:
227 250
228 if (!IS_ERR_OR_NULL(rq)) 251 if (!IS_ERR_OR_NULL(rq))
229 i915_add_request_no_flush(rq); 252 i915_add_request_no_flush(rq);
253 else
254 engine->context_unpin(engine, shadow_ctx);
255
230 mutex_unlock(&dev_priv->drm.struct_mutex); 256 mutex_unlock(&dev_priv->drm.struct_mutex);
231 return ret; 257 return ret;
232} 258}
@@ -322,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
322 (u32)((workload->ctx_desc.lrca + i) << 348 (u32)((workload->ctx_desc.lrca + i) <<
323 GTT_PAGE_SHIFT)); 349 GTT_PAGE_SHIFT));
324 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 350 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
325 gvt_err("invalid guest context descriptor\n"); 351 gvt_vgpu_err("invalid guest context descriptor\n");
326 return; 352 return;
327 } 353 }
328 354
@@ -376,6 +402,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
376 * For the workload w/o request, directly complete the workload. 402 * For the workload w/o request, directly complete the workload.
377 */ 403 */
378 if (workload->req) { 404 if (workload->req) {
405 struct drm_i915_private *dev_priv =
406 workload->vgpu->gvt->dev_priv;
407 struct intel_engine_cs *engine =
408 dev_priv->engine[workload->ring_id];
379 wait_event(workload->shadow_ctx_status_wq, 409 wait_event(workload->shadow_ctx_status_wq,
380 !atomic_read(&workload->shadow_ctx_active)); 410 !atomic_read(&workload->shadow_ctx_active));
381 411
@@ -388,6 +418,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
388 INTEL_GVT_EVENT_MAX) 418 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event); 419 intel_vgpu_trigger_virtual_event(vgpu, event);
390 } 420 }
421 mutex_lock(&dev_priv->drm.struct_mutex);
422 /* unpin shadow ctx as the shadow_ctx update is done */
423 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
424 mutex_unlock(&dev_priv->drm.struct_mutex);
391 } 425 }
392 426
393 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 427 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +451,7 @@ static int workload_thread(void *priv)
417 int ring_id = p->ring_id; 451 int ring_id = p->ring_id;
418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 452 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
419 struct intel_vgpu_workload *workload = NULL; 453 struct intel_vgpu_workload *workload = NULL;
454 struct intel_vgpu *vgpu = NULL;
420 int ret; 455 int ret;
421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 456 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
422 DEFINE_WAIT_FUNC(wait, woken_wake_function); 457 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +494,14 @@ static int workload_thread(void *priv)
459 mutex_unlock(&gvt->lock); 494 mutex_unlock(&gvt->lock);
460 495
461 if (ret) { 496 if (ret) {
462 gvt_err("fail to dispatch workload, skip\n"); 497 vgpu = workload->vgpu;
498 gvt_vgpu_err("fail to dispatch workload, skip\n");
463 goto complete; 499 goto complete;
464 } 500 }
465 501
466 gvt_dbg_sched("ring id %d wait workload %p\n", 502 gvt_dbg_sched("ring id %d wait workload %p\n",
467 workload->ring_id, workload); 503 workload->ring_id, workload);
468retry: 504 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
469 i915_wait_request(workload->req,
470 0, MAX_SCHEDULE_TIMEOUT);
471 /* I915 has replay mechanism and a request will be replayed
472 * if there is i915 reset. So the seqno will be updated anyway.
473 * If the seqno is not updated yet after waiting, which means
474 * the replay may still be in progress and we can wait again.
475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
480 }
481 505
482complete: 506complete:
483 gvt_dbg_sched("will complete workload %p, status: %d\n", 507 gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
513void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 537void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
514{ 538{
515 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 539 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
516 int i; 540 struct intel_engine_cs *engine;
541 enum intel_engine_id i;
517 542
518 gvt_dbg_core("clean workload scheduler\n"); 543 gvt_dbg_core("clean workload scheduler\n");
519 544
520 for (i = 0; i < I915_NUM_ENGINES; i++) { 545 for_each_engine(engine, gvt->dev_priv, i) {
521 if (scheduler->thread[i]) { 546 atomic_notifier_chain_unregister(
522 kthread_stop(scheduler->thread[i]); 547 &engine->context_status_notifier,
523 scheduler->thread[i] = NULL; 548 &gvt->shadow_ctx_notifier_block[i]);
524 } 549 kthread_stop(scheduler->thread[i]);
525 } 550 }
526} 551}
527 552
@@ -529,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
529{ 554{
530 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 555 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
531 struct workload_thread_param *param = NULL; 556 struct workload_thread_param *param = NULL;
557 struct intel_engine_cs *engine;
558 enum intel_engine_id i;
532 int ret; 559 int ret;
533 int i;
534 560
535 gvt_dbg_core("init workload scheduler\n"); 561 gvt_dbg_core("init workload scheduler\n");
536 562
537 init_waitqueue_head(&scheduler->workload_complete_wq); 563 init_waitqueue_head(&scheduler->workload_complete_wq);
538 564
539 for (i = 0; i < I915_NUM_ENGINES; i++) { 565 for_each_engine(engine, gvt->dev_priv, i) {
540 /* check ring mask at init time */
541 if (!HAS_ENGINE(gvt->dev_priv, i))
542 continue;
543
544 init_waitqueue_head(&scheduler->waitq[i]); 566 init_waitqueue_head(&scheduler->waitq[i]);
545 567
546 param = kzalloc(sizeof(*param), GFP_KERNEL); 568 param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
559 ret = PTR_ERR(scheduler->thread[i]); 581 ret = PTR_ERR(scheduler->thread[i]);
560 goto err; 582 goto err;
561 } 583 }
584
585 gvt->shadow_ctx_notifier_block[i].notifier_call =
586 shadow_context_status_change;
587 atomic_notifier_chain_register(&engine->context_status_notifier,
588 &gvt->shadow_ctx_notifier_block[i]);
562 } 589 }
563 return 0; 590 return 0;
564err: 591err:
@@ -570,9 +597,6 @@ err:
570 597
571void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) 598void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
572{ 599{
573 atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
574 &vgpu->shadow_ctx_notifier_block);
575
576 i915_gem_context_put_unlocked(vgpu->shadow_ctx); 600 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
577} 601}
578 602
@@ -587,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
587 611
588 vgpu->shadow_ctx->engine[RCS].initialised = true; 612 vgpu->shadow_ctx->engine[RCS].initialised = true;
589 613
590 vgpu->shadow_ctx_notifier_block.notifier_call =
591 shadow_context_status_change;
592
593 atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
594 &vgpu->shadow_ctx_notifier_block);
595 return 0; 614 return 0;
596} 615}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e703556eba99..1c75402a59c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
248 case I915_PARAM_IRQ_ACTIVE: 248 case I915_PARAM_IRQ_ACTIVE:
249 case I915_PARAM_ALLOW_BATCHBUFFER: 249 case I915_PARAM_ALLOW_BATCHBUFFER:
250 case I915_PARAM_LAST_DISPATCH: 250 case I915_PARAM_LAST_DISPATCH:
251 case I915_PARAM_HAS_EXEC_CONSTANTS:
251 /* Reject all old ums/dri params. */ 252 /* Reject all old ums/dri params. */
252 return -ENODEV; 253 return -ENODEV;
253 case I915_PARAM_CHIPSET_ID: 254 case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
274 case I915_PARAM_HAS_BSD2: 275 case I915_PARAM_HAS_BSD2:
275 value = !!dev_priv->engine[VCS2]; 276 value = !!dev_priv->engine[VCS2];
276 break; 277 break;
277 case I915_PARAM_HAS_EXEC_CONSTANTS:
278 value = INTEL_GEN(dev_priv) >= 4;
279 break;
280 case I915_PARAM_HAS_LLC: 278 case I915_PARAM_HAS_LLC:
281 value = HAS_LLC(dev_priv); 279 value = HAS_LLC(dev_priv);
282 break; 280 break;
@@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1788 goto error; 1786 goto error;
1789 } 1787 }
1790 1788
1791 i915_gem_reset_finish(dev_priv); 1789 i915_gem_reset(dev_priv);
1792 intel_overlay_reset(dev_priv); 1790 intel_overlay_reset(dev_priv);
1793 1791
1794 /* Ok, now get things going again... */ 1792 /* Ok, now get things going again... */
@@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1814 i915_queue_hangcheck(dev_priv); 1812 i915_queue_hangcheck(dev_priv);
1815 1813
1816wakeup: 1814wakeup:
1815 i915_gem_reset_finish(dev_priv);
1817 enable_irq(dev_priv->drm.irq); 1816 enable_irq(dev_priv->drm.irq);
1818 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); 1817 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
1819 return; 1818 return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4b42d31391..1e53c31b6826 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -293,6 +293,7 @@ enum plane_id {
293 PLANE_PRIMARY, 293 PLANE_PRIMARY,
294 PLANE_SPRITE0, 294 PLANE_SPRITE0,
295 PLANE_SPRITE1, 295 PLANE_SPRITE1,
296 PLANE_SPRITE2,
296 PLANE_CURSOR, 297 PLANE_CURSOR,
297 I915_MAX_PLANES, 298 I915_MAX_PLANES,
298}; 299};
@@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt {
1324 unsigned boosts; 1325 unsigned boosts;
1325 1326
1326 /* manual wa residency calculations */ 1327 /* manual wa residency calculations */
1327 struct intel_rps_ei up_ei, down_ei; 1328 struct intel_rps_ei ei;
1328 1329
1329 /* 1330 /*
1330 * Protects RPS/RC6 register access and PCU communication. 1331 * Protects RPS/RC6 register access and PCU communication.
@@ -2063,8 +2064,6 @@ struct drm_i915_private {
2063 2064
2064 const struct intel_device_info info; 2065 const struct intel_device_info info;
2065 2066
2066 int relative_constants_mode;
2067
2068 void __iomem *regs; 2067 void __iomem *regs;
2069 2068
2070 struct intel_uncore uncore; 2069 struct intel_uncore uncore;
@@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
3341} 3340}
3342 3341
3343int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3342int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3343void i915_gem_reset(struct drm_i915_private *dev_priv);
3344void i915_gem_reset_finish(struct drm_i915_private *dev_priv); 3344void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3345void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3345void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3346void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3346void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123162d1..67b1fc5a0331 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1434 1434
1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1436 1436
1437 ret = -ENODEV;
1438 if (obj->ops->pwrite)
1439 ret = obj->ops->pwrite(obj, args);
1440 if (ret != -ENODEV)
1441 goto err;
1442
1437 ret = i915_gem_object_wait(obj, 1443 ret = i915_gem_object_wait(obj,
1438 I915_WAIT_INTERRUPTIBLE | 1444 I915_WAIT_INTERRUPTIBLE |
1439 I915_WAIT_ALL, 1445 I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2119 */ 2125 */
2120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2126 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2121 obj->mm.madv = __I915_MADV_PURGED; 2127 obj->mm.madv = __I915_MADV_PURGED;
2128 obj->mm.pages = ERR_PTR(-EFAULT);
2122} 2129}
2123 2130
2124/* Try to discard unwanted pages */ 2131/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2218 2225
2219 __i915_gem_object_reset_page_iter(obj); 2226 __i915_gem_object_reset_page_iter(obj);
2220 2227
2221 obj->ops->put_pages(obj, pages); 2228 if (!IS_ERR(pages))
2229 obj->ops->put_pages(obj, pages);
2230
2222unlock: 2231unlock:
2223 mutex_unlock(&obj->mm.lock); 2232 mutex_unlock(&obj->mm.lock);
2224} 2233}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2437 if (err) 2446 if (err)
2438 return err; 2447 return err;
2439 2448
2440 if (unlikely(!obj->mm.pages)) { 2449 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2441 err = ____i915_gem_object_get_pages(obj); 2450 err = ____i915_gem_object_get_pages(obj);
2442 if (err) 2451 if (err)
2443 goto unlock; 2452 goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2515 2524
2516 pinned = true; 2525 pinned = true;
2517 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2526 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2518 if (unlikely(!obj->mm.pages)) { 2527 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2519 ret = ____i915_gem_object_get_pages(obj); 2528 ret = ____i915_gem_object_get_pages(obj);
2520 if (ret) 2529 if (ret)
2521 goto err_unlock; 2530 goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
2563 goto out_unlock; 2572 goto out_unlock;
2564} 2573}
2565 2574
2575static int
2576i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2577 const struct drm_i915_gem_pwrite *arg)
2578{
2579 struct address_space *mapping = obj->base.filp->f_mapping;
2580 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2581 u64 remain, offset;
2582 unsigned int pg;
2583
2584 /* Before we instantiate/pin the backing store for our use, we
2585 * can prepopulate the shmemfs filp efficiently using a write into
2586 * the pagecache. We avoid the penalty of instantiating all the
2587 * pages, important if the user is just writing to a few and never
2588 * uses the object on the GPU, and using a direct write into shmemfs
2589 * allows it to avoid the cost of retrieving a page (either swapin
2590 * or clearing-before-use) before it is overwritten.
2591 */
2592 if (READ_ONCE(obj->mm.pages))
2593 return -ENODEV;
2594
2595 /* Before the pages are instantiated the object is treated as being
2596 * in the CPU domain. The pages will be clflushed as required before
2597 * use, and we can freely write into the pages directly. If userspace
2598 * races pwrite with any other operation; corruption will ensue -
2599 * that is userspace's prerogative!
2600 */
2601
2602 remain = arg->size;
2603 offset = arg->offset;
2604 pg = offset_in_page(offset);
2605
2606 do {
2607 unsigned int len, unwritten;
2608 struct page *page;
2609 void *data, *vaddr;
2610 int err;
2611
2612 len = PAGE_SIZE - pg;
2613 if (len > remain)
2614 len = remain;
2615
2616 err = pagecache_write_begin(obj->base.filp, mapping,
2617 offset, len, 0,
2618 &page, &data);
2619 if (err < 0)
2620 return err;
2621
2622 vaddr = kmap(page);
2623 unwritten = copy_from_user(vaddr + pg, user_data, len);
2624 kunmap(page);
2625
2626 err = pagecache_write_end(obj->base.filp, mapping,
2627 offset, len, len - unwritten,
2628 page, data);
2629 if (err < 0)
2630 return err;
2631
2632 if (unwritten)
2633 return -EFAULT;
2634
2635 remain -= len;
2636 user_data += len;
2637 offset += len;
2638 pg = 0;
2639 } while (remain);
2640
2641 return 0;
2642}
2643
2566static bool ban_context(const struct i915_gem_context *ctx) 2644static bool ban_context(const struct i915_gem_context *ctx)
2567{ 2645{
2568 return (i915_gem_context_is_bannable(ctx) && 2646 return (i915_gem_context_is_bannable(ctx) &&
@@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2641 for_each_engine(engine, dev_priv, id) { 2719 for_each_engine(engine, dev_priv, id) {
2642 struct drm_i915_gem_request *request; 2720 struct drm_i915_gem_request *request;
2643 2721
2722 /* Prevent request submission to the hardware until we have
2723 * completed the reset in i915_gem_reset_finish(). If a request
2724 * is completed by one engine, it may then queue a request
2725 * to a second via its engine->irq_tasklet *just* as we are
2726 * calling engine->init_hw() and also writing the ELSP.
2727 * Turning off the engine->irq_tasklet until the reset is over
2728 * prevents the race.
2729 */
2644 tasklet_kill(&engine->irq_tasklet); 2730 tasklet_kill(&engine->irq_tasklet);
2731 tasklet_disable(&engine->irq_tasklet);
2645 2732
2646 if (engine_stalled(engine)) { 2733 if (engine_stalled(engine)) {
2647 request = i915_gem_find_active_request(engine); 2734 request = i915_gem_find_active_request(engine);
@@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2756 engine->reset_hw(engine, request); 2843 engine->reset_hw(engine, request);
2757} 2844}
2758 2845
2759void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 2846void i915_gem_reset(struct drm_i915_private *dev_priv)
2760{ 2847{
2761 struct intel_engine_cs *engine; 2848 struct intel_engine_cs *engine;
2762 enum intel_engine_id id; 2849 enum intel_engine_id id;
@@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2778 } 2865 }
2779} 2866}
2780 2867
2868void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2869{
2870 struct intel_engine_cs *engine;
2871 enum intel_engine_id id;
2872
2873 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2874
2875 for_each_engine(engine, dev_priv, id)
2876 tasklet_enable(&engine->irq_tasklet);
2877}
2878
2781static void nop_submit_request(struct drm_i915_gem_request *request) 2879static void nop_submit_request(struct drm_i915_gem_request *request)
2782{ 2880{
2783 dma_fence_set_error(&request->fence, -EIO); 2881 dma_fence_set_error(&request->fence, -EIO);
@@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3029 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3127 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3030 if (args->timeout_ns < 0) 3128 if (args->timeout_ns < 0)
3031 args->timeout_ns = 0; 3129 args->timeout_ns = 0;
3130
3131 /*
3132 * Apparently ktime isn't accurate enough and occasionally has a
3133 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3134 * things up to make the test happy. We allow up to 1 jiffy.
3135 *
3136 * This is a regression from the timespec->ktime conversion.
3137 */
3138 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3139 args->timeout_ns = 0;
3032 } 3140 }
3033 3141
3034 i915_gem_object_put(obj); 3142 i915_gem_object_put(obj);
@@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3974static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4082static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3975 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4083 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3976 I915_GEM_OBJECT_IS_SHRINKABLE, 4084 I915_GEM_OBJECT_IS_SHRINKABLE,
4085
3977 .get_pages = i915_gem_object_get_pages_gtt, 4086 .get_pages = i915_gem_object_get_pages_gtt,
3978 .put_pages = i915_gem_object_put_pages_gtt, 4087 .put_pages = i915_gem_object_put_pages_gtt,
4088
4089 .pwrite = i915_gem_object_pwrite_gtt,
3979}; 4090};
3980 4091
3981struct drm_i915_gem_object * 4092struct drm_i915_gem_object *
@@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
4583 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 4694 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4584 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4695 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4585 4696
4586 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4587
4588 init_waitqueue_head(&dev_priv->pending_flip_queue); 4697 init_waitqueue_head(&dev_priv->pending_flip_queue);
4589 4698
4590 dev_priv->mm.interruptible = true; 4699 dev_priv->mm.interruptible = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c618208..e2d83b6d376b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
311 ctx->ring_size = 4 * PAGE_SIZE; 311 ctx->ring_size = 4 * PAGE_SIZE;
312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << 312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
313 GEN8_CTX_ADDRESSING_MODE_SHIFT; 313 GEN8_CTX_ADDRESSING_MODE_SHIFT;
314 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
315 314
316 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not 315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
317 * present or not in use we still need a small bias as ring wraparound 316 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b90f3d..e9c008fe14b1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
160 /** desc_template: invariant fields for the HW context descriptor */ 160 /** desc_template: invariant fields for the HW context descriptor */
161 u32 desc_template; 161 u32 desc_template;
162 162
163 /** status_notifier: list of callbacks for context-switch changes */
164 struct atomic_notifier_head status_notifier;
165
166 /** guilty_count: How many times this context has caused a GPU hang. */ 163 /** guilty_count: How many times this context has caused a GPU hang. */
167 unsigned int guilty_count; 164 unsigned int guilty_count;
168 /** 165 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1bb3d2c..3be2503aa042 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
293 * those as well to make room for our guard pages. 293 * those as well to make room for our guard pages.
294 */ 294 */
295 if (check_color) { 295 if (check_color) {
296 if (vma->node.start + vma->node.size == node->start) { 296 if (node->start + node->size == target->start) {
297 if (vma->node.color == node->color) 297 if (node->color == target->color)
298 continue; 298 continue;
299 } 299 }
300 if (vma->node.start == node->start + node->size) { 300 if (node->start == target->start + target->size) {
301 if (vma->node.color == node->color) 301 if (node->color == target->color)
302 continue; 302 continue;
303 } 303 }
304 } 304 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d02cfaefe1c8..30e0675fd7da 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
1408 struct drm_i915_gem_execbuffer2 *args, 1408 struct drm_i915_gem_execbuffer2 *args,
1409 struct list_head *vmas) 1409 struct list_head *vmas)
1410{ 1410{
1411 struct drm_i915_private *dev_priv = params->request->i915;
1412 u64 exec_start, exec_len; 1411 u64 exec_start, exec_len;
1413 int instp_mode;
1414 u32 instp_mask;
1415 int ret; 1412 int ret;
1416 1413
1417 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 1414 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
1422 if (ret) 1419 if (ret)
1423 return ret; 1420 return ret;
1424 1421
1425 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1422 if (args->flags & I915_EXEC_CONSTANTS_MASK) {
1426 instp_mask = I915_EXEC_CONSTANTS_MASK; 1423 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
1427 switch (instp_mode) {
1428 case I915_EXEC_CONSTANTS_REL_GENERAL:
1429 case I915_EXEC_CONSTANTS_ABSOLUTE:
1430 case I915_EXEC_CONSTANTS_REL_SURFACE:
1431 if (instp_mode != 0 && params->engine->id != RCS) {
1432 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1433 return -EINVAL;
1434 }
1435
1436 if (instp_mode != dev_priv->relative_constants_mode) {
1437 if (INTEL_INFO(dev_priv)->gen < 4) {
1438 DRM_DEBUG("no rel constants on pre-gen4\n");
1439 return -EINVAL;
1440 }
1441
1442 if (INTEL_INFO(dev_priv)->gen > 5 &&
1443 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1444 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1445 return -EINVAL;
1446 }
1447
1448 /* The HW changed the meaning on this bit on gen6 */
1449 if (INTEL_INFO(dev_priv)->gen >= 6)
1450 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1451 }
1452 break;
1453 default:
1454 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1455 return -EINVAL; 1424 return -EINVAL;
1456 } 1425 }
1457 1426
1458 if (params->engine->id == RCS &&
1459 instp_mode != dev_priv->relative_constants_mode) {
1460 struct intel_ring *ring = params->request->ring;
1461
1462 ret = intel_ring_begin(params->request, 4);
1463 if (ret)
1464 return ret;
1465
1466 intel_ring_emit(ring, MI_NOOP);
1467 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1468 intel_ring_emit_reg(ring, INSTPM);
1469 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1470 intel_ring_advance(ring);
1471
1472 dev_priv->relative_constants_mode = instp_mode;
1473 }
1474
1475 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1427 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1476 ret = i915_reset_gen7_sol_offsets(params->request); 1428 ret = i915_reset_gen7_sol_offsets(params->request);
1477 if (ret) 1429 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07163d1..76b80a0be797 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
56 56
57 int (*pwrite)(struct drm_i915_gem_object *,
58 const struct drm_i915_gem_pwrite *);
59
57 int (*dmabuf_export)(struct drm_i915_gem_object *); 60 int (*dmabuf_export)(struct drm_i915_gem_object *);
58 void (*release)(struct drm_i915_gem_object *); 61 void (*release)(struct drm_i915_gem_object *);
59}; 62};
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 401006b4c6a3..d5d2b4c6ed38 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
263 I915_SHRINK_BOUND | 263 I915_SHRINK_BOUND |
264 I915_SHRINK_UNBOUND | 264 I915_SHRINK_UNBOUND |
265 I915_SHRINK_ACTIVE); 265 I915_SHRINK_ACTIVE);
266 rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ 266 synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
267 267
268 return freed; 268 return freed;
269} 269}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6ffef2f707a..b6c886ac901b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1047} 1047}
1048 1048
1049static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1050 const struct intel_rps_ei *old,
1051 const struct intel_rps_ei *now,
1052 int threshold)
1053{
1054 u64 time, c0;
1055 unsigned int mul = 100;
1056
1057 if (old->cz_clock == 0)
1058 return false;
1059
1060 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1061 mul <<= 8;
1062
1063 time = now->cz_clock - old->cz_clock;
1064 time *= threshold * dev_priv->czclk_freq;
1065
1066 /* Workload can be split between render + media, e.g. SwapBuffers
1067 * being blitted in X after being rendered in mesa. To account for
1068 * this we need to combine both engines into our activity counter.
1069 */
1070 c0 = now->render_c0 - old->render_c0;
1071 c0 += now->media_c0 - old->media_c0;
1072 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1073
1074 return c0 >= time;
1075}
1076
1077void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1049void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1078{ 1050{
1079 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1051 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1080 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1081} 1052}
1082 1053
1083static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1054static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1084{ 1055{
1056 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1085 struct intel_rps_ei now; 1057 struct intel_rps_ei now;
1086 u32 events = 0; 1058 u32 events = 0;
1087 1059
1088 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1060 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1089 return 0; 1061 return 0;
1090 1062
1091 vlv_c0_read(dev_priv, &now); 1063 vlv_c0_read(dev_priv, &now);
1092 if (now.cz_clock == 0) 1064 if (now.cz_clock == 0)
1093 return 0; 1065 return 0;
1094 1066
1095 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1067 if (prev->cz_clock) {
1096 if (!vlv_c0_above(dev_priv, 1068 u64 time, c0;
1097 &dev_priv->rps.down_ei, &now, 1069 unsigned int mul;
1098 dev_priv->rps.down_threshold)) 1070
1099 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1071 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1100 dev_priv->rps.down_ei = now; 1072 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1101 } 1073 mul <<= 8;
1102 1074
1103 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1075 time = now.cz_clock - prev->cz_clock;
1104 if (vlv_c0_above(dev_priv, 1076 time *= dev_priv->czclk_freq;
1105 &dev_priv->rps.up_ei, &now, 1077
1106 dev_priv->rps.up_threshold)) 1078 /* Workload can be split between render + media,
1107 events |= GEN6_PM_RP_UP_THRESHOLD; 1079 * e.g. SwapBuffers being blitted in X after being rendered in
1108 dev_priv->rps.up_ei = now; 1080 * mesa. To account for this we need to combine both engines
1081 * into our activity counter.
1082 */
1083 c0 = now.render_c0 - prev->render_c0;
1084 c0 += now.media_c0 - prev->media_c0;
1085 c0 *= mul;
1086
1087 if (c0 > time * dev_priv->rps.up_threshold)
1088 events = GEN6_PM_RP_UP_THRESHOLD;
1089 else if (c0 < time * dev_priv->rps.down_threshold)
1090 events = GEN6_PM_RP_DOWN_THRESHOLD;
1109 } 1091 }
1110 1092
1093 dev_priv->rps.ei = now;
1111 return events; 1094 return events;
1112} 1095}
1113 1096
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4228 /* Let's track the enabled rps events */ 4211 /* Let's track the enabled rps events */
4229 if (IS_VALLEYVIEW(dev_priv)) 4212 if (IS_VALLEYVIEW(dev_priv))
4230 /* WaGsvRC0ResidencyMethod:vlv */ 4213 /* WaGsvRC0ResidencyMethod:vlv */
4231 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4214 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4232 else 4215 else
4233 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4216 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4234 4217
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4266 if (!IS_GEN2(dev_priv)) 4249 if (!IS_GEN2(dev_priv))
4267 dev->vblank_disable_immediate = true; 4250 dev->vblank_disable_immediate = true;
4268 4251
4252 /* Most platforms treat the display irq block as an always-on
4253 * power domain. vlv/chv can disable it at runtime and need
4254 * special care to avoid writing any of the display block registers
4255 * outside of the power domain. We defer setting up the display irqs
4256 * in this case to the runtime pm.
4257 */
4258 dev_priv->display_irqs_enabled = true;
4259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4260 dev_priv->display_irqs_enabled = false;
4261
4269 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4262 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4270 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4263 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4271 4264
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..df20e9bc1c0f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ err_unpin:
512 return ret; 512 return ret;
513} 513}
514 514
515static void
516i915_vma_remove(struct i915_vma *vma)
517{
518 struct drm_i915_gem_object *obj = vma->obj;
519
520 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523 drm_mm_remove_node(&vma->node);
524 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526 /* Since the unbound list is global, only move to that list if
527 * no more VMAs exist.
528 */
529 if (--obj->bind_count == 0)
530 list_move_tail(&obj->global_link,
531 &to_i915(obj->base.dev)->mm.unbound_list);
532
533 /* And finally now the object is completely decoupled from this vma,
534 * we can drop its hold on the backing storage and allow it to be
535 * reaped by the shrinker.
536 */
537 i915_gem_object_unpin_pages(obj);
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539}
540
515int __i915_vma_do_pin(struct i915_vma *vma, 541int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags) 542 u64 size, u64 alignment, u64 flags)
517{ 543{
518 unsigned int bound = vma->flags; 544 const unsigned int bound = vma->flags;
519 int ret; 545 int ret;
520 546
521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
524 550
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 551 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY; 552 ret = -EBUSY;
527 goto err; 553 goto err_unpin;
528 } 554 }
529 555
530 if ((bound & I915_VMA_BIND_MASK) == 0) { 556 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags); 557 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret) 558 if (ret)
533 goto err; 559 goto err_unpin;
534 } 560 }
535 561
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 562 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret) 563 if (ret)
538 goto err; 564 goto err_remove;
539 565
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 566 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma); 567 __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 570 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0; 571 return 0;
546 572
547err: 573err_remove:
574 if ((bound & I915_VMA_BIND_MASK) == 0) {
575 GEM_BUG_ON(vma->pages);
576 i915_vma_remove(vma);
577 }
578err_unpin:
548 __i915_vma_unpin(vma); 579 __i915_vma_unpin(vma);
549 return ret; 580 return ret;
550} 581}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
657 } 688 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 689 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659 690
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) { 691 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages); 692 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages); 693 sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
667 } 695 }
668 vma->pages = NULL; 696 vma->pages = NULL;
669 697
670 /* Since the unbound list is global, only move to that list if 698 i915_vma_remove(vma);
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
682 699
683destroy: 700destroy:
684 if (unlikely(i915_vma_is_closed(vma))) 701 if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 0085bc745f6a..de219b71fb76 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" 37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
38MODULE_FIRMWARE(I915_CSR_GLK);
39#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
40 39
41#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" 40#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01341670738f..ed1f4f272b4f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3670 crtc->base.mode = crtc->base.state->mode; 3670 crtc->base.mode = crtc->base.state->mode;
3671 3671
3672 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3673 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3674 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3675
3676 /* 3672 /*
3677 * Update pipe size and adjust fitter if needed: the reason for this is 3673 * Update pipe size and adjust fitter if needed: the reason for this is
3678 * that in compute_mode_changes we check the native mode (not the pfit 3674 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4796 struct intel_crtc_scaler_state *scaler_state = 4792 struct intel_crtc_scaler_state *scaler_state =
4797 &crtc->config->scaler_state; 4793 &crtc->config->scaler_state;
4798 4794
4799 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4800
4801 if (crtc->config->pch_pfit.enabled) { 4795 if (crtc->config->pch_pfit.enabled) {
4802 int id; 4796 int id;
4803 4797
4804 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4798 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4805 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4806 return; 4799 return;
4807 }
4808 4800
4809 id = scaler_state->scaler_id; 4801 id = scaler_state->scaler_id;
4810 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4802 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4811 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4803 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4812 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4804 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4813 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4805 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4814
4815 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4816 } 4806 }
4817} 4807}
4818 4808
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
14379 } while (progress); 14369 } while (progress);
14380} 14370}
14381 14371
14372static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14373{
14374 struct intel_atomic_state *state, *next;
14375 struct llist_node *freed;
14376
14377 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14378 llist_for_each_entry_safe(state, next, freed, freed)
14379 drm_atomic_state_put(&state->base);
14380}
14381
14382static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14383{
14384 struct drm_i915_private *dev_priv =
14385 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14386
14387 intel_atomic_helper_free_state(dev_priv);
14388}
14389
14382static void intel_atomic_commit_tail(struct drm_atomic_state *state) 14390static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14383{ 14391{
14384 struct drm_device *dev = state->dev; 14392 struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14545 * can happen also when the device is completely off. 14553 * can happen also when the device is completely off.
14546 */ 14554 */
14547 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14555 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
14556
14557 intel_atomic_helper_free_state(dev_priv);
14548} 14558}
14549 14559
14550static void intel_atomic_commit_work(struct work_struct *work) 14560static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14946 to_intel_atomic_state(old_crtc_state->state); 14956 to_intel_atomic_state(old_crtc_state->state);
14947 bool modeset = needs_modeset(crtc->state); 14957 bool modeset = needs_modeset(crtc->state);
14948 14958
14959 if (!modeset &&
14960 (intel_cstate->base.color_mgmt_changed ||
14961 intel_cstate->update_pipe)) {
14962 intel_color_set_csc(crtc->state);
14963 intel_color_load_luts(crtc->state);
14964 }
14965
14949 /* Perform vblank evasion around commit operation */ 14966 /* Perform vblank evasion around commit operation */
14950 intel_pipe_update_start(intel_crtc); 14967 intel_pipe_update_start(intel_crtc);
14951 14968
14952 if (modeset) 14969 if (modeset)
14953 goto out; 14970 goto out;
14954 14971
14955 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14956 intel_color_set_csc(crtc->state);
14957 intel_color_load_luts(crtc->state);
14958 }
14959
14960 if (intel_cstate->update_pipe) 14972 if (intel_cstate->update_pipe)
14961 intel_update_pipe_config(intel_crtc, old_intel_cstate); 14973 intel_update_pipe_config(intel_crtc, old_intel_cstate);
14962 else if (INTEL_GEN(dev_priv) >= 9) 14974 else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
16599 drm_modeset_acquire_fini(&ctx); 16611 drm_modeset_acquire_fini(&ctx);
16600} 16612}
16601 16613
16602static void intel_atomic_helper_free_state(struct work_struct *work)
16603{
16604 struct drm_i915_private *dev_priv =
16605 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16606 struct intel_atomic_state *state, *next;
16607 struct llist_node *freed;
16608
16609 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16610 llist_for_each_entry_safe(state, next, freed, freed)
16611 drm_atomic_state_put(&state->base);
16612}
16613
16614int intel_modeset_init(struct drm_device *dev) 16614int intel_modeset_init(struct drm_device *dev)
16615{ 16615{
16616 struct drm_i915_private *dev_priv = to_i915(dev); 16616 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
16631 dev->mode_config.funcs = &intel_mode_funcs; 16631 dev->mode_config.funcs = &intel_mode_funcs;
16632 16632
16633 INIT_WORK(&dev_priv->atomic_helper.free_work, 16633 INIT_WORK(&dev_priv->atomic_helper.free_work,
16634 intel_atomic_helper_free_state); 16634 intel_atomic_helper_free_state_worker);
16635 16635
16636 intel_init_quirks(dev); 16636 intel_init_quirks(dev);
16637 16637
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
16696 } 16696 }
16697 } 16697 }
16698 16698
16699 intel_update_czclk(dev_priv);
16700 intel_update_cdclk(dev_priv);
16701 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
16702
16703 intel_shared_dpll_init(dev); 16699 intel_shared_dpll_init(dev);
16704 16700
16701 intel_update_czclk(dev_priv);
16702 intel_modeset_init_hw(dev);
16703
16705 if (dev_priv->max_cdclk_freq == 0) 16704 if (dev_priv->max_cdclk_freq == 0)
16706 intel_update_max_cdclk(dev_priv); 16705 intel_update_max_cdclk(dev_priv);
16707 16706
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
17258 17257
17259 intel_init_gt_powersave(dev_priv); 17258 intel_init_gt_powersave(dev_priv);
17260 17259
17261 intel_modeset_init_hw(dev);
17262
17263 intel_setup_overlay(dev_priv); 17260 intel_setup_overlay(dev_priv);
17264} 17261}
17265 17262
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf109e34..ab1be5c80ea5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
105 /* Nothing to do here, execute in order of dependencies */ 105 /* Nothing to do here, execute in order of dependencies */
106 engine->schedule = NULL; 106 engine->schedule = NULL;
107 107
108 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
109
108 dev_priv->engine[id] = engine; 110 dev_priv->engine[id] = engine;
109 return 0; 111 return 0;
110} 112}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 1b8ba2e77539..2d449fb5d1d2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
357 bool *enabled, int width, int height) 357 bool *enabled, int width, int height)
358{ 358{
359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
360 unsigned long conn_configured, mask; 360 unsigned long conn_configured, conn_seq, mask;
361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
362 int i, j; 362 int i, j;
363 bool *save_enabled; 363 bool *save_enabled;
364 bool fallback = true; 364 bool fallback = true;
365 int num_connectors_enabled = 0; 365 int num_connectors_enabled = 0;
366 int num_connectors_detected = 0; 366 int num_connectors_detected = 0;
367 int pass = 0;
368 367
369 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); 368 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
370 if (!save_enabled) 369 if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 mask = BIT(count) - 1; 373 mask = BIT(count) - 1;
375 conn_configured = 0; 374 conn_configured = 0;
376retry: 375retry:
376 conn_seq = conn_configured;
377 for (i = 0; i < count; i++) { 377 for (i = 0; i < count; i++) {
378 struct drm_fb_helper_connector *fb_conn; 378 struct drm_fb_helper_connector *fb_conn;
379 struct drm_connector *connector; 379 struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
387 if (conn_configured & BIT(i)) 387 if (conn_configured & BIT(i))
388 continue; 388 continue;
389 389
390 if (pass == 0 && !connector->has_tile) 390 if (conn_seq == 0 && !connector->has_tile)
391 continue; 391 continue;
392 392
393 if (connector->status == connector_status_connected) 393 if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
498 conn_configured |= BIT(i); 498 conn_configured |= BIT(i);
499 } 499 }
500 500
501 if ((conn_configured & mask) != mask) { 501 if ((conn_configured & mask) != mask && conn_configured != conn_seq)
502 pass++;
503 goto retry; 502 goto retry;
504 }
505 503
506 /* 504 /*
507 * If the BIOS didn't enable everything it could, fall back to have the 505 * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index d23c0fcff751..8c04eca84351 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
77 goto bail; 77 goto bail;
78 } 78 }
79 79
80 if (!i915.enable_execlists) {
81 DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
82 goto bail;
83 }
84
80 /* 85 /*
81 * We're not in host or fail to find a MPT module, disable GVT-g 86 * We're not in host or fail to find a MPT module, disable GVT-g
82 */ 87 */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ebae2bd83918..24b2fa5b6282 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1298 1298
1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) 1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
1300{ 1300{
1301 struct drm_device *dev = crtc_state->base.crtc->dev; 1301 struct drm_i915_private *dev_priv =
1302 to_i915(crtc_state->base.crtc->dev);
1303 struct drm_atomic_state *state = crtc_state->base.state;
1304 struct drm_connector_state *connector_state;
1305 struct drm_connector *connector;
1306 int i;
1302 1307
1303 if (HAS_GMCH_DISPLAY(to_i915(dev))) 1308 if (HAS_GMCH_DISPLAY(dev_priv))
1304 return false; 1309 return false;
1305 1310
1306 /* 1311 /*
1307 * HDMI 12bpc affects the clocks, so it's only possible 1312 * HDMI 12bpc affects the clocks, so it's only possible
1308 * when not cloning with other encoder types. 1313 * when not cloning with other encoder types.
1309 */ 1314 */
1310 return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; 1315 if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
1316 return false;
1317
1318 for_each_connector_in_state(state, connector, connector_state, i) {
1319 const struct drm_display_info *info = &connector->display_info;
1320
1321 if (connector_state->crtc != crtc_state->base.crtc)
1322 continue;
1323
1324 if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
1325 return false;
1326 }
1327
1328 return true;
1311} 1329}
1312 1330
1313bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1331bool intel_hdmi_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b62e3f8ad415..54208bef7a83 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
219 } 219 }
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev_priv); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
425 } 425 }
426 } 426 }
427 427
428 if (storm_detected) 428 if (storm_detected && dev_priv->display_irqs_enabled)
429 dev_priv->display.hpd_irq_setup(dev_priv); 429 dev_priv->display.hpd_irq_setup(dev_priv);
430 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
431 431
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
471 * Interrupt setup is already guaranteed to be single-threaded, this is 471 * Interrupt setup is already guaranteed to be single-threaded, this is
472 * just to make the assert_spin_locked checks happy. 472 * just to make the assert_spin_locked checks happy.
473 */ 473 */
474 spin_lock_irq(&dev_priv->irq_lock); 474 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
475 if (dev_priv->display.hpd_irq_setup) 475 spin_lock_irq(&dev_priv->irq_lock);
476 dev_priv->display.hpd_irq_setup(dev_priv); 476 if (dev_priv->display_irqs_enabled)
477 spin_unlock_irq(&dev_priv->irq_lock); 477 dev_priv->display.hpd_irq_setup(dev_priv);
478 spin_unlock_irq(&dev_priv->irq_lock);
479 }
478} 480}
479 481
480static void i915_hpd_poll_init_work(struct work_struct *work) 482static void i915_hpd_poll_init_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023d21e6..471af3b480ad 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
346 return; 346 return;
347 347
348 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); 348 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
349 status, rq);
349} 350}
350 351
351static void 352static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d45be0..6a29784d2b41 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4891 break; 4891 break;
4892 } 4892 }
4893 4893
4894 /* When byt can survive without system hang with dynamic
4895 * sw freq adjustments, this restriction can be lifted.
4896 */
4897 if (IS_VALLEYVIEW(dev_priv))
4898 goto skip_hw_write;
4899
4894 I915_WRITE(GEN6_RP_UP_EI, 4900 I915_WRITE(GEN6_RP_UP_EI,
4895 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4901 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4896 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4902 I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4911 GEN6_RP_UP_BUSY_AVG | 4917 GEN6_RP_UP_BUSY_AVG |
4912 GEN6_RP_DOWN_IDLE_AVG); 4918 GEN6_RP_DOWN_IDLE_AVG);
4913 4919
4920skip_hw_write:
4914 dev_priv->rps.power = new_power; 4921 dev_priv->rps.power = new_power;
4915 dev_priv->rps.up_threshold = threshold_up; 4922 dev_priv->rps.up_threshold = threshold_up;
4916 dev_priv->rps.down_threshold = threshold_down; 4923 dev_priv->rps.down_threshold = threshold_down;
@@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4921{ 4928{
4922 u32 mask = 0; 4929 u32 mask = 0;
4923 4930
4931 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
4924 if (val > dev_priv->rps.min_freq_softlimit) 4932 if (val > dev_priv->rps.min_freq_softlimit)
4925 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4933 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4926 if (val < dev_priv->rps.max_freq_softlimit) 4934 if (val < dev_priv->rps.max_freq_softlimit)
4927 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4935 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4928 4936
@@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
5032{ 5040{
5033 mutex_lock(&dev_priv->rps.hw_lock); 5041 mutex_lock(&dev_priv->rps.hw_lock);
5034 if (dev_priv->rps.enabled) { 5042 if (dev_priv->rps.enabled) {
5035 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 5043 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5036 gen6_rps_reset_ei(dev_priv); 5044 gen6_rps_reset_ei(dev_priv);
5037 I915_WRITE(GEN6_PMINTRMSK, 5045 I915_WRITE(GEN6_PMINTRMSK,
5038 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 5046 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7916 * @timeout_base_ms: timeout for polling with preemption enabled 7924 * @timeout_base_ms: timeout for polling with preemption enabled
7917 * 7925 *
7918 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7926 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7919 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7927 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
7920 * The request is acknowledged once the PCODE reply dword equals @reply after 7928 * The request is acknowledged once the PCODE reply dword equals @reply after
7921 * applying @reply_mask. Polling is first attempted with preemption enabled 7929 * applying @reply_mask. Polling is first attempted with preemption enabled
7922 * for @timeout_base_ms and if this times out for another 10 ms with 7930 * for @timeout_base_ms and if this times out for another 50 ms with
7923 * preemption disabled. 7931 * preemption disabled.
7924 * 7932 *
7925 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7933 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7955 * worst case) _and_ PCODE was busy for some reason even after a 7963 * worst case) _and_ PCODE was busy for some reason even after a
7956 * (queued) request and @timeout_base_ms delay. As a workaround retry 7964 * (queued) request and @timeout_base_ms delay. As a workaround retry
7957 * the poll with preemption disabled to maximize the number of 7965 * the poll with preemption disabled to maximize the number of
7958 * requests. Increase the timeout from @timeout_base_ms to 10ms to 7966 * requests. Increase the timeout from @timeout_base_ms to 50ms to
7959 * account for interrupts that could reduce the number of these 7967 * account for interrupts that could reduce the number of these
7960 * requests. 7968 * requests, and for any quirks of the PCODE firmware that delays
7969 * the request completion.
7961 */ 7970 */
7962 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7971 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7963 WARN_ON_ONCE(timeout_base_ms > 3); 7972 WARN_ON_ONCE(timeout_base_ms > 3);
7964 preempt_disable(); 7973 preempt_disable();
7965 ret = wait_for_atomic(COND, 10); 7974 ret = wait_for_atomic(COND, 50);
7966 preempt_enable(); 7975 preempt_enable();
7967 7976
7968out: 7977out:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91bc4abf5d3e..6c5f9958197d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
2024 ret = context_pin(ctx, flags); 2024 ret = context_pin(ctx, flags);
2025 if (ret) 2025 if (ret)
2026 goto error; 2026 goto error;
2027
2028 ce->state->obj->mm.dirty = true;
2027 } 2029 }
2028 2030
2029 /* The kernel context is only used as a placeholder for flushing the 2031 /* The kernel context is only used as a placeholder for flushing the
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d72322..13dccb18cd43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -403,6 +403,9 @@ struct intel_engine_cs {
403 */ 403 */
404 struct i915_gem_context *legacy_active_context; 404 struct i915_gem_context *legacy_active_context;
405 405
406 /* status_notifier: list of callbacks for context-switch changes */
407 struct atomic_notifier_head context_status_notifier;
408
406 struct intel_engine_hangcheck hangcheck; 409 struct intel_engine_hangcheck hangcheck;
407 410
408 bool needs_cmd_parser; 411 bool needs_cmd_parser;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef54688872a..9481ca9a3ae7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
254 int scaler_id = plane_state->scaler_id; 254 int scaler_id = plane_state->scaler_id;
255 const struct intel_scaler *scaler; 255 const struct intel_scaler *scaler;
256 256
257 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
258 plane_id, PS_PLANE_SEL(plane_id));
259
260 scaler = &crtc_state->scaler_state.scalers[scaler_id]; 257 scaler = &crtc_state->scaler_state.scalers[scaler_id];
261 258
262 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), 259 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe08885a5ba..b7ff592b14f5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
119 119
120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d); 121 fw_domain_wait_ack(d);
122
123 dev_priv->uncore.fw_domains_active |= fw_domains;
122} 124}
123 125
124static void 126static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
130 fw_domain_put(d); 132 fw_domain_put(d);
131 fw_domain_posting_read(d); 133 fw_domain_posting_read(d);
132 } 134 }
135
136 dev_priv->uncore.fw_domains_active &= ~fw_domains;
133} 137}
134 138
135static void 139static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
240 if (WARN_ON(domain->wake_count == 0)) 244 if (WARN_ON(domain->wake_count == 0))
241 domain->wake_count++; 245 domain->wake_count++;
242 246
243 if (--domain->wake_count == 0) { 247 if (--domain->wake_count == 0)
244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
245 dev_priv->uncore.fw_domains_active &= ~domain->mask;
246 }
247 249
248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
249 251
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
454 fw_domains &= ~domain->mask; 456 fw_domains &= ~domain->mask;
455 } 457 }
456 458
457 if (fw_domains) { 459 if (fw_domains)
458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 460 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
459 dev_priv->uncore.fw_domains_active |= fw_domains;
460 }
461} 461}
462 462
463/** 463/**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
968 fw_domain_arm_timer(domain); 968 fw_domain_arm_timer(domain);
969 969
970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
971 dev_priv->uncore.fw_domains_active |= fw_domains;
972} 971}
973 972
974static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 973static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..36602ac7e248 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
1/* Copyright (c) 2016 The Linux Foundation. All rights reserved. 1/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 4 * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
534 } 534 }
535 535
536 if (a5xx_gpu->gpmu_bo) { 536 if (a5xx_gpu->gpmu_bo) {
537 if (a5xx_gpu->gpmu_bo) 537 if (a5xx_gpu->gpmu_iova)
538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540 } 540 }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
860 .idle = a5xx_idle, 860 .idle = a5xx_idle,
861 .irq = a5xx_irq, 861 .irq = a5xx_irq,
862 .destroy = a5xx_destroy, 862 .destroy = a5xx_destroy,
863#ifdef CONFIG_DEBUG_FS
863 .show = a5xx_show, 864 .show = a5xx_show,
865#endif
864 }, 866 },
865 .get_timestamp = a5xx_get_timestamp, 867 .get_timestamp = a5xx_get_timestamp,
866}; 868};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5ae65426b4e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
418 return 0; 418 return 0;
419} 419}
420 420
421void adreno_gpu_cleanup(struct adreno_gpu *gpu) 421void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
422{ 422{
423 if (gpu->memptrs_bo) { 423 struct msm_gpu *gpu = &adreno_gpu->base;
424 if (gpu->memptrs) 424
425 msm_gem_put_vaddr(gpu->memptrs_bo); 425 if (adreno_gpu->memptrs_bo) {
426 if (adreno_gpu->memptrs)
427 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
428
429 if (adreno_gpu->memptrs_iova)
430 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
431
432 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
433 }
434 release_firmware(adreno_gpu->pm4);
435 release_firmware(adreno_gpu->pfp);
426 436
427 if (gpu->memptrs_iova) 437 msm_gpu_cleanup(gpu);
428 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
429 438
430 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 439 if (gpu->aspace) {
440 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
441 iommu_ports, ARRAY_SIZE(iommu_ports));
442 msm_gem_address_space_destroy(gpu->aspace);
431 } 443 }
432 release_firmware(gpu->pm4);
433 release_firmware(gpu->pfp);
434 msm_gpu_cleanup(&gpu->base);
435} 444}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
171 } 171 }
172 } 172 }
173 } else { 173 } else {
174 msm_dsi_host_reset_phy(mdsi->host); 174 msm_dsi_host_reset_phy(msm_dsi->host);
175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); 175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
18#include <linux/hdmi.h> 18#include <linux/hdmi.h>
19#include "hdmi.h" 19#include "hdmi.h"
20 20
21
22/* Supported HDMI Audio channels */
23#define MSM_HDMI_AUDIO_CHANNEL_2 0
24#define MSM_HDMI_AUDIO_CHANNEL_4 1
25#define MSM_HDMI_AUDIO_CHANNEL_6 2
26#define MSM_HDMI_AUDIO_CHANNEL_8 3
27
28/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ 21/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
29static int nchannels[] = { 2, 4, 6, 8 }; 22static int nchannels[] = { 2, 4, 6, 8 };
30 23
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..238901987e00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
18#ifndef __MDP5_PIPE_H__ 18#ifndef __MDP5_PIPE_H__
19#define __MDP5_PIPE_H__ 19#define __MDP5_PIPE_H__
20 20
21#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ 21/* TODO: Add SSPP_MAX in mdp5.xml.h */
22#define SSPP_MAX (SSPP_CURSOR1 + 1)
22 23
23/* represents a hw pipe, which is dynamically assigned to a plane */ 24/* represents a hw pipe, which is dynamically assigned to a plane */
24struct mdp5_hw_pipe { 25struct mdp5_hw_pipe {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
812 812
813 size = PAGE_ALIGN(size); 813 size = PAGE_ALIGN(size);
814 814
815 /* Disallow zero sized objects as they make the underlying
816 * infrastructure grumpy
817 */
818 if (size == 0)
819 return ERR_PTR(-EINVAL);
820
815 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 821 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816 if (ret) 822 if (ret)
817 goto fail; 823 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..af5b6ba4095b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
706 msm_ringbuffer_destroy(gpu->rb); 706 msm_ringbuffer_destroy(gpu->rb);
707 } 707 }
708 708
709 if (gpu->aspace)
710 msm_gem_address_space_destroy(gpu->aspace);
711
712 if (gpu->fctx) 709 if (gpu->fctx)
713 msm_fence_context_free(gpu->fctx); 710 msm_fence_context_free(gpu->fctx);
714} 711}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c35d813..ee5883f59be5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
147 struct drm_gem_object *obj = buffer->priv; 147 struct drm_gem_object *obj = buffer->priv;
148 int ret = 0; 148 int ret = 0;
149 149
150 if (WARN_ON(!obj->filp))
151 return -EINVAL;
152
153 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 if (ret < 0) 151 if (ret < 0)
155 return ret; 152 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 684f1703aa5c..aaa3e80fecb4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
213 rbo->placement.num_busy_placement = 0; 213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) { 214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { 215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn) 216 if (rbo->placements[i].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn; 217 rbo->placements[i].fpfn = fpfn;
218 } else { 218 } else {
219 rbo->placement.busy_placement = 219 rbo->placement.busy_placement =
220 &rbo->placements[i]; 220 &rbo->placements[i];
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b8978142f..c7af9fdd20c7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2984 (rdev->pdev->device == 0x6667)) { 2984 (rdev->pdev->device == 0x6667)) {
2985 max_sclk = 75000; 2985 max_sclk = 75000;
2986 } 2986 }
2987 } else if (rdev->family == CHIP_OLAND) {
2988 if ((rdev->pdev->revision == 0xC7) ||
2989 (rdev->pdev->revision == 0x80) ||
2990 (rdev->pdev->revision == 0x81) ||
2991 (rdev->pdev->revision == 0x83) ||
2992 (rdev->pdev->revision == 0x87) ||
2993 (rdev->pdev->device == 0x6604) ||
2994 (rdev->pdev->device == 0x6605)) {
2995 max_sclk = 75000;
2996 }
2987 } 2997 }
2988 2998
2989 if (rps->vce_active) { 2999 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index f80bf9385e41..d745e8b50fb8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
464{ 464{
465 struct drm_device *dev = crtc->dev; 465 struct drm_device *dev = crtc->dev;
466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
467 unsigned long flags;
467 468
468 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
469 mutex_lock(&tilcdc_crtc->enable_lock); 470 mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
484 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 485 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
485 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 486 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
486 LCDC_PALETTE_LOAD_MODE_MASK); 487 LCDC_PALETTE_LOAD_MODE_MASK);
488
489 /* There is no real chance for a race here as the time stamp
490 * is taken before the raster DMA is started. The spin-lock is
491 * taken to have a memory barrier after taking the time-stamp
492 * and to avoid a context switch between taking the stamp and
493 * enabling the raster.
494 */
495 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
496 tilcdc_crtc->last_vblank = ktime_get();
487 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 497 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
498 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
488 499
489 drm_crtc_vblank_on(crtc); 500 drm_crtc_vblank_on(crtc);
490 501
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
539 } 550 }
540 551
541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 552 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
542 tilcdc_crtc->last_vblank = 0;
543 553
544 tilcdc_crtc->enabled = false; 554 tilcdc_crtc->enabled = false;
545 mutex_unlock(&tilcdc_crtc->enable_lock); 555 mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
602{ 612{
603 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 613 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
604 struct drm_device *dev = crtc->dev; 614 struct drm_device *dev = crtc->dev;
605 unsigned long flags;
606 615
607 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 616 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
608 617
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
614 drm_framebuffer_reference(fb); 623 drm_framebuffer_reference(fb);
615 624
616 crtc->primary->fb = fb; 625 crtc->primary->fb = fb;
626 tilcdc_crtc->event = event;
617 627
618 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 628 mutex_lock(&tilcdc_crtc->enable_lock);
619 629
620 if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { 630 if (tilcdc_crtc->enabled) {
631 unsigned long flags;
621 ktime_t next_vblank; 632 ktime_t next_vblank;
622 s64 tdiff; 633 s64 tdiff;
623 634
624 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 635 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
625 1000000 / crtc->hwmode.vrefresh);
626 636
637 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
638 1000000 / crtc->hwmode.vrefresh);
627 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 639 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
628 640
629 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 641 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
630 tilcdc_crtc->next_fb = fb; 642 tilcdc_crtc->next_fb = fb;
631 } 643 else
632 644 set_scanout(crtc, fb);
633 if (tilcdc_crtc->next_fb != fb)
634 set_scanout(crtc, fb);
635 645
636 tilcdc_crtc->event = event; 646 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
647 }
637 648
638 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 649 mutex_unlock(&tilcdc_crtc->enable_lock);
639 650
640 return 0; 651 return 0;
641} 652}
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
1036 1047
1037fail: 1048fail:
1038 tilcdc_crtc_destroy(crtc); 1049 tilcdc_crtc_destroy(crtc);
1039 return -ENOMEM; 1050 return ret;
1040} 1051}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
179 if (unlikely(ret != 0)) 179 if (unlikely(ret != 0))
180 goto out_err0; 180 goto out_err0;
181 181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
184 goto out_err1; 184 goto out_err1;
185 185
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
318 318
319int ttm_ref_object_add(struct ttm_object_file *tfile, 319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base, 320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed) 321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
322{ 323{
323 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
324 struct ttm_ref_object *ref; 325 struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
345 } 346 }
346 347
347 rcu_read_unlock(); 348 rcu_read_unlock();
349 if (require_existed)
350 return -EPERM;
351
348 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 352 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
349 false, false); 353 false, false);
350 if (unlikely(ret != 0)) 354 if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
449 ttm_ref_object_release(&ref->kref); 453 ttm_ref_object_release(&ref->kref);
450 } 454 }
451 455
456 spin_unlock(&tfile->lock);
452 for (i = 0; i < TTM_REF_NUM; ++i) 457 for (i = 0; i < TTM_REF_NUM; ++i)
453 drm_ht_remove(&tfile->ref_hash[i]); 458 drm_ht_remove(&tfile->ref_hash[i]);
454 459
455 spin_unlock(&tfile->lock);
456 ttm_object_file_unref(&tfile); 460 ttm_object_file_unref(&tfile);
457} 461}
458EXPORT_SYMBOL(ttm_object_file_release); 462EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
529 533
530 *p_tdev = NULL; 534 *p_tdev = NULL;
531 535
532 spin_lock(&tdev->object_lock);
533 drm_ht_remove(&tdev->object_hash); 536 drm_ht_remove(&tdev->object_hash);
534 spin_unlock(&tdev->object_lock);
535 537
536 kfree(tdev); 538 kfree(tdev);
537} 539}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
635 prime = (struct ttm_prime_object *) dma_buf->priv; 637 prime = (struct ttm_prime_object *) dma_buf->priv;
636 base = &prime->base; 638 base = &prime->base;
637 *handle = base->hash.key; 639 *handle = base->hash.key;
638 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 640 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
639 641
640 dma_buf_put(dma_buf); 642 dma_buf_put(dma_buf);
641 643
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0c06844af445..9fcf05ca492b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
846 drm_atomic_helper_crtc_destroy_state(crtc, state); 846 drm_atomic_helper_crtc_destroy_state(crtc, state);
847} 847}
848 848
849static void
850vc4_crtc_reset(struct drm_crtc *crtc)
851{
852 if (crtc->state)
853 __drm_atomic_helper_crtc_destroy_state(crtc->state);
854
855 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
856 if (crtc->state)
857 crtc->state->crtc = crtc;
858}
859
849static const struct drm_crtc_funcs vc4_crtc_funcs = { 860static const struct drm_crtc_funcs vc4_crtc_funcs = {
850 .set_config = drm_atomic_helper_set_config, 861 .set_config = drm_atomic_helper_set_config,
851 .destroy = vc4_crtc_destroy, 862 .destroy = vc4_crtc_destroy,
@@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
853 .set_property = NULL, 864 .set_property = NULL,
854 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 865 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
855 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 866 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
856 .reset = drm_atomic_helper_crtc_reset, 867 .reset = vc4_crtc_reset,
857 .atomic_duplicate_state = vc4_crtc_duplicate_state, 868 .atomic_duplicate_state = vc4_crtc_duplicate_state,
858 .atomic_destroy_state = vc4_crtc_destroy_state, 869 .atomic_destroy_state = vc4_crtc_destroy_state,
859 .gamma_set = vc4_crtc_gamma_set, 870 .gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6541dd8b82dc..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
538 struct vmw_fence_obj **p_fence) 538 struct vmw_fence_obj **p_fence)
539{ 539{
540 struct vmw_fence_obj *fence; 540 struct vmw_fence_obj *fence;
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
701} 701}
702 702
703 703
704/**
705 * vmw_fence_obj_lookup - Look up a user-space fence object
706 *
707 * @tfile: A struct ttm_object_file identifying the caller.
708 * @handle: A handle identifying the fence object.
709 * @return: A struct vmw_user_fence base ttm object on success or
710 * an error pointer on failure.
711 *
712 * The fence object is looked up and type-checked. The caller needs
713 * to have opened the fence object first, but since that happens on
714 * creation and fence objects aren't shareable, that's not an
715 * issue currently.
716 */
717static struct ttm_base_object *
718vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
719{
720 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
721
722 if (!base) {
723 pr_err("Invalid fence object handle 0x%08lx.\n",
724 (unsigned long)handle);
725 return ERR_PTR(-EINVAL);
726 }
727
728 if (base->refcount_release != vmw_user_fence_base_release) {
729 pr_err("Invalid fence object handle 0x%08lx.\n",
730 (unsigned long)handle);
731 ttm_base_object_unref(&base);
732 return ERR_PTR(-EINVAL);
733 }
734
735 return base;
736}
737
738
704int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 739int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv) 740 struct drm_file *file_priv)
706{ 741{
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
726 arg->kernel_cookie = jiffies + wait_timeout; 761 arg->kernel_cookie = jiffies + wait_timeout;
727 } 762 }
728 763
729 base = ttm_base_object_lookup(tfile, arg->handle); 764 base = vmw_fence_obj_lookup(tfile, arg->handle);
730 if (unlikely(base == NULL)) { 765 if (IS_ERR(base))
731 printk(KERN_ERR "Wait invalid fence object handle " 766 return PTR_ERR(base);
732 "0x%08lx.\n",
733 (unsigned long)arg->handle);
734 return -EINVAL;
735 }
736 767
737 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 768 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
738 769
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
771 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 802 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
772 struct vmw_private *dev_priv = vmw_priv(dev); 803 struct vmw_private *dev_priv = vmw_priv(dev);
773 804
774 base = ttm_base_object_lookup(tfile, arg->handle); 805 base = vmw_fence_obj_lookup(tfile, arg->handle);
775 if (unlikely(base == NULL)) { 806 if (IS_ERR(base))
776 printk(KERN_ERR "Fence signaled invalid fence object handle " 807 return PTR_ERR(base);
777 "0x%08lx.\n",
778 (unsigned long)arg->handle);
779 return -EINVAL;
780 }
781 808
782 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 809 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
783 fman = fman_from_fence(fence); 810 fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1024 (struct drm_vmw_fence_event_arg *) data; 1051 (struct drm_vmw_fence_event_arg *) data;
1025 struct vmw_fence_obj *fence = NULL; 1052 struct vmw_fence_obj *fence = NULL;
1026 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054 struct ttm_object_file *tfile = vmw_fp->tfile;
1027 struct drm_vmw_fence_rep __user *user_fence_rep = 1055 struct drm_vmw_fence_rep __user *user_fence_rep =
1028 (struct drm_vmw_fence_rep __user *)(unsigned long) 1056 (struct drm_vmw_fence_rep __user *)(unsigned long)
1029 arg->fence_rep; 1057 arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1037 */ 1065 */
1038 if (arg->handle) { 1066 if (arg->handle) {
1039 struct ttm_base_object *base = 1067 struct ttm_base_object *base =
1040 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1068 vmw_fence_obj_lookup(tfile, arg->handle);
1041 arg->handle); 1069
1042 1070 if (IS_ERR(base))
1043 if (unlikely(base == NULL)) { 1071 return PTR_ERR(base);
1044 DRM_ERROR("Fence event invalid fence object handle " 1072
1045 "0x%08lx.\n",
1046 (unsigned long)arg->handle);
1047 return -EINVAL;
1048 }
1049 fence = &(container_of(base, struct vmw_user_fence, 1073 fence = &(container_of(base, struct vmw_user_fence,
1050 base)->fence); 1074 base)->fence);
1051 (void) vmw_fence_obj_reference(fence); 1075 (void) vmw_fence_obj_reference(fence);
1052 1076
1053 if (user_fence_rep != NULL) { 1077 if (user_fence_rep != NULL) {
1054 bool existed;
1055
1056 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1078 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1057 TTM_REF_USAGE, &existed); 1079 TTM_REF_USAGE, NULL, false);
1058 if (unlikely(ret != 0)) { 1080 if (unlikely(ret != 0)) {
1059 DRM_ERROR("Failed to reference a fence " 1081 DRM_ERROR("Failed to reference a fence "
1060 "object.\n"); 1082 "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1097 return 0; 1119 return 0;
1098out_no_create: 1120out_no_create:
1099 if (user_fence_rep != NULL) 1121 if (user_fence_rep != NULL)
1100 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1122 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1101 handle, TTM_REF_USAGE);
1102out_no_ref_obj: 1123out_no_ref_obj:
1103 vmw_fence_obj_unreference(&fence); 1124 vmw_fence_obj_unreference(&fence);
1104 return ret; 1125 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
114 param->value = dev_priv->has_dx; 114 param->value = dev_priv->has_dx;
115 break; 115 break;
116 default: 116 default:
117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118 param->param);
119 return -EINVAL; 117 return -EINVAL;
120 } 118 }
121 119
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
186 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 184 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
187 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 185 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
188 186
189 if (unlikely(arg->pad64 != 0)) { 187 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
190 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 188 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191 return -EINVAL; 189 return -EINVAL;
192 } 190 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 65b3f0369636..bf23153d4f55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
589 return ret; 589 return ret;
590 590
591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
592 TTM_REF_SYNCCPU_WRITE, &existed); 592 TTM_REF_SYNCCPU_WRITE, &existed, false);
593 if (ret != 0 || existed) 593 if (ret != 0 || existed)
594 ttm_bo_synccpu_write_release(&user_bo->dma.base); 594 ttm_bo_synccpu_write_release(&user_bo->dma.base);
595 595
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
773 773
774 *handle = user_bo->prime.base.hash.key; 774 *handle = user_bo->prime.base.hash.key;
775 return ttm_ref_object_add(tfile, &user_bo->prime.base, 775 return ttm_ref_object_add(tfile, &user_bo->prime.base,
776 TTM_REF_USAGE, NULL); 776 TTM_REF_USAGE, NULL, false);
777} 777}
778 778
779/* 779/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b445ce9b9757..05fa092c942b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
713 128; 713 128;
714 714
715 num_sizes = 0; 715 num_sizes = 0;
716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
717 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
718 return -EINVAL;
717 num_sizes += req->mip_levels[i]; 719 num_sizes += req->mip_levels[i];
720 }
718 721
719 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * 722 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
720 DRM_VMW_MAX_MIP_LEVELS) 723 num_sizes == 0)
721 return -EINVAL; 724 return -EINVAL;
722 725
723 size = vmw_user_surface_size + 128 + 726 size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
891 uint32_t handle; 894 uint32_t handle;
892 struct ttm_base_object *base; 895 struct ttm_base_object *base;
893 int ret; 896 int ret;
897 bool require_exist = false;
894 898
895 if (handle_type == DRM_VMW_HANDLE_PRIME) { 899 if (handle_type == DRM_VMW_HANDLE_PRIME) {
896 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 900 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
897 if (unlikely(ret != 0)) 901 if (unlikely(ret != 0))
898 return ret; 902 return ret;
899 } else { 903 } else {
900 if (unlikely(drm_is_render_client(file_priv))) { 904 if (unlikely(drm_is_render_client(file_priv)))
901 DRM_ERROR("Render client refused legacy " 905 require_exist = true;
902 "surface reference.\n"); 906
903 return -EACCES;
904 }
905 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { 907 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
906 DRM_ERROR("Locked master refused legacy " 908 DRM_ERROR("Locked master refused legacy "
907 "surface reference.\n"); 909 "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
929 931
930 /* 932 /*
931 * Make sure the surface creator has the same 933 * Make sure the surface creator has the same
932 * authenticating master. 934 * authenticating master, or is already registered with us.
933 */ 935 */
934 if (drm_is_primary_client(file_priv) && 936 if (drm_is_primary_client(file_priv) &&
935 user_srf->master != file_priv->master) { 937 user_srf->master != file_priv->master)
936 DRM_ERROR("Trying to reference surface outside of" 938 require_exist = true;
937 " master domain.\n");
938 ret = -EACCES;
939 goto out_bad_resource;
940 }
941 939
942 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 940 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
941 require_exist);
943 if (unlikely(ret != 0)) { 942 if (unlikely(ret != 0)) {
944 DRM_ERROR("Could not add a reference to a surface.\n"); 943 DRM_ERROR("Could not add a reference to a surface.\n");
945 goto out_bad_resource; 944 goto out_bad_resource;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1aeb80e52424..8c54cb8f5d6d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,11 +175,11 @@ config HID_CHERRY
175 Support for Cherry Cymotion keyboard. 175 Support for Cherry Cymotion keyboard.
176 176
177config HID_CHICONY 177config HID_CHICONY
178 tristate "Chicony Tactical pad" 178 tristate "Chicony devices"
179 depends on HID 179 depends on HID
180 default !EXPERT 180 default !EXPERT
181 ---help--- 181 ---help---
182 Support for Chicony Tactical pad. 182 Support for Chicony Tactical pad and special keys on Chicony keyboards.
183 183
184config HID_CORSAIR 184config HID_CORSAIR
185 tristate "Corsair devices" 185 tristate "Corsair devices"
@@ -190,6 +190,7 @@ config HID_CORSAIR
190 190
191 Supported devices: 191 Supported devices:
192 - Vengeance K90 192 - Vengeance K90
193 - Scimitar PRO RGB
193 194
194config HID_PRODIKEYS 195config HID_PRODIKEYS
195 tristate "Prodikeys PC-MIDI Keyboard support" 196 tristate "Prodikeys PC-MIDI Keyboard support"
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index bc3cec199fee..f04ed9aabc3f 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
89 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
89 { } 90 { }
90}; 91};
91MODULE_DEVICE_TABLE(hid, ch_devices); 92MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e9e87d337446..63ec1993eaaa 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
1873 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1874 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1876 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1912 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
1912 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1913 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
1914 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1913 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1915 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1914 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1916 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
1915 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1917 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -2110,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2110 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2111 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2112 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2113 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2114 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2118 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index c0303f61c26a..9ba5d98a1180 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -3,8 +3,10 @@
3 * 3 *
4 * Supported devices: 4 * Supported devices:
5 * - Vengeance K90 Keyboard 5 * - Vengeance K90 Keyboard
6 * - Scimitar PRO RGB Gaming Mouse
6 * 7 *
7 * Copyright (c) 2015 Clement Vuchener 8 * Copyright (c) 2015 Clement Vuchener
9 * Copyright (c) 2017 Oscar Campos
8 */ 10 */
9 11
10/* 12/*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
670 return 0; 672 return 0;
671} 673}
672 674
675/*
676 * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
677 * non parseable as they define two consecutive Logical Minimum for
678 * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
679 * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
680 * prevents poper parsing of the report descriptor due Logical
681 * Minimum being larger than Logical Maximum.
682 *
683 * This driver fixes the report descriptor for:
684 * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
685 */
686
687static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
688 unsigned int *rsize)
689{
690 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
691
692 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
693 /*
694 * Corsair Scimitar RGB Pro report descriptor is broken and
695 * defines two different Logical Minimum for the Consumer
696 * Application. The byte 77 should be a 0x26 defining a 16
697 * bits integer for the Logical Maximum but it is a 0x16
698 * instead (Logical Minimum)
699 */
700 switch (hdev->product) {
701 case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
702 if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
703 && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
704 hid_info(hdev, "Fixing up report descriptor\n");
705 rdesc[77] = 0x26;
706 }
707 break;
708 }
709
710 }
711 return rdesc;
712}
713
673static const struct hid_device_id corsair_devices[] = { 714static const struct hid_device_id corsair_devices[] = {
674 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), 715 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
675 .driver_data = CORSAIR_USE_K90_MACRO | 716 .driver_data = CORSAIR_USE_K90_MACRO |
676 CORSAIR_USE_K90_BACKLIGHT }, 717 CORSAIR_USE_K90_BACKLIGHT },
718 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
719 USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
677 {} 720 {}
678}; 721};
679 722
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
686 .event = corsair_event, 729 .event = corsair_event,
687 .remove = corsair_remove, 730 .remove = corsair_remove,
688 .input_mapping = corsair_input_mapping, 731 .input_mapping = corsair_input_mapping,
732 .report_fixup = corsair_mouse_report_fixup,
689}; 733};
690 734
691module_hid_driver(corsair_driver); 735module_hid_driver(corsair_driver);
692 736
693MODULE_LICENSE("GPL"); 737MODULE_LICENSE("GPL");
738/* Original K90 driver author */
694MODULE_AUTHOR("Clement Vuchener"); 739MODULE_AUTHOR("Clement Vuchener");
740/* Scimitar PRO RGB driver author */
741MODULE_AUTHOR("Oscar Campos");
695MODULE_DESCRIPTION("HID driver for Corsair devices"); 742MODULE_DESCRIPTION("HID driver for Corsair devices");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 86c95d30ac80..4e2648c86c8c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -278,6 +278,9 @@
278#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 278#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
279#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 279#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
280#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 280#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
281#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38
282#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39
283#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e
281 284
282#define USB_VENDOR_ID_CREATIVELABS 0x041e 285#define USB_VENDOR_ID_CREATIVELABS 0x041e
283#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c 286#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
@@ -557,6 +560,7 @@
557 560
558#define USB_VENDOR_ID_JESS 0x0c45 561#define USB_VENDOR_ID_JESS 0x0c45
559#define USB_DEVICE_ID_JESS_YUREX 0x1010 562#define USB_DEVICE_ID_JESS_YUREX 0x1010
563#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
560 564
561#define USB_VENDOR_ID_JESS2 0x0f30 565#define USB_VENDOR_ID_JESS2 0x0f30
562#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 566#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
@@ -1078,6 +1082,7 @@
1078 1082
1079#define USB_VENDOR_ID_XIN_MO 0x16c0 1083#define USB_VENDOR_ID_XIN_MO 0x16c0
1080#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 1084#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
1085#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
1081 1086
1082#define USB_VENDOR_ID_XIROKU 0x1477 1087#define USB_VENDOR_ID_XIROKU 0x1477
1083#define USB_DEVICE_ID_XIROKU_SPX 0x1006 1088#define USB_DEVICE_ID_XIROKU_SPX 0x1006
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index f405b07d0381..740996f9bdd4 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2632,6 +2632,8 @@ err_stop:
2632 sony_leds_remove(sc); 2632 sony_leds_remove(sc);
2633 if (sc->quirks & SONY_BATTERY_SUPPORT) 2633 if (sc->quirks & SONY_BATTERY_SUPPORT)
2634 sony_battery_remove(sc); 2634 sony_battery_remove(sc);
2635 if (sc->touchpad)
2636 sony_unregister_touchpad(sc);
2635 sony_cancel_work_sync(sc); 2637 sony_cancel_work_sync(sc);
2636 kfree(sc->output_report_dmabuf); 2638 kfree(sc->output_report_dmabuf);
2637 sony_remove_dev_list(sc); 2639 sony_remove_dev_list(sc);
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227a7e61..9ad7731d2e10 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
46 46
47static const struct hid_device_id xinmo_devices[] = { 47static const struct hid_device_id xinmo_devices[] = {
48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
49 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
49 { } 50 { }
50}; 51};
51 52
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index d6847a664446..a69a3c88ab29 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, 80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, 81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
84 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 86 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
85 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 88 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index be8f7e2a026f..e2666ef84dc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2165 2165
2166 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2166 wacom_update_name(wacom, wireless ? " (WL)" : "");
2167 2167
2168 /* pen only Bamboo neither support touch nor pad */
2169 if ((features->type == BAMBOO_PEN) &&
2170 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2171 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2172 error = -ENODEV;
2173 goto fail;
2174 }
2175
2168 error = wacom_add_shared_data(hdev); 2176 error = wacom_add_shared_data(hdev);
2169 if (error) 2177 if (error)
2170 goto fail; 2178 goto fail;
@@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2208 /* touch only Bamboo doesn't support pen */ 2216 /* touch only Bamboo doesn't support pen */
2209 if ((features->type == BAMBOO_TOUCH) && 2217 if ((features->type == BAMBOO_TOUCH) &&
2210 (features->device_type & WACOM_DEVICETYPE_PEN)) { 2218 (features->device_type & WACOM_DEVICETYPE_PEN)) {
2211 error = -ENODEV; 2219 cancel_delayed_work_sync(&wacom->init_work);
2212 goto fail_quirks; 2220 _wacom_query_tablet_data(wacom);
2213 }
2214
2215 /* pen only Bamboo neither support touch nor pad */
2216 if ((features->type == BAMBOO_PEN) &&
2217 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2218 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2219 error = -ENODEV; 2221 error = -ENODEV;
2220 goto fail_quirks; 2222 goto fail_quirks;
2221 } 2223 }
@@ -2579,7 +2581,9 @@ static void wacom_remove(struct hid_device *hdev)
2579 2581
2580 /* make sure we don't trigger the LEDs */ 2582 /* make sure we don't trigger the LEDs */
2581 wacom_led_groups_release(wacom); 2583 wacom_led_groups_release(wacom);
2582 wacom_release_resources(wacom); 2584
2585 if (wacom->wacom_wac.features.type != REMOTE)
2586 wacom_release_resources(wacom);
2583 2587
2584 hid_set_drvdata(hdev, NULL); 2588 hid_set_drvdata(hdev, NULL);
2585} 2589}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 4aa3de9f1163..94250c293be2 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); 1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); 1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); 1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
1962 input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1962 if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
1963 input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1963 input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
1964 input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
1965 }
1964 break; 1966 break;
1965 case WACOM_HID_WD_FINGERWHEEL: 1967 case WACOM_HID_WD_FINGERWHEEL:
1966 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); 1968 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
@@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 =
4197 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 4199 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
4198static const struct wacom_features wacom_features_0x360 = 4200static const struct wacom_features wacom_features_0x360 =
4199 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, 4201 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
4200 INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4202 INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
4201static const struct wacom_features wacom_features_0x361 = 4203static const struct wacom_features wacom_features_0x361 =
4202 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, 4204 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
4203 INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4205 INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
4204 4206
4205static const struct wacom_features wacom_features_HID_ANY_ID = 4207static const struct wacom_features wacom_features_HID_ANY_ID =
4206 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; 4208 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bd0d1988feb2..321b8833fa6f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
502 502
503 wait_for_completion(&info->waitevent); 503 wait_for_completion(&info->waitevent);
504 504
505 if (channel->rescind) {
506 ret = -ENODEV;
507 goto post_msg_err;
508 }
509
510post_msg_err: 505post_msg_err:
506 /*
507 * If the channel has been rescinded;
508 * we will be awakened by the rescind
509 * handler; set the error code to zero so we don't leak memory.
510 */
511 if (channel->rescind)
512 ret = 0;
513
511 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
512 list_del(&info->msglistentry); 515 list_del(&info->msglistentry);
513 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
530 int ret; 533 int ret;
531 534
532 /* 535 /*
533 * vmbus_on_event(), running in the tasklet, can race 536 * vmbus_on_event(), running in the per-channel tasklet, can race
534 * with vmbus_close_internal() in the case of SMP guest, e.g., when 537 * with vmbus_close_internal() in the case of SMP guest, e.g., when
535 * the former is accessing channel->inbound.ring_buffer, the latter 538 * the former is accessing channel->inbound.ring_buffer, the latter
536 * could be freeing the ring_buffer pages. 539 * could be freeing the ring_buffer pages, so here we must stop it
537 * 540 * first.
538 * To resolve the race, we can serialize them by disabling the
539 * tasklet when the latter is running here.
540 */ 541 */
541 hv_event_tasklet_disable(channel); 542 tasklet_disable(&channel->callback_event);
542 543
543 /* 544 /*
544 * In case a device driver's probe() fails (e.g., 545 * In case a device driver's probe() fails (e.g.,
@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
605 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
606 607
607out: 608out:
608 hv_event_tasklet_enable(channel);
609
610 return ret; 609 return ret;
611} 610}
612 611
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f33465d78a02..fbcb06352308 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
350static void free_channel(struct vmbus_channel *channel) 350static void free_channel(struct vmbus_channel *channel)
351{ 351{
352 tasklet_kill(&channel->callback_event); 352 tasklet_kill(&channel->callback_event);
353 kfree(channel); 353
354 kfree_rcu(channel, rcu);
354} 355}
355 356
356static void percpu_channel_enq(void *arg) 357static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
359 struct hv_per_cpu_context *hv_cpu 360 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context); 361 = this_cpu_ptr(hv_context.cpu_context);
361 362
362 list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); 363 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
363} 364}
364 365
365static void percpu_channel_deq(void *arg) 366static void percpu_channel_deq(void *arg)
366{ 367{
367 struct vmbus_channel *channel = arg; 368 struct vmbus_channel *channel = arg;
368 369
369 list_del(&channel->percpu_list); 370 list_del_rcu(&channel->percpu_list);
370} 371}
371 372
372 373
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
381 true); 382 true);
382} 383}
383 384
384void hv_event_tasklet_disable(struct vmbus_channel *channel)
385{
386 tasklet_disable(&channel->callback_event);
387}
388
389void hv_event_tasklet_enable(struct vmbus_channel *channel)
390{
391 tasklet_enable(&channel->callback_event);
392
393 /* In case there is any pending event */
394 tasklet_schedule(&channel->callback_event);
395}
396
397void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 385void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
398{ 386{
399 unsigned long flags; 387 unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
402 BUG_ON(!channel->rescind); 390 BUG_ON(!channel->rescind);
403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 391 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
404 392
405 hv_event_tasklet_disable(channel);
406 if (channel->target_cpu != get_cpu()) { 393 if (channel->target_cpu != get_cpu()) {
407 put_cpu(); 394 put_cpu();
408 smp_call_function_single(channel->target_cpu, 395 smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
411 percpu_channel_deq(channel); 398 percpu_channel_deq(channel);
412 put_cpu(); 399 put_cpu();
413 } 400 }
414 hv_event_tasklet_enable(channel);
415 401
416 if (channel->primary_channel == NULL) { 402 if (channel->primary_channel == NULL) {
417 list_del(&channel->listentry); 403 list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
505 491
506 init_vp_index(newchannel, dev_type); 492 init_vp_index(newchannel, dev_type);
507 493
508 hv_event_tasklet_disable(newchannel);
509 if (newchannel->target_cpu != get_cpu()) { 494 if (newchannel->target_cpu != get_cpu()) {
510 put_cpu(); 495 put_cpu();
511 smp_call_function_single(newchannel->target_cpu, 496 smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
515 percpu_channel_enq(newchannel); 500 percpu_channel_enq(newchannel);
516 put_cpu(); 501 put_cpu();
517 } 502 }
518 hv_event_tasklet_enable(newchannel);
519 503
520 /* 504 /*
521 * This state is used to indicate a successful open 505 * This state is used to indicate a successful open
@@ -565,7 +549,6 @@ err_deq_chan:
565 list_del(&newchannel->listentry); 549 list_del(&newchannel->listentry);
566 mutex_unlock(&vmbus_connection.channel_mutex); 550 mutex_unlock(&vmbus_connection.channel_mutex);
567 551
568 hv_event_tasklet_disable(newchannel);
569 if (newchannel->target_cpu != get_cpu()) { 552 if (newchannel->target_cpu != get_cpu()) {
570 put_cpu(); 553 put_cpu();
571 smp_call_function_single(newchannel->target_cpu, 554 smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ err_deq_chan:
574 percpu_channel_deq(newchannel); 557 percpu_channel_deq(newchannel);
575 put_cpu(); 558 put_cpu();
576 } 559 }
577 hv_event_tasklet_enable(newchannel);
578 560
579 vmbus_release_relid(newchannel->offermsg.child_relid); 561 vmbus_release_relid(newchannel->offermsg.child_relid);
580 562
@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
814 /* Allocate the channel object and save this offer. */ 796 /* Allocate the channel object and save this offer. */
815 newchannel = alloc_channel(); 797 newchannel = alloc_channel();
816 if (!newchannel) { 798 if (!newchannel) {
799 vmbus_release_relid(offer->child_relid);
817 pr_err("Unable to allocate channel object\n"); 800 pr_err("Unable to allocate channel object\n");
818 return; 801 return;
819 } 802 }
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 9aee6014339d..a5596a642ed0 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
71static const char fcopy_devname[] = "vmbus/hv_fcopy"; 71static const char fcopy_devname[] = "vmbus/hv_fcopy";
72static u8 *recv_buffer; 72static u8 *recv_buffer;
73static struct hvutil_transport *hvt; 73static struct hvutil_transport *hvt;
74static struct completion release_event;
75/* 74/*
76 * This state maintains the version number registered by the daemon. 75 * This state maintains the version number registered by the daemon.
77 */ 76 */
@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
331 330
332 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 331 if (cancel_delayed_work_sync(&fcopy_timeout_work))
333 fcopy_respond_to_host(HV_E_FAIL); 332 fcopy_respond_to_host(HV_E_FAIL);
334 complete(&release_event);
335} 333}
336 334
337int hv_fcopy_init(struct hv_util_service *srv) 335int hv_fcopy_init(struct hv_util_service *srv)
@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
339 recv_buffer = srv->recv_buffer; 337 recv_buffer = srv->recv_buffer;
340 fcopy_transaction.recv_channel = srv->channel; 338 fcopy_transaction.recv_channel = srv->channel;
341 339
342 init_completion(&release_event);
343 /* 340 /*
344 * When this driver loads, the user level daemon that 341 * When this driver loads, the user level daemon that
345 * processes the host requests may not yet be running. 342 * processes the host requests may not yet be running.
@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
361 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 358 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
362 cancel_delayed_work_sync(&fcopy_timeout_work); 359 cancel_delayed_work_sync(&fcopy_timeout_work);
363 hvutil_transport_destroy(hvt); 360 hvutil_transport_destroy(hvt);
364 wait_for_completion(&release_event);
365} 361}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index de263712e247..a1adfe2cfb34 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
101static const char kvp_devname[] = "vmbus/hv_kvp"; 101static const char kvp_devname[] = "vmbus/hv_kvp";
102static u8 *recv_buffer; 102static u8 *recv_buffer;
103static struct hvutil_transport *hvt; 103static struct hvutil_transport *hvt;
104static struct completion release_event;
105/* 104/*
106 * Register the kernel component with the user-level daemon. 105 * Register the kernel component with the user-level daemon.
107 * As part of this registration, pass the LIC version number. 106 * As part of this registration, pass the LIC version number.
@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
714 if (cancel_delayed_work_sync(&kvp_timeout_work)) 713 if (cancel_delayed_work_sync(&kvp_timeout_work))
715 kvp_respond_to_host(NULL, HV_E_FAIL); 714 kvp_respond_to_host(NULL, HV_E_FAIL);
716 kvp_transaction.state = HVUTIL_DEVICE_INIT; 715 kvp_transaction.state = HVUTIL_DEVICE_INIT;
717 complete(&release_event);
718} 716}
719 717
720int 718int
@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
723 recv_buffer = srv->recv_buffer; 721 recv_buffer = srv->recv_buffer;
724 kvp_transaction.recv_channel = srv->channel; 722 kvp_transaction.recv_channel = srv->channel;
725 723
726 init_completion(&release_event);
727 /* 724 /*
728 * When this driver loads, the user level daemon that 725 * When this driver loads, the user level daemon that
729 * processes the host requests may not yet be running. 726 * processes the host requests may not yet be running.
@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
747 cancel_delayed_work_sync(&kvp_timeout_work); 744 cancel_delayed_work_sync(&kvp_timeout_work);
748 cancel_work_sync(&kvp_sendkey_work); 745 cancel_work_sync(&kvp_sendkey_work);
749 hvutil_transport_destroy(hvt); 746 hvutil_transport_destroy(hvt);
750 wait_for_completion(&release_event);
751} 747}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bcc03f0748d6..e659d1b94a57 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -79,7 +79,6 @@ static int dm_reg_value;
79static const char vss_devname[] = "vmbus/hv_vss"; 79static const char vss_devname[] = "vmbus/hv_vss";
80static __u8 *recv_buffer; 80static __u8 *recv_buffer;
81static struct hvutil_transport *hvt; 81static struct hvutil_transport *hvt;
82static struct completion release_event;
83 82
84static void vss_timeout_func(struct work_struct *dummy); 83static void vss_timeout_func(struct work_struct *dummy);
85static void vss_handle_request(struct work_struct *dummy); 84static void vss_handle_request(struct work_struct *dummy);
@@ -361,13 +360,11 @@ static void vss_on_reset(void)
361 if (cancel_delayed_work_sync(&vss_timeout_work)) 360 if (cancel_delayed_work_sync(&vss_timeout_work))
362 vss_respond_to_host(HV_E_FAIL); 361 vss_respond_to_host(HV_E_FAIL);
363 vss_transaction.state = HVUTIL_DEVICE_INIT; 362 vss_transaction.state = HVUTIL_DEVICE_INIT;
364 complete(&release_event);
365} 363}
366 364
367int 365int
368hv_vss_init(struct hv_util_service *srv) 366hv_vss_init(struct hv_util_service *srv)
369{ 367{
370 init_completion(&release_event);
371 if (vmbus_proto_version < VERSION_WIN8_1) { 368 if (vmbus_proto_version < VERSION_WIN8_1) {
372 pr_warn("Integration service 'Backup (volume snapshot)'" 369 pr_warn("Integration service 'Backup (volume snapshot)'"
373 " not supported on this host version.\n"); 370 " not supported on this host version.\n");
@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
400 cancel_delayed_work_sync(&vss_timeout_work); 397 cancel_delayed_work_sync(&vss_timeout_work);
401 cancel_work_sync(&vss_handle_request_work); 398 cancel_work_sync(&vss_handle_request_work);
402 hvutil_transport_destroy(hvt); 399 hvutil_transport_destroy(hvt);
403 wait_for_completion(&release_event);
404} 400}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3042eaa13062..186b10083c55 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
590 if (!hyperv_cs) 590 if (!hyperv_cs)
591 return -ENODEV; 591 return -ENODEV;
592 592
593 spin_lock_init(&host_ts.lock);
594
593 INIT_WORK(&wrk.work, hv_set_host_time); 595 INIT_WORK(&wrk.work, hv_set_host_time);
594 596
595 /* 597 /*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a9515267..4402a71e23f7 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
182 * connects back. 182 * connects back.
183 */ 183 */
184 hvt_reset(hvt); 184 hvt_reset(hvt);
185 mutex_unlock(&hvt->lock);
186 185
187 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 186 if (mode_old == HVUTIL_TRANSPORT_DESTROY)
188 hvt_transport_free(hvt); 187 complete(&hvt->release);
188
189 mutex_unlock(&hvt->lock);
189 190
190 return 0; 191 return 0;
191} 192}
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
304 305
305 init_waitqueue_head(&hvt->outmsg_q); 306 init_waitqueue_head(&hvt->outmsg_q);
306 mutex_init(&hvt->lock); 307 mutex_init(&hvt->lock);
308 init_completion(&hvt->release);
307 309
308 spin_lock(&hvt_list_lock); 310 spin_lock(&hvt_list_lock);
309 list_add(&hvt->list, &hvt_list); 311 list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
351 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 353 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
352 cn_del_callback(&hvt->cn_id); 354 cn_del_callback(&hvt->cn_id);
353 355
354 if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 356 if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
355 hvt_transport_free(hvt); 357 wait_for_completion(&hvt->release);
358
359 hvt_transport_free(hvt);
356} 360}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f5225c3e6..79afb626e166 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@ struct hvutil_transport {
41 int outmsg_len; /* its length */ 41 int outmsg_len; /* its length */
42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */
43 struct mutex lock; /* protects struct members */ 43 struct mutex lock; /* protects struct members */
44 struct completion release; /* synchronize with fd release */
44}; 45};
45 46
46struct hvutil_transport *hvutil_transport_init(const char *name, 47struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index da6b59ba5940..8370b9dc6037 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
939 if (relid == 0) 939 if (relid == 0)
940 continue; 940 continue;
941 941
942 rcu_read_lock();
943
942 /* Find channel based on relid */ 944 /* Find channel based on relid */
943 list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { 945 list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
944 if (channel->offermsg.child_relid != relid) 946 if (channel->offermsg.child_relid != relid)
945 continue; 947 continue;
946 948
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
956 tasklet_schedule(&channel->callback_event); 958 tasklet_schedule(&channel->callback_event);
957 } 959 }
958 } 960 }
961
962 rcu_read_unlock();
959 } 963 }
960} 964}
961 965
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87963e0..975c43d446f8 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
646 else 646 else
647 err = atk_read_value_new(sensor, value); 647 err = atk_read_value_new(sensor, value);
648 648
649 if (err)
650 return err;
651
649 sensor->is_valid = true; 652 sensor->is_valid = true;
650 sensor->last_updated = jiffies; 653 sensor->last_updated = jiffies;
651 sensor->cached_value = *value; 654 sensor->cached_value = *value;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index efb01c247e2d..4dfc7238313e 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void)
3198{ 3198{
3199 int sioaddr[2] = { REG_2E, REG_4E }; 3199 int sioaddr[2] = { REG_2E, REG_4E };
3200 struct it87_sio_data sio_data; 3200 struct it87_sio_data sio_data;
3201 unsigned short isa_address; 3201 unsigned short isa_address[2];
3202 bool found = false; 3202 bool found = false;
3203 int i, err; 3203 int i, err;
3204 3204
@@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void)
3208 3208
3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { 3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
3210 memset(&sio_data, 0, sizeof(struct it87_sio_data)); 3210 memset(&sio_data, 0, sizeof(struct it87_sio_data));
3211 isa_address = 0; 3211 isa_address[i] = 0;
3212 err = it87_find(sioaddr[i], &isa_address, &sio_data); 3212 err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
3213 if (err || isa_address == 0) 3213 if (err || isa_address[i] == 0)
3214 continue; 3214 continue;
3215 /*
3216 * Don't register second chip if its ISA address matches
3217 * the first chip's ISA address.
3218 */
3219 if (i && isa_address[i] == isa_address[0])
3220 break;
3215 3221
3216 err = it87_device_add(i, isa_address, &sio_data); 3222 err = it87_device_add(i, isa_address[i], &sio_data);
3217 if (err) 3223 if (err)
3218 goto exit_dev_unregister; 3224 goto exit_dev_unregister;
3225
3219 found = true; 3226 found = true;
3227
3228 /*
3229 * IT8705F may respond on both SIO addresses.
3230 * Stop probing after finding one.
3231 */
3232 if (sio_data.type == it87)
3233 break;
3220 } 3234 }
3221 3235
3222 if (!found) { 3236 if (!found) {
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275978f9..281491cca510 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
311 data->pwm[channel] = val << 8; 311 data->pwm[channel] = val << 8;
312 err = i2c_smbus_write_word_swapped(client, 312 err = i2c_smbus_write_word_swapped(client,
313 MAX31790_REG_PWMOUT(channel), 313 MAX31790_REG_PWMOUT(channel),
314 val); 314 data->pwm[channel]);
315 break; 315 break;
316 case hwmon_pwm_enable: 316 case hwmon_pwm_enable:
317 fan_config = data->fan_config[channel]; 317 fan_config = data->fan_config[channel];
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index cdd9b3b26195..7563eceeaaea 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
221 else 221 else
222 intel_th_trace_enable(thdev); 222 intel_th_trace_enable(thdev);
223 223
224 if (ret) 224 if (ret) {
225 pm_runtime_put(&thdev->dev); 225 pm_runtime_put(&thdev->dev);
226 module_put(thdrv->driver.owner);
227 }
226 228
227 return ret; 229 return ret;
228} 230}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0bba3842336e..590cf90dd21a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
86 .driver_data = (kernel_ulong_t)0, 86 .driver_data = (kernel_ulong_t)0,
87 }, 87 },
88 {
89 /* Denverton */
90 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
91 .driver_data = (kernel_ulong_t)0,
92 },
93 {
94 /* Gemini Lake */
95 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
96 .driver_data = (kernel_ulong_t)0,
97 },
88 { 0 }, 98 { 0 },
89}; 99};
90 100
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index dfc1c0e37c40..ad31d21da316 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,7 +35,6 @@
35 * warranty of any kind, whether express or implied. 35 * warranty of any kind, whether express or implied.
36 */ 36 */
37 37
38#include <linux/acpi.h>
39#include <linux/device.h> 38#include <linux/device.h>
40#include <linux/gpio/consumer.h> 39#include <linux/gpio/consumer.h>
41#include <linux/i2c.h> 40#include <linux/i2c.h>
@@ -117,6 +116,10 @@ static const struct chip_desc chips[] = {
117 .has_irq = 1, 116 .has_irq = 1,
118 .muxtype = pca954x_isswi, 117 .muxtype = pca954x_isswi,
119 }, 118 },
119 [pca_9546] = {
120 .nchans = 4,
121 .muxtype = pca954x_isswi,
122 },
120 [pca_9547] = { 123 [pca_9547] = {
121 .nchans = 8, 124 .nchans = 8,
122 .enable = 0x8, 125 .enable = 0x8,
@@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = {
134 { "pca9543", pca_9543 }, 137 { "pca9543", pca_9543 },
135 { "pca9544", pca_9544 }, 138 { "pca9544", pca_9544 },
136 { "pca9545", pca_9545 }, 139 { "pca9545", pca_9545 },
137 { "pca9546", pca_9545 }, 140 { "pca9546", pca_9546 },
138 { "pca9547", pca_9547 }, 141 { "pca9547", pca_9547 },
139 { "pca9548", pca_9548 }, 142 { "pca9548", pca_9548 },
140 { } 143 { }
141}; 144};
142MODULE_DEVICE_TABLE(i2c, pca954x_id); 145MODULE_DEVICE_TABLE(i2c, pca954x_id);
143 146
144#ifdef CONFIG_ACPI
145static const struct acpi_device_id pca954x_acpi_ids[] = {
146 { .id = "PCA9540", .driver_data = pca_9540 },
147 { .id = "PCA9542", .driver_data = pca_9542 },
148 { .id = "PCA9543", .driver_data = pca_9543 },
149 { .id = "PCA9544", .driver_data = pca_9544 },
150 { .id = "PCA9545", .driver_data = pca_9545 },
151 { .id = "PCA9546", .driver_data = pca_9545 },
152 { .id = "PCA9547", .driver_data = pca_9547 },
153 { .id = "PCA9548", .driver_data = pca_9548 },
154 { }
155};
156MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
157#endif
158
159#ifdef CONFIG_OF 147#ifdef CONFIG_OF
160static const struct of_device_id pca954x_of_match[] = { 148static const struct of_device_id pca954x_of_match[] = {
161 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, 149 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client,
393 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); 381 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
394 if (match) 382 if (match)
395 data->chip = of_device_get_match_data(&client->dev); 383 data->chip = of_device_get_match_data(&client->dev);
396 else if (id) 384 else
397 data->chip = &chips[id->driver_data]; 385 data->chip = &chips[id->driver_data];
398 else {
399 const struct acpi_device_id *acpi_id;
400
401 acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
402 &client->dev);
403 if (!acpi_id)
404 return -ENODEV;
405 data->chip = &chips[acpi_id->driver_data];
406 }
407 386
408 data->last_chan = 0; /* force the first selection */ 387 data->last_chan = 0; /* force the first selection */
409 388
@@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = {
492 .name = "pca954x", 471 .name = "pca954x",
493 .pm = &pca954x_pm, 472 .pm = &pca954x_pm,
494 .of_match_table = of_match_ptr(pca954x_of_match), 473 .of_match_table = of_match_ptr(pca954x_of_match),
495 .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
496 }, 474 },
497 .probe = pca954x_probe, 475 .probe = pca954x_probe,
498 .remove = pca954x_remove, 476 .remove = pca954x_remove,
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index ca5759c0c318..43a6cb078193 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
370 name = "accel_3d"; 370 name = "accel_3d";
371 channel_spec = accel_3d_channels; 371 channel_spec = accel_3d_channels;
372 channel_size = sizeof(accel_3d_channels); 372 channel_size = sizeof(accel_3d_channels);
373 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
373 } else { 374 } else {
374 name = "gravity"; 375 name = "gravity";
375 channel_spec = gravity_channels; 376 channel_spec = gravity_channels;
376 channel_size = sizeof(gravity_channels); 377 channel_size = sizeof(gravity_channels);
378 indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
377 } 379 }
378 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, 380 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
379 &accel_state->common_attributes); 381 &accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
395 goto error_free_dev_mem; 397 goto error_free_dev_mem;
396 } 398 }
397 399
398 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
399 indio_dev->dev.parent = &pdev->dev; 400 indio_dev->dev.parent = &pdev->dev;
400 indio_dev->info = &accel_3d_info; 401 indio_dev->info = &accel_3d_info;
401 indio_dev->name = name; 402 indio_dev->name = name;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index ad9dec30bb30..4282ceca3d8f 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
169{ 169{
170 struct iio_dev *indio_dev = private; 170 struct iio_dev *indio_dev = private;
171 struct tiadc_device *adc_dev = iio_priv(indio_dev); 171 struct tiadc_device *adc_dev = iio_priv(indio_dev);
172 unsigned int status, config; 172 unsigned int status, config, adc_fsm;
173 unsigned short count = 0;
174
173 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 175 status = tiadc_readl(adc_dev, REG_IRQSTATUS);
174 176
175 /* 177 /*
@@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
183 tiadc_writel(adc_dev, REG_CTRL, config); 185 tiadc_writel(adc_dev, REG_CTRL, config);
184 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN 186 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
185 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); 187 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
188
189 /* wait for idle state.
190 * ADC needs to finish the current conversion
191 * before disabling the module
192 */
193 do {
194 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
195 } while (adc_fsm != 0x10 && count++ < 100);
196
186 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); 197 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
187 return IRQ_HANDLED; 198 return IRQ_HANDLED;
188 } else if (status & IRQENB_FIFO1THRES) { 199 } else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index d6c372bb433b..c17596f7ed2c 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); 61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
62 if (ret < 0) 62 if (ret < 0)
63 break; 63 break;
64 64 ret = IIO_VAL_INT;
65 *val = data; 65 *val = data;
66 break; 66 break;
67 case IIO_CHAN_INFO_CALIBBIAS: 67 case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) 76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
77 st->core.calib[i] = 77 st->core.calib[i] =
78 st->core.resp->sensor_offset.offset[i]; 78 st->core.resp->sensor_offset.offset[i];
79 79 ret = IIO_VAL_INT;
80 *val = st->core.calib[idx]; 80 *val = st->core.calib[idx];
81 break; 81 break;
82 case IIO_CHAN_INFO_SCALE: 82 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 7afdac42ed42..01e02b9926d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
379{ 379{
380 380
381 struct hid_sensor_hub_attribute_info timestamp; 381 struct hid_sensor_hub_attribute_info timestamp;
382 s32 value;
383 int ret;
382 384
383 hid_sensor_get_reporting_interval(hsdev, usage_id, st); 385 hid_sensor_get_reporting_interval(hsdev, usage_id, st);
384 386
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
417 st->sensitivity.index, st->sensitivity.report_id, 419 st->sensitivity.index, st->sensitivity.report_id,
418 timestamp.index, timestamp.report_id); 420 timestamp.index, timestamp.report_id);
419 421
422 ret = sensor_hub_get_feature(hsdev,
423 st->power_state.report_id,
424 st->power_state.index, sizeof(value), &value);
425 if (ret < 0)
426 return ret;
427 if (value < 0)
428 return -EINVAL;
429
420 return 0; 430 return 0;
421} 431}
422EXPORT_SYMBOL(hid_sensor_parse_common_attributes); 432EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a38300..ecf592d69043 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
51 st->report_state.report_id, 51 st->report_state.report_id,
52 st->report_state.index, 52 st->report_state.index,
53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); 53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
54
55 poll_value = hid_sensor_read_poll_value(st);
56 } else { 54 } else {
57 int val; 55 int val;
58 56
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
89 sensor_hub_get_feature(st->hsdev, st->power_state.report_id, 87 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
90 st->power_state.index, 88 st->power_state.index,
91 sizeof(state_val), &state_val); 89 sizeof(state_val), &state_val);
92 if (state && poll_value) 90 if (state)
91 poll_value = hid_sensor_read_poll_value(st);
92 if (poll_value > 0)
93 msleep_interruptible(poll_value * 2); 93 msleep_interruptible(poll_value * 2);
94 94
95 return 0; 95 return 0;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa886f72..821919dd245b 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
27#include <linux/iio/trigger_consumer.h> 27#include <linux/iio/trigger_consumer.h>
28#include <linux/iio/triggered_buffer.h> 28#include <linux/iio/triggered_buffer.h>
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30#include <linux/delay.h>
30#include "bmg160.h" 31#include "bmg160.h"
31 32
32#define BMG160_IRQ_NAME "bmg160_event" 33#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
52#define BMG160_DEF_BW 100 53#define BMG160_DEF_BW 100
53#define BMG160_REG_PMU_BW_RES BIT(7) 54#define BMG160_REG_PMU_BW_RES BIT(7)
54 55
56#define BMG160_GYRO_REG_RESET 0x14
57#define BMG160_GYRO_RESET_VAL 0xb6
58
55#define BMG160_REG_INT_MAP_0 0x17 59#define BMG160_REG_INT_MAP_0 0x17
56#define BMG160_INT_MAP_0_BIT_ANY BIT(1) 60#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
57 61
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
236 int ret; 240 int ret;
237 unsigned int val; 241 unsigned int val;
238 242
243 /*
244 * Reset chip to get it in a known good state. A delay of 30ms after
245 * reset is required according to the datasheet.
246 */
247 regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
248 BMG160_GYRO_RESET_VAL);
249 usleep_range(30000, 30700);
250
239 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); 251 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
240 if (ret < 0) { 252 if (ret < 0) {
241 dev_err(dev, "Error reading reg_chip_id\n"); 253 dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 78532ce07449..81b572d7699a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
193 if (err < 0) 193 if (err < 0)
194 goto out; 194 goto out;
195 195
196 fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | 196 fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); 197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
198 198
199 wdata = cpu_to_le16(fifo_watermark); 199 wdata = cpu_to_le16(fifo_watermark);
200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, 200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d18ded45bedd..3ff91e02fee3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
612 case IIO_VAL_FRACTIONAL_LOG2: 612 case IIO_VAL_FRACTIONAL_LOG2:
613 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 613 tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
614 tmp1 = do_div(tmp, 1000000000LL); 614 tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
615 tmp0 = tmp; 615 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
616 return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
617 case IIO_VAL_INT_MULTIPLE: 616 case IIO_VAL_INT_MULTIPLE:
618 { 617 {
619 int i; 618 int i;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 6dd8cbd7ce95..e13370dc9b1c 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -763,7 +763,7 @@ power_off:
763 return ret; 763 return ret;
764} 764}
765 765
766static int __exit ak8974_remove(struct i2c_client *i2c) 766static int ak8974_remove(struct i2c_client *i2c)
767{ 767{
768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c); 768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
769 struct ak8974 *ak8974 = iio_priv(indio_dev); 769 struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = {
845 .of_match_table = of_match_ptr(ak8974_of_match), 845 .of_match_table = of_match_ptr(ak8974_of_match),
846 }, 846 },
847 .probe = ak8974_probe, 847 .probe = ak8974_probe,
848 .remove = __exit_p(ak8974_remove), 848 .remove = ak8974_remove,
849 .id_table = ak8974_id, 849 .id_table = ak8974_id,
850}; 850};
851module_i2c_driver(ak8974_driver); 851module_i2c_driver(ak8974_driver);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5f2680855552..fd0edca0e656 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = true,
460 .bootime = 2,
460 }, 461 },
461}; 462};
462 463
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index e95510117a6d..f2ae75fa3128 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
29{ 29{
30 int i, n, completed = 0; 30 int i, n, completed = 0;
31 31
32 while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { 32 /*
33 * budget might be (-1) if the caller does not
34 * want to bound this call, thus we need unsigned
35 * minimum here.
36 */
37 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
38 budget - completed), cq->wc)) > 0) {
33 for (i = 0; i < n; i++) { 39 for (i = 0; i < n; i++) {
34 struct ib_wc *wc = &cq->wc[i]; 40 struct ib_wc *wc = &cq->wc[i];
35 41
@@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
196 irq_poll_disable(&cq->iop); 202 irq_poll_disable(&cq->iop);
197 break; 203 break;
198 case IB_POLL_WORKQUEUE: 204 case IB_POLL_WORKQUEUE:
199 flush_work(&cq->work); 205 cancel_work_sync(&cq->work);
200 break; 206 break;
201 default: 207 default:
202 WARN_ON_ONCE(1); 208 WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 593d2ce6ec7c..7c9e34d679d3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
336 struct device *parent = device->dev.parent; 336 struct device *parent = device->dev.parent;
337 337
338 WARN_ON_ONCE(!parent); 338 WARN_ON_ONCE(!parent);
339 if (!device->dev.dma_ops) 339 WARN_ON_ONCE(device->dma_device);
340 device->dev.dma_ops = parent->dma_ops; 340 if (device->dev.dma_ops) {
341 if (!device->dev.dma_mask) 341 /*
342 device->dev.dma_mask = parent->dma_mask; 342 * The caller provided custom DMA operations. Copy the
343 if (!device->dev.coherent_dma_mask) 343 * DMA-related fields that are used by e.g. dma_alloc_coherent()
344 device->dev.coherent_dma_mask = parent->coherent_dma_mask; 344 * into device->dev.
345 */
346 device->dma_device = &device->dev;
347 if (!device->dev.dma_mask)
348 device->dev.dma_mask = parent->dma_mask;
349 if (!device->dev.coherent_dma_mask)
350 device->dev.coherent_dma_mask =
351 parent->coherent_dma_mask;
352 } else {
353 /*
354 * The caller did not provide custom DMA operations. Use the
355 * DMA mapping operations of the parent device.
356 */
357 device->dma_device = parent;
358 }
345 359
346 mutex_lock(&device_mutex); 360 mutex_lock(&device_mutex);
347 361
@@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
1015 return -ENOMEM; 1029 return -ENOMEM;
1016 1030
1017 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1031 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1018 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1032 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1019 WQ_UNBOUND_MAX_ACTIVE);
1020 if (!ib_comp_wq) { 1033 if (!ib_comp_wq) {
1021 ret = -ENOMEM; 1034 ret = -ENOMEM;
1022 goto err; 1035 goto err;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0f5d43d1f5fc..70c3e9e79508 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
160 return NOTIFY_DONE; 160 return NOTIFY_DONE;
161 161
162 iwdev = &hdl->device; 162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER)
164 return NOTIFY_DONE;
165
163 netdev = iwdev->ldev->netdev; 166 netdev = iwdev->ldev->netdev;
164 upper_dev = netdev_master_upper_dev_get(netdev); 167 upper_dev = netdev_master_upper_dev_get(netdev);
165 if (netdev != event_netdev) 168 if (netdev != event_netdev)
@@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
214 return NOTIFY_DONE; 217 return NOTIFY_DONE;
215 218
216 iwdev = &hdl->device; 219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER)
221 return NOTIFY_DONE;
222
217 netdev = iwdev->ldev->netdev; 223 netdev = iwdev->ldev->netdev;
218 if (netdev != event_netdev) 224 if (netdev != event_netdev)
219 return NOTIFY_DONE; 225 return NOTIFY_DONE;
@@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
260 if (!iwhdl) 266 if (!iwhdl)
261 return NOTIFY_DONE; 267 return NOTIFY_DONE;
262 iwdev = &iwhdl->device; 268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER)
270 return NOTIFY_DONE;
263 p = (__be32 *)neigh->primary_key; 271 p = (__be32 *)neigh->primary_key;
264 i40iw_copy_ip_ntohl(local_ipaddr, p); 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
265 if (neigh->nud_state & NUD_VALID) { 273 if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc9fb144e57b..c52edeafd616 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
372 return 0; 372 return 0;
373} 373}
374 374
375static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 375static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
376 bool dpp_pool) 376 bool dpp_pool)
377{ 377{
378 int status; 378 int status;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 12c4208fd701..af9f596bb68b 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7068 unsigned long flags; 7068 unsigned long flags;
7069 7069
7070 while (wait) { 7070 while (wait) {
7071 unsigned long shadow; 7071 unsigned long shadow = 0;
7072 int cstart, previ = -1; 7072 int cstart, previ = -1;
7073 7073
7074 /* 7074 /*
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 3cd96c1b9502..9fbe22d3467b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -69,6 +69,9 @@
69 */ 69 */
70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
71 71
72#define PVRDMA_NUM_RING_PAGES 4
73#define PVRDMA_QP_NUM_HEADER_PAGES 1
74
72struct pvrdma_dev; 75struct pvrdma_dev;
73 76
74struct pvrdma_page_dir { 77struct pvrdma_page_dir {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index e69d6f3cae32..09078ccfaec7 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
132 132
133enum pvrdma_device_ctl { 133enum pvrdma_device_ctl {
134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ 134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
135 PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ 135 PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ 136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
137}; 137};
138 138
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 100bea5c42ff..34ebc7615411 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -56,7 +56,7 @@
56#include "pvrdma.h" 56#include "pvrdma.h"
57 57
58#define DRV_NAME "vmw_pvrdma" 58#define DRV_NAME "vmw_pvrdma"
59#define DRV_VERSION "1.0.0.0-k" 59#define DRV_VERSION "1.0.1.0-k"
60 60
61static DEFINE_MUTEX(pvrdma_device_list_lock); 61static DEFINE_MUTEX(pvrdma_device_list_lock);
62static LIST_HEAD(pvrdma_device_list); 62static LIST_HEAD(pvrdma_device_list);
@@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
661 break; 661 break;
662 case NETDEV_UP: 662 case NETDEV_UP:
663 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 663 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
664 PVRDMA_DEVICE_CTL_UNQUIESCE);
665
666 mb();
667
668 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
669 dev_err(&dev->pdev->dev,
670 "failed to activate device during link up\n");
671 else
672 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
664 break; 673 break;
665 default: 674 default:
666 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 675 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
@@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
858 dev->dsr->resp_slot_dma = (u64)slot_dma; 867 dev->dsr->resp_slot_dma = (u64)slot_dma;
859 868
860 /* Async event ring */ 869 /* Async event ring */
861 dev->dsr->async_ring_pages.num_pages = 4; 870 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
862 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 871 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
863 dev->dsr->async_ring_pages.num_pages, true); 872 dev->dsr->async_ring_pages.num_pages, true);
864 if (ret) 873 if (ret)
@@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
867 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 876 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
868 877
869 /* CQ notification ring */ 878 /* CQ notification ring */
870 dev->dsr->cq_ring_pages.num_pages = 4; 879 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
871 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 880 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
872 dev->dsr->cq_ring_pages.num_pages, true); 881 dev->dsr->cq_ring_pages.num_pages, true);
873 if (ret) 882 if (ret)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index dbbfd35e7da7..30062aad3af1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
170 sizeof(struct pvrdma_sge) * 170 sizeof(struct pvrdma_sge) *
171 qp->sq.max_sg); 171 qp->sq.max_sg);
172 /* Note: one extra page for the header. */ 172 /* Note: one extra page for the header. */
173 qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + 173 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174 PAGE_SIZE - 1) / PAGE_SIZE; 174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175 PAGE_SIZE;
175 176
176 return 0; 177 return 0;
177} 178}
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
288 qp->npages = qp->npages_send + qp->npages_recv; 289 qp->npages = qp->npages_send + qp->npages_recv;
289 290
290 /* Skip header page. */ 291 /* Skip header page. */
291 qp->sq.offset = PAGE_SIZE; 292 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
292 293
293 /* Recv queue pages are after send pages. */ 294 /* Recv queue pages are after send pages. */
294 qp->rq.offset = qp->npages_send * PAGE_SIZE; 295 qp->rq.offset = qp->npages_send * PAGE_SIZE;
@@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
341 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); 342 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
342 cmd->access_flags = IB_ACCESS_LOCAL_WRITE; 343 cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
343 cmd->total_chunks = qp->npages; 344 cmd->total_chunks = qp->npages;
344 cmd->send_chunks = qp->npages_send - 1; 345 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
345 cmd->pdir_dma = qp->pdir.dir_dma; 346 cmd->pdir_dma = qp->pdir.dir_dma;
346 347
347 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", 348 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
@@ -554,13 +555,13 @@ out:
554 return ret; 555 return ret;
555} 556}
556 557
557static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) 558static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
558{ 559{
559 return pvrdma_page_dir_get_ptr(&qp->pdir, 560 return pvrdma_page_dir_get_ptr(&qp->pdir,
560 qp->sq.offset + n * qp->sq.wqe_size); 561 qp->sq.offset + n * qp->sq.wqe_size);
561} 562}
562 563
563static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) 564static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
564{ 565{
565 return pvrdma_page_dir_get_ptr(&qp->pdir, 566 return pvrdma_page_dir_get_ptr(&qp->pdir,
566 qp->rq.offset + n * qp->rq.wqe_size); 567 qp->rq.offset + n * qp->rq.wqe_size);
@@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
598 unsigned long flags; 599 unsigned long flags;
599 struct pvrdma_sq_wqe_hdr *wqe_hdr; 600 struct pvrdma_sq_wqe_hdr *wqe_hdr;
600 struct pvrdma_sge *sge; 601 struct pvrdma_sge *sge;
601 int i, index; 602 int i, ret;
602 int nreq;
603 int ret;
604 603
605 /* 604 /*
606 * In states lower than RTS, we can fail immediately. In other states, 605 * In states lower than RTS, we can fail immediately. In other states,
@@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
613 612
614 spin_lock_irqsave(&qp->sq.lock, flags); 613 spin_lock_irqsave(&qp->sq.lock, flags);
615 614
616 index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); 615 while (wr) {
617 for (nreq = 0; wr; nreq++, wr = wr->next) { 616 unsigned int tail = 0;
618 unsigned int tail;
619 617
620 if (unlikely(!pvrdma_idx_ring_has_space( 618 if (unlikely(!pvrdma_idx_ring_has_space(
621 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { 619 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
@@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
680 } 678 }
681 } 679 }
682 680
683 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); 681 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
684 memset(wqe_hdr, 0, sizeof(*wqe_hdr)); 682 memset(wqe_hdr, 0, sizeof(*wqe_hdr));
685 wqe_hdr->wr_id = wr->wr_id; 683 wqe_hdr->wr_id = wr->wr_id;
686 wqe_hdr->num_sge = wr->num_sge; 684 wqe_hdr->num_sge = wr->num_sge;
@@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
771 /* Make sure wqe is written before index update */ 769 /* Make sure wqe is written before index update */
772 smp_wmb(); 770 smp_wmb();
773 771
774 index++;
775 if (unlikely(index >= qp->sq.wqe_cnt))
776 index = 0;
777 /* Update shared sq ring */ 772 /* Update shared sq ring */
778 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, 773 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
779 qp->sq.wqe_cnt); 774 qp->sq.wqe_cnt);
775
776 wr = wr->next;
780 } 777 }
781 778
782 ret = 0; 779 ret = 0;
@@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
806 struct pvrdma_qp *qp = to_vqp(ibqp); 803 struct pvrdma_qp *qp = to_vqp(ibqp);
807 struct pvrdma_rq_wqe_hdr *wqe_hdr; 804 struct pvrdma_rq_wqe_hdr *wqe_hdr;
808 struct pvrdma_sge *sge; 805 struct pvrdma_sge *sge;
809 int index, nreq;
810 int ret = 0; 806 int ret = 0;
811 int i; 807 int i;
812 808
@@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
821 817
822 spin_lock_irqsave(&qp->rq.lock, flags); 818 spin_lock_irqsave(&qp->rq.lock, flags);
823 819
824 index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); 820 while (wr) {
825 for (nreq = 0; wr; nreq++, wr = wr->next) { 821 unsigned int tail = 0;
826 unsigned int tail;
827 822
828 if (unlikely(wr->num_sge > qp->rq.max_sg || 823 if (unlikely(wr->num_sge > qp->rq.max_sg ||
829 wr->num_sge < 0)) { 824 wr->num_sge < 0)) {
@@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
843 goto out; 838 goto out;
844 } 839 }
845 840
846 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); 841 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
847 wqe_hdr->wr_id = wr->wr_id; 842 wqe_hdr->wr_id = wr->wr_id;
848 wqe_hdr->num_sge = wr->num_sge; 843 wqe_hdr->num_sge = wr->num_sge;
849 wqe_hdr->total_len = 0; 844 wqe_hdr->total_len = 0;
@@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
859 /* Make sure wqe is written before index update */ 854 /* Make sure wqe is written before index update */
860 smp_wmb(); 855 smp_wmb();
861 856
862 index++;
863 if (unlikely(index >= qp->rq.wqe_cnt))
864 index = 0;
865 /* Update shared rq ring */ 857 /* Update shared rq ring */
866 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, 858 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
867 qp->rq.wqe_cnt); 859 qp->rq.wqe_cnt);
860
861 wr = wr->next;
868 } 862 }
869 863
870 spin_unlock_irqrestore(&qp->rq.lock, flags); 864 spin_unlock_irqrestore(&qp->rq.lock, flags);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b8142759..6b712eecbd37 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
170 170
171 spin_lock_irq(&rdi->mmap_offset_lock); 171 spin_lock_irq(&rdi->mmap_offset_lock);
172 if (rdi->mmap_offset == 0) 172 if (rdi->mmap_offset == 0)
173 rdi->mmap_offset = PAGE_SIZE; 173 rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
174 ip->offset = rdi->mmap_offset; 174 ip->offset = rdi->mmap_offset;
175 rdi->mmap_offset += size; 175 rdi->mmap_offset += ALIGN(size, SHMLBA);
176 spin_unlock_irq(&rdi->mmap_offset_lock); 176 spin_unlock_irq(&rdi->mmap_offset_lock);
177 177
178 INIT_LIST_HEAD(&ip->pending_mmaps); 178 INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 7d1ac27ed251..6332dedc11e8 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -22,4 +22,4 @@ config RDMA_RXE
22 To configure and work with soft-RoCE driver please use the 22 To configure and work with soft-RoCE driver please use the
23 following wiki page under "configure Soft-RoCE (RXE)" section: 23 following wiki page under "configure Soft-RoCE (RXE)" section:
24 24
25 https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home 25 https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c09359..bd812e00988e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
156 spin_lock_bh(&rxe->mmap_offset_lock); 156 spin_lock_bh(&rxe->mmap_offset_lock);
157 157
158 if (rxe->mmap_offset == 0) 158 if (rxe->mmap_offset == 0)
159 rxe->mmap_offset = PAGE_SIZE; 159 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
160 160
161 ip->info.offset = rxe->mmap_offset; 161 ip->info.offset = rxe->mmap_offset;
162 rxe->mmap_offset += size; 162 rxe->mmap_offset += ALIGN(size, SHMLBA);
163 163
164 spin_unlock_bh(&rxe->mmap_offset_lock); 164 spin_unlock_bh(&rxe->mmap_offset_lock);
165 165
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index dbfde0dc6ff7..9f95f50b2909 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -729,11 +729,11 @@ next_wqe:
729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); 729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
730 if (ret) { 730 if (ret) {
731 qp->need_req_skb = 1; 731 qp->need_req_skb = 1;
732 kfree_skb(skb);
733 732
734 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 733 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
735 734
736 if (ret == -EAGAIN) { 735 if (ret == -EAGAIN) {
736 kfree_skb(skb);
737 rxe_run_task(&qp->req.task, 1); 737 rxe_run_task(&qp->req.task, 1);
738 goto exit; 738 goto exit;
739 } 739 }
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index d404a8aba7af..c9dd385ce62e 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
813 WARN_ON_ONCE(1); 813 WARN_ON_ONCE(1);
814 } 814 }
815 815
816 /* We successfully processed this new request. */
817 qp->resp.msn++;
818
819 /* next expected psn, read handles this separately */ 816 /* next expected psn, read handles this separately */
820 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 817 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
821 818
822 qp->resp.opcode = pkt->opcode; 819 qp->resp.opcode = pkt->opcode;
823 qp->resp.status = IB_WC_SUCCESS; 820 qp->resp.status = IB_WC_SUCCESS;
824 821
825 if (pkt->mask & RXE_COMP_MASK) 822 if (pkt->mask & RXE_COMP_MASK) {
823 /* We successfully processed this new request. */
824 qp->resp.msn++;
826 return RESPST_COMPLETE; 825 return RESPST_COMPLETE;
827 else if (qp_type(qp) == IB_QPT_RC) 826 } else if (qp_type(qp) == IB_QPT_RC)
828 return RESPST_ACKNOWLEDGE; 827 return RESPST_ACKNOWLEDGE;
829 else 828 else
830 return RESPST_CLEANUP; 829 return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d0b22ad58c1..c1ae4aeae2f9 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@ struct iser_fr_desc {
430 struct list_head list; 430 struct list_head list;
431 struct iser_reg_resources rsc; 431 struct iser_reg_resources rsc;
432 struct iser_pi_context *pi_ctx; 432 struct iser_pi_context *pi_ctx;
433 struct list_head all_list;
433}; 434};
434 435
435/** 436/**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
443 struct list_head list; 444 struct list_head list;
444 spinlock_t lock; 445 spinlock_t lock;
445 int size; 446 int size;
447 struct list_head all_list;
446}; 448};
447 449
448/** 450/**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 30b622f2ab73..c538a38c91ce 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
362 int i, ret; 362 int i, ret;
363 363
364 INIT_LIST_HEAD(&fr_pool->list); 364 INIT_LIST_HEAD(&fr_pool->list);
365 INIT_LIST_HEAD(&fr_pool->all_list);
365 spin_lock_init(&fr_pool->lock); 366 spin_lock_init(&fr_pool->lock);
366 fr_pool->size = 0; 367 fr_pool->size = 0;
367 for (i = 0; i < cmds_max; i++) { 368 for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
373 } 374 }
374 375
375 list_add_tail(&desc->list, &fr_pool->list); 376 list_add_tail(&desc->list, &fr_pool->list);
377 list_add_tail(&desc->all_list, &fr_pool->all_list);
376 fr_pool->size++; 378 fr_pool->size++;
377 } 379 }
378 380
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
392 struct iser_fr_desc *desc, *tmp; 394 struct iser_fr_desc *desc, *tmp;
393 int i = 0; 395 int i = 0;
394 396
395 if (list_empty(&fr_pool->list)) 397 if (list_empty(&fr_pool->all_list))
396 return; 398 return;
397 399
398 iser_info("freeing conn %p fr pool\n", ib_conn); 400 iser_info("freeing conn %p fr pool\n", ib_conn);
399 401
400 list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { 402 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401 list_del(&desc->list); 403 list_del(&desc->all_list);
402 iser_free_reg_res(&desc->rsc); 404 iser_free_reg_res(&desc->rsc);
403 if (desc->pi_ctx) 405 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx); 406 iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27dfcdc..db64adfbe1af 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
141 141
142 interface = intf->cur_altsetting; 142 interface = intf->cur_altsetting;
143 143
144 if (interface->desc.bNumEndpoints < 2)
145 return -ENODEV;
146
144 epirq = &interface->endpoint[0].desc; 147 epirq = &interface->endpoint[0].desc;
145 epout = &interface->endpoint[1].desc; 148 epout = &interface->endpoint[1].desc;
146 149
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d057c302..23c191a2a071 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
700 int error = -ENOMEM; 700 int error = -ENOMEM;
701 701
702 interface = intf->cur_altsetting; 702 interface = intf->cur_altsetting;
703
704 if (interface->desc.bNumEndpoints < 1)
705 return -ENODEV;
706
703 endpoint = &interface->endpoint[0].desc; 707 endpoint = &interface->endpoint[0].desc;
704 708
705 if (!usb_endpoint_is_int_in(endpoint)) 709 if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36913b4..f4e8fbec6a94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
1667 return -EINVAL; 1667 return -EINVAL;
1668 1668
1669 alt = pcu->ctrl_intf->cur_altsetting; 1669 alt = pcu->ctrl_intf->cur_altsetting;
1670
1671 if (alt->desc.bNumEndpoints < 1)
1672 return -ENODEV;
1673
1670 pcu->ep_ctrl = &alt->endpoint[0].desc; 1674 pcu->ep_ctrl = &alt->endpoint[0].desc;
1671 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); 1675 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
1672 1676
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c075f1..6e7ff9561d92 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
875 int ret, pipe, i; 875 int ret, pipe, i;
876 876
877 interface = intf->cur_altsetting; 877 interface = intf->cur_altsetting;
878
879 if (interface->desc.bNumEndpoints < 1)
880 return -ENODEV;
881
878 endpoint = &interface->endpoint[0].desc; 882 endpoint = &interface->endpoint[0].desc;
879 if (!usb_endpoint_is_int_in(endpoint)) 883 if (!usb_endpoint_is_int_in(endpoint))
880 return -ENODEV; 884 return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 72b28ebfe360..f210e19ddba6 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1282 /* handle buttons */ 1282 /* handle buttons */
1283 if (pkt_id == SS4_PACKET_ID_STICK) { 1283 if (pkt_id == SS4_PACKET_ID_STICK) {
1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01); 1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
1285 if (!(priv->flags & ALPS_BUTTONPAD)) { 1285 f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
1286 f->ts_right = !!(SS4_BTN_V2(p) & 0x02); 1286 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1287 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1288 }
1289 } else { 1287 } else {
1290 f->left = !!(SS4_BTN_V2(p) & 0x01); 1288 f->left = !!(SS4_BTN_V2(p) & 0x01);
1291 if (!(priv->flags & ALPS_BUTTONPAD)) { 1289 if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
2462 int num_y_electrode; 2460 int num_y_electrode;
2463 int x_pitch, y_pitch, x_phys, y_phys; 2461 int x_pitch, y_pitch, x_phys, y_phys;
2464 2462
2465 num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); 2463 if (IS_SS4PLUS_DEV(priv->dev_id)) {
2466 num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); 2464 num_x_electrode =
2465 SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
2466 num_y_electrode =
2467 SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
2468
2469 priv->x_max =
2470 (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2471 priv->y_max =
2472 (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2467 2473
2468 priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2474 x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2469 priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2475 y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2470 2476
2471 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; 2477 } else {
2472 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; 2478 num_x_electrode =
2479 SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
2480 num_y_electrode =
2481 SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
2482
2483 priv->x_max =
2484 (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2485 priv->y_max =
2486 (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2487
2488 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
2489 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
2490 }
2473 2491
2474 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ 2492 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
2475 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ 2493 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2485{ 2503{
2486 unsigned char is_btnless; 2504 unsigned char is_btnless;
2487 2505
2488 is_btnless = (otp[1][1] >> 3) & 0x01; 2506 if (IS_SS4PLUS_DEV(priv->dev_id))
2507 is_btnless = (otp[1][0] >> 1) & 0x01;
2508 else
2509 is_btnless = (otp[1][1] >> 3) & 0x01;
2489 2510
2490 if (is_btnless) 2511 if (is_btnless)
2491 priv->flags |= ALPS_BUTTONPAD; 2512 priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2493 return 0; 2514 return 0;
2494} 2515}
2495 2516
2517static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
2518 struct alps_data *priv)
2519{
2520 bool is_dual = false;
2521
2522 if (IS_SS4PLUS_DEV(priv->dev_id))
2523 is_dual = (otp[0][0] >> 4) & 0x01;
2524
2525 if (is_dual)
2526 priv->flags |= ALPS_DUALPOINT |
2527 ALPS_DUALPOINT_WITH_PRESSURE;
2528
2529 return 0;
2530}
2531
2496static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, 2532static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2497 struct alps_data *priv) 2533 struct alps_data *priv)
2498{ 2534{
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2508 2544
2509 alps_update_btn_info_ss4_v2(otp, priv); 2545 alps_update_btn_info_ss4_v2(otp, priv);
2510 2546
2547 alps_update_dual_info_ss4_v2(otp, priv);
2548
2511 return 0; 2549 return 0;
2512} 2550}
2513 2551
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
2753 if (alps_set_defaults_ss4_v2(psmouse, priv)) 2791 if (alps_set_defaults_ss4_v2(psmouse, priv))
2754 return -EIO; 2792 return -EIO;
2755 2793
2756 if (priv->fw_ver[1] == 0x1)
2757 priv->flags |= ALPS_DUALPOINT |
2758 ALPS_DUALPOINT_WITH_PRESSURE;
2759
2760 break; 2794 break;
2761 } 2795 }
2762 2796
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2827 ec[2] >= 0x90 && ec[2] <= 0x9d) { 2861 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2828 protocol = &alps_v3_protocol_data; 2862 protocol = &alps_v3_protocol_data;
2829 } else if (e7[0] == 0x73 && e7[1] == 0x03 && 2863 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2830 e7[2] == 0x14 && ec[1] == 0x02) { 2864 (e7[2] == 0x14 || e7[2] == 0x28)) {
2831 protocol = &alps_v8_protocol_data;
2832 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2833 e7[2] == 0x28 && ec[1] == 0x01) {
2834 protocol = &alps_v8_protocol_data; 2865 protocol = &alps_v8_protocol_data;
2835 } else { 2866 } else {
2836 psmouse_dbg(psmouse, 2867 psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2840 } 2871 }
2841 2872
2842 if (priv) { 2873 if (priv) {
2843 /* Save the Firmware version */ 2874 /* Save Device ID and Firmware version */
2875 memcpy(priv->dev_id, e7, 3);
2844 memcpy(priv->fw_ver, ec, 3); 2876 memcpy(priv->fw_ver, ec, 3);
2845 error = alps_set_protocol(psmouse, priv, protocol); 2877 error = alps_set_protocol(psmouse, priv, protocol);
2846 if (error) 2878 if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 6d279aa27cb9..4334f2805d93 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
54 54
55#define SS4_MASK_NORMAL_BUTTONS 0x07 55#define SS4_MASK_NORMAL_BUTTONS 0x07
56 56
57#define SS4PLUS_COUNT_PER_ELECTRODE 128
58#define SS4PLUS_NUMSENSOR_XOFFSET 16
59#define SS4PLUS_NUMSENSOR_YOFFSET 5
60#define SS4PLUS_MIN_PITCH_MM 37
61
62#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \
63 ((_b[1]) == 0x03) && \
64 ((_b[2]) == 0x28) \
65 )
66
57#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ 67#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
58 ((_b[1]) == 0x10) && \ 68 ((_b[1]) == 0x10) && \
59 ((_b[2]) == 0x00) && \ 69 ((_b[2]) == 0x00) && \
@@ -283,6 +293,7 @@ struct alps_data {
283 int addr_command; 293 int addr_command;
284 u16 proto_version; 294 u16 proto_version;
285 u8 byte0, mask0; 295 u8 byte0, mask0;
296 u8 dev_id[3];
286 u8 fw_ver[3]; 297 u8 fw_ver[3];
287 int flags; 298 int flags;
288 int x_max; 299 int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 352050e9031d..d5ab9ddef3e3 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
218 218
219static int elan_check_ASUS_special_fw(struct elan_tp_data *data) 219static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
220{ 220{
221 if (data->ic_type != 0x0E) 221 if (data->ic_type == 0x0E) {
222 return false; 222 switch (data->product_id) {
223 223 case 0x05 ... 0x07:
224 switch (data->product_id) { 224 case 0x09:
225 case 0x05 ... 0x07: 225 case 0x13:
226 case 0x09: 226 return true;
227 case 0x13: 227 }
228 } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
229 /* ASUS EeeBook X205TA */
228 return true; 230 return true;
229 default:
230 return false;
231 } 231 }
232
233 return false;
232} 234}
233 235
234static int __elan_initialize(struct elan_tp_data *data) 236static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 198678613382..34dfee555b20 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
170 rmi_get_platform_data(fn->rmi_dev); 170 rmi_get_platform_data(fn->rmi_dev);
171 int error; 171 int error;
172 172
173 /* can happen if f30_data.disable is set */
174 if (!f30)
175 return 0;
176
173 if (pdata->f30_data.trackstick_buttons) { 177 if (pdata->f30_data.trackstick_buttons) {
174 /* Try [re-]establish link to F03. */ 178 /* Try [re-]establish link to F03. */
175 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); 179 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 05afd16ea9c9..312bd6ca9198 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
120 }, 120 },
121 }, 121 },
122 { 122 {
123 /* Dell Embedded Box PC 3000 */
124 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
126 DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
127 },
128 },
129 {
123 /* OQO Model 01 */ 130 /* OQO Model 01 */
124 .matches = { 131 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "OQO"), 132 DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
513 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), 520 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
514 }, 521 },
515 }, 522 },
523 {
524 /* TUXEDO BU1406 */
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 },
529 },
516 { } 530 { }
517}; 531};
518 532
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd852059b99e..df4bea96d7ed 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
340 int error; 340 int error;
341 int i; 341 int i;
342 342
343 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
344 return -ENODEV;
345
343 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); 346 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
344 input_dev = input_allocate_device(); 347 input_dev = input_allocate_device();
345 if (!hanwang || !input_dev) { 348 if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e8afbc..4d9d64908b59 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
122 struct input_dev *input_dev; 122 struct input_dev *input_dev;
123 int error = -ENOMEM; 123 int error = -ENOMEM;
124 124
125 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
126 return -ENODEV;
127
125 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); 128 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
126 input_dev = input_allocate_device(); 129 input_dev = input_allocate_device();
127 if (!kbtab || !input_dev) 130 if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e11f88a..4c0eecae065c 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
527 if (iface_desc->desc.bInterfaceClass != 0xFF) 527 if (iface_desc->desc.bInterfaceClass != 0xFF)
528 return -ENODEV; 528 return -ENODEV;
529 529
530 if (iface_desc->desc.bNumEndpoints < 5)
531 return -ENODEV;
532
530 /* Use endpoint #4 (0x86). */ 533 /* Use endpoint #4 (0x86). */
531 endpoint = &iface_desc->endpoint[4].desc; 534 endpoint = &iface_desc->endpoint[4].desc;
532 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) 535 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98940d1392cb..b17536d6e69b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3202 3202
3203 region = iommu_alloc_resv_region(MSI_RANGE_START, 3203 region = iommu_alloc_resv_region(MSI_RANGE_START,
3204 MSI_RANGE_END - MSI_RANGE_START + 1, 3204 MSI_RANGE_END - MSI_RANGE_START + 1,
3205 0, IOMMU_RESV_RESERVED); 3205 0, IOMMU_RESV_MSI);
3206 if (!region) 3206 if (!region)
3207 return; 3207 return;
3208 list_add_tail(&region->list, head); 3208 list_add_tail(&region->list, head);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5806a6acc94e..591bb96047c9 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1889 1889
1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1891 prot, IOMMU_RESV_MSI); 1891 prot, IOMMU_RESV_SW_MSI);
1892 if (!region) 1892 if (!region)
1893 return; 1893 return;
1894 1894
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index abf6496843a6..b493c99e17f7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1609 1609
1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1611 prot, IOMMU_RESV_MSI); 1611 prot, IOMMU_RESV_SW_MSI);
1612 if (!region) 1612 if (!region)
1613 return; 1613 return;
1614 1614
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index a7e0821c9967..c01bfcdb2383 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
512 spin_lock_irqsave(&data->lock, flags); 512 spin_lock_irqsave(&data->lock, flags);
513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
514 clk_enable(data->clk_master); 514 clk_enable(data->clk_master);
515 __sysmmu_tlb_invalidate_entry(data, iova, 1); 515 if (sysmmu_block(data)) {
516 if (data->version >= MAKE_MMU_VER(5, 0))
517 __sysmmu_tlb_invalidate(data);
518 else
519 __sysmmu_tlb_invalidate_entry(data, iova, 1);
520 sysmmu_unblock(data);
521 }
516 clk_disable(data->clk_master); 522 clk_disable(data->clk_master);
517 } 523 }
518 spin_unlock_irqrestore(&data->lock, flags); 524 spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 238ad3447712..d412a313a372 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
916 * which we used for the IOMMU lookup. Strictly speaking 916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to 917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */ 918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev->is_virtfn) 919 if (pdev && pdev->is_virtfn)
920 goto got_pdev; 920 goto got_pdev;
921 921
922 *bus = drhd->devices[i].bus; 922 *bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
5249 5249
5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, 5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, 5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5252 0, IOMMU_RESV_RESERVED); 5252 0, IOMMU_RESV_MSI);
5253 if (!reg) 5253 if (!reg)
5254 return; 5254 return;
5255 list_add_tail(&reg->list, head); 5255 list_add_tail(&reg->list, head);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1c049e2e12bf..8d6ca28c3e1f 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
422 pte |= ARM_V7S_ATTR_NS_TABLE; 422 pte |= ARM_V7S_ATTR_NS_TABLE;
423 423
424 __arm_v7s_set_pte(ptep, pte, 1, cfg); 424 __arm_v7s_set_pte(ptep, pte, 1, cfg);
425 } else { 425 } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
426 cptep = iopte_deref(pte, lvl); 426 cptep = iopte_deref(pte, lvl);
427 } else {
428 /* We require an unmap first */
429 WARN_ON(!selftest_running);
430 return -EEXIST;
427 } 431 }
428 432
429 /* Rinse, repeat */ 433 /* Rinse, repeat */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index feacc54bec68..f9bc6ebb8140 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE; 336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg); 337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else { 338 } else if (!iopte_leaf(pte, lvl)) {
339 cptep = iopte_deref(pte, data); 339 cptep = iopte_deref(pte, data);
340 } else {
341 /* We require an unmap first */
342 WARN_ON(!selftest_running);
343 return -EEXIST;
340 } 344 }
341 345
342 /* Rinse, repeat */ 346 /* Rinse, repeat */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8ea14f41a979..3b67144dead2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
72 [IOMMU_RESV_DIRECT] = "direct", 72 [IOMMU_RESV_DIRECT] = "direct",
73 [IOMMU_RESV_RESERVED] = "reserved", 73 [IOMMU_RESV_RESERVED] = "reserved",
74 [IOMMU_RESV_MSI] = "msi", 74 [IOMMU_RESV_MSI] = "msi",
75 [IOMMU_RESV_SW_MSI] = "msi",
75}; 76};
76 77
77#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 78#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1743} 1744}
1744 1745
1745struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 1746struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1746 size_t length, 1747 size_t length, int prot,
1747 int prot, int type) 1748 enum iommu_resv_type type)
1748{ 1749{
1749 struct iommu_resv_region *region; 1750 struct iommu_resv_region *region;
1750 1751
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 125528f39e92..8162121bb1bc 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -262,6 +262,7 @@ config IRQ_MXS
262 262
263config MVEBU_ODMI 263config MVEBU_ODMI
264 bool 264 bool
265 select GENERIC_MSI_IRQ_DOMAIN
265 266
266config MVEBU_PIC 267config MVEBU_PIC
267 bool 268 bool
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 11d12bccc4e7..cd20df12d63d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node,
991 991
992static void __init gic_map_interrupts(struct device_node *node) 992static void __init gic_map_interrupts(struct device_node *node)
993{ 993{
994 gic_map_single_int(node, GIC_LOCAL_INT_WD);
995 gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
994 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
995 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
998 gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
999 gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
996 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC);
997} 1001}
998 1002
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 1dfd1085a04f..9ca691d6c13b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1032 sizeof(avmb1_carddef)))) 1032 sizeof(avmb1_carddef))))
1033 return -EFAULT; 1033 return -EFAULT;
1034 cdef.cardtype = AVM_CARDTYPE_B1; 1034 cdef.cardtype = AVM_CARDTYPE_B1;
1035 cdef.cardnr = 0;
1035 } else { 1036 } else {
1036 if ((retval = copy_from_user(&cdef, data, 1037 if ((retval = copy_from_user(&cdef, data,
1037 sizeof(avmb1_extcarddef)))) 1038 sizeof(avmb1_extcarddef))))
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 11e13c56126f..2da3ff650e1d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
2317 return -ENODEV; 2317 return -ENODEV;
2318 } 2318 }
2319 2319
2320 if (hostif->desc.bNumEndpoints < 1)
2321 return -ENODEV;
2322
2320 dev_info(&udev->dev, 2323 dev_info(&udev->dev,
2321 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", 2324 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
2322 __func__, le16_to_cpu(udev->descriptor.idVendor), 2325 __func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 3f041b187033..f757cef293f8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
392 * To get all the fields, copy all archdata 392 * To get all the fields, copy all archdata
393 */ 393 */
394 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; 394 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
395 dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
395#endif /* CONFIG_PCI */ 396#endif /* CONFIG_PCI */
396 397
397#ifdef DEBUG 398#ifdef DEBUG
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index e4c2c1a1e993..6735c8d6a445 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
932 *result = true; 932 *result = true;
933 933
934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, 934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
935 from_cblock(begin), &cmd->dirty_cursor); 935 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
936 if (r) { 936 if (r) {
937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); 937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
938 return r; 938 return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
959 return 0; 959 return 0;
960 } 960 }
961 961
962 begin = to_cblock(from_cblock(begin) + 1);
963 if (begin == end)
964 break;
965
962 r = dm_bitset_cursor_next(&cmd->dirty_cursor); 966 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
963 if (r) { 967 if (r) {
964 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); 968 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
965 dm_bitset_cursor_end(&cmd->dirty_cursor); 969 dm_bitset_cursor_end(&cmd->dirty_cursor);
966 return r; 970 return r;
967 } 971 }
968
969 begin = to_cblock(from_cblock(begin) + 1);
970 } 972 }
971 973
972 dm_bitset_cursor_end(&cmd->dirty_cursor); 974 dm_bitset_cursor_end(&cmd->dirty_cursor);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..1e217ba84d09 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
3726 return r; 3726 return r;
3727 3727
3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ 3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && 3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { 3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, 3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0); 3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 28955b94d2b2..0b081d170087 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
755 /* Undo dm_start_request() before requeuing */ 755 /* Undo dm_start_request() before requeuing */
756 rq_end_stats(md, rq); 756 rq_end_stats(md, rq);
757 rq_completed(md, rq_data_dir(rq), false); 757 rq_completed(md, rq_data_dir(rq), false);
758 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
758 return BLK_MQ_RQ_QUEUE_BUSY; 759 return BLK_MQ_RQ_QUEUE_BUSY;
759 } 760 }
760 761
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a3d922..78f36012eaca 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
146 block = fec_buffer_rs_block(v, fio, n, i); 146 block = fec_buffer_rs_block(v, fio, n, i);
147 res = fec_decode_rs8(v, fio, block, &par[offset], neras); 147 res = fec_decode_rs8(v, fio, block, &par[offset], neras);
148 if (res < 0) { 148 if (res < 0) {
149 dm_bufio_release(buf);
150
151 r = res; 149 r = res;
152 goto error; 150 goto error;
153 } 151 }
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
172done: 170done:
173 r = corrected; 171 r = corrected;
174error: 172error:
173 dm_bufio_release(buf);
174
175 if (r < 0 && neras) 175 if (r < 0 && neras)
176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", 176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
177 v->data_dev->name, (unsigned long long)rsb, r); 177 v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
269 &is_zero) == 0) { 269 &is_zero) == 0) {
270 /* skip known zero blocks entirely */ 270 /* skip known zero blocks entirely */
271 if (is_zero) 271 if (is_zero)
272 continue; 272 goto done;
273 273
274 /* 274 /*
275 * skip if we have already found the theoretical 275 * skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
439 if (!verity_fec_is_enabled(v)) 439 if (!verity_fec_is_enabled(v))
440 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
441 441
442 if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
443 DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
444 return -EIO;
445 }
446
447 fio->level++;
448
442 if (type == DM_VERITY_BLOCK_TYPE_METADATA) 449 if (type == DM_VERITY_BLOCK_TYPE_METADATA)
443 block += v->data_blocks; 450 block += v->data_blocks;
444 451
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
470 if (r < 0) { 477 if (r < 0) {
471 r = fec_decode_rsb(v, io, fio, rsb, offset, true); 478 r = fec_decode_rsb(v, io, fio, rsb, offset, true);
472 if (r < 0) 479 if (r < 0)
473 return r; 480 goto done;
474 } 481 }
475 482
476 if (dest) 483 if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
480 r = verity_for_bv_block(v, io, iter, fec_bv_copy); 487 r = verity_for_bv_block(v, io, iter, fec_bv_copy);
481 } 488 }
482 489
490done:
491 fio->level--;
483 return r; 492 return r;
484} 493}
485 494
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
520 memset(fio->bufs, 0, sizeof(fio->bufs)); 529 memset(fio->bufs, 0, sizeof(fio->bufs));
521 fio->nbufs = 0; 530 fio->nbufs = 0;
522 fio->output = NULL; 531 fio->output = NULL;
532 fio->level = 0;
523} 533}
524 534
525/* 535/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298b995e..bb31ce87a933 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -27,6 +27,9 @@
27#define DM_VERITY_FEC_BUF_MAX \ 27#define DM_VERITY_FEC_BUF_MAX \
28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) 28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
29 29
30/* maximum recursion level for verity_fec_decode */
31#define DM_VERITY_FEC_MAX_RECURSION 4
32
30#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" 33#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
31#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" 34#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
32#define DM_VERITY_OPT_FEC_START "fec_start" 35#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
58 unsigned nbufs; /* number of buffers allocated */ 61 unsigned nbufs; /* number of buffers allocated */
59 u8 *output; /* buffer for corrected output */ 62 u8 *output; /* buffer for corrected output */
60 size_t output_pos; 63 size_t output_pos;
64 unsigned level; /* recursion level */
61}; 65};
62 66
63#ifdef CONFIG_DM_VERITY_FEC 67#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4ffd1eb8f44..dfb75979e455 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
989 struct dm_offload *o = container_of(cb, struct dm_offload, cb); 989 struct dm_offload *o = container_of(cb, struct dm_offload, cb);
990 struct bio_list list; 990 struct bio_list list;
991 struct bio *bio; 991 struct bio *bio;
992 int i;
992 993
993 INIT_LIST_HEAD(&o->cb.list); 994 INIT_LIST_HEAD(&o->cb.list);
994 995
995 if (unlikely(!current->bio_list)) 996 if (unlikely(!current->bio_list))
996 return; 997 return;
997 998
998 list = *current->bio_list; 999 for (i = 0; i < 2; i++) {
999 bio_list_init(current->bio_list); 1000 list = current->bio_list[i];
1000 1001 bio_list_init(&current->bio_list[i]);
1001 while ((bio = bio_list_pop(&list))) { 1002
1002 struct bio_set *bs = bio->bi_pool; 1003 while ((bio = bio_list_pop(&list))) {
1003 if (unlikely(!bs) || bs == fs_bio_set) { 1004 struct bio_set *bs = bio->bi_pool;
1004 bio_list_add(current->bio_list, bio); 1005 if (unlikely(!bs) || bs == fs_bio_set) {
1005 continue; 1006 bio_list_add(&current->bio_list[i], bio);
1007 continue;
1008 }
1009
1010 spin_lock(&bs->rescue_lock);
1011 bio_list_add(&bs->rescue_list, bio);
1012 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1013 spin_unlock(&bs->rescue_lock);
1006 } 1014 }
1007
1008 spin_lock(&bs->rescue_lock);
1009 bio_list_add(&bs->rescue_list, bio);
1010 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1011 spin_unlock(&bs->rescue_lock);
1012 } 1015 }
1013} 1016}
1014 1017
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117fb918..321ecac23027 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
777 bm_lockres->flags |= DLM_LKF_NOQUEUE; 777 bm_lockres->flags |= DLM_LKF_NOQUEUE;
778 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); 778 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
779 if (ret == -EAGAIN) { 779 if (ret == -EAGAIN) {
780 memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
781 s = read_resync_info(mddev, bm_lockres); 780 s = read_resync_info(mddev, bm_lockres);
782 if (s) { 781 if (s) {
783 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", 782 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
974 lockres_free(cinfo->bitmap_lockres); 973 lockres_free(cinfo->bitmap_lockres);
975 unlock_all_bitmaps(mddev); 974 unlock_all_bitmaps(mddev);
976 dlm_release_lockspace(cinfo->lockspace, 2); 975 dlm_release_lockspace(cinfo->lockspace, 2);
976 kfree(cinfo);
977 return 0; 977 return 0;
978} 978}
979 979
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 548d1b8014f8..f6ae1d67bcd0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
440} 440}
441EXPORT_SYMBOL(md_flush_request); 441EXPORT_SYMBOL(md_flush_request);
442 442
443void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
444{
445 struct mddev *mddev = cb->data;
446 md_wakeup_thread(mddev->thread);
447 kfree(cb);
448}
449EXPORT_SYMBOL(md_unplug);
450
451static inline struct mddev *mddev_get(struct mddev *mddev) 443static inline struct mddev *mddev_get(struct mddev *mddev)
452{ 444{
453 atomic_inc(&mddev->active); 445 atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1887 } 1879 }
1888 sb = page_address(rdev->sb_page); 1880 sb = page_address(rdev->sb_page);
1889 sb->data_size = cpu_to_le64(num_sectors); 1881 sb->data_size = cpu_to_le64(num_sectors);
1890 sb->super_offset = rdev->sb_start; 1882 sb->super_offset = cpu_to_le64(rdev->sb_start);
1891 sb->sb_csum = calc_sb_1_csum(sb); 1883 sb->sb_csum = calc_sb_1_csum(sb);
1892 do { 1884 do {
1893 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1885 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
2295 /* Check if any mddev parameters have changed */ 2287 /* Check if any mddev parameters have changed */
2296 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2288 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2297 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2289 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2298 (mddev->layout != le64_to_cpu(sb->layout)) || 2290 (mddev->layout != le32_to_cpu(sb->layout)) ||
2299 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2291 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2300 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2292 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2301 return true; 2293 return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6458 mddev->layout = info->layout; 6450 mddev->layout = info->layout;
6459 mddev->chunk_sectors = info->chunk_size >> 9; 6451 mddev->chunk_sectors = info->chunk_size >> 9;
6460 6452
6461 mddev->max_disks = MD_SB_DISKS;
6462
6463 if (mddev->persistent) { 6453 if (mddev->persistent) {
6464 mddev->flags = 0; 6454 mddev->max_disks = MD_SB_DISKS;
6465 mddev->sb_flags = 0; 6455 mddev->flags = 0;
6456 mddev->sb_flags = 0;
6466 } 6457 }
6467 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6458 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6468 6459
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
6533 return -ENOSPC; 6524 return -ENOSPC;
6534 } 6525 }
6535 rv = mddev->pers->resize(mddev, num_sectors); 6526 rv = mddev->pers->resize(mddev, num_sectors);
6536 if (!rv) 6527 if (!rv) {
6537 revalidate_disk(mddev->gendisk); 6528 if (mddev->queue) {
6529 set_capacity(mddev->gendisk, mddev->array_sectors);
6530 revalidate_disk(mddev->gendisk);
6531 }
6532 }
6538 return rv; 6533 return rv;
6539} 6534}
6540 6535
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b8859cbf84b6..dde8ecb760c8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
676extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 676extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
677 struct mddev *mddev); 677 struct mddev *mddev);
678 678
679extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
680extern void md_reload_sb(struct mddev *mddev, int raid_disk); 679extern void md_reload_sb(struct mddev *mddev, int raid_disk);
681extern void md_update_sb(struct mddev *mddev, int force); 680extern void md_update_sb(struct mddev *mddev, int force);
682extern void md_kick_rdev_from_array(struct md_rdev * rdev); 681extern void md_kick_rdev_from_array(struct md_rdev * rdev);
683struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 682struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
684static inline int mddev_check_plugged(struct mddev *mddev)
685{
686 return !!blk_check_plugged(md_unplug, mddev,
687 sizeof(struct blk_plug_cb));
688}
689 683
690static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 684static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
691{ 685{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fbc2d7851b49..a34f58772022 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
1027static void freeze_array(struct r1conf *conf, int extra) 1027static void freeze_array(struct r1conf *conf, int extra)
1028{ 1028{
1029 /* Stop sync I/O and normal I/O and wait for everything to 1029 /* Stop sync I/O and normal I/O and wait for everything to
1030 * go quite. 1030 * go quiet.
1031 * This is called in two situations: 1031 * This is called in two situations:
1032 * 1) management command handlers (reshape, remove disk, quiesce). 1032 * 1) management command handlers (reshape, remove disk, quiesce).
1033 * 2) one normal I/O request failed. 1033 * 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1587 split = bio; 1587 split = bio;
1588 } 1588 }
1589 1589
1590 if (bio_data_dir(split) == READ) 1590 if (bio_data_dir(split) == READ) {
1591 raid1_read_request(mddev, split); 1591 raid1_read_request(mddev, split);
1592 else 1592
1593 /*
1594 * If a bio is splitted, the first part of bio will
1595 * pass barrier but the bio is queued in
1596 * current->bio_list (see generic_make_request). If
1597 * there is a raise_barrier() called here, the second
1598 * part of bio can't pass barrier. But since the first
1599 * part bio isn't dispatched to underlaying disks yet,
1600 * the barrier is never released, hence raise_barrier
1601 * will alays wait. We have a deadlock.
1602 * Note, this only happens in read path. For write
1603 * path, the first part of bio is dispatched in a
1604 * schedule() call (because of blk plug) or offloaded
1605 * to raid10d.
1606 * Quitting from the function immediately can change
1607 * the bio order queued in bio_list and avoid the deadlock.
1608 */
1609 if (split != bio) {
1610 generic_make_request(bio);
1611 break;
1612 }
1613 } else
1593 raid1_write_request(mddev, split); 1614 raid1_write_request(mddev, split);
1594 } while (split != bio); 1615 } while (split != bio);
1595} 1616}
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
3246 return ret; 3267 return ret;
3247 } 3268 }
3248 md_set_array_sectors(mddev, newsize); 3269 md_set_array_sectors(mddev, newsize);
3249 set_capacity(mddev->gendisk, mddev->array_sectors);
3250 revalidate_disk(mddev->gendisk);
3251 if (sectors > mddev->dev_sectors && 3270 if (sectors > mddev->dev_sectors &&
3252 mddev->recovery_cp > mddev->dev_sectors) { 3271 mddev->recovery_cp > mddev->dev_sectors) {
3253 mddev->recovery_cp = mddev->dev_sectors; 3272 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 063c43d83b72..e89a8d78a9ed 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
974 !conf->barrier || 974 !conf->barrier ||
975 (atomic_read(&conf->nr_pending) && 975 (atomic_read(&conf->nr_pending) &&
976 current->bio_list && 976 current->bio_list &&
977 !bio_list_empty(current->bio_list)), 977 (!bio_list_empty(&current->bio_list[0]) ||
978 !bio_list_empty(&current->bio_list[1]))),
978 conf->resync_lock); 979 conf->resync_lock);
979 conf->nr_waiting--; 980 conf->nr_waiting--;
980 if (!conf->nr_waiting) 981 if (!conf->nr_waiting)
@@ -1477,11 +1478,24 @@ retry_write:
1477 mbio->bi_bdev = (void*)rdev; 1478 mbio->bi_bdev = (void*)rdev;
1478 1479
1479 atomic_inc(&r10_bio->remaining); 1480 atomic_inc(&r10_bio->remaining);
1481
1482 cb = blk_check_plugged(raid10_unplug, mddev,
1483 sizeof(*plug));
1484 if (cb)
1485 plug = container_of(cb, struct raid10_plug_cb,
1486 cb);
1487 else
1488 plug = NULL;
1480 spin_lock_irqsave(&conf->device_lock, flags); 1489 spin_lock_irqsave(&conf->device_lock, flags);
1481 bio_list_add(&conf->pending_bio_list, mbio); 1490 if (plug) {
1482 conf->pending_count++; 1491 bio_list_add(&plug->pending, mbio);
1492 plug->pending_cnt++;
1493 } else {
1494 bio_list_add(&conf->pending_bio_list, mbio);
1495 conf->pending_count++;
1496 }
1483 spin_unlock_irqrestore(&conf->device_lock, flags); 1497 spin_unlock_irqrestore(&conf->device_lock, flags);
1484 if (!mddev_check_plugged(mddev)) 1498 if (!plug)
1485 md_wakeup_thread(mddev->thread); 1499 md_wakeup_thread(mddev->thread);
1486 } 1500 }
1487 } 1501 }
@@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1571 split = bio; 1585 split = bio;
1572 } 1586 }
1573 1587
1588 /*
1589 * If a bio is splitted, the first part of bio will pass
1590 * barrier but the bio is queued in current->bio_list (see
1591 * generic_make_request). If there is a raise_barrier() called
1592 * here, the second part of bio can't pass barrier. But since
1593 * the first part bio isn't dispatched to underlaying disks
1594 * yet, the barrier is never released, hence raise_barrier will
1595 * alays wait. We have a deadlock.
1596 * Note, this only happens in read path. For write path, the
1597 * first part of bio is dispatched in a schedule() call
1598 * (because of blk plug) or offloaded to raid10d.
1599 * Quitting from the function immediately can change the bio
1600 * order queued in bio_list and avoid the deadlock.
1601 */
1574 __make_request(mddev, split); 1602 __make_request(mddev, split);
1603 if (split != bio && bio_data_dir(bio) == READ) {
1604 generic_make_request(bio);
1605 break;
1606 }
1575 } while (split != bio); 1607 } while (split != bio);
1576 1608
1577 /* In case raid10d snuck in to freeze_array */ 1609 /* In case raid10d snuck in to freeze_array */
@@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
3943 return ret; 3975 return ret;
3944 } 3976 }
3945 md_set_array_sectors(mddev, size); 3977 md_set_array_sectors(mddev, size);
3946 if (mddev->queue) {
3947 set_capacity(mddev->gendisk, mddev->array_sectors);
3948 revalidate_disk(mddev->gendisk);
3949 }
3950 if (sectors > mddev->dev_sectors && 3978 if (sectors > mddev->dev_sectors &&
3951 mddev->recovery_cp > oldsize) { 3979 mddev->recovery_cp > oldsize) {
3952 mddev->recovery_cp = oldsize; 3980 mddev->recovery_cp = oldsize;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4fb09b3fcb41..ed5cd705b985 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
1401 (test_bit(R5_Wantdrain, &dev->flags) || 1401 (test_bit(R5_Wantdrain, &dev->flags) ||
1402 test_bit(R5_InJournal, &dev->flags))) || 1402 test_bit(R5_InJournal, &dev->flags))) ||
1403 (srctype == SYNDROME_SRC_WRITTEN && 1403 (srctype == SYNDROME_SRC_WRITTEN &&
1404 dev->written)) { 1404 (dev->written ||
1405 test_bit(R5_InJournal, &dev->flags)))) {
1405 if (test_bit(R5_InJournal, &dev->flags)) 1406 if (test_bit(R5_InJournal, &dev->flags))
1406 srcs[slot] = sh->dev[i].orig_page; 1407 srcs[slot] = sh->dev[i].orig_page;
1407 else 1408 else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
7605 return ret; 7606 return ret;
7606 } 7607 }
7607 md_set_array_sectors(mddev, newsize); 7608 md_set_array_sectors(mddev, newsize);
7608 set_capacity(mddev->gendisk, mddev->array_sectors);
7609 revalidate_disk(mddev->gendisk);
7610 if (sectors > mddev->dev_sectors && 7609 if (sectors > mddev->dev_sectors &&
7611 mddev->recovery_cp > mddev->dev_sectors) { 7610 mddev->recovery_cp > mddev->dev_sectors) {
7612 mddev->recovery_cp = mddev->dev_sectors; 7611 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 67fd8ffa60a4..669a4c82f1ff 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
321}; 321};
322MODULE_DEVICE_TABLE(of, vdoa_dt_ids); 322MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
323 323
324static const struct platform_driver vdoa_driver = { 324static struct platform_driver vdoa_driver = {
325 .probe = vdoa_probe, 325 .probe = vdoa_probe,
326 .remove = vdoa_remove, 326 .remove = vdoa_remove,
327 .driver = { 327 .driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index cbb03768f5d7..0f0c389f8897 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
861 861
862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || 862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || 863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
864 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || 864 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
866 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
867 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) 865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
868 swap(addr->cb, addr->cr); 866 swap(addr->cb, addr->cr);
869 867
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 823608112d89..7918b928f058 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
632 632
633error_ctrls: 633error_ctrls:
634 bdisp_ctrls_delete(ctx); 634 bdisp_ctrls_delete(ctx);
635error_fh:
636 v4l2_fh_del(&ctx->fh); 635 v4l2_fh_del(&ctx->fh);
636error_fh:
637 v4l2_fh_exit(&ctx->fh); 637 v4l2_fh_exit(&ctx->fh);
638 bdisp_hw_free_nodes(ctx); 638 bdisp_hw_free_nodes(ctx);
639mem_ctx: 639mem_ctx:
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index ab9866024ec7..04033efe7ad5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) 36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
37{ 37{
38 struct hexline *hx; 38 struct hexline *hx;
39 u8 reset; 39 u8 *buf;
40 int ret,pos=0; 40 int ret, pos = 0;
41 u16 cpu_cs_register = cypress[type].cpu_cs_register;
41 42
42 hx = kmalloc(sizeof(*hx), GFP_KERNEL); 43 buf = kmalloc(sizeof(*hx), GFP_KERNEL);
43 if (!hx) 44 if (!buf)
44 return -ENOMEM; 45 return -ENOMEM;
46 hx = (struct hexline *)buf;
45 47
46 /* stop the CPU */ 48 /* stop the CPU */
47 reset = 1; 49 buf[0] = 1;
48 if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) 50 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
49 err("could not stop the USB controller CPU."); 51 err("could not stop the USB controller CPU.");
50 52
51 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { 53 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
61 } 63 }
62 if (ret < 0) { 64 if (ret < 0) {
63 err("firmware download failed at %d with %d",pos,ret); 65 err("firmware download failed at %d with %d",pos,ret);
64 kfree(hx); 66 kfree(buf);
65 return ret; 67 return ret;
66 } 68 }
67 69
68 if (ret == 0) { 70 if (ret == 0) {
69 /* restart the CPU */ 71 /* restart the CPU */
70 reset = 0; 72 buf[0] = 0;
71 if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { 73 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
72 err("could not restart the USB controller CPU."); 74 err("could not restart the USB controller CPU.");
73 ret = -EINVAL; 75 ret = -EINVAL;
74 } 76 }
75 } else 77 } else
76 ret = -EIO; 78 ret = -EIO;
77 79
78 kfree(hx); 80 kfree(buf);
79 81
80 return ret; 82 return ret;
81} 83}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c361ad58..bf0fe0137dfe 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
1947 if (!of_property_read_u32(child, "dma-channel", &val)) 1947 if (!of_property_read_u32(child, "dma-channel", &val))
1948 gpmc_onenand_data->dma_channel = val; 1948 gpmc_onenand_data->dma_channel = val;
1949 1949
1950 gpmc_onenand_init(gpmc_onenand_data); 1950 return gpmc_onenand_init(gpmc_onenand_data);
1951
1952 return 0;
1953} 1951}
1954#else 1952#else
1955static int gpmc_probe_onenand_child(struct platform_device *pdev, 1953static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 91f645992c94..b27ea98b781f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1792 1792
1793 /* If we're permanently dead, give up. */ 1793 /* If we're permanently dead, give up. */
1794 if (state == pci_channel_io_perm_failure) { 1794 if (state == pci_channel_io_perm_failure) {
1795 /* Tell the AFU drivers; but we don't care what they
1796 * say, we're going away.
1797 */
1798 for (i = 0; i < adapter->slices; i++) { 1795 for (i = 0; i < adapter->slices; i++) {
1799 afu = adapter->afu[i]; 1796 afu = adapter->afu[i];
1800 /* Only participate in EEH if we are on a virtual PHB */ 1797 /*
1801 if (afu->phb == NULL) 1798 * Tell the AFU drivers; but we don't care what they
1802 return PCI_ERS_RESULT_NONE; 1799 * say, we're going away.
1803 cxl_vphb_error_detected(afu, state); 1800 */
1801 if (afu->phb != NULL)
1802 cxl_vphb_error_detected(afu, state);
1804 } 1803 }
1805 return PCI_ERS_RESULT_DISCONNECT; 1804 return PCI_ERS_RESULT_DISCONNECT;
1806 } 1805 }
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 3600c9993a98..29f2daed37e0 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -112,11 +112,9 @@ struct mkhi_msg {
112 112
113static int mei_osver(struct mei_cl_device *cldev) 113static int mei_osver(struct mei_cl_device *cldev)
114{ 114{
115 int ret;
116 const size_t size = sizeof(struct mkhi_msg_hdr) + 115 const size_t size = sizeof(struct mkhi_msg_hdr) +
117 sizeof(struct mkhi_fwcaps) + 116 sizeof(struct mkhi_fwcaps) +
118 sizeof(struct mei_os_ver); 117 sizeof(struct mei_os_ver);
119 size_t length = 8;
120 char buf[size]; 118 char buf[size];
121 struct mkhi_msg *req; 119 struct mkhi_msg *req;
122 struct mkhi_fwcaps *fwcaps; 120 struct mkhi_fwcaps *fwcaps;
@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
137 os_ver = (struct mei_os_ver *)fwcaps->data; 135 os_ver = (struct mei_os_ver *)fwcaps->data;
138 os_ver->os_type = OSTYPE_LINUX; 136 os_ver->os_type = OSTYPE_LINUX;
139 137
140 ret = __mei_cl_send(cldev->cl, buf, size, mode); 138 return __mei_cl_send(cldev->cl, buf, size, mode);
141 if (ret < 0)
142 return ret;
143
144 ret = __mei_cl_recv(cldev->cl, buf, length, 0);
145 if (ret < 0)
146 return ret;
147
148 return 0;
149} 139}
150 140
151static void mei_mkhi_fix(struct mei_cl_device *cldev) 141static void mei_mkhi_fix(struct mei_cl_device *cldev)
@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
160 return; 150 return;
161 151
162 ret = mei_osver(cldev); 152 ret = mei_osver(cldev);
163 if (ret) 153 if (ret < 0)
164 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 154 dev_err(&cldev->dev, "OS version command failed %d\n", ret);
165 155
166 mei_cldev_disable(cldev); 156 mei_cldev_disable(cldev);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cfb1cdf176fa..13c55b8f9261 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
124 124
125 mei_clear_interrupts(dev); 125 mei_clear_interrupts(dev);
126 126
127 mei_synchronize_irq(dev);
128
129 /* we're already in reset, cancel the init timer 127 /* we're already in reset, cancel the init timer
130 * if the reset was called due the hbm protocol error 128 * if the reset was called due the hbm protocol error
131 * we need to call it before hw start 129 * we need to call it before hw start
@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
304 container_of(work, struct mei_device, reset_work); 302 container_of(work, struct mei_device, reset_work);
305 int ret; 303 int ret;
306 304
305 mei_clear_interrupts(dev);
306 mei_synchronize_irq(dev);
307
307 mutex_lock(&dev->device_lock); 308 mutex_lock(&dev->device_lock);
308 309
309 ret = mei_reset(dev); 310 ret = mei_reset(dev);
@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
328 329
329 mei_cancel_work(dev); 330 mei_cancel_work(dev);
330 331
332 mei_clear_interrupts(dev);
333 mei_synchronize_irq(dev);
334
331 mutex_lock(&dev->device_lock); 335 mutex_lock(&dev->device_lock);
332 336
333 dev->dev_state = MEI_DEV_POWER_DOWN; 337 dev->dev_state = MEI_DEV_POWER_DOWN;
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 9d659542a335..dad5abee656e 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
566 */ 566 */
567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
568 PCI_IRQ_MSIX); 568 PCI_IRQ_MSIX);
569 if (error) { 569 if (error < 0) {
570 error = pci_alloc_irq_vectors(pdev, 1, 1, 570 error = pci_alloc_irq_vectors(pdev, 1, 1,
571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
572 if (error) 572 if (error < 0)
573 goto err_remove_bitmap; 573 goto err_remove_bitmap;
574 } else { 574 } else {
575 vmci_dev->exclusive_vectors = true; 575 vmci_dev->exclusive_vectors = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1621fa08e206..ff3da960c473 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1560 struct mmc_blk_request *brq, struct request *req, 1560 struct mmc_blk_request *brq, struct request *req,
1561 bool old_req_pending) 1561 bool old_req_pending)
1562{ 1562{
1563 struct mmc_queue_req *mq_rq;
1564 bool req_pending; 1563 bool req_pending;
1565 1564
1566 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1567
1568 /* 1565 /*
1569 * If this is an SD card and we're writing, we can first 1566 * If this is an SD card and we're writing, we can first
1570 * mark the known good sectors as ok. 1567 * mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1701 case MMC_BLK_CMD_ERR: 1698 case MMC_BLK_CMD_ERR:
1702 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1703 if (mmc_blk_reset(md, card->host, type)) { 1700 if (mmc_blk_reset(md, card->host, type)) {
1704 mmc_blk_rw_cmd_abort(card, old_req); 1701 if (req_pending)
1702 mmc_blk_rw_cmd_abort(card, old_req);
1705 mmc_blk_rw_try_restart(mq, new_req); 1703 mmc_blk_rw_try_restart(mq, new_req);
1706 return; 1704 return;
1707 } 1705 }
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1817 mmc_blk_issue_flush(mq, req); 1815 mmc_blk_issue_flush(mq, req);
1818 } else { 1816 } else {
1819 mmc_blk_issue_rw_rq(mq, req); 1817 mmc_blk_issue_rw_rq(mq, req);
1818 card->host->context_info.is_waiting_last_req = false;
1820 } 1819 }
1821 1820
1822out: 1821out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7fd722868875..b502601df228 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1730 err = mmc_select_hs400(card); 1730 err = mmc_select_hs400(card);
1731 if (err) 1731 if (err)
1732 goto free_card; 1732 goto free_card;
1733 } else { 1733 } else if (!mmc_card_hs400es(card)) {
1734 /* Select the desired bus width optionally */ 1734 /* Select the desired bus width optionally */
1735 err = mmc_select_bus_width(card); 1735 err = mmc_select_bus_width(card);
1736 if (err > 0 && mmc_card_hs(card)) { 1736 if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8e32580c12b5..b235d8da0602 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
580 } 580 }
581 } 581 }
582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
583 (mode << 8) | (div % 0xff)); 583 (mode << 8) | div);
584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) 585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
586 cpu_relax(); 586 cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1559 host->src_clk_freq = clk_get_rate(host->src_clk); 1559 host->src_clk_freq = clk_get_rate(host->src_clk);
1560 /* Set host parameters to mmc */ 1560 /* Set host parameters to mmc */
1561 mmc->ops = &mt_msdc_ops; 1561 mmc->ops = &mt_msdc_ops;
1562 mmc->f_min = host->src_clk_freq / (4 * 255); 1562 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
1563 1563
1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; 1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
1565 /* MMC core transfer sizes tunable parameters */ 1565 /* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b1c25f..1cfd7f900339 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
28#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
32#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 31#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
33 32
34#define VENDOR_ENHANCED_STROBE BIT(0) 33#define VENDOR_ENHANCED_STROBE BIT(0)
35#define CLK_CTRL_TIMEOUT_SHIFT 16
36#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
37#define CLK_CTRL_TIMEOUT_MIN_EXP 13
38 34
39#define PHY_CLK_TOO_SLOW_HZ 400000 35#define PHY_CLK_TOO_SLOW_HZ 400000
40 36
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
163 159
164static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) 160static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
165{ 161{
166 u32 div;
167 unsigned long freq; 162 unsigned long freq;
168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
169 164
170 div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); 165 /* SDHCI timeout clock is in kHz */
171 div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; 166 freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
172 167
173 freq = clk_get_rate(pltfm_host->clk); 168 /* or in MHz */
174 freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); 169 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
170 freq = DIV_ROUND_UP(freq, 1000);
175 171
176 return freq; 172 return freq;
177} 173}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2f9ad213377a..d5430ed02a67 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
29 29
30#include "sdhci-pltfm.h" 30#include "sdhci-pltfm.h"
31 31
32#define SDMMC_MC1R 0x204
33#define SDMMC_MC1R_DDR BIT(3)
32#define SDMMC_CACR 0x230 34#define SDMMC_CACR 0x230
33#define SDMMC_CACR_CAPWREN BIT(0) 35#define SDMMC_CACR_CAPWREN BIT(0)
34#define SDMMC_CACR_KEY (0x46 << 8) 36#define SDMMC_CACR_KEY (0x46 << 8)
@@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
85 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 87 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
86} 88}
87 89
90/*
91 * In this specific implementation of the SDHCI controller, the power register
92 * needs to have a valid voltage set even when the power supply is managed by
93 * an external regulator.
94 */
95static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
96 unsigned short vdd)
97{
98 if (!IS_ERR(host->mmc->supply.vmmc)) {
99 struct mmc_host *mmc = host->mmc;
100
101 spin_unlock_irq(&host->lock);
102 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
103 spin_lock_irq(&host->lock);
104 }
105 sdhci_set_power_noreg(host, mode, vdd);
106}
107
108void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
109{
110 if (timing == MMC_TIMING_MMC_DDR52)
111 sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
112 sdhci_set_uhs_signaling(host, timing);
113}
114
88static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 115static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
89 .set_clock = sdhci_at91_set_clock, 116 .set_clock = sdhci_at91_set_clock,
90 .set_bus_width = sdhci_set_bus_width, 117 .set_bus_width = sdhci_set_bus_width,
91 .reset = sdhci_reset, 118 .reset = sdhci_reset,
92 .set_uhs_signaling = sdhci_set_uhs_signaling, 119 .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
120 .set_power = sdhci_at91_set_power,
93}; 121};
94 122
95static const struct sdhci_pltfm_data soc_data_sama5d2 = { 123static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 982b3e349426..86560d590786 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
451 if (mode == MMC_POWER_OFF) 451 if (mode == MMC_POWER_OFF)
452 return; 452 return;
453 453
454 spin_unlock_irq(&host->lock);
455
454 /* 456 /*
455 * Bus power might not enable after D3 -> D0 transition due to the 457 * Bus power might not enable after D3 -> D0 transition due to the
456 * present state not yet having propagated. Retry for up to 2ms. 458 * present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
463 reg |= SDHCI_POWER_ON; 465 reg |= SDHCI_POWER_ON;
464 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 466 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
465 } 467 }
468
469 spin_lock_irq(&host->lock);
466} 470}
467 471
468static const struct sdhci_ops sdhci_intel_byt_ops = { 472static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6fdd7a70f229..63bc33a54d0d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1362 return; 1362 return;
1363 } 1363 }
1364 timeout--; 1364 timeout--;
1365 mdelay(1); 1365 spin_unlock_irq(&host->lock);
1366 usleep_range(900, 1100);
1367 spin_lock_irq(&host->lock);
1366 } 1368 }
1367 1369
1368 clk |= SDHCI_CLOCK_CARD_EN; 1370 clk |= SDHCI_CLOCK_CARD_EN;
@@ -1828,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1828 struct sdhci_host *host = mmc_priv(mmc); 1830 struct sdhci_host *host = mmc_priv(mmc);
1829 unsigned long flags; 1831 unsigned long flags;
1830 1832
1833 if (enable)
1834 pm_runtime_get_noresume(host->mmc->parent);
1835
1831 spin_lock_irqsave(&host->lock, flags); 1836 spin_lock_irqsave(&host->lock, flags);
1832 if (enable) 1837 if (enable)
1833 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1838 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1836,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1836 1841
1837 sdhci_enable_sdio_irq_nolock(host, enable); 1842 sdhci_enable_sdio_irq_nolock(host, enable);
1838 spin_unlock_irqrestore(&host->lock, flags); 1843 spin_unlock_irqrestore(&host->lock, flags);
1844
1845 if (!enable)
1846 pm_runtime_put_noidle(host->mmc->parent);
1839} 1847}
1840 1848
1841static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1849static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f09d69..1d843357422e 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
426 struct ushc_data *ushc; 426 struct ushc_data *ushc;
427 int ret; 427 int ret;
428 428
429 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
430 return -ENODEV;
431
429 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); 432 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
430 if (mmc == NULL) 433 if (mmc == NULL)
431 return -ENOMEM; 434 return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 8a280e7d66bd..127adbeefb10 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -984,29 +984,29 @@
984#define XP_ECC_CNT1_DESC_DED_WIDTH 8 984#define XP_ECC_CNT1_DESC_DED_WIDTH 8
985#define XP_ECC_CNT1_DESC_SEC_INDEX 0 985#define XP_ECC_CNT1_DESC_SEC_INDEX 0
986#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 986#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
987#define XP_ECC_IER_DESC_DED_INDEX 0 987#define XP_ECC_IER_DESC_DED_INDEX 5
988#define XP_ECC_IER_DESC_DED_WIDTH 1 988#define XP_ECC_IER_DESC_DED_WIDTH 1
989#define XP_ECC_IER_DESC_SEC_INDEX 1 989#define XP_ECC_IER_DESC_SEC_INDEX 4
990#define XP_ECC_IER_DESC_SEC_WIDTH 1 990#define XP_ECC_IER_DESC_SEC_WIDTH 1
991#define XP_ECC_IER_RX_DED_INDEX 2 991#define XP_ECC_IER_RX_DED_INDEX 3
992#define XP_ECC_IER_RX_DED_WIDTH 1 992#define XP_ECC_IER_RX_DED_WIDTH 1
993#define XP_ECC_IER_RX_SEC_INDEX 3 993#define XP_ECC_IER_RX_SEC_INDEX 2
994#define XP_ECC_IER_RX_SEC_WIDTH 1 994#define XP_ECC_IER_RX_SEC_WIDTH 1
995#define XP_ECC_IER_TX_DED_INDEX 4 995#define XP_ECC_IER_TX_DED_INDEX 1
996#define XP_ECC_IER_TX_DED_WIDTH 1 996#define XP_ECC_IER_TX_DED_WIDTH 1
997#define XP_ECC_IER_TX_SEC_INDEX 5 997#define XP_ECC_IER_TX_SEC_INDEX 0
998#define XP_ECC_IER_TX_SEC_WIDTH 1 998#define XP_ECC_IER_TX_SEC_WIDTH 1
999#define XP_ECC_ISR_DESC_DED_INDEX 0 999#define XP_ECC_ISR_DESC_DED_INDEX 5
1000#define XP_ECC_ISR_DESC_DED_WIDTH 1 1000#define XP_ECC_ISR_DESC_DED_WIDTH 1
1001#define XP_ECC_ISR_DESC_SEC_INDEX 1 1001#define XP_ECC_ISR_DESC_SEC_INDEX 4
1002#define XP_ECC_ISR_DESC_SEC_WIDTH 1 1002#define XP_ECC_ISR_DESC_SEC_WIDTH 1
1003#define XP_ECC_ISR_RX_DED_INDEX 2 1003#define XP_ECC_ISR_RX_DED_INDEX 3
1004#define XP_ECC_ISR_RX_DED_WIDTH 1 1004#define XP_ECC_ISR_RX_DED_WIDTH 1
1005#define XP_ECC_ISR_RX_SEC_INDEX 3 1005#define XP_ECC_ISR_RX_SEC_INDEX 2
1006#define XP_ECC_ISR_RX_SEC_WIDTH 1 1006#define XP_ECC_ISR_RX_SEC_WIDTH 1
1007#define XP_ECC_ISR_TX_DED_INDEX 4 1007#define XP_ECC_ISR_TX_DED_INDEX 1
1008#define XP_ECC_ISR_TX_DED_WIDTH 1 1008#define XP_ECC_ISR_TX_DED_WIDTH 1
1009#define XP_ECC_ISR_TX_SEC_INDEX 5 1009#define XP_ECC_ISR_TX_SEC_INDEX 0
1010#define XP_ECC_ISR_TX_SEC_WIDTH 1 1010#define XP_ECC_ISR_TX_SEC_WIDTH 1
1011#define XP_I2C_MUTEX_BUSY_INDEX 31 1011#define XP_I2C_MUTEX_BUSY_INDEX 31
1012#define XP_I2C_MUTEX_BUSY_WIDTH 1 1012#define XP_I2C_MUTEX_BUSY_WIDTH 1
@@ -1148,8 +1148,8 @@
1148#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 1148#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
1149#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 1149#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
1150#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 1150#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
1151#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 1151#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
1152#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 1152#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
1153#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 1153#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
1154#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 1154#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
1155#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 1155#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -1158,6 +1158,8 @@
1158#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 1158#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
1159#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 1159#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
1160#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 1160#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
1161#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
1162#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
1161 1163
1162#define RX_NORMAL_DESC0_OVT_INDEX 0 1164#define RX_NORMAL_DESC0_OVT_INDEX 0
1163#define RX_NORMAL_DESC0_OVT_WIDTH 16 1165#define RX_NORMAL_DESC0_OVT_WIDTH 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 937f37a5dcb2..24a687ce4388 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1896 1896
1897 /* Get the header length */ 1897 /* Get the header length */
1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1899 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1900 FIRST, 1);
1899 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1901 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1900 RX_NORMAL_DESC2, HL); 1902 RX_NORMAL_DESC2, HL);
1901 if (rdata->rx.hdr_len) 1903 if (rdata->rx.hdr_len)
1902 pdata->ext_stats.rx_split_header_packets++; 1904 pdata->ext_stats.rx_split_header_packets++;
1905 } else {
1906 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1907 FIRST, 0);
1903 } 1908 }
1904 1909
1905 /* Get the RSS hash */ 1910 /* Get the RSS hash */
@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1922 } 1927 }
1923 } 1928 }
1924 1929
1925 /* Get the packet length */ 1930 /* Not all the data has been transferred for this packet */
1926 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1931 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1927
1928 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1929 /* Not all the data has been transferred for this packet */
1930 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1931 INCOMPLETE, 1);
1932 return 0; 1932 return 0;
1933 }
1934 1933
1935 /* This is the last of the data for this packet */ 1934 /* This is the last of the data for this packet */
1936 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1935 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1937 INCOMPLETE, 0); 1936 LAST, 1);
1937
1938 /* Get the packet length */
1939 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1938 1940
1939 /* Set checksum done indicator as appropriate */ 1941 /* Set checksum done indicator as appropriate */
1940 if (netdev->features & NETIF_F_RXCSUM) 1942 if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 248f60d171a5..a713abd9d03e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1971{ 1971{
1972 struct sk_buff *skb; 1972 struct sk_buff *skb;
1973 u8 *packet; 1973 u8 *packet;
1974 unsigned int copy_len;
1975 1974
1976 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); 1975 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1977 if (!skb) 1976 if (!skb)
1978 return NULL; 1977 return NULL;
1979 1978
1980 /* Start with the header buffer which may contain just the header 1979 /* Pull in the header buffer which may contain just the header
1981 * or the header plus data 1980 * or the header plus data
1982 */ 1981 */
1983 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, 1982 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1986 1985
1987 packet = page_address(rdata->rx.hdr.pa.pages) + 1986 packet = page_address(rdata->rx.hdr.pa.pages) +
1988 rdata->rx.hdr.pa.pages_offset; 1987 rdata->rx.hdr.pa.pages_offset;
1989 copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; 1988 skb_copy_to_linear_data(skb, packet, len);
1990 copy_len = min(rdata->rx.hdr.dma_len, copy_len); 1989 skb_put(skb, len);
1991 skb_copy_to_linear_data(skb, packet, copy_len);
1992 skb_put(skb, copy_len);
1993
1994 len -= copy_len;
1995 if (len) {
1996 /* Add the remaining data as a frag */
1997 dma_sync_single_range_for_cpu(pdata->dev,
1998 rdata->rx.buf.dma_base,
1999 rdata->rx.buf.dma_off,
2000 rdata->rx.buf.dma_len,
2001 DMA_FROM_DEVICE);
2002
2003 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2004 rdata->rx.buf.pa.pages,
2005 rdata->rx.buf.pa.pages_offset,
2006 len, rdata->rx.buf.dma_len);
2007 rdata->rx.buf.pa.pages = NULL;
2008 }
2009 1990
2010 return skb; 1991 return skb;
2011} 1992}
2012 1993
1994static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
1995 struct xgbe_packet_data *packet)
1996{
1997 /* Always zero if not the first descriptor */
1998 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
1999 return 0;
2000
2001 /* First descriptor with split header, return header length */
2002 if (rdata->rx.hdr_len)
2003 return rdata->rx.hdr_len;
2004
2005 /* First descriptor but not the last descriptor and no split header,
2006 * so the full buffer was used
2007 */
2008 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2009 return rdata->rx.hdr.dma_len;
2010
2011 /* First descriptor and last descriptor and no split header, so
2012 * calculate how much of the buffer was used
2013 */
2014 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2015}
2016
2017static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2018 struct xgbe_packet_data *packet,
2019 unsigned int len)
2020{
2021 /* Always the full buffer if not the last descriptor */
2022 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2023 return rdata->rx.buf.dma_len;
2024
2025 /* Last descriptor so calculate how much of the buffer was used
2026 * for the last bit of data
2027 */
2028 return rdata->rx.len - len;
2029}
2030
2013static int xgbe_tx_poll(struct xgbe_channel *channel) 2031static int xgbe_tx_poll(struct xgbe_channel *channel)
2014{ 2032{
2015 struct xgbe_prv_data *pdata = channel->pdata; 2033 struct xgbe_prv_data *pdata = channel->pdata;
@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2092 struct napi_struct *napi; 2110 struct napi_struct *napi;
2093 struct sk_buff *skb; 2111 struct sk_buff *skb;
2094 struct skb_shared_hwtstamps *hwtstamps; 2112 struct skb_shared_hwtstamps *hwtstamps;
2095 unsigned int incomplete, error, context_next, context; 2113 unsigned int last, error, context_next, context;
2096 unsigned int len, rdesc_len, max_len; 2114 unsigned int len, buf1_len, buf2_len, max_len;
2097 unsigned int received = 0; 2115 unsigned int received = 0;
2098 int packet_count = 0; 2116 int packet_count = 0;
2099 2117
@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2103 if (!ring) 2121 if (!ring)
2104 return 0; 2122 return 0;
2105 2123
2106 incomplete = 0; 2124 last = 0;
2107 context_next = 0; 2125 context_next = 0;
2108 2126
2109 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 2127 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -2137,9 +2155,8 @@ read_again:
2137 received++; 2155 received++;
2138 ring->cur++; 2156 ring->cur++;
2139 2157
2140 incomplete = XGMAC_GET_BITS(packet->attributes, 2158 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2141 RX_PACKET_ATTRIBUTES, 2159 LAST);
2142 INCOMPLETE);
2143 context_next = XGMAC_GET_BITS(packet->attributes, 2160 context_next = XGMAC_GET_BITS(packet->attributes,
2144 RX_PACKET_ATTRIBUTES, 2161 RX_PACKET_ATTRIBUTES,
2145 CONTEXT_NEXT); 2162 CONTEXT_NEXT);
@@ -2148,7 +2165,7 @@ read_again:
2148 CONTEXT); 2165 CONTEXT);
2149 2166
2150 /* Earlier error, just drain the remaining data */ 2167 /* Earlier error, just drain the remaining data */
2151 if ((incomplete || context_next) && error) 2168 if ((!last || context_next) && error)
2152 goto read_again; 2169 goto read_again;
2153 2170
2154 if (error || packet->errors) { 2171 if (error || packet->errors) {
@@ -2160,16 +2177,22 @@ read_again:
2160 } 2177 }
2161 2178
2162 if (!context) { 2179 if (!context) {
2163 /* Length is cumulative, get this descriptor's length */ 2180 /* Get the data length in the descriptor buffers */
2164 rdesc_len = rdata->rx.len - len; 2181 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2165 len += rdesc_len; 2182 len += buf1_len;
2183 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2184 len += buf2_len;
2166 2185
2167 if (rdesc_len && !skb) { 2186 if (!skb) {
2168 skb = xgbe_create_skb(pdata, napi, rdata, 2187 skb = xgbe_create_skb(pdata, napi, rdata,
2169 rdesc_len); 2188 buf1_len);
2170 if (!skb) 2189 if (!skb) {
2171 error = 1; 2190 error = 1;
2172 } else if (rdesc_len) { 2191 goto skip_data;
2192 }
2193 }
2194
2195 if (buf2_len) {
2173 dma_sync_single_range_for_cpu(pdata->dev, 2196 dma_sync_single_range_for_cpu(pdata->dev,
2174 rdata->rx.buf.dma_base, 2197 rdata->rx.buf.dma_base,
2175 rdata->rx.buf.dma_off, 2198 rdata->rx.buf.dma_off,
@@ -2179,13 +2202,14 @@ read_again:
2179 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 2202 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2180 rdata->rx.buf.pa.pages, 2203 rdata->rx.buf.pa.pages,
2181 rdata->rx.buf.pa.pages_offset, 2204 rdata->rx.buf.pa.pages_offset,
2182 rdesc_len, 2205 buf2_len,
2183 rdata->rx.buf.dma_len); 2206 rdata->rx.buf.dma_len);
2184 rdata->rx.buf.pa.pages = NULL; 2207 rdata->rx.buf.pa.pages = NULL;
2185 } 2208 }
2186 } 2209 }
2187 2210
2188 if (incomplete || context_next) 2211skip_data:
2212 if (!last || context_next)
2189 goto read_again; 2213 goto read_again;
2190 2214
2191 if (!skb) 2215 if (!skb)
@@ -2243,7 +2267,7 @@ next_packet:
2243 } 2267 }
2244 2268
2245 /* Check if we need to save state before leaving */ 2269 /* Check if we need to save state before leaving */
2246 if (received && (incomplete || context_next)) { 2270 if (received && (!last || context_next)) {
2247 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 2271 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2248 rdata->state_saved = 1; 2272 rdata->state_saved = 1;
2249 rdata->state.skb = skb; 2273 rdata->state.skb = skb;
@@ -2272,10 +2296,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
2272 processed = xgbe_rx_poll(channel, budget); 2296 processed = xgbe_rx_poll(channel, budget);
2273 2297
2274 /* If we processed everything, we are done */ 2298 /* If we processed everything, we are done */
2275 if (processed < budget) { 2299 if ((processed < budget) && napi_complete_done(napi, processed)) {
2276 /* Turn off polling */
2277 napi_complete_done(napi, processed);
2278
2279 /* Enable Tx and Rx interrupts */ 2300 /* Enable Tx and Rx interrupts */
2280 if (pdata->channel_irq_mode) 2301 if (pdata->channel_irq_mode)
2281 xgbe_enable_rx_tx_int(pdata, channel); 2302 xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2338,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
2317 } while ((processed < budget) && (processed != last_processed)); 2338 } while ((processed < budget) && (processed != last_processed));
2318 2339
2319 /* If we processed everything, we are done */ 2340 /* If we processed everything, we are done */
2320 if (processed < budget) { 2341 if ((processed < budget) && napi_complete_done(napi, processed)) {
2321 /* Turn off polling */
2322 napi_complete_done(napi, processed);
2323
2324 /* Enable Tx and Rx interrupts */ 2342 /* Enable Tx and Rx interrupts */
2325 xgbe_enable_rx_tx_ints(pdata); 2343 xgbe_enable_rx_tx_ints(pdata);
2326 } 2344 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index dad63623be6a..5d6c40d86775 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -98,11 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
98 98
99 if (err < 0) 99 if (err < 0)
100 goto err_exit; 100 goto err_exit;
101 101 ndev->mtu = new_mtu;
102 if (netif_running(ndev)) {
103 aq_ndev_close(ndev);
104 aq_ndev_open(ndev);
105 }
106 102
107err_exit: 103err_exit:
108 return err; 104 return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ee78444bfb88..cdb02991f249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
487 dx_buff->mss = skb_shinfo(skb)->gso_size; 487 dx_buff->mss = skb_shinfo(skb)->gso_size;
488 dx_buff->is_txc = 1U; 488 dx_buff->is_txc = 1U;
489 489
490 dx_buff->is_ipv6 =
491 (ip_hdr(skb)->version == 6) ? 1U : 0U;
492
490 dx = aq_ring_next_dx(ring, dx); 493 dx = aq_ring_next_dx(ring, dx);
491 dx_buff = &ring->buff_ring[dx]; 494 dx_buff = &ring->buff_ring[dx];
492 ++ret; 495 ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
511 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
512 1U : 0U; 515 1U : 0U;
513 dx_buff->is_tcp_cso = 516
514 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; 517 if (ip_hdr(skb)->version == 4) {
515 dx_buff->is_udp_cso = 518 dx_buff->is_tcp_cso =
516 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; 519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
520 1U : 0U;
521 dx_buff->is_udp_cso =
522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
523 1U : 0U;
524 } else if (ip_hdr(skb)->version == 6) {
525 dx_buff->is_tcp_cso =
526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
527 1U : 0U;
528 dx_buff->is_udp_cso =
529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
530 1U : 0U;
531 }
517 } 532 }
518 533
519 for (; nr_frags--; ++frag_count) { 534 for (; nr_frags--; ++frag_count) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 581de71a958a..4c6c882c6a1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
213 if (!((1U << i) & self->msix_entry_mask)) 213 if (!((1U << i) & self->msix_entry_mask))
214 continue; 214 continue;
215 215
216 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
217 if (pdev->msix_enabled) 216 if (pdev->msix_enabled)
218 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); 217 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
218 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
219 self->msix_entry_mask &= ~(1U << i); 219 self->msix_entry_mask &= ~(1U << i);
220 } 220 }
221} 221}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0358e6072d45..3a8a4aa13687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
104 return 0; 105 return 0;
105} 106}
106 107
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 257254645068..eecd6d1c4d73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
58 u8 len_l2; 58 u8 len_l2;
59 u8 len_l3; 59 u8 len_l3;
60 u8 len_l4; 60 u8 len_l4;
61 u8 rsvd2; 61 u8 is_ipv6:1;
62 u8 rsvd2:7;
62 u32 len_pkt; 63 u32 len_pkt;
63 }; 64 };
64 }; 65 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index a2b746a2dd50..4ee15ff06a44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
433 buff->len_l3 + 433 buff->len_l3 +
434 buff->len_l2); 434 buff->len_l2);
435 is_gso = true; 435 is_gso = true;
436
437 if (buff->is_ipv6)
438 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
436 } else { 439 } else {
437 buff_pa_len = buff->len; 440 buff_pa_len = buff->len;
438 441
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
458 if (unlikely(buff->is_eop)) { 461 if (unlikely(buff->is_eop)) {
459 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; 462 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
460 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; 463 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
464 is_gso = false;
461 } 465 }
462 } 466 }
463 467
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1093ea18823a..0592a0330cf0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
137 .tx_rings = HW_ATL_A0_TX_RINGS, 137 .tx_rings = HW_ATL_A0_TX_RINGS,
138 .rx_rings = HW_ATL_A0_RX_RINGS, 138 .rx_rings = HW_ATL_A0_RX_RINGS,
139 .hw_features = NETIF_F_HW_CSUM | 139 .hw_features = NETIF_F_HW_CSUM |
140 NETIF_F_RXCSUM |
140 NETIF_F_RXHASH | 141 NETIF_F_RXHASH |
141 NETIF_F_SG | 142 NETIF_F_SG |
142 NETIF_F_TSO, 143 NETIF_F_TSO,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cab2931dab9a..42150708191d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
471 buff->len_l3 + 471 buff->len_l3 +
472 buff->len_l2); 472 buff->len_l2);
473 is_gso = true; 473 is_gso = true;
474
475 if (buff->is_ipv6)
476 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
474 } else { 477 } else {
475 buff_pa_len = buff->len; 478 buff_pa_len = buff->len;
476 479
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
496 if (unlikely(buff->is_eop)) { 499 if (unlikely(buff->is_eop)) {
497 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 500 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
498 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 501 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
502 is_gso = false;
499 } 503 }
500 } 504 }
501 505
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 8bdee3ddd5a0..f3957e930340 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
188 .tx_rings = HW_ATL_B0_TX_RINGS, 188 .tx_rings = HW_ATL_B0_TX_RINGS,
189 .rx_rings = HW_ATL_B0_RX_RINGS, 189 .rx_rings = HW_ATL_B0_RX_RINGS,
190 .hw_features = NETIF_F_HW_CSUM | 190 .hw_features = NETIF_F_HW_CSUM |
191 NETIF_F_RXCSUM |
191 NETIF_F_RXHASH | 192 NETIF_F_RXHASH |
192 NETIF_F_SG | 193 NETIF_F_SG |
193 NETIF_F_TSO | 194 NETIF_F_TSO |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0a23034bbe3f..352beff796ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ 2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) 2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
2279 2279
2280#define HW_INTERRUT_ASSERT_SET_0 \ 2280#define HW_INTERRUPT_ASSERT_SET_0 \
2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ 2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ 2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ 2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ 2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ 2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) 2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
2293#define HW_INTERRUT_ASSERT_SET_1 \ 2293#define HW_INTERRUPT_ASSERT_SET_1 \
2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ 2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ 2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ 2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ 2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ 2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) 2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
2321#define HW_INTERRUT_ASSERT_SET_2 \ 2321#define HW_INTERRUPT_ASSERT_SET_2 \
2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ 2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ 2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ 2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d8d06fdfc42b..a851f95c307a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4166 bnx2x_release_phy_lock(bp); 4166 bnx2x_release_phy_lock(bp);
4167 } 4167 }
4168 4168
4169 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170 4170
4171 val = REG_RD(bp, reg_offset); 4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val); 4173 REG_WR(bp, reg_offset, val);
4174 4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic(); 4177 bnx2x_panic();
4178 } 4178 }
4179} 4179}
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4191 BNX2X_ERR("FATAL error from DORQ\n"); 4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 } 4192 }
4193 4193
4194 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195 4195
4196 int port = BP_PORT(bp); 4196 int port = BP_PORT(bp);
4197 int reg_offset; 4197 int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201 4201
4202 val = REG_RD(bp, reg_offset); 4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val); 4204 REG_WR(bp, reg_offset, val);
4205 4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic(); 4208 bnx2x_panic();
4209 } 4209 }
4210} 4210}
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235 } 4235 }
4236 } 4236 }
4237 4237
4238 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239 4239
4240 int port = BP_PORT(bp); 4240 int port = BP_PORT(bp);
4241 int reg_offset; 4241 int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245 4245
4246 val = REG_RD(bp, reg_offset); 4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val); 4248 REG_WR(bp, reg_offset, val);
4249 4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic(); 4252 bnx2x_panic();
4253 } 4253 }
4254} 4254}
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13292 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13292 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13293 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 13293 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13294 13294
13295 /* VF with OLD Hypervisor or old PF do not support filtering */
13296 if (IS_PF(bp)) { 13295 if (IS_PF(bp)) {
13297 if (chip_is_e1x) 13296 if (chip_is_e1x)
13298 bp->accept_any_vlan = true; 13297 bp->accept_any_vlan = true;
13299 else 13298 else
13300 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13299 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13301#ifdef CONFIG_BNX2X_SRIOV
13302 } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13303 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13304#endif
13305 } 13300 }
13301 /* For VF we'll know whether to enable VLAN filtering after
13302 * getting a response to CHANNEL_TLV_ACQUIRE from PF.
13303 */
13306 13304
13307 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 13305 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13308 dev->features |= NETIF_F_HIGHDMA; 13306 dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13738 if (!netif_running(bp->dev)) { 13736 if (!netif_running(bp->dev)) {
13739 DP(BNX2X_MSG_PTP, 13737 DP(BNX2X_MSG_PTP,
13740 "PTP adjfreq called while the interface is down\n"); 13738 "PTP adjfreq called while the interface is down\n");
13741 return -EFAULT; 13739 return -ENETDOWN;
13742 } 13740 }
13743 13741
13744 if (ppb < 0) { 13742 if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13797{ 13795{
13798 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13796 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13799 13797
13798 if (!netif_running(bp->dev)) {
13799 DP(BNX2X_MSG_PTP,
13800 "PTP adjtime called while the interface is down\n");
13801 return -ENETDOWN;
13802 }
13803
13800 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13804 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13801 13805
13802 timecounter_adjtime(&bp->timecounter, delta); 13806 timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13809 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13813 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13810 u64 ns; 13814 u64 ns;
13811 13815
13816 if (!netif_running(bp->dev)) {
13817 DP(BNX2X_MSG_PTP,
13818 "PTP gettime called while the interface is down\n");
13819 return -ENETDOWN;
13820 }
13821
13812 ns = timecounter_read(&bp->timecounter); 13822 ns = timecounter_read(&bp->timecounter);
13813 13823
13814 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); 13824 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13824 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13834 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13825 u64 ns; 13835 u64 ns;
13826 13836
13837 if (!netif_running(bp->dev)) {
13838 DP(BNX2X_MSG_PTP,
13839 "PTP settime called while the interface is down\n");
13840 return -ENETDOWN;
13841 }
13842
13827 ns = timespec64_to_ns(ts); 13843 ns = timespec64_to_ns(ts);
13828 13844
13829 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); 13845 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13991 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 14007 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13992 if (rc) 14008 if (rc)
13993 goto init_one_freemem; 14009 goto init_one_freemem;
14010
14011#ifdef CONFIG_BNX2X_SRIOV
14012 /* VF with OLD Hypervisor or old PF do not support filtering */
14013 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14014 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14015 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14016 }
14017#endif
13994 } 14018 }
13995 14019
13996 /* Enable SRIOV if capability found in configuration space */ 14020 /* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fad22adbbb9..bdfd53b46bc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
434 434
435 /* Add/Remove the filter */ 435 /* Add/Remove the filter */
436 rc = bnx2x_config_vlan_mac(bp, &ramrod); 436 rc = bnx2x_config_vlan_mac(bp, &ramrod);
437 if (rc && rc != -EEXIST) { 437 if (rc == -EEXIST)
438 return 0;
439 if (rc) {
438 BNX2X_ERR("Failed to %s %s\n", 440 BNX2X_ERR("Failed to %s %s\n",
439 filter->add ? "add" : "delete", 441 filter->add ? "add" : "delete",
440 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? 442 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
444 return rc; 446 return rc;
445 } 447 }
446 448
449 filter->applied = true;
450
447 return 0; 451 return 0;
448} 452}
449 453
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
469 /* Rollback if needed */ 473 /* Rollback if needed */
470 if (i != filters->count) { 474 if (i != filters->count) {
471 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 475 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
472 i, filters->count + 1); 476 i, filters->count);
473 while (--i >= 0) { 477 while (--i >= 0) {
478 if (!filters->filters[i].applied)
479 continue;
474 filters->filters[i].add = !filters->filters[i].add; 480 filters->filters[i].add = !filters->filters[i].add;
475 bnx2x_vf_mac_vlan_config(bp, vf, qid, 481 bnx2x_vf_mac_vlan_config(bp, vf, qid,
476 &filters->filters[i], 482 &filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1899 continue; 1905 continue;
1900 } 1906 }
1901 1907
1902 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1908 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1909 "add addresses for vf %d\n", vf->abs_vfid);
1903 for_each_vfq(vf, j) { 1910 for_each_vfq(vf, j) {
1904 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1911 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1905 1912
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1920 cpu_to_le32(U64_HI(q_stats_addr)); 1927 cpu_to_le32(U64_HI(q_stats_addr));
1921 cur_query_entry->address.lo = 1928 cur_query_entry->address.lo =
1922 cpu_to_le32(U64_LO(q_stats_addr)); 1929 cpu_to_le32(U64_LO(q_stats_addr));
1923 DP(BNX2X_MSG_IOV, 1930 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1924 "added address %x %x for vf %d queue %d client %d\n", 1931 "added address %x %x for vf %d queue %d client %d\n",
1925 cur_query_entry->address.hi, 1932 cur_query_entry->address.hi,
1926 cur_query_entry->address.lo, cur_query_entry->funcID, 1933 cur_query_entry->address.lo,
1927 j, cur_query_entry->index); 1934 cur_query_entry->funcID,
1935 j, cur_query_entry->index);
1928 cur_query_entry++; 1936 cur_query_entry++;
1929 cur_data_offset += sizeof(struct per_queue_stats); 1937 cur_data_offset += sizeof(struct per_queue_stats);
1930 stats_count++; 1938 stats_count++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406f4c11..888d0b6632e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
114 (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ 114 (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
115 115
116 bool add; 116 bool add;
117 bool applied;
117 u8 *mac; 118 u8 *mac;
118 u16 vid; 119 u16 vid;
119}; 120};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300cf25f..76a4668c50fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
868 struct bnx2x *bp = netdev_priv(dev); 868 struct bnx2x *bp = netdev_priv(dev);
869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; 869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
871 int rc, i = 0; 871 int rc = 0, i = 0;
872 struct netdev_hw_addr *ha; 872 struct netdev_hw_addr *ha;
873 873
874 if (bp->state != BNX2X_STATE_OPEN) { 874 if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
883 /* Get Rx mode requested */ 883 /* Get Rx mode requested */
884 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); 884 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
885 885
886 /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
887 if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
888 DP(NETIF_MSG_IFUP,
889 "VF supports not more than %d multicast MAC addresses\n",
890 PFVF_MAX_MULTICAST_PER_VF);
891 rc = -EINVAL;
892 goto out;
893 }
894
886 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
887 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 896 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
888 bnx2x_mc_addr(ha)); 897 bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
890 i++; 899 i++;
891 } 900 }
892 901
893 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
894 * addresses tops
895 */
896 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
897 DP(NETIF_MSG_IFUP,
898 "VF supports not more than %d multicast MAC addresses\n",
899 PFVF_MAX_MULTICAST_PER_VF);
900 return -EINVAL;
901 }
902
903 req->n_multicast = i; 902 req->n_multicast = i;
904 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; 903 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
905 req->vf_qid = 0; 904 req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
924out: 923out:
925 bnx2x_vfpf_finalize(bp, &req->first_tlv); 924 bnx2x_vfpf_finalize(bp, &req->first_tlv);
926 925
927 return 0; 926 return rc;
928} 927}
929 928
930/* request pf to add a vlan for the vf */ 929/* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1778 goto op_err; 1777 goto op_err;
1779 } 1778 }
1780 1779
1780 /* build vlan list */
1781 fl = NULL;
1782
1783 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1784 VFPF_VLAN_FILTER);
1785 if (rc)
1786 goto op_err;
1787
1788 if (fl) {
1789 /* set vlan list */
1790 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1791 msg->vf_qid,
1792 false);
1793 if (rc)
1794 goto op_err;
1795 }
1796
1781 } 1797 }
1782 1798
1783 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1799 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 235733e91c79..1f1e54ba0ecb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1983 1983
1984 for (j = 0; j < max_idx; j++) { 1984 for (j = 0; j < max_idx; j++) {
1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1986 dma_addr_t mapping = rx_buf->mapping;
1986 void *data = rx_buf->data; 1987 void *data = rx_buf->data;
1987 1988
1988 if (!data) 1989 if (!data)
1989 continue; 1990 continue;
1990 1991
1991 dma_unmap_single(&pdev->dev, rx_buf->mapping,
1992 bp->rx_buf_use_size, bp->rx_dir);
1993
1994 rx_buf->data = NULL; 1992 rx_buf->data = NULL;
1995 1993
1996 if (BNXT_RX_PAGE_MODE(bp)) 1994 if (BNXT_RX_PAGE_MODE(bp)) {
1995 mapping -= bp->rx_dma_offset;
1996 dma_unmap_page(&pdev->dev, mapping,
1997 PAGE_SIZE, bp->rx_dir);
1997 __free_page(data); 1998 __free_page(data);
1998 else 1999 } else {
2000 dma_unmap_single(&pdev->dev, mapping,
2001 bp->rx_buf_use_size,
2002 bp->rx_dir);
1999 kfree(data); 2003 kfree(data);
2004 }
2000 } 2005 }
2001 2006
2002 for (j = 0; j < max_agg_idx; j++) { 2007 for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2455 return 0; 2460 return 0;
2456} 2461}
2457 2462
2463static void bnxt_init_cp_rings(struct bnxt *bp)
2464{
2465 int i;
2466
2467 for (i = 0; i < bp->cp_nr_rings; i++) {
2468 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2469 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2470
2471 ring->fw_ring_id = INVALID_HW_RING_ID;
2472 }
2473}
2474
2458static int bnxt_init_rx_rings(struct bnxt *bp) 2475static int bnxt_init_rx_rings(struct bnxt *bp)
2459{ 2476{
2460 int i, rc = 0; 2477 int i, rc = 0;
@@ -4465,6 +4482,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4465 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4482 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4466 } 4483 }
4467#endif 4484#endif
4485 if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
4486 FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
4487 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4488
4468 switch (resp->port_partition_type) { 4489 switch (resp->port_partition_type) {
4469 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4490 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4470 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4491 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -4728,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4728 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4749 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4729 if (rc) { 4750 if (rc) {
4730 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4751 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4731 rc, i); 4752 i, rc);
4732 return rc; 4753 return rc;
4733 } 4754 }
4734 } 4755 }
@@ -5002,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5002 5023
5003static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5024static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5004{ 5025{
5026 bnxt_init_cp_rings(bp);
5005 bnxt_init_rx_rings(bp); 5027 bnxt_init_rx_rings(bp);
5006 bnxt_init_tx_rings(bp); 5028 bnxt_init_tx_rings(bp);
5007 bnxt_init_ring_grps(bp, irq_re_init); 5029 bnxt_init_ring_grps(bp, irq_re_init);
@@ -5507,8 +5529,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5507 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5529 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5508 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5530 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5509 } 5531 }
5510 link_info->support_auto_speeds = 5532 if (resp->supported_speeds_auto_mode)
5511 le16_to_cpu(resp->supported_speeds_auto_mode); 5533 link_info->support_auto_speeds =
5534 le16_to_cpu(resp->supported_speeds_auto_mode);
5512 5535
5513hwrm_phy_qcaps_exit: 5536hwrm_phy_qcaps_exit:
5514 mutex_unlock(&bp->hwrm_cmd_lock); 5537 mutex_unlock(&bp->hwrm_cmd_lock);
@@ -6495,8 +6518,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
6495 if (!silent) 6518 if (!silent)
6496 bnxt_dbg_dump_states(bp); 6519 bnxt_dbg_dump_states(bp);
6497 if (netif_running(bp->dev)) { 6520 if (netif_running(bp->dev)) {
6521 int rc;
6522
6523 if (!silent)
6524 bnxt_ulp_stop(bp);
6498 bnxt_close_nic(bp, false, false); 6525 bnxt_close_nic(bp, false, false);
6499 bnxt_open_nic(bp, false, false); 6526 rc = bnxt_open_nic(bp, false, false);
6527 if (!silent && !rc)
6528 bnxt_ulp_start(bp);
6500 } 6529 }
6501} 6530}
6502 6531
@@ -7444,6 +7473,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7444 if (rc) 7473 if (rc)
7445 goto init_err_pci_clean; 7474 goto init_err_pci_clean;
7446 7475
7476 rc = bnxt_hwrm_func_reset(bp);
7477 if (rc)
7478 goto init_err_pci_clean;
7479
7447 bnxt_hwrm_fw_set_time(bp); 7480 bnxt_hwrm_fw_set_time(bp);
7448 7481
7449 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7482 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7554,10 +7587,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7554 if (rc) 7587 if (rc)
7555 goto init_err_pci_clean; 7588 goto init_err_pci_clean;
7556 7589
7557 rc = bnxt_hwrm_func_reset(bp);
7558 if (rc)
7559 goto init_err_pci_clean;
7560
7561 rc = bnxt_init_int_mode(bp); 7590 rc = bnxt_init_int_mode(bp);
7562 if (rc) 7591 if (rc)
7563 goto init_err_pci_clean; 7592 goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index faf26a2f726b..c7a5b84a5cb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -993,6 +993,7 @@ struct bnxt {
993 BNXT_FLAG_ROCEV2_CAP) 993 BNXT_FLAG_ROCEV2_CAP)
994 #define BNXT_FLAG_NO_AGG_RINGS 0x20000 994 #define BNXT_FLAG_NO_AGG_RINGS 0x20000
995 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 995 #define BNXT_FLAG_RX_PAGE_MODE 0x40000
996 #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
996 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 997 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
997 998
998 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ 999 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fdf2d8caf7bf..03532061d211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
474 return; 474 return;
475 475
476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; 476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
477 if (BNXT_PF(bp)) 477 if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST; 478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
479 else 479 else
480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; 480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a..365895ed3c3e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom GENET (Gigabit Ethernet) controller driver 2 * Broadcom GENET (Gigabit Ethernet) controller driver
3 * 3 *
4 * Copyright (c) 2014 Broadcom Corporation 4 * Copyright (c) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
450 genet_dma_ring_regs[r]); 450 genet_dma_ring_regs[r]);
451} 451}
452 452
453static int bcmgenet_begin(struct net_device *dev)
454{
455 struct bcmgenet_priv *priv = netdev_priv(dev);
456
457 /* Turn on the clock */
458 return clk_prepare_enable(priv->clk);
459}
460
461static void bcmgenet_complete(struct net_device *dev)
462{
463 struct bcmgenet_priv *priv = netdev_priv(dev);
464
465 /* Turn off the clock */
466 clk_disable_unprepare(priv->clk);
467}
468
453static int bcmgenet_get_link_ksettings(struct net_device *dev, 469static int bcmgenet_get_link_ksettings(struct net_device *dev,
454 struct ethtool_link_ksettings *cmd) 470 struct ethtool_link_ksettings *cmd)
455{ 471{
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 794 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779 /* Misc UniMAC counters */ 795 /* Misc UniMAC counters */
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, 796 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 UMAC_RBUF_OVFL_CNT), 797 UMAC_RBUF_OVFL_CNT_V1),
782 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 798 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
799 UMAC_RBUF_ERR_CNT_V1),
783 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 800 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
784 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 801 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
785 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), 802 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
821 } 838 }
822} 839}
823 840
841static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
842{
843 u16 new_offset;
844 u32 val;
845
846 switch (offset) {
847 case UMAC_RBUF_OVFL_CNT_V1:
848 if (GENET_IS_V2(priv))
849 new_offset = RBUF_OVFL_CNT_V2;
850 else
851 new_offset = RBUF_OVFL_CNT_V3PLUS;
852
853 val = bcmgenet_rbuf_readl(priv, new_offset);
854 /* clear if overflowed */
855 if (val == ~0)
856 bcmgenet_rbuf_writel(priv, 0, new_offset);
857 break;
858 case UMAC_RBUF_ERR_CNT_V1:
859 if (GENET_IS_V2(priv))
860 new_offset = RBUF_ERR_CNT_V2;
861 else
862 new_offset = RBUF_ERR_CNT_V3PLUS;
863
864 val = bcmgenet_rbuf_readl(priv, new_offset);
865 /* clear if overflowed */
866 if (val == ~0)
867 bcmgenet_rbuf_writel(priv, 0, new_offset);
868 break;
869 default:
870 val = bcmgenet_umac_readl(priv, offset);
871 /* clear if overflowed */
872 if (val == ~0)
873 bcmgenet_umac_writel(priv, 0, offset);
874 break;
875 }
876
877 return val;
878}
879
824static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) 880static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
825{ 881{
826 int i, j = 0; 882 int i, j = 0;
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
836 case BCMGENET_STAT_NETDEV: 892 case BCMGENET_STAT_NETDEV:
837 case BCMGENET_STAT_SOFT: 893 case BCMGENET_STAT_SOFT:
838 continue; 894 continue;
839 case BCMGENET_STAT_MIB_RX:
840 case BCMGENET_STAT_MIB_TX:
841 case BCMGENET_STAT_RUNT: 895 case BCMGENET_STAT_RUNT:
842 if (s->type != BCMGENET_STAT_MIB_RX) 896 offset += BCMGENET_STAT_OFFSET;
843 offset = BCMGENET_STAT_OFFSET; 897 /* fall through */
898 case BCMGENET_STAT_MIB_TX:
899 offset += BCMGENET_STAT_OFFSET;
900 /* fall through */
901 case BCMGENET_STAT_MIB_RX:
844 val = bcmgenet_umac_readl(priv, 902 val = bcmgenet_umac_readl(priv,
845 UMAC_MIB_START + j + offset); 903 UMAC_MIB_START + j + offset);
904 offset = 0; /* Reset Offset */
846 break; 905 break;
847 case BCMGENET_STAT_MISC: 906 case BCMGENET_STAT_MISC:
848 val = bcmgenet_umac_readl(priv, s->reg_offset); 907 if (GENET_IS_V1(priv)) {
849 /* clear if overflowed */ 908 val = bcmgenet_umac_readl(priv, s->reg_offset);
850 if (val == ~0) 909 /* clear if overflowed */
851 bcmgenet_umac_writel(priv, 0, s->reg_offset); 910 if (val == ~0)
911 bcmgenet_umac_writel(priv, 0,
912 s->reg_offset);
913 } else {
914 val = bcmgenet_update_stat_misc(priv,
915 s->reg_offset);
916 }
852 break; 917 break;
853 } 918 }
854 919
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
973 1038
974/* standard ethtool support functions. */ 1039/* standard ethtool support functions. */
975static const struct ethtool_ops bcmgenet_ethtool_ops = { 1040static const struct ethtool_ops bcmgenet_ethtool_ops = {
1041 .begin = bcmgenet_begin,
1042 .complete = bcmgenet_complete,
976 .get_strings = bcmgenet_get_strings, 1043 .get_strings = bcmgenet_get_strings,
977 .get_sset_count = bcmgenet_get_sset_count, 1044 .get_sset_count = bcmgenet_get_sset_count,
978 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 1045 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1167 struct bcmgenet_priv *priv = netdev_priv(dev); 1234 struct bcmgenet_priv *priv = netdev_priv(dev);
1168 struct device *kdev = &priv->pdev->dev; 1235 struct device *kdev = &priv->pdev->dev;
1169 struct enet_cb *tx_cb_ptr; 1236 struct enet_cb *tx_cb_ptr;
1170 struct netdev_queue *txq;
1171 unsigned int pkts_compl = 0; 1237 unsigned int pkts_compl = 0;
1172 unsigned int bytes_compl = 0; 1238 unsigned int bytes_compl = 0;
1173 unsigned int c_index; 1239 unsigned int c_index;
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1219 dev->stats.tx_packets += pkts_compl; 1285 dev->stats.tx_packets += pkts_compl;
1220 dev->stats.tx_bytes += bytes_compl; 1286 dev->stats.tx_bytes += bytes_compl;
1221 1287
1222 txq = netdev_get_tx_queue(dev, ring->queue); 1288 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1223 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 1289 pkts_compl, bytes_compl);
1224
1225 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1226 if (netif_tx_queue_stopped(txq))
1227 netif_tx_wake_queue(txq);
1228 }
1229 1290
1230 return pkts_compl; 1291 return pkts_compl;
1231} 1292}
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1248 struct bcmgenet_tx_ring *ring = 1309 struct bcmgenet_tx_ring *ring =
1249 container_of(napi, struct bcmgenet_tx_ring, napi); 1310 container_of(napi, struct bcmgenet_tx_ring, napi);
1250 unsigned int work_done = 0; 1311 unsigned int work_done = 0;
1312 struct netdev_queue *txq;
1313 unsigned long flags;
1251 1314
1252 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); 1315 spin_lock_irqsave(&ring->lock, flags);
1316 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1317 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1318 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1319 netif_tx_wake_queue(txq);
1320 }
1321 spin_unlock_irqrestore(&ring->lock, flags);
1253 1322
1254 if (work_done == 0) { 1323 if (work_done == 0) {
1255 napi_complete(napi); 1324 napi_complete(napi);
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2457/* Interrupt bottom half */ 2526/* Interrupt bottom half */
2458static void bcmgenet_irq_task(struct work_struct *work) 2527static void bcmgenet_irq_task(struct work_struct *work)
2459{ 2528{
2529 unsigned long flags;
2530 unsigned int status;
2460 struct bcmgenet_priv *priv = container_of( 2531 struct bcmgenet_priv *priv = container_of(
2461 work, struct bcmgenet_priv, bcmgenet_irq_work); 2532 work, struct bcmgenet_priv, bcmgenet_irq_work);
2462 2533
2463 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 2534 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2464 2535
2465 if (priv->irq0_stat & UMAC_IRQ_MPD_R) { 2536 spin_lock_irqsave(&priv->lock, flags);
2466 priv->irq0_stat &= ~UMAC_IRQ_MPD_R; 2537 status = priv->irq0_stat;
2538 priv->irq0_stat = 0;
2539 spin_unlock_irqrestore(&priv->lock, flags);
2540
2541 if (status & UMAC_IRQ_MPD_R) {
2467 netif_dbg(priv, wol, priv->dev, 2542 netif_dbg(priv, wol, priv->dev,
2468 "magic packet detected, waking up\n"); 2543 "magic packet detected, waking up\n");
2469 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); 2544 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2470 } 2545 }
2471 2546
2472 /* Link UP/DOWN event */ 2547 /* Link UP/DOWN event */
2473 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { 2548 if (status & UMAC_IRQ_LINK_EVENT)
2474 phy_mac_interrupt(priv->phydev, 2549 phy_mac_interrupt(priv->phydev,
2475 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2550 !!(status & UMAC_IRQ_LINK_UP));
2476 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2477 }
2478} 2551}
2479 2552
2480/* bcmgenet_isr1: handle Rx and Tx priority queues */ 2553/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2483 struct bcmgenet_priv *priv = dev_id; 2556 struct bcmgenet_priv *priv = dev_id;
2484 struct bcmgenet_rx_ring *rx_ring; 2557 struct bcmgenet_rx_ring *rx_ring;
2485 struct bcmgenet_tx_ring *tx_ring; 2558 struct bcmgenet_tx_ring *tx_ring;
2486 unsigned int index; 2559 unsigned int index, status;
2487 2560
2488 /* Save irq status for bottom-half processing. */ 2561 /* Read irq status */
2489 priv->irq1_stat = 2562 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2490 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2491 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 2563 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2492 2564
2493 /* clear interrupts */ 2565 /* clear interrupts */
2494 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2566 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2495 2567
2496 netif_dbg(priv, intr, priv->dev, 2568 netif_dbg(priv, intr, priv->dev,
2497 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2569 "%s: IRQ=0x%x\n", __func__, status);
2498 2570
2499 /* Check Rx priority queue interrupts */ 2571 /* Check Rx priority queue interrupts */
2500 for (index = 0; index < priv->hw_params->rx_queues; index++) { 2572 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2501 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) 2573 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2502 continue; 2574 continue;
2503 2575
2504 rx_ring = &priv->rx_rings[index]; 2576 rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2511 2583
2512 /* Check Tx priority queue interrupts */ 2584 /* Check Tx priority queue interrupts */
2513 for (index = 0; index < priv->hw_params->tx_queues; index++) { 2585 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2514 if (!(priv->irq1_stat & BIT(index))) 2586 if (!(status & BIT(index)))
2515 continue; 2587 continue;
2516 2588
2517 tx_ring = &priv->tx_rings[index]; 2589 tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2531 struct bcmgenet_priv *priv = dev_id; 2603 struct bcmgenet_priv *priv = dev_id;
2532 struct bcmgenet_rx_ring *rx_ring; 2604 struct bcmgenet_rx_ring *rx_ring;
2533 struct bcmgenet_tx_ring *tx_ring; 2605 struct bcmgenet_tx_ring *tx_ring;
2606 unsigned int status;
2607 unsigned long flags;
2534 2608
2535 /* Save irq status for bottom-half processing. */ 2609 /* Read irq status */
2536 priv->irq0_stat = 2610 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2537 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2538 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 2611 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2539 2612
2540 /* clear interrupts */ 2613 /* clear interrupts */
2541 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 2614 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2542 2615
2543 netif_dbg(priv, intr, priv->dev, 2616 netif_dbg(priv, intr, priv->dev,
2544 "IRQ=0x%x\n", priv->irq0_stat); 2617 "IRQ=0x%x\n", status);
2545 2618
2546 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { 2619 if (status & UMAC_IRQ_RXDMA_DONE) {
2547 rx_ring = &priv->rx_rings[DESC_INDEX]; 2620 rx_ring = &priv->rx_rings[DESC_INDEX];
2548 2621
2549 if (likely(napi_schedule_prep(&rx_ring->napi))) { 2622 if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2552 } 2625 }
2553 } 2626 }
2554 2627
2555 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { 2628 if (status & UMAC_IRQ_TXDMA_DONE) {
2556 tx_ring = &priv->tx_rings[DESC_INDEX]; 2629 tx_ring = &priv->tx_rings[DESC_INDEX];
2557 2630
2558 if (likely(napi_schedule_prep(&tx_ring->napi))) { 2631 if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2561 } 2634 }
2562 } 2635 }
2563 2636
2564 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2565 UMAC_IRQ_PHY_DET_F |
2566 UMAC_IRQ_LINK_EVENT |
2567 UMAC_IRQ_HFB_SM |
2568 UMAC_IRQ_HFB_MM |
2569 UMAC_IRQ_MPD_R)) {
2570 /* all other interested interrupts handled in bottom half */
2571 schedule_work(&priv->bcmgenet_irq_work);
2572 }
2573
2574 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2637 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2575 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 2638 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2576 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2577 wake_up(&priv->wq); 2639 wake_up(&priv->wq);
2578 } 2640 }
2579 2641
2642 /* all other interested interrupts handled in bottom half */
2643 status &= (UMAC_IRQ_LINK_EVENT |
2644 UMAC_IRQ_MPD_R);
2645 if (status) {
2646 /* Save irq status for bottom-half processing. */
2647 spin_lock_irqsave(&priv->lock, flags);
2648 priv->irq0_stat |= status;
2649 spin_unlock_irqrestore(&priv->lock, flags);
2650
2651 schedule_work(&priv->bcmgenet_irq_work);
2652 }
2653
2580 return IRQ_HANDLED; 2654 return IRQ_HANDLED;
2581} 2655}
2582 2656
@@ -2801,6 +2875,8 @@ err_irq0:
2801err_fini_dma: 2875err_fini_dma:
2802 bcmgenet_fini_dma(priv); 2876 bcmgenet_fini_dma(priv);
2803err_clk_disable: 2877err_clk_disable:
2878 if (priv->internal_phy)
2879 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2804 clk_disable_unprepare(priv->clk); 2880 clk_disable_unprepare(priv->clk);
2805 return ret; 2881 return ret;
2806} 2882}
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3177 */ 3253 */
3178 gphy_rev = reg & 0xffff; 3254 gphy_rev = reg & 0xffff;
3179 3255
3256 /* This is reserved so should require special treatment */
3257 if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3258 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3259 return;
3260 }
3261
3180 /* This is the good old scheme, just GPHY major, no minor nor patch */ 3262 /* This is the good old scheme, just GPHY major, no minor nor patch */
3181 if ((gphy_rev & 0xf0) != 0) 3263 if ((gphy_rev & 0xf0) != 0)
3182 priv->gphy_rev = gphy_rev << 8; 3264 priv->gphy_rev = gphy_rev << 8;
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3185 else if ((gphy_rev & 0xff00) != 0) 3267 else if ((gphy_rev & 0xff00) != 0)
3186 priv->gphy_rev = gphy_rev; 3268 priv->gphy_rev = gphy_rev;
3187 3269
3188 /* This is reserved so should require special treatment */
3189 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3190 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3191 return;
3192 }
3193
3194#ifdef CONFIG_PHYS_ADDR_T_64BIT 3270#ifdef CONFIG_PHYS_ADDR_T_64BIT
3195 if (!(params->flags & GENET_HAS_40BITS)) 3271 if (!(params->flags & GENET_HAS_40BITS))
3196 pr_warn("GENET does not support 40-bits PA\n"); 3272 pr_warn("GENET does not support 40-bits PA\n");
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
3233 const void *macaddr; 3309 const void *macaddr;
3234 struct resource *r; 3310 struct resource *r;
3235 int err = -EIO; 3311 int err = -EIO;
3312 const char *phy_mode_str;
3236 3313
3237 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ 3314 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3238 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 3315 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
3276 goto err; 3353 goto err;
3277 } 3354 }
3278 3355
3356 spin_lock_init(&priv->lock);
3357
3279 SET_NETDEV_DEV(dev, &pdev->dev); 3358 SET_NETDEV_DEV(dev, &pdev->dev);
3280 dev_set_drvdata(&pdev->dev, dev); 3359 dev_set_drvdata(&pdev->dev, dev);
3281 ether_addr_copy(dev->dev_addr, macaddr); 3360 ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
3338 priv->clk_eee = NULL; 3417 priv->clk_eee = NULL;
3339 } 3418 }
3340 3419
3420 /* If this is an internal GPHY, power it on now, before UniMAC is
3421 * brought out of reset as absolutely no UniMAC activity is allowed
3422 */
3423 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3424 !strcasecmp(phy_mode_str, "internal"))
3425 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3426
3341 err = reset_umac(priv); 3427 err = reset_umac(priv);
3342 if (err) 3428 if (err)
3343 goto err_clk_disable; 3429 goto err_clk_disable;
@@ -3395,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
3395 3481
3396 bcmgenet_netif_stop(dev); 3482 bcmgenet_netif_stop(dev);
3397 3483
3398 phy_suspend(priv->phydev); 3484 if (!device_may_wakeup(d))
3485 phy_suspend(priv->phydev);
3399 3486
3400 netif_device_detach(dev); 3487 netif_device_detach(dev);
3401 3488
@@ -3492,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
3492 3579
3493 netif_device_attach(dev); 3580 netif_device_attach(dev);
3494 3581
3495 phy_resume(priv->phydev); 3582 if (!device_may_wakeup(d))
3583 phy_resume(priv->phydev);
3496 3584
3497 if (priv->eee.eee_enabled) 3585 if (priv->eee.eee_enabled)
3498 bcmgenet_eee_enable_set(dev, true); 3586 bcmgenet_eee_enable_set(dev, true);
@@ -3502,6 +3590,8 @@ static int bcmgenet_resume(struct device *d)
3502 return 0; 3590 return 0;
3503 3591
3504out_clk_disable: 3592out_clk_disable:
3593 if (priv->internal_phy)
3594 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3505 clk_disable_unprepare(priv->clk); 3595 clk_disable_unprepare(priv->clk);
3506 return ret; 3596 return ret;
3507} 3597}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..db7f289d65ae 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 Broadcom Corporation 2 * Copyright (c) 2014-2017 Broadcom
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
214#define MDIO_REG_SHIFT 16 214#define MDIO_REG_SHIFT 16
215#define MDIO_REG_MASK 0x1F 215#define MDIO_REG_MASK 0x1F
216 216
217#define UMAC_RBUF_OVFL_CNT 0x61C 217#define UMAC_RBUF_OVFL_CNT_V1 0x61C
218#define RBUF_OVFL_CNT_V2 0x80
219#define RBUF_OVFL_CNT_V3PLUS 0x94
218 220
219#define UMAC_MPD_CTRL 0x620 221#define UMAC_MPD_CTRL 0x620
220#define MPD_EN (1 << 0) 222#define MPD_EN (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
224 226
225#define UMAC_MPD_PW_MS 0x624 227#define UMAC_MPD_PW_MS 0x624
226#define UMAC_MPD_PW_LS 0x628 228#define UMAC_MPD_PW_LS 0x628
227#define UMAC_RBUF_ERR_CNT 0x634 229#define UMAC_RBUF_ERR_CNT_V1 0x634
230#define RBUF_ERR_CNT_V2 0x84
231#define RBUF_ERR_CNT_V3PLUS 0x98
228#define UMAC_MDF_ERR_CNT 0x638 232#define UMAC_MDF_ERR_CNT 0x638
229#define UMAC_MDF_CTRL 0x650 233#define UMAC_MDF_CTRL 0x650
230#define UMAC_MDF_ADDR 0x654 234#define UMAC_MDF_ADDR 0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
619 struct work_struct bcmgenet_irq_work; 623 struct work_struct bcmgenet_irq_work;
620 int irq0; 624 int irq0;
621 int irq1; 625 int irq1;
622 unsigned int irq0_stat;
623 unsigned int irq1_stat;
624 int wol_irq; 626 int wol_irq;
625 bool wol_irq_disabled; 627 bool wol_irq_disabled;
626 628
629 /* shared status */
630 spinlock_t lock;
631 unsigned int irq0_stat;
632
627 /* HW descriptors/checksum variables */ 633 /* HW descriptors/checksum variables */
628 bool desc_64b_en; 634 bool desc_64b_en;
629 bool desc_rxchk_en; 635 bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e87607621e62..2f9281936f0e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
220 udelay(60); 220 udelay(60);
221} 221}
222 222
223static void bcmgenet_internal_phy_setup(struct net_device *dev)
224{
225 struct bcmgenet_priv *priv = netdev_priv(dev);
226 u32 reg;
227
228 /* Power up PHY */
229 bcmgenet_phy_power_set(dev, true);
230 /* enable APD */
231 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
232 reg |= EXT_PWR_DN_EN_LD;
233 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
234 bcmgenet_mii_reset(dev);
235}
236
237static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) 223static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
238{ 224{
239 u32 reg; 225 u32 reg;
@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
281 267
282 if (priv->internal_phy) { 268 if (priv->internal_phy) {
283 phy_name = "internal PHY"; 269 phy_name = "internal PHY";
284 bcmgenet_internal_phy_setup(dev);
285 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 270 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
286 phy_name = "MoCA"; 271 phy_name = "MoCA";
287 bcmgenet_moca_phy_setup(priv); 272 bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663a6ead..0f6811860ad5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
1930bfa_ioc_send_enable(struct bfa_ioc *ioc) 1930bfa_ioc_send_enable(struct bfa_ioc *ioc)
1931{ 1931{
1932 struct bfi_ioc_ctrl_req enable_req; 1932 struct bfi_ioc_ctrl_req enable_req;
1933 struct timeval tv;
1934 1933
1935 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1936 bfa_ioc_portid(ioc)); 1935 bfa_ioc_portid(ioc));
1937 enable_req.clscode = htons(ioc->clscode); 1936 enable_req.clscode = htons(ioc->clscode);
1938 do_gettimeofday(&tv); 1937 enable_req.rsvd = htons(0);
1939 enable_req.tv_sec = ntohl(tv.tv_sec); 1938 /* overflow in 2106 */
1939 enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1941} 1941}
1942 1942
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
1947 1947
1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1949 bfa_ioc_portid(ioc)); 1949 bfa_ioc_portid(ioc));
1950 disable_req.clscode = htons(ioc->clscode);
1951 disable_req.rsvd = htons(0);
1952 /* overflow in 2106 */
1953 disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1950 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1951} 1955}
1952 1956
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 05c1c1dd7751..cebfe3bd086e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
325 return PTR_ERR(kern_buf); 325 return PTR_ERR(kern_buf);
326 326
327 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 327 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
328 if (rc < 2) { 328 if (rc < 2 || len > UINT_MAX >> 2) {
329 netdev_warn(bnad->netdev, "failed to read user buffer\n"); 329 netdev_warn(bnad->netdev, "failed to read user buffer\n");
330 kfree(kern_buf); 330 kfree(kern_buf);
331 return -EINVAL; 331 return -EINVAL;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index be9c0e3f5ade..92f46b1375c3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -152,7 +152,7 @@ struct octnic_gather {
152 */ 152 */
153 struct octeon_sg_entry *sg; 153 struct octeon_sg_entry *sg;
154 154
155 u64 sg_dma_ptr; 155 dma_addr_t sg_dma_ptr;
156}; 156};
157 157
158struct handshake { 158struct handshake {
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
734 struct octnic_gather *g; 734 struct octnic_gather *g;
735 int i; 735 int i;
736 736
737 kfree(lio->glist_lock);
738 lio->glist_lock = NULL;
739
737 if (!lio->glist) 740 if (!lio->glist)
738 return; 741 return;
739 742
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
741 do { 744 do {
742 g = (struct octnic_gather *) 745 g = (struct octnic_gather *)
743 list_delete_head(&lio->glist[i]); 746 list_delete_head(&lio->glist[i]);
744 if (g) { 747 if (g)
745 if (g->sg) {
746 dma_unmap_single(&lio->oct_dev->
747 pci_dev->dev,
748 g->sg_dma_ptr,
749 g->sg_size,
750 DMA_TO_DEVICE);
751 kfree((void *)((unsigned long)g->sg -
752 g->adjust));
753 }
754 kfree(g); 748 kfree(g);
755 }
756 } while (g); 749 } while (g);
750
751 if (lio->glists_virt_base && lio->glists_virt_base[i]) {
752 lio_dma_free(lio->oct_dev,
753 lio->glist_entry_size * lio->tx_qsize,
754 lio->glists_virt_base[i],
755 lio->glists_dma_base[i]);
756 }
757 } 757 }
758 758
759 kfree((void *)lio->glist); 759 kfree(lio->glists_virt_base);
760 kfree((void *)lio->glist_lock); 760 lio->glists_virt_base = NULL;
761
762 kfree(lio->glists_dma_base);
763 lio->glists_dma_base = NULL;
764
765 kfree(lio->glist);
766 lio->glist = NULL;
761} 767}
762 768
763/** 769/**
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
772 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), 778 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
773 GFP_KERNEL); 779 GFP_KERNEL);
774 if (!lio->glist_lock) 780 if (!lio->glist_lock)
775 return 1; 781 return -ENOMEM;
776 782
777 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), 783 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
778 GFP_KERNEL); 784 GFP_KERNEL);
779 if (!lio->glist) { 785 if (!lio->glist) {
780 kfree((void *)lio->glist_lock); 786 kfree(lio->glist_lock);
781 return 1; 787 lio->glist_lock = NULL;
788 return -ENOMEM;
789 }
790
791 lio->glist_entry_size =
792 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
793
794 /* allocate memory to store virtual and dma base address of
795 * per glist consistent memory
796 */
797 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
798 GFP_KERNEL);
799 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
800 GFP_KERNEL);
801
802 if (!lio->glists_virt_base || !lio->glists_dma_base) {
803 delete_glists(lio);
804 return -ENOMEM;
782 } 805 }
783 806
784 for (i = 0; i < num_iqs; i++) { 807 for (i = 0; i < num_iqs; i++) {
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
788 811
789 INIT_LIST_HEAD(&lio->glist[i]); 812 INIT_LIST_HEAD(&lio->glist[i]);
790 813
814 lio->glists_virt_base[i] =
815 lio_dma_alloc(oct,
816 lio->glist_entry_size * lio->tx_qsize,
817 &lio->glists_dma_base[i]);
818
819 if (!lio->glists_virt_base[i]) {
820 delete_glists(lio);
821 return -ENOMEM;
822 }
823
791 for (j = 0; j < lio->tx_qsize; j++) { 824 for (j = 0; j < lio->tx_qsize; j++) {
792 g = kzalloc_node(sizeof(*g), GFP_KERNEL, 825 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
793 numa_node); 826 numa_node);
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
796 if (!g) 829 if (!g)
797 break; 830 break;
798 831
799 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 832 g->sg = lio->glists_virt_base[i] +
800 OCT_SG_ENTRY_SIZE); 833 (j * lio->glist_entry_size);
801 834
802 g->sg = kmalloc_node(g->sg_size + 8, 835 g->sg_dma_ptr = lio->glists_dma_base[i] +
803 GFP_KERNEL, numa_node); 836 (j * lio->glist_entry_size);
804 if (!g->sg)
805 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
806 if (!g->sg) {
807 kfree(g);
808 break;
809 }
810
811 /* The gather component should be aligned on 64-bit
812 * boundary
813 */
814 if (((unsigned long)g->sg) & 7) {
815 g->adjust = 8 - (((unsigned long)g->sg) & 7);
816 g->sg = (struct octeon_sg_entry *)
817 ((unsigned long)g->sg + g->adjust);
818 }
819 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
820 g->sg, g->sg_size,
821 DMA_TO_DEVICE);
822 if (dma_mapping_error(&oct->pci_dev->dev,
823 g->sg_dma_ptr)) {
824 kfree((void *)((unsigned long)g->sg -
825 g->adjust));
826 kfree(g);
827 break;
828 }
829 837
830 list_add_tail(&g->list, &lio->glist[i]); 838 list_add_tail(&g->list, &lio->glist[i]);
831 } 839 }
832 840
833 if (j != lio->tx_qsize) { 841 if (j != lio->tx_qsize) {
834 delete_glists(lio); 842 delete_glists(lio);
835 return 1; 843 return -ENOMEM;
836 } 844 }
837 } 845 }
838 846
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
1885 i++; 1893 i++;
1886 } 1894 }
1887 1895
1888 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1889 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1890
1891 iq = skb_iq(lio, skb); 1896 iq = skb_iq(lio, skb);
1892 spin_lock(&lio->glist_lock[iq]); 1897 spin_lock(&lio->glist_lock[iq]);
1893 list_add_tail(&g->list, &lio->glist[iq]); 1898 list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
1933 i++; 1938 i++;
1934 } 1939 }
1935 1940
1936 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1937 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1938
1939 iq = skb_iq(lio, skb); 1941 iq = skb_iq(lio, skb);
1940 1942
1941 spin_lock(&lio->glist_lock[iq]); 1943 spin_lock(&lio->glist_lock[iq]);
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
3273 i++; 3275 i++;
3274 } 3276 }
3275 3277
3276 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3277 g->sg_size, DMA_TO_DEVICE);
3278 dptr = g->sg_dma_ptr; 3278 dptr = g->sg_dma_ptr;
3279 3279
3280 if (OCTEON_CN23XX_PF(oct)) 3280 if (OCTEON_CN23XX_PF(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9d5e03502c76..7b83be4ce1fe 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -108,6 +108,8 @@ struct octnic_gather {
108 * received from the IP layer. 108 * received from the IP layer.
109 */ 109 */
110 struct octeon_sg_entry *sg; 110 struct octeon_sg_entry *sg;
111
112 dma_addr_t sg_dma_ptr;
111}; 113};
112 114
113struct octeon_device_priv { 115struct octeon_device_priv {
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
490 struct octnic_gather *g; 492 struct octnic_gather *g;
491 int i; 493 int i;
492 494
495 kfree(lio->glist_lock);
496 lio->glist_lock = NULL;
497
493 if (!lio->glist) 498 if (!lio->glist)
494 return; 499 return;
495 500
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
497 do { 502 do {
498 g = (struct octnic_gather *) 503 g = (struct octnic_gather *)
499 list_delete_head(&lio->glist[i]); 504 list_delete_head(&lio->glist[i]);
500 if (g) { 505 if (g)
501 if (g->sg)
502 kfree((void *)((unsigned long)g->sg -
503 g->adjust));
504 kfree(g); 506 kfree(g);
505 }
506 } while (g); 507 } while (g);
508
509 if (lio->glists_virt_base && lio->glists_virt_base[i]) {
510 lio_dma_free(lio->oct_dev,
511 lio->glist_entry_size * lio->tx_qsize,
512 lio->glists_virt_base[i],
513 lio->glists_dma_base[i]);
514 }
507 } 515 }
508 516
517 kfree(lio->glists_virt_base);
518 lio->glists_virt_base = NULL;
519
520 kfree(lio->glists_dma_base);
521 lio->glists_dma_base = NULL;
522
509 kfree(lio->glist); 523 kfree(lio->glist);
510 kfree(lio->glist_lock); 524 lio->glist = NULL;
511} 525}
512 526
513/** 527/**
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
522 lio->glist_lock = 536 lio->glist_lock =
523 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 537 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
524 if (!lio->glist_lock) 538 if (!lio->glist_lock)
525 return 1; 539 return -ENOMEM;
526 540
527 lio->glist = 541 lio->glist =
528 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 542 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
529 if (!lio->glist) { 543 if (!lio->glist) {
530 kfree(lio->glist_lock); 544 kfree(lio->glist_lock);
531 return 1; 545 lio->glist_lock = NULL;
546 return -ENOMEM;
547 }
548
549 lio->glist_entry_size =
550 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
551
552 /* allocate memory to store virtual and dma base address of
553 * per glist consistent memory
554 */
555 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
556 GFP_KERNEL);
557 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
558 GFP_KERNEL);
559
560 if (!lio->glists_virt_base || !lio->glists_dma_base) {
561 delete_glists(lio);
562 return -ENOMEM;
532 } 563 }
533 564
534 for (i = 0; i < num_iqs; i++) { 565 for (i = 0; i < num_iqs; i++) {
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
536 567
537 INIT_LIST_HEAD(&lio->glist[i]); 568 INIT_LIST_HEAD(&lio->glist[i]);
538 569
570 lio->glists_virt_base[i] =
571 lio_dma_alloc(lio->oct_dev,
572 lio->glist_entry_size * lio->tx_qsize,
573 &lio->glists_dma_base[i]);
574
575 if (!lio->glists_virt_base[i]) {
576 delete_glists(lio);
577 return -ENOMEM;
578 }
579
539 for (j = 0; j < lio->tx_qsize; j++) { 580 for (j = 0; j < lio->tx_qsize; j++) {
540 g = kzalloc(sizeof(*g), GFP_KERNEL); 581 g = kzalloc(sizeof(*g), GFP_KERNEL);
541 if (!g) 582 if (!g)
542 break; 583 break;
543 584
544 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 585 g->sg = lio->glists_virt_base[i] +
545 OCT_SG_ENTRY_SIZE); 586 (j * lio->glist_entry_size);
546 587
547 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 588 g->sg_dma_ptr = lio->glists_dma_base[i] +
548 if (!g->sg) { 589 (j * lio->glist_entry_size);
549 kfree(g);
550 break;
551 }
552 590
553 /* The gather component should be aligned on 64-bit
554 * boundary
555 */
556 if (((unsigned long)g->sg) & 7) {
557 g->adjust = 8 - (((unsigned long)g->sg) & 7);
558 g->sg = (struct octeon_sg_entry *)
559 ((unsigned long)g->sg + g->adjust);
560 }
561 list_add_tail(&g->list, &lio->glist[i]); 591 list_add_tail(&g->list, &lio->glist[i]);
562 } 592 }
563 593
564 if (j != lio->tx_qsize) { 594 if (j != lio->tx_qsize) {
565 delete_glists(lio); 595 delete_glists(lio);
566 return 1; 596 return -ENOMEM;
567 } 597 }
568 } 598 }
569 599
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
1324 i++; 1354 i++;
1325 } 1355 }
1326 1356
1327 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1328 finfo->dptr, g->sg_size,
1329 DMA_TO_DEVICE);
1330
1331 iq = skb_iq(lio, skb); 1357 iq = skb_iq(lio, skb);
1332 1358
1333 spin_lock(&lio->glist_lock[iq]); 1359 spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
1374 i++; 1400 i++;
1375 } 1401 }
1376 1402
1377 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1378 finfo->dptr, g->sg_size,
1379 DMA_TO_DEVICE);
1380
1381 iq = skb_iq(lio, skb); 1403 iq = skb_iq(lio, skb);
1382 1404
1383 spin_lock(&lio->glist_lock[iq]); 1405 spin_lock(&lio->glist_lock[iq]);
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2382 i++; 2404 i++;
2383 } 2405 }
2384 2406
2385 dptr = dma_map_single(&oct->pci_dev->dev, 2407 dptr = g->sg_dma_ptr;
2386 g->sg, g->sg_size,
2387 DMA_TO_DEVICE);
2388 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2389 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
2390 __func__);
2391 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
2392 skb->len - skb->data_len,
2393 DMA_TO_DEVICE);
2394 for (j = 1; j <= frags; j++) {
2395 frag = &skb_shinfo(skb)->frags[j - 1];
2396 dma_unmap_page(&oct->pci_dev->dev,
2397 g->sg[j >> 2].ptr[j & 3],
2398 frag->size, DMA_TO_DEVICE);
2399 }
2400 return NETDEV_TX_BUSY;
2401 }
2402 2408
2403 ndata.cmd.cmd3.dptr = dptr; 2409 ndata.cmd.cmd3.dptr = dptr;
2404 finfo->dptr = dptr; 2410 finfo->dptr = dptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3dc2e9651a8..d29ebc531151 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,17 +71,17 @@
71#define CN23XX_MAX_RINGS_PER_VF 8 71#define CN23XX_MAX_RINGS_PER_VF 8
72 72
73#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF 73#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
74#define CN23XX_MAX_IQ_DESCRIPTORS 2048 74#define CN23XX_MAX_IQ_DESCRIPTORS 512
75#define CN23XX_DB_MIN 1 75#define CN23XX_DB_MIN 1
76#define CN23XX_DB_MAX 8 76#define CN23XX_DB_MAX 8
77#define CN23XX_DB_TIMEOUT 1 77#define CN23XX_DB_TIMEOUT 1
78 78
79#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF 79#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
80#define CN23XX_MAX_OQ_DESCRIPTORS 2048 80#define CN23XX_MAX_OQ_DESCRIPTORS 512
81#define CN23XX_OQ_BUF_SIZE 1536 81#define CN23XX_OQ_BUF_SIZE 1536
82#define CN23XX_OQ_PKTSPER_INTR 128 82#define CN23XX_OQ_PKTSPER_INTR 128
83/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ 83/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
84#define CN23XX_OQ_REFIL_THRESHOLD 128 84#define CN23XX_OQ_REFIL_THRESHOLD 16
85 85
86#define CN23XX_OQ_INTR_PKT 64 86#define CN23XX_OQ_INTR_PKT 64
87#define CN23XX_OQ_INTR_TIME 100 87#define CN23XX_OQ_INTR_TIME 100
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0be87d119a97..79f809479af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
155 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 155 recv_buffer_destroy(droq->recv_buf_list[i].buffer,
156 pg_info); 156 pg_info);
157 157
158 if (droq->desc_ring && droq->desc_ring[i].info_ptr)
159 lio_unmap_ring_info(oct->pci_dev,
160 (u64)droq->
161 desc_ring[i].info_ptr,
162 OCT_DROQ_INFO_SIZE);
163 droq->recv_buf_list[i].buffer = NULL; 158 droq->recv_buf_list[i].buffer = NULL;
164 } 159 }
165 160
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
211 vfree(droq->recv_buf_list); 206 vfree(droq->recv_buf_list);
212 207
213 if (droq->info_base_addr) 208 if (droq->info_base_addr)
214 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, 209 lio_free_info_buffer(oct, droq);
215 droq->info_alloc_size,
216 droq->info_base_addr,
217 droq->info_list_dma);
218 210
219 if (droq->desc_ring) 211 if (droq->desc_ring)
220 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 212 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
294 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 286 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
295 droq->max_count); 287 droq->max_count);
296 288
297 droq->info_list = 289 droq->info_list = lio_alloc_info_buffer(oct, droq);
298 cnnic_numa_alloc_aligned_dma((droq->max_count *
299 OCT_DROQ_INFO_SIZE),
300 &droq->info_alloc_size,
301 &droq->info_base_addr,
302 numa_node);
303 if (!droq->info_list) { 290 if (!droq->info_list) {
304 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 291 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
305 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 292 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index e62074090681..6982c0af5ecc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -325,10 +325,10 @@ struct octeon_droq {
325 size_t desc_ring_dma; 325 size_t desc_ring_dma;
326 326
327 /** Info ptr list are allocated at this virtual address. */ 327 /** Info ptr list are allocated at this virtual address. */
328 size_t info_base_addr; 328 void *info_base_addr;
329 329
330 /** DMA mapped address of the info list */ 330 /** DMA mapped address of the info list */
331 size_t info_list_dma; 331 dma_addr_t info_list_dma;
332 332
333 /** Allocated size of info list. */ 333 /** Allocated size of info list. */
334 u32 info_alloc_size; 334 u32 info_alloc_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index aa36e9ae7676..bed9ef17bc26 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -140,48 +140,6 @@ err_release_region:
140 return 1; 140 return 1;
141} 141}
142 142
143static inline void *
144cnnic_numa_alloc_aligned_dma(u32 size,
145 u32 *alloc_size,
146 size_t *orig_ptr,
147 int numa_node)
148{
149 int retries = 0;
150 void *ptr = NULL;
151
152#define OCTEON_MAX_ALLOC_RETRIES 1
153 do {
154 struct page *page = NULL;
155
156 page = alloc_pages_node(numa_node,
157 GFP_KERNEL,
158 get_order(size));
159 if (!page)
160 page = alloc_pages(GFP_KERNEL,
161 get_order(size));
162 ptr = (void *)page_address(page);
163 if ((unsigned long)ptr & 0x07) {
164 __free_pages(page, get_order(size));
165 ptr = NULL;
166 /* Increment the size required if the first
167 * attempt failed.
168 */
169 if (!retries)
170 size += 7;
171 }
172 retries++;
173 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
174
175 *alloc_size = size;
176 *orig_ptr = (unsigned long)ptr;
177 if ((unsigned long)ptr & 0x07)
178 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
179 return ptr;
180}
181
182#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
183 free_pages(orig_ptr, get_order(size))
184
185static inline int 143static inline int
186sleep_cond(wait_queue_head_t *wait_queue, int *condition) 144sleep_cond(wait_queue_head_t *wait_queue, int *condition)
187{ 145{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 6bb89419006e..eef2a1e8a7e3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -62,6 +62,9 @@ struct lio {
62 62
63 /** Array of gather component linked lists */ 63 /** Array of gather component linked lists */
64 struct list_head *glist; 64 struct list_head *glist;
65 void **glists_virt_base;
66 dma_addr_t *glists_dma_base;
67 u32 glist_entry_size;
65 68
66 /** Pointer to the NIC properties for the Octeon device this network 69 /** Pointer to the NIC properties for the Octeon device this network
67 * interface is associated with. 70 * interface is associated with.
@@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
344#define lio_dma_free(oct, size, virt_addr, dma_addr) \ 347#define lio_dma_free(oct, size, virt_addr, dma_addr) \
345 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) 348 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
346 349
350static inline void *
351lio_alloc_info_buffer(struct octeon_device *oct,
352 struct octeon_droq *droq)
353{
354 void *virt_ptr;
355
356 virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
357 &droq->info_list_dma);
358 if (virt_ptr) {
359 droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
360 droq->info_base_addr = virt_ptr;
361 }
362
363 return virt_ptr;
364}
365
366static inline void lio_free_info_buffer(struct octeon_device *oct,
367 struct octeon_droq *droq)
368{
369 lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
370 droq->info_list_dma);
371}
372
347static inline 373static inline
348void *get_rbd(struct sk_buff *skb) 374void *get_rbd(struct sk_buff *skb)
349{ 375{
@@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
359static inline u64 385static inline u64
360lio_map_ring_info(struct octeon_droq *droq, u32 i) 386lio_map_ring_info(struct octeon_droq *droq, u32 i)
361{ 387{
362 dma_addr_t dma_addr; 388 return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
363 struct octeon_device *oct = droq->oct_dev;
364
365 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
366 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
367
368 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
369
370 return (u64)dma_addr;
371}
372
373static inline void
374lio_unmap_ring_info(struct pci_dev *pci_dev,
375 u64 info_ptr, u32 size)
376{
377 dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
378} 389}
379 390
380static inline u64 391static inline u64
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c7153562..2269ff562d95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
269#define MAX_QUEUES_PER_QSET 8 269#define MAX_QUEUES_PER_QSET 8
270 struct queue_set *qs; 270 struct queue_set *qs;
271 struct nicvf_cq_poll *napi[8]; 271 struct nicvf_cq_poll *napi[8];
272 void *iommu_domain;
272 u8 vf_id; 273 u8 vf_id;
273 u8 sqs_id; 274 u8 sqs_id;
274 bool sqs_mode; 275 bool sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24bcfd4..24017588f531 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/prefetch.h> 17#include <linux/prefetch.h>
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/iommu.h>
19 20
20#include "nic_reg.h" 21#include "nic_reg.h"
21#include "nic.h" 22#include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
525 /* Get actual TSO descriptors and free them */ 526 /* Get actual TSO descriptors and free them */
526 tso_sqe = 527 tso_sqe =
527 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); 528 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
529 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
530 tso_sqe->subdesc_cnt);
528 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); 531 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
532 } else {
533 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
534 hdr->subdesc_cnt);
529 } 535 }
530 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 536 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
531 prefetch(skb); 537 prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576{ 582{
577 struct sk_buff *skb; 583 struct sk_buff *skb;
578 struct nicvf *nic = netdev_priv(netdev); 584 struct nicvf *nic = netdev_priv(netdev);
585 struct nicvf *snic = nic;
579 int err = 0; 586 int err = 0;
580 int rq_idx; 587 int rq_idx;
581 588
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
592 if (err && !cqe_rx->rb_cnt) 599 if (err && !cqe_rx->rb_cnt)
593 return; 600 return;
594 601
595 skb = nicvf_get_rcv_skb(nic, cqe_rx); 602 skb = nicvf_get_rcv_skb(snic, cqe_rx);
596 if (!skb) { 603 if (!skb) {
597 netdev_dbg(nic->netdev, "Packet not received\n"); 604 netdev_dbg(nic->netdev, "Packet not received\n");
598 return; 605 return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1643 if (!pass1_silicon(nic->pdev)) 1650 if (!pass1_silicon(nic->pdev))
1644 nic->hw_tso = true; 1651 nic->hw_tso = true;
1645 1652
1653 /* Get iommu domain for iova to physical addr conversion */
1654 nic->iommu_domain = iommu_get_domain_for_dev(dev);
1655
1646 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); 1656 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1647 if (sdevid == 0xA134) 1657 if (sdevid == 0xA134)
1648 nic->t88 = true; 1658 nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390be3b12..f13289f0d238 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/ip.h> 11#include <linux/ip.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/iommu.h>
13#include <net/ip.h> 14#include <net/ip.h>
14#include <net/tso.h> 15#include <net/tso.h>
15 16
@@ -18,6 +19,16 @@
18#include "q_struct.h" 19#include "q_struct.h"
19#include "nicvf_queues.h" 20#include "nicvf_queues.h"
20 21
22#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
23
24static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
25{
26 /* Translation is installed only when IOMMU is present */
27 if (nic->iommu_domain)
28 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
29 return dma_addr;
30}
31
21static void nicvf_get_page(struct nicvf *nic) 32static void nicvf_get_page(struct nicvf *nic)
22{ 33{
23 if (!nic->rb_pageref || !nic->rb_page) 34 if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
87static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 98static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
88 u32 buf_len, u64 **rbuf) 99 u32 buf_len, u64 **rbuf)
89{ 100{
90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; 101 int order = NICVF_PAGE_ORDER;
91 102
92 /* Check if request can be accomodated in previous allocated page */ 103 /* Check if request can be accomodated in previous allocated page */
93 if (nic->rb_page && 104 if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
97 } 108 }
98 109
99 nicvf_get_page(nic); 110 nicvf_get_page(nic);
100 nic->rb_page = NULL;
101 111
102 /* Allocate a new page */ 112 /* Allocate a new page */
113 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
114 order);
103 if (!nic->rb_page) { 115 if (!nic->rb_page) {
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 116 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
105 order); 117 return -ENOMEM;
106 if (!nic->rb_page) {
107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
109 return -ENOMEM;
110 }
111 nic->rb_page_offset = 0;
112 } 118 }
113 119 nic->rb_page_offset = 0;
114ret: 120ret:
115 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 121 /* HW will ensure data coherency, CPU sync not required */
122 *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
123 nic->rb_page_offset, buf_len,
124 DMA_FROM_DEVICE,
125 DMA_ATTR_SKIP_CPU_SYNC));
126 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
127 if (!nic->rb_page_offset)
128 __free_pages(nic->rb_page, order);
129 nic->rb_page = NULL;
130 return -ENOMEM;
131 }
116 nic->rb_page_offset += buf_len; 132 nic->rb_page_offset += buf_len;
117 133
118 return 0; 134 return 0;
@@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
158 rbdr->dma_size = buf_size; 174 rbdr->dma_size = buf_size;
159 rbdr->enable = true; 175 rbdr->enable = true;
160 rbdr->thresh = RBDR_THRESH; 176 rbdr->thresh = RBDR_THRESH;
177 rbdr->head = 0;
178 rbdr->tail = 0;
161 179
162 nic->rb_page = NULL; 180 nic->rb_page = NULL;
163 for (idx = 0; idx < ring_len; idx++) { 181 for (idx = 0; idx < ring_len; idx++) {
164 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
165 &rbuf); 183 &rbuf);
166 if (err) 184 if (err) {
185 /* To free already allocated and mapped ones */
186 rbdr->tail = idx - 1;
167 return err; 187 return err;
188 }
168 189
169 desc = GET_RBDR_DESC(rbdr, idx); 190 desc = GET_RBDR_DESC(rbdr, idx);
170 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 191 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
171 } 192 }
172 193
173 nicvf_get_page(nic); 194 nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
179static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 200static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
180{ 201{
181 int head, tail; 202 int head, tail;
182 u64 buf_addr; 203 u64 buf_addr, phys_addr;
183 struct rbdr_entry_t *desc; 204 struct rbdr_entry_t *desc;
184 205
185 if (!rbdr) 206 if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
192 head = rbdr->head; 213 head = rbdr->head;
193 tail = rbdr->tail; 214 tail = rbdr->tail;
194 215
195 /* Free SKBs */ 216 /* Release page references */
196 while (head != tail) { 217 while (head != tail) {
197 desc = GET_RBDR_DESC(rbdr, head); 218 desc = GET_RBDR_DESC(rbdr, head);
198 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 219 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
199 put_page(virt_to_page(phys_to_virt(buf_addr))); 220 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
221 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
222 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
223 if (phys_addr)
224 put_page(virt_to_page(phys_to_virt(phys_addr)));
200 head++; 225 head++;
201 head &= (rbdr->dmem.q_len - 1); 226 head &= (rbdr->dmem.q_len - 1);
202 } 227 }
203 /* Free SKB of tail desc */ 228 /* Release buffer of tail desc */
204 desc = GET_RBDR_DESC(rbdr, tail); 229 desc = GET_RBDR_DESC(rbdr, tail);
205 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 230 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
206 put_page(virt_to_page(phys_to_virt(buf_addr))); 231 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
232 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
233 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
234 if (phys_addr)
235 put_page(virt_to_page(phys_to_virt(phys_addr)));
207 236
208 /* Free RBDR ring */ 237 /* Free RBDR ring */
209 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 238 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
250 break; 279 break;
251 280
252 desc = GET_RBDR_DESC(rbdr, tail); 281 desc = GET_RBDR_DESC(rbdr, tail);
253 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 282 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
254 refill_rb_cnt--; 283 refill_rb_cnt--;
255 new_rb++; 284 new_rb++;
256 } 285 }
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
361 return 0; 390 return 0;
362} 391}
363 392
393void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
394 int hdr_sqe, u8 subdesc_cnt)
395{
396 u8 idx;
397 struct sq_gather_subdesc *gather;
398
399 /* Unmap DMA mapped skb data buffers */
400 for (idx = 0; idx < subdesc_cnt; idx++) {
401 hdr_sqe++;
402 hdr_sqe &= (sq->dmem.q_len - 1);
403 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
404 /* HW will ensure data coherency, CPU sync not required */
405 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
406 gather->size, DMA_TO_DEVICE,
407 DMA_ATTR_SKIP_CPU_SYNC);
408 }
409}
410
364static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 411static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
365{ 412{
366 struct sk_buff *skb; 413 struct sk_buff *skb;
414 struct sq_hdr_subdesc *hdr;
415 struct sq_hdr_subdesc *tso_sqe;
367 416
368 if (!sq) 417 if (!sq)
369 return; 418 return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
379 smp_rmb(); 428 smp_rmb();
380 while (sq->head != sq->tail) { 429 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head]; 430 skb = (struct sk_buff *)sq->skbuff[sq->head];
382 if (skb) 431 if (!skb)
383 dev_kfree_skb_any(skb); 432 goto next;
433 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
434 /* Check for dummy descriptor used for HW TSO offload on 88xx */
435 if (hdr->dont_send) {
436 /* Get actual TSO descriptors and unmap them */
437 tso_sqe =
438 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
439 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
440 tso_sqe->subdesc_cnt);
441 } else {
442 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
443 hdr->subdesc_cnt);
444 }
445 dev_kfree_skb_any(skb);
446next:
384 sq->head++; 447 sq->head++;
385 sq->head &= (sq->dmem.q_len - 1); 448 sq->head &= (sq->dmem.q_len - 1);
386 } 449 }
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
559 nicvf_send_msg_to_pf(nic, &mbx); 622 nicvf_send_msg_to_pf(nic, &mbx);
560 623
561 if (!nic->sqs_mode && (qidx == 0)) { 624 if (!nic->sqs_mode && (qidx == 0)) {
562 /* Enable checking L3/L4 length and TCP/UDP checksums */ 625 /* Enable checking L3/L4 length and TCP/UDP checksums
626 * Also allow IPv6 pkts with zero UDP checksum.
627 */
563 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 628 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
564 (BIT(24) | BIT(23) | BIT(21))); 629 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
565 nicvf_config_vlan_stripping(nic, nic->netdev->features); 630 nicvf_config_vlan_stripping(nic, nic->netdev->features);
566 } 631 }
567 632
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
882 return qentry; 947 return qentry;
883} 948}
884 949
950/* Rollback to previous tail pointer when descriptors not used */
951static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
952 int qentry, int desc_cnt)
953{
954 sq->tail = qentry;
955 atomic_add(desc_cnt, &sq->free_cnt);
956}
957
885/* Free descriptor back to SQ for future use */ 958/* Free descriptor back to SQ for future use */
886void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 959void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
887{ 960{
@@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1207 struct sk_buff *skb, u8 sq_num) 1280 struct sk_buff *skb, u8 sq_num)
1208{ 1281{
1209 int i, size; 1282 int i, size;
1210 int subdesc_cnt, tso_sqe = 0; 1283 int subdesc_cnt, hdr_sqe = 0;
1211 int qentry; 1284 int qentry;
1285 u64 dma_addr;
1212 1286
1213 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1287 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1214 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1288 if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1223 /* Add SQ header subdesc */ 1297 /* Add SQ header subdesc */
1224 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1298 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1225 skb, skb->len); 1299 skb, skb->len);
1226 tso_sqe = qentry; 1300 hdr_sqe = qentry;
1227 1301
1228 /* Add SQ gather subdescs */ 1302 /* Add SQ gather subdescs */
1229 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1303 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1230 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1304 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1231 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1305 /* HW will ensure data coherency, CPU sync not required */
1306 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1307 offset_in_page(skb->data), size,
1308 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1309 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1310 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1311 return 0;
1312 }
1313
1314 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1232 1315
1233 /* Check for scattered buffer */ 1316 /* Check for scattered buffer */
1234 if (!skb_is_nonlinear(skb)) 1317 if (!skb_is_nonlinear(skb))
@@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1241 1324
1242 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1325 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1243 size = skb_frag_size(frag); 1326 size = skb_frag_size(frag);
1244 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1327 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1245 virt_to_phys( 1328 skb_frag_page(frag),
1246 skb_frag_address(frag))); 1329 frag->page_offset, size,
1330 DMA_TO_DEVICE,
1331 DMA_ATTR_SKIP_CPU_SYNC);
1332 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1333 /* Free entire chain of mapped buffers
1334 * here 'i' = frags mapped + above mapped skb->data
1335 */
1336 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1337 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1338 return 0;
1339 }
1340 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1247 } 1341 }
1248 1342
1249doorbell: 1343doorbell:
1250 if (nic->t88 && skb_shinfo(skb)->gso_size) { 1344 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1251 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1345 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1252 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); 1346 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
1253 } 1347 }
1254 1348
1255 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); 1349 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1282 int offset; 1376 int offset;
1283 u16 *rb_lens = NULL; 1377 u16 *rb_lens = NULL;
1284 u64 *rb_ptrs = NULL; 1378 u64 *rb_ptrs = NULL;
1379 u64 phys_addr;
1285 1380
1286 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1381 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1287 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to 1382 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1296 else 1391 else
1297 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); 1392 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
1298 1393
1299 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1300 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1301
1302 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1394 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1303 payload_len = rb_lens[frag_num(frag)]; 1395 payload_len = rb_lens[frag_num(frag)];
1396 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1397 if (!phys_addr) {
1398 if (skb)
1399 dev_kfree_skb_any(skb);
1400 return NULL;
1401 }
1402
1304 if (!frag) { 1403 if (!frag) {
1305 /* First fragment */ 1404 /* First fragment */
1405 dma_unmap_page_attrs(&nic->pdev->dev,
1406 *rb_ptrs - cqe_rx->align_pad,
1407 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1408 DMA_ATTR_SKIP_CPU_SYNC);
1306 skb = nicvf_rb_ptr_to_skb(nic, 1409 skb = nicvf_rb_ptr_to_skb(nic,
1307 *rb_ptrs - cqe_rx->align_pad, 1410 phys_addr - cqe_rx->align_pad,
1308 payload_len); 1411 payload_len);
1309 if (!skb) 1412 if (!skb)
1310 return NULL; 1413 return NULL;
@@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1312 skb_put(skb, payload_len); 1415 skb_put(skb, payload_len);
1313 } else { 1416 } else {
1314 /* Add fragments */ 1417 /* Add fragments */
1315 page = virt_to_page(phys_to_virt(*rb_ptrs)); 1418 dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1316 offset = phys_to_virt(*rb_ptrs) - page_address(page); 1419 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1420 DMA_ATTR_SKIP_CPU_SYNC);
1421 page = virt_to_page(phys_to_virt(phys_addr));
1422 offset = phys_to_virt(phys_addr) - page_address(page);
1317 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1423 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1318 offset, payload_len, RCV_FRAG_LEN); 1424 offset, payload_len, RCV_FRAG_LEN);
1319 } 1425 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da99a2d..10cb4b84625b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -87,7 +87,7 @@
87#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 87#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
88#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) 88#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
89#define RBDR_THRESH (RCV_BUF_COUNT / 2) 89#define RBDR_THRESH (RCV_BUF_COUNT / 2)
90#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ 90#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
91#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ 91#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
92 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 92 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
93 93
@@ -301,6 +301,8 @@ struct queue_set {
301 301
302#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) 302#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
303 303
304void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
305 int hdr_sqe, u8 subdesc_cnt);
304void nicvf_config_vlan_stripping(struct nicvf *nic, 306void nicvf_config_vlan_stripping(struct nicvf *nic,
305 netdev_features_t features); 307 netdev_features_t features);
306int nicvf_set_qset_resources(struct nicvf *nic); 308int nicvf_set_qset_resources(struct nicvf *nic);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf730bb..64a1095e4d14 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
123 return 1; 123 return 1;
124} 124}
125 125
126static int max_bgx_per_node;
127static void set_max_bgx_per_node(struct pci_dev *pdev)
128{
129 u16 sdevid;
130
131 if (max_bgx_per_node)
132 return;
133
134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
135 switch (sdevid) {
136 case PCI_SUBSYS_DEVID_81XX_BGX:
137 max_bgx_per_node = MAX_BGX_PER_CN81XX;
138 break;
139 case PCI_SUBSYS_DEVID_83XX_BGX:
140 max_bgx_per_node = MAX_BGX_PER_CN83XX;
141 break;
142 case PCI_SUBSYS_DEVID_88XX_BGX:
143 default:
144 max_bgx_per_node = MAX_BGX_PER_CN88XX;
145 break;
146 }
147}
148
149static struct bgx *get_bgx(int node, int bgx_idx)
150{
151 int idx = (node * max_bgx_per_node) + bgx_idx;
152
153 return bgx_vnic[idx];
154}
155
126/* Return number of BGX present in HW */ 156/* Return number of BGX present in HW */
127unsigned bgx_get_map(int node) 157unsigned bgx_get_map(int node)
128{ 158{
129 int i; 159 int i;
130 unsigned map = 0; 160 unsigned map = 0;
131 161
132 for (i = 0; i < MAX_BGX_PER_NODE; i++) { 162 for (i = 0; i < max_bgx_per_node; i++) {
133 if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) 163 if (bgx_vnic[(node * max_bgx_per_node) + i])
134 map |= (1 << i); 164 map |= (1 << i);
135 } 165 }
136 166
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
143{ 173{
144 struct bgx *bgx; 174 struct bgx *bgx;
145 175
146 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 176 bgx = get_bgx(node, bgx_idx);
147 if (bgx) 177 if (bgx)
148 return bgx->lmac_count; 178 return bgx->lmac_count;
149 179
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
158 struct bgx *bgx; 188 struct bgx *bgx;
159 struct lmac *lmac; 189 struct lmac *lmac;
160 190
161 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 191 bgx = get_bgx(node, bgx_idx);
162 if (!bgx) 192 if (!bgx)
163 return; 193 return;
164 194
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
172 202
173const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 203const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
174{ 204{
175 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 205 struct bgx *bgx = get_bgx(node, bgx_idx);
176 206
177 if (bgx) 207 if (bgx)
178 return bgx->lmac[lmacid].mac; 208 return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
183 213
184void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 214void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
185{ 215{
186 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 216 struct bgx *bgx = get_bgx(node, bgx_idx);
187 217
188 if (!bgx) 218 if (!bgx)
189 return; 219 return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
194 224
195void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 225void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
196{ 226{
197 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 227 struct bgx *bgx = get_bgx(node, bgx_idx);
198 struct lmac *lmac; 228 struct lmac *lmac;
199 u64 cfg; 229 u64 cfg;
200 230
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
217void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) 247void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
218{ 248{
219 struct pfc *pfc = (struct pfc *)pause; 249 struct pfc *pfc = (struct pfc *)pause;
220 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 250 struct bgx *bgx = get_bgx(node, bgx_idx);
221 struct lmac *lmac; 251 struct lmac *lmac;
222 u64 cfg; 252 u64 cfg;
223 253
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
237void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) 267void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
238{ 268{
239 struct pfc *pfc = (struct pfc *)pause; 269 struct pfc *pfc = (struct pfc *)pause;
240 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 270 struct bgx *bgx = get_bgx(node, bgx_idx);
241 struct lmac *lmac; 271 struct lmac *lmac;
242 u64 cfg; 272 u64 cfg;
243 273
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
369{ 399{
370 struct bgx *bgx; 400 struct bgx *bgx;
371 401
372 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 402 bgx = get_bgx(node, bgx_idx);
373 if (!bgx) 403 if (!bgx)
374 return 0; 404 return 0;
375 405
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
383{ 413{
384 struct bgx *bgx; 414 struct bgx *bgx;
385 415
386 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 416 bgx = get_bgx(node, bgx_idx);
387 if (!bgx) 417 if (!bgx)
388 return 0; 418 return 0;
389 419
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
411 struct lmac *lmac; 441 struct lmac *lmac;
412 u64 cfg; 442 u64 cfg;
413 443
414 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 444 bgx = get_bgx(node, bgx_idx);
415 if (!bgx) 445 if (!bgx)
416 return; 446 return;
417 447
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1011 dev_info(dev, "%s: 40G_KR4\n", (char *)str); 1041 dev_info(dev, "%s: 40G_KR4\n", (char *)str);
1012 break; 1042 break;
1013 case BGX_MODE_QSGMII: 1043 case BGX_MODE_QSGMII:
1014 if ((lmacid == 0) &&
1015 (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
1016 return;
1017 if ((lmacid == 2) &&
1018 (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
1019 return;
1020 dev_info(dev, "%s: QSGMII\n", (char *)str); 1044 dev_info(dev, "%s: QSGMII\n", (char *)str);
1021 break; 1045 break;
1022 case BGX_MODE_RGMII: 1046 case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1334 goto err_release_regions; 1358 goto err_release_regions;
1335 } 1359 }
1336 1360
1361 set_max_bgx_per_node(pdev);
1362
1337 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1363 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1338 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1364 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1339 bgx->bgx_id = (pci_resource_start(pdev, 1365 bgx->bgx_id = (pci_resource_start(pdev,
1340 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1366 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1341 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1367 bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
1342 bgx->max_lmac = MAX_LMAC_PER_BGX; 1368 bgx->max_lmac = MAX_LMAC_PER_BGX;
1343 bgx_vnic[bgx->bgx_id] = bgx; 1369 bgx_vnic[bgx->bgx_id] = bgx;
1344 } else { 1370 } else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189429bb..c5080f2cead5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
22#define MAX_BGX_PER_CN88XX 2 22#define MAX_BGX_PER_CN88XX 2
23#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */ 23#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
24#define MAX_BGX_PER_CN83XX 4 24#define MAX_BGX_PER_CN83XX 4
25#define MAX_BGX_PER_NODE 4
26#define MAX_LMAC_PER_BGX 4 25#define MAX_LMAC_PER_BGX 4
27#define MAX_BGX_CHANS_PER_LMAC 16 26#define MAX_BGX_CHANS_PER_LMAC 16
28#define MAX_DMAC_PER_LMAC 8 27#define MAX_DMAC_PER_LMAC 8
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e855004c57..02dd5246dfae 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4939,8 +4939,9 @@ static int
4939__be_cmd_set_logical_link_config(struct be_adapter *adapter, 4939__be_cmd_set_logical_link_config(struct be_adapter *adapter,
4940 int link_state, int version, u8 domain) 4940 int link_state, int version, u8 domain)
4941{ 4941{
4942 struct be_mcc_wrb *wrb;
4943 struct be_cmd_req_set_ll_link *req; 4942 struct be_cmd_req_set_ll_link *req;
4943 struct be_mcc_wrb *wrb;
4944 u32 link_config = 0;
4944 int status; 4945 int status;
4945 4946
4946 mutex_lock(&adapter->mcc_lock); 4947 mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4962 4963
4963 if (link_state == IFLA_VF_LINK_STATE_ENABLE || 4964 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4964 link_state == IFLA_VF_LINK_STATE_AUTO) 4965 link_state == IFLA_VF_LINK_STATE_AUTO)
4965 req->link_config |= PLINK_ENABLE; 4966 link_config |= PLINK_ENABLE;
4966 4967
4967 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4968 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4968 req->link_config |= PLINK_TRACK; 4969 link_config |= PLINK_TRACK;
4970
4971 req->link_config = cpu_to_le32(link_config);
4969 4972
4970 status = be_mcc_notify_wait(adapter); 4973 status = be_mcc_notify_wait(adapter);
4971err: 4974err:
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 992ebe973d25..f819843e2bae 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
189 189
190 nps_enet_tx_handler(ndev); 190 nps_enet_tx_handler(ndev);
191 work_done = nps_enet_rx_handler(ndev); 191 work_done = nps_enet_rx_handler(ndev);
192 if (work_done < budget) { 192 if ((work_done < budget) && napi_complete_done(napi, work_done)) {
193 u32 buf_int_enable_value = 0; 193 u32 buf_int_enable_value = 0;
194 194
195 napi_complete_done(napi, work_done);
196
197 /* set tx_done and rx_rdy bits */ 195 /* set tx_done and rx_rdy bits */
198 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 196 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
199 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 197 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 928b0df2b8e0..ade6b3e4ed13 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,8 +28,10 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
31#include <linux/phy.h> 32#include <linux/phy.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/property.h>
33#include <net/ip.h> 35#include <net/ip.h>
34#include <net/ncsi.h> 36#include <net/ncsi.h>
35 37
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3239d27143b9..bdd8cdd732fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
82 else 82 else
83 *link_status = 0; 83 *link_status = 0;
84 84
85 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); 85 if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
86 if (!ret) 86 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
87 *link_status = *link_status && sfp_prsnt; 87 &sfp_prsnt);
88 if (!ret)
89 *link_status = *link_status && sfp_prsnt;
90 }
88 91
89 mac_cb->link = *link_status; 92 mac_cb->link = *link_status;
90} 93}
@@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
855 of_node_put(np); 858 of_node_put(np);
856 859
857 np = of_parse_phandle(to_of_node(mac_cb->fw_port), 860 np = of_parse_phandle(to_of_node(mac_cb->fw_port),
858 "serdes-syscon", 0); 861 "serdes-syscon", 0);
859 syscon = syscon_node_to_regmap(np); 862 syscon = syscon_node_to_regmap(np);
860 of_node_put(np); 863 of_node_put(np);
861 if (IS_ERR_OR_NULL(syscon)) { 864 if (IS_ERR_OR_NULL(syscon)) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 90dbda792614..403ea9db6dbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
1519 mac_key->high.bits.mac_3 = addr[3]; 1519 mac_key->high.bits.mac_3 = addr[3];
1520 mac_key->low.bits.mac_4 = addr[4]; 1520 mac_key->low.bits.mac_4 = addr[4];
1521 mac_key->low.bits.mac_5 = addr[5]; 1521 mac_key->low.bits.mac_5 = addr[5];
1522 mac_key->low.bits.port_vlan = 0;
1522 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, 1523 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
1523 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1524 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1524 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1525 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2924 /* find the tcam entry index for promisc */ 2925 /* find the tcam entry index for promisc */
2925 entry_index = dsaf_promisc_tcam_entry(port); 2926 entry_index = dsaf_promisc_tcam_entry(port);
2926 2927
2928 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2929 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2930
2927 /* config key mask */ 2931 /* config key mask */
2928 if (enable) { 2932 if (enable) {
2929 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2930 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2931 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, 2933 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2932 DSAF_TBL_TCAM_KEY_PORT_M, 2934 DSAF_TBL_TCAM_KEY_PORT_M,
2933 DSAF_TBL_TCAM_KEY_PORT_S, port); 2935 DSAF_TBL_TCAM_KEY_PORT_S, port);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a2c22d084ce9..e13aa064a8e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
461 return 0; 461 return 0;
462} 462}
463 463
464int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
465{
466 union acpi_object *obj;
467 union acpi_object obj_args, argv4;
468
469 obj_args.integer.type = ACPI_TYPE_INTEGER;
470 obj_args.integer.value = mac_cb->mac_id;
471
472 argv4.type = ACPI_TYPE_PACKAGE,
473 argv4.package.count = 1,
474 argv4.package.elements = &obj_args,
475
476 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
477 hns_dsaf_acpi_dsm_uuid, 0,
478 HNS_OP_GET_SFP_STAT_FUNC, &argv4);
479
480 if (!obj || obj->type != ACPI_TYPE_INTEGER)
481 return -ENODEV;
482
483 *sfp_prsnt = obj->integer.value;
484
485 ACPI_FREE(obj);
486
487 return 0;
488}
489
464/** 490/**
465 * hns_mac_config_sds_loopback - set loop back for serdes 491 * hns_mac_config_sds_loopback - set loop back for serdes
466 * @mac_cb: mac control block 492 * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
592 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; 618 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
593 619
594 misc_op->get_phy_if = hns_mac_get_phy_if_acpi; 620 misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
595 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; 621 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
596 622
597 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; 623 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
598 } else { 624 } else {
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 275c2e2349ad..c44036d5761a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
2589static int emac_dt_phy_connect(struct emac_instance *dev, 2589static int emac_dt_phy_connect(struct emac_instance *dev,
2590 struct device_node *phy_handle) 2590 struct device_node *phy_handle)
2591{ 2591{
2592 int res;
2593
2594 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), 2592 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2595 GFP_KERNEL); 2593 GFP_KERNEL);
2596 if (!dev->phy.def) 2594 if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
2617{ 2615{
2618 struct device_node *np = dev->ofdev->dev.of_node; 2616 struct device_node *np = dev->ofdev->dev.of_node;
2619 struct device_node *phy_handle; 2617 struct device_node *phy_handle;
2620 int res = 0; 2618 int res = 1;
2621 2619
2622 phy_handle = of_parse_phandle(np, "phy-handle", 0); 2620 phy_handle = of_parse_phandle(np, "phy-handle", 0);
2623 2621
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
2714 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { 2712 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2715 int res = emac_dt_phy_probe(dev); 2713 int res = emac_dt_phy_probe(dev);
2716 2714
2717 mutex_unlock(&emac_phy_map_lock); 2715 switch (res) {
2718 if (!res) 2716 case 1:
2717 /* No phy-handle property configured.
2718 * Continue with the existing phy probe
2719 * and setup code.
2720 */
2721 break;
2722
2723 case 0:
2724 mutex_unlock(&emac_phy_map_lock);
2719 goto init_phy; 2725 goto init_phy;
2720 2726
2721 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n", 2727 default:
2722 res); 2728 mutex_unlock(&emac_phy_map_lock);
2723 return res; 2729 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2730 res);
2731 return res;
2732 }
2724 } 2733 }
2725 2734
2726 if (dev->phy_address != 0xffffffff) 2735 if (dev->phy_address != 0xffffffff)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9198e6bd5160..b23d6545f835 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
404 send_map_query(adapter); 404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) { 405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i], 406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 IBMVNIC_BUFFS_PER_POOL, i, 407 adapter->req_rx_add_entries_per_subcrq, i,
408 be64_to_cpu(size_array[i]), 1); 408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { 409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n"); 410 dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
419 for (i = 0; i < tx_subcrqs; i++) { 419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i]; 420 tx_pool = &adapter->tx_pool[i];
421 tx_pool->tx_buff = 421 tx_pool->tx_buff =
422 kcalloc(adapter->max_tx_entries_per_subcrq, 422 kcalloc(adapter->req_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); 423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff) 424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed; 425 goto tx_pool_alloc_failed;
426 426
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->max_tx_entries_per_subcrq * 428 adapter->req_tx_entries_per_subcrq *
429 adapter->req_mtu)) 429 adapter->req_mtu))
430 goto tx_ltb_alloc_failed; 430 goto tx_ltb_alloc_failed;
431 431
432 tx_pool->free_map = 432 tx_pool->free_map =
433 kcalloc(adapter->max_tx_entries_per_subcrq, 433 kcalloc(adapter->req_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL); 434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map) 435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed; 436 goto tx_fm_alloc_failed;
437 437
438 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++) 438 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j; 439 tx_pool->free_map[j] = j;
440 440
441 tx_pool->consumer_index = 0; 441 tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
706 struct device *dev = &adapter->vdev->dev; 706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL; 707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_sub_crq_queue *tx_scrq;
708 struct ibmvnic_tx_pool *tx_pool; 709 struct ibmvnic_tx_pool *tx_pool;
709 unsigned int tx_send_failed = 0; 710 unsigned int tx_send_failed = 0;
710 unsigned int tx_map_failed = 0; 711 unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
724 int ret = 0; 725 int ret = 0;
725 726
726 tx_pool = &adapter->tx_pool[queue_num]; 727 tx_pool = &adapter->tx_pool[queue_num];
728 tx_scrq = adapter->tx_scrq[queue_num];
727 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 729 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 730 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 be32_to_cpu(adapter->login_rsp_buf-> 731 be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
744 746
745 tx_pool->consumer_index = 747 tx_pool->consumer_index =
746 (tx_pool->consumer_index + 1) % 748 (tx_pool->consumer_index + 1) %
747 adapter->max_tx_entries_per_subcrq; 749 adapter->req_tx_entries_per_subcrq;
748 750
749 tx_buff = &tx_pool->tx_buff[index]; 751 tx_buff = &tx_pool->tx_buff[index];
750 tx_buff->skb = skb; 752 tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
817 819
818 if (tx_pool->consumer_index == 0) 820 if (tx_pool->consumer_index == 0)
819 tx_pool->consumer_index = 821 tx_pool->consumer_index =
820 adapter->max_tx_entries_per_subcrq - 1; 822 adapter->req_tx_entries_per_subcrq - 1;
821 else 823 else
822 tx_pool->consumer_index--; 824 tx_pool->consumer_index--;
823 825
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
826 ret = NETDEV_TX_BUSY; 828 ret = NETDEV_TX_BUSY;
827 goto out; 829 goto out;
828 } 830 }
831
832 atomic_inc(&tx_scrq->used);
833
834 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
835 netdev_info(netdev, "Stopping queue %d\n", queue_num);
836 netif_stop_subqueue(netdev, queue_num);
837 }
838
829 tx_packets++; 839 tx_packets++;
830 tx_bytes += skb->len; 840 tx_bytes += skb->len;
831 txq->trans_start = jiffies; 841 txq->trans_start = jiffies;
@@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1213 scrq->adapter = adapter; 1223 scrq->adapter = adapter;
1214 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 1224 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1215 scrq->cur = 0; 1225 scrq->cur = 0;
1226 atomic_set(&scrq->used, 0);
1216 scrq->rx_skb_top = NULL; 1227 scrq->rx_skb_top = NULL;
1217 spin_lock_init(&scrq->lock); 1228 spin_lock_init(&scrq->lock);
1218 1229
@@ -1246,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1246 release_sub_crq_queue(adapter, 1257 release_sub_crq_queue(adapter,
1247 adapter->tx_scrq[i]); 1258 adapter->tx_scrq[i]);
1248 } 1259 }
1260 kfree(adapter->tx_scrq);
1249 adapter->tx_scrq = NULL; 1261 adapter->tx_scrq = NULL;
1250 } 1262 }
1251 1263
@@ -1258,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1258 release_sub_crq_queue(adapter, 1270 release_sub_crq_queue(adapter,
1259 adapter->rx_scrq[i]); 1271 adapter->rx_scrq[i]);
1260 } 1272 }
1273 kfree(adapter->rx_scrq);
1261 adapter->rx_scrq = NULL; 1274 adapter->rx_scrq = NULL;
1262 } 1275 }
1263} 1276}
@@ -1355,14 +1368,28 @@ restart_loop:
1355 DMA_TO_DEVICE); 1368 DMA_TO_DEVICE);
1356 } 1369 }
1357 1370
1358 if (txbuff->last_frag) 1371 if (txbuff->last_frag) {
1372 atomic_dec(&scrq->used);
1373
1374 if (atomic_read(&scrq->used) <=
1375 (adapter->req_tx_entries_per_subcrq / 2) &&
1376 netif_subqueue_stopped(adapter->netdev,
1377 txbuff->skb)) {
1378 netif_wake_subqueue(adapter->netdev,
1379 scrq->pool_index);
1380 netdev_dbg(adapter->netdev,
1381 "Started queue %d\n",
1382 scrq->pool_index);
1383 }
1384
1359 dev_kfree_skb_any(txbuff->skb); 1385 dev_kfree_skb_any(txbuff->skb);
1386 }
1360 1387
1361 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. 1388 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1362 producer_index] = index; 1389 producer_index] = index;
1363 adapter->tx_pool[pool].producer_index = 1390 adapter->tx_pool[pool].producer_index =
1364 (adapter->tx_pool[pool].producer_index + 1) % 1391 (adapter->tx_pool[pool].producer_index + 1) %
1365 adapter->max_tx_entries_per_subcrq; 1392 adapter->req_tx_entries_per_subcrq;
1366 } 1393 }
1367 /* remove tx_comp scrq*/ 1394 /* remove tx_comp scrq*/
1368 next->tx_comp.first = 0; 1395 next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 422824f1f42a..1993b42666f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
863 spinlock_t lock; 863 spinlock_t lock;
864 struct sk_buff *rx_skb_top; 864 struct sk_buff *rx_skb_top;
865 struct ibmvnic_adapter *adapter; 865 struct ibmvnic_adapter *adapter;
866 atomic_t used;
866}; 867};
867 868
868struct ibmvnic_long_term_buff { 869struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2175cced402f..e9af89ad039c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
6274 /* Quiesce the device without resetting the hardware */ 6274 /* Quiesce the device without resetting the hardware */
6275 e1000e_down(adapter, false); 6275 e1000e_down(adapter, false);
6276 e1000_free_irq(adapter); 6276 e1000_free_irq(adapter);
6277 e1000e_reset_interrupt_capability(adapter);
6278 } 6277 }
6278 e1000e_reset_interrupt_capability(adapter);
6279 6279
6280 /* Allow time for pending master requests to run */ 6280 /* Allow time for pending master requests to run */
6281 e1000e_disable_pcie_master(&adapter->hw); 6281 e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..82a95cc2c8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4438 if (!vsi->netdev) 4438 if (!vsi->netdev)
4439 return; 4439 return;
4440 4440
4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4442 napi_enable(&vsi->q_vectors[q_idx]->napi); 4442 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4443
4444 if (q_vector->rx.ring || q_vector->tx.ring)
4445 napi_enable(&q_vector->napi);
4446 }
4443} 4447}
4444 4448
4445/** 4449/**
@@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4453 if (!vsi->netdev) 4457 if (!vsi->netdev)
4454 return; 4458 return;
4455 4459
4456 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4460 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4457 napi_disable(&vsi->q_vectors[q_idx]->napi); 4461 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4462
4463 if (q_vector->rx.ring || q_vector->tx.ring)
4464 napi_disable(&q_vector->napi);
4465 }
4458} 4466}
4459 4467
4460/** 4468/**
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e8c105164931..0e0fa7030565 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
2305 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); 2305 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2306 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { 2306 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2307 /* PCI might be offline */ 2307 /* PCI might be offline */
2308
2309 /* If device removal has been requested,
2310 * do not continue retrying.
2311 */
2312 if (dev->persist->interface_state &
2313 MLX4_INTERFACE_STATE_NOWAIT) {
2314 mlx4_warn(dev,
2315 "communication channel is offline\n");
2316 return -EIO;
2317 }
2318
2308 msleep(100); 2319 msleep(100);
2309 wr_toggle = swab32(readl(&priv->mfunc.comm-> 2320 wr_toggle = swab32(readl(&priv->mfunc.comm->
2310 slave_write)); 2321 slave_write));
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 21377c315083..703205475524 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1940 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1941 if (!offline_bit) 1941 if (!offline_bit)
1942 return 0; 1942 return 0;
1943
1944 /* If device removal has been requested,
1945 * do not continue retrying.
1946 */
1947 if (dev->persist->interface_state &
1948 MLX4_INTERFACE_STATE_NOWAIT)
1949 break;
1950
1943 /* There are cases as part of AER/Reset flow that PF needs 1951 /* There are cases as part of AER/Reset flow that PF needs
1944 * around 100 msec to load. We therefore sleep for 100 msec 1952 * around 100 msec to load. We therefore sleep for 100 msec
1945 * to allow other tasks to make use of that CPU during this 1953 * to allow other tasks to make use of that CPU during this
@@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3955 struct devlink *devlink = priv_to_devlink(priv); 3963 struct devlink *devlink = priv_to_devlink(priv);
3956 int active_vfs = 0; 3964 int active_vfs = 0;
3957 3965
3966 if (mlx4_is_slave(dev))
3967 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
3968
3958 mutex_lock(&persist->interface_state_mutex); 3969 mutex_lock(&persist->interface_state_mutex);
3959 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3970 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3960 mutex_unlock(&persist->interface_state_mutex); 3971 mutex_unlock(&persist->interface_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ddb4ca4ff930..117170014e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE
14config MLX5_CORE_EN 14config MLX5_CORE_EN
15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 15 bool "Mellanox Technologies ConnectX-4 Ethernet support"
16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
17 depends on IPV6=y || IPV6=n || MLX5_CORE=m
17 imply PTP_1588_CLOCK 18 imply PTP_1588_CLOCK
18 default n 19 default n
19 ---help--- 20 ---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index caa837e5e2b9..a380353a78c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
361 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 361 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
362 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 362 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
363 case MLX5_CMD_OP_QUERY_Q_COUNTER: 363 case MLX5_CMD_OP_QUERY_Q_COUNTER:
364 case MLX5_CMD_OP_SET_RATE_LIMIT:
365 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
364 case MLX5_CMD_OP_ALLOC_PD: 366 case MLX5_CMD_OP_ALLOC_PD:
365 case MLX5_CMD_OP_ALLOC_UAR: 367 case MLX5_CMD_OP_ALLOC_UAR:
366 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 368 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
497 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 499 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
498 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 500 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
499 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 501 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
502 MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
503 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
500 MLX5_COMMAND_STR_CASE(ALLOC_PD); 504 MLX5_COMMAND_STR_CASE(ALLOC_PD);
501 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 505 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
502 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 506 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f6a6ded204f6..dc52053128bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
928int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 928int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
929void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); 929void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
930u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); 930u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
931void mlx5e_add_vxlan_port(struct net_device *netdev,
932 struct udp_tunnel_info *ti);
933void mlx5e_del_vxlan_port(struct net_device *netdev,
934 struct udp_tunnel_info *ti);
935 931
936int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, 932int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
937 void *sp); 933 void *sp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0523ed47f597..8fa23f6a1f67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
302 struct mlx5e_priv *priv = netdev_priv(dev); 302 struct mlx5e_priv *priv = netdev_priv(dev);
303 struct mlx5e_dcbx *dcbx = &priv->dcbx; 303 struct mlx5e_dcbx *dcbx = &priv->dcbx;
304 304
305 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
306 return 1;
307
305 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { 308 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
306 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO) 309 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
307 return 0; 310 return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
315 return 1; 318 return 1;
316 } 319 }
317 320
318 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) 321 if (!(mode & DCB_CAP_DCBX_HOST))
319 return 1; 322 return 1;
320 323
321 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 324 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
322 !(mode & DCB_CAP_DCBX_VER_CEE) ||
323 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
324 !(mode & DCB_CAP_DCBX_HOST))
325 return 1; 325 return 1;
326 326
327 return 0; 327 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8ef64c4db2c2..66c133757a5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
3100 vf_stats); 3100 vf_stats);
3101} 3101}
3102 3102
3103void mlx5e_add_vxlan_port(struct net_device *netdev, 3103static void mlx5e_add_vxlan_port(struct net_device *netdev,
3104 struct udp_tunnel_info *ti) 3104 struct udp_tunnel_info *ti)
3105{ 3105{
3106 struct mlx5e_priv *priv = netdev_priv(netdev); 3106 struct mlx5e_priv *priv = netdev_priv(netdev);
3107 3107
@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
3114 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); 3114 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3115} 3115}
3116 3116
3117void mlx5e_del_vxlan_port(struct net_device *netdev, 3117static void mlx5e_del_vxlan_port(struct net_device *netdev,
3118 struct udp_tunnel_info *ti) 3118 struct udp_tunnel_info *ti)
3119{ 3119{
3120 struct mlx5e_priv *priv = netdev_priv(netdev); 3120 struct mlx5e_priv *priv = netdev_priv(netdev);
3121 3121
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c864574a9d5..f621373bd7a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
393 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, 393 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
394 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, 394 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
395 .ndo_get_stats64 = mlx5e_rep_get_stats, 395 .ndo_get_stats64 = mlx5e_rep_get_stats,
396 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
397 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
398 .ndo_has_offload_stats = mlx5e_has_offload_stats, 396 .ndo_has_offload_stats = mlx5e_has_offload_stats,
399 .ndo_get_offload_stats = mlx5e_get_offload_stats, 397 .ndo_get_offload_stats = mlx5e_get_offload_stats,
400}; 398};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3d371688fbbb..bafcb349a50c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
601 if (lro_num_seg > 1) { 601 if (lro_num_seg > 1) {
602 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); 602 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
603 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); 603 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
604 /* Subtract one since we already counted this as one
605 * "regular" packet in mlx5e_complete_rx_cqe()
606 */
607 rq->stats.packets += lro_num_seg - 1;
604 rq->stats.lro_packets++; 608 rq->stats.lro_packets++;
605 rq->stats.lro_bytes += cqe_bcnt; 609 rq->stats.lro_bytes += cqe_bcnt;
606 } 610 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 31e3cb7ee5fe..5621dcfda4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
204 struct iphdr *iph; 204 struct iphdr *iph;
205 205
206 /* We are only going to peek, no need to clone the SKB */ 206 /* We are only going to peek, no need to clone the SKB */
207 if (skb->protocol != htons(ETH_P_IP))
208 goto out;
209
210 if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) 207 if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
211 goto out; 208 goto out;
212 209
@@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
249 lbtp->loopback_ok = false; 246 lbtp->loopback_ok = false;
250 init_completion(&lbtp->comp); 247 init_completion(&lbtp->comp);
251 248
252 lbtp->pt.type = htons(ETH_P_ALL); 249 lbtp->pt.type = htons(ETH_P_IP);
253 lbtp->pt.func = mlx5e_test_loopback_validate; 250 lbtp->pt.func = mlx5e_test_loopback_validate;
254 lbtp->pt.dev = priv->netdev; 251 lbtp->pt.dev = priv->netdev;
255 lbtp->pt.af_packet_priv = lbtp; 252 lbtp->pt.af_packet_priv = lbtp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 44406a5ec15d..fade7233dac5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -48,9 +48,14 @@
48#include "eswitch.h" 48#include "eswitch.h"
49#include "vxlan.h" 49#include "vxlan.h"
50 50
51enum {
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
53};
54
51struct mlx5e_tc_flow { 55struct mlx5e_tc_flow {
52 struct rhash_head node; 56 struct rhash_head node;
53 u64 cookie; 57 u64 cookie;
58 u8 flags;
54 struct mlx5_flow_handle *rule; 59 struct mlx5_flow_handle *rule;
55 struct list_head encap; /* flows sharing the same encap */ 60 struct list_head encap; /* flows sharing the same encap */
56 struct mlx5_esw_flow_attr *attr; 61 struct mlx5_esw_flow_attr *attr;
@@ -128,6 +133,23 @@ err_create_ft:
128 return rule; 133 return rule;
129} 134}
130 135
136static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137 struct mlx5e_tc_flow *flow)
138{
139 struct mlx5_fc *counter = NULL;
140
141 if (!IS_ERR(flow->rule)) {
142 counter = mlx5_flow_rule_counter(flow->rule);
143 mlx5_del_flow_rules(flow->rule);
144 mlx5_fc_destroy(priv->mdev, counter);
145 }
146
147 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148 mlx5_destroy_flow_table(priv->fs.tc.t);
149 priv->fs.tc.t = NULL;
150 }
151}
152
131static struct mlx5_flow_handle * 153static struct mlx5_flow_handle *
132mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 154mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133 struct mlx5_flow_spec *spec, 155 struct mlx5_flow_spec *spec,
@@ -144,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
144} 166}
145 167
146static void mlx5e_detach_encap(struct mlx5e_priv *priv, 168static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147 struct mlx5e_tc_flow *flow) { 169 struct mlx5e_tc_flow *flow);
170
171static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow)
173{
174 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
175
176 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
177
178 mlx5_eswitch_del_vlan_action(esw, flow->attr);
179
180 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181 mlx5e_detach_encap(priv, flow);
182}
183
184static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185 struct mlx5e_tc_flow *flow)
186{
148 struct list_head *next = flow->encap.next; 187 struct list_head *next = flow->encap.next;
149 188
150 list_del(&flow->encap); 189 list_del(&flow->encap);
@@ -168,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
168static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 207static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
169 struct mlx5e_tc_flow *flow) 208 struct mlx5e_tc_flow *flow)
170{ 209{
171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 210 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
172 struct mlx5_fc *counter = NULL; 211 mlx5e_tc_del_fdb_flow(priv, flow);
173 212 else
174 if (!IS_ERR(flow->rule)) { 213 mlx5e_tc_del_nic_flow(priv, flow);
175 counter = mlx5_flow_rule_counter(flow->rule);
176 mlx5_del_flow_rules(flow->rule);
177 mlx5_fc_destroy(priv->mdev, counter);
178 }
179
180 if (esw && esw->mode == SRIOV_OFFLOADS) {
181 mlx5_eswitch_del_vlan_action(esw, flow->attr);
182 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
183 mlx5e_detach_encap(priv, flow);
184 }
185
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
189 }
190} 214}
191 215
192static void parse_vxlan_attr(struct mlx5_flow_spec *spec, 216static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@@ -243,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
243 skb_flow_dissector_target(f->dissector, 267 skb_flow_dissector_target(f->dissector,
244 FLOW_DISSECTOR_KEY_ENC_PORTS, 268 FLOW_DISSECTOR_KEY_ENC_PORTS,
245 f->mask); 269 f->mask);
270 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
271 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
272 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
246 273
247 /* Full udp dst port must be given */ 274 /* Full udp dst port must be given */
248 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 275 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
249 goto vxlan_match_offload_err; 276 goto vxlan_match_offload_err;
250 277
251 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 278 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
252 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 279 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253 parse_vxlan_attr(spec, f); 280 parse_vxlan_attr(spec, f);
254 else { 281 else {
@@ -598,6 +625,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
598} 625}
599 626
600static int parse_cls_flower(struct mlx5e_priv *priv, 627static int parse_cls_flower(struct mlx5e_priv *priv,
628 struct mlx5e_tc_flow *flow,
601 struct mlx5_flow_spec *spec, 629 struct mlx5_flow_spec *spec,
602 struct tc_cls_flower_offload *f) 630 struct tc_cls_flower_offload *f)
603{ 631{
@@ -609,7 +637,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
609 637
610 err = __parse_cls_flower(priv, spec, f, &min_inline); 638 err = __parse_cls_flower(priv, spec, f, &min_inline);
611 639
612 if (!err && esw->mode == SRIOV_OFFLOADS && 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
613 rep->vport != FDB_UPLINK_VPORT) { 641 rep->vport != FDB_UPLINK_VPORT) {
614 if (min_inline > esw->offloads.inline_mode) { 642 if (min_inline > esw->offloads.inline_mode) {
615 netdev_warn(priv->netdev, 643 netdev_warn(priv->netdev,
@@ -970,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
970 struct mlx5_esw_flow_attr *attr) 998 struct mlx5_esw_flow_attr *attr)
971{ 999{
972 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1000 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1001 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1002 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
973 unsigned short family = ip_tunnel_info_af(tun_info); 1003 unsigned short family = ip_tunnel_info_af(tun_info);
974 struct ip_tunnel_key *key = &tun_info->key; 1004 struct ip_tunnel_key *key = &tun_info->key;
975 struct mlx5_encap_entry *e; 1005 struct mlx5_encap_entry *e;
@@ -990,7 +1020,7 @@ vxlan_encap_offload_err:
990 return -EOPNOTSUPP; 1020 return -EOPNOTSUPP;
991 } 1021 }
992 1022
993 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 1023 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
994 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 1024 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
995 tunnel_type = MLX5_HEADER_TYPE_VXLAN; 1025 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
996 } else { 1026 } else {
@@ -1106,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1106 } 1136 }
1107 1137
1108 if (is_tcf_vlan(a)) { 1138 if (is_tcf_vlan(a)) {
1109 if (tcf_vlan_action(a) == VLAN_F_POP) { 1139 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1110 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 1140 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1111 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { 1141 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1112 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) 1142 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1113 return -EOPNOTSUPP; 1143 return -EOPNOTSUPP;
1114 1144
1115 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 1145 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1116 attr->vlan = tcf_vlan_push_vid(a); 1146 attr->vlan = tcf_vlan_push_vid(a);
1147 } else { /* action is TCA_VLAN_ACT_MODIFY */
1148 return -EOPNOTSUPP;
1117 } 1149 }
1118 continue; 1150 continue;
1119 } 1151 }
@@ -1132,23 +1164,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1132 struct tc_cls_flower_offload *f) 1164 struct tc_cls_flower_offload *f)
1133{ 1165{
1134 struct mlx5e_tc_table *tc = &priv->fs.tc; 1166 struct mlx5e_tc_table *tc = &priv->fs.tc;
1135 int err = 0; 1167 int err, attr_size = 0;
1136 bool fdb_flow = false;
1137 u32 flow_tag, action; 1168 u32 flow_tag, action;
1138 struct mlx5e_tc_flow *flow; 1169 struct mlx5e_tc_flow *flow;
1139 struct mlx5_flow_spec *spec; 1170 struct mlx5_flow_spec *spec;
1140 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1172 u8 flow_flags = 0;
1141 1173
1142 if (esw && esw->mode == SRIOV_OFFLOADS) 1174 if (esw && esw->mode == SRIOV_OFFLOADS) {
1143 fdb_flow = true; 1175 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1144 1176 attr_size = sizeof(struct mlx5_esw_flow_attr);
1145 if (fdb_flow) 1177 }
1146 flow = kzalloc(sizeof(*flow) +
1147 sizeof(struct mlx5_esw_flow_attr),
1148 GFP_KERNEL);
1149 else
1150 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1151 1178
1179 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1152 spec = mlx5_vzalloc(sizeof(*spec)); 1180 spec = mlx5_vzalloc(sizeof(*spec));
1153 if (!spec || !flow) { 1181 if (!spec || !flow) {
1154 err = -ENOMEM; 1182 err = -ENOMEM;
@@ -1156,12 +1184,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1156 } 1184 }
1157 1185
1158 flow->cookie = f->cookie; 1186 flow->cookie = f->cookie;
1187 flow->flags = flow_flags;
1159 1188
1160 err = parse_cls_flower(priv, spec, f); 1189 err = parse_cls_flower(priv, flow, spec, f);
1161 if (err < 0) 1190 if (err < 0)
1162 goto err_free; 1191 goto err_free;
1163 1192
1164 if (fdb_flow) { 1193 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1165 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); 1194 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
1166 err = parse_tc_fdb_actions(priv, f->exts, flow); 1195 err = parse_tc_fdb_actions(priv, f->exts, flow);
1167 if (err < 0) 1196 if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f193128bac4b..57f5e2d7ebd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
274 sq->stats.tso_bytes += skb->len - ihs; 274 sq->stats.tso_bytes += skb->len - ihs;
275 } 275 }
276 276
277 sq->stats.packets += skb_shinfo(skb)->gso_segs;
277 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 278 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
278 } else { 279 } else {
279 bf = sq->bf_budget && 280 bf = sq->bf_budget &&
280 !skb->xmit_more && 281 !skb->xmit_more &&
281 !skb_shinfo(skb)->nr_frags; 282 !skb_shinfo(skb)->nr_frags;
282 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); 283 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
284 sq->stats.packets++;
283 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 285 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
284 } 286 }
285 287
288 sq->stats.bytes += num_bytes;
286 wi->num_bytes = num_bytes; 289 wi->num_bytes = num_bytes;
287 290
288 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 291 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
381 if (bf) 384 if (bf)
382 sq->bf_budget--; 385 sq->bf_budget--;
383 386
384 sq->stats.packets++;
385 sq->stats.bytes += num_bytes;
386 return NETDEV_TX_OK; 387 return NETDEV_TX_OK;
387 388
388dma_unmap_wqe_err: 389dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5b78883d5654..ad329b1680b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
209 struct mlx5_eswitch_rep *vport_reps; 209 struct mlx5_eswitch_rep *vport_reps;
210 DECLARE_HASHTABLE(encap_tbl, 8); 210 DECLARE_HASHTABLE(encap_tbl, 8);
211 u8 inline_mode; 211 u8 inline_mode;
212 u64 num_flows;
212}; 213};
213 214
214struct mlx5_eswitch { 215struct mlx5_eswitch {
@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
271mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 272mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
272 struct mlx5_flow_spec *spec, 273 struct mlx5_flow_spec *spec,
273 struct mlx5_esw_flow_attr *attr); 274 struct mlx5_esw_flow_attr *attr);
275void
276mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
277 struct mlx5_flow_handle *rule,
278 struct mlx5_esw_flow_attr *attr);
279
274struct mlx5_flow_handle * 280struct mlx5_flow_handle *
275mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); 281mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
276 282
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4f5b0d47d5f3..307ec6c5fd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
93 spec, &flow_act, dest, i); 93 spec, &flow_act, dest, i);
94 if (IS_ERR(rule)) 94 if (IS_ERR(rule))
95 mlx5_fc_destroy(esw->dev, counter); 95 mlx5_fc_destroy(esw->dev, counter);
96 else
97 esw->offloads.num_flows++;
96 98
97 return rule; 99 return rule;
98} 100}
99 101
102void
103mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
104 struct mlx5_flow_handle *rule,
105 struct mlx5_esw_flow_attr *attr)
106{
107 struct mlx5_fc *counter = NULL;
108
109 if (!IS_ERR(rule)) {
110 counter = mlx5_flow_rule_counter(rule);
111 mlx5_del_flow_rules(rule);
112 mlx5_fc_destroy(esw->dev, counter);
113 esw->offloads.num_flows--;
114 }
115}
116
100static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) 117static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
101{ 118{
102 struct mlx5_eswitch_rep *rep; 119 struct mlx5_eswitch_rep *rep;
@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
908 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 925 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
909 return -EOPNOTSUPP; 926 return -EOPNOTSUPP;
910 927
928 if (esw->offloads.num_flows > 0) {
929 esw_warn(dev, "Can't set inline mode when flows are configured\n");
930 return -EOPNOTSUPP;
931 }
932
911 err = esw_inline_mode_from_devlink(mode, &mlx5_mode); 933 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
912 if (err) 934 if (err)
913 goto out; 935 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2478516a61e2..ded27bb9a3b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1136 u32 *match_criteria) 1136 u32 *match_criteria)
1137{ 1137{
1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1139 struct list_head *prev = ft->node.children.prev; 1139 struct list_head *prev = &ft->node.children;
1140 unsigned int candidate_index = 0; 1140 unsigned int candidate_index = 0;
1141 struct mlx5_flow_group *fg; 1141 struct mlx5_flow_group *fg;
1142 void *match_criteria_addr; 1142 void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 55957246c0e8..b5d5519542e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
294 struct netdev_notifier_changeupper_info *info) 294 struct netdev_notifier_changeupper_info *info)
295{ 295{
296 struct net_device *upper = info->upper_dev, *ndev_tmp; 296 struct net_device *upper = info->upper_dev, *ndev_tmp;
297 struct netdev_lag_upper_info *lag_upper_info; 297 struct netdev_lag_upper_info *lag_upper_info = NULL;
298 bool is_bonded; 298 bool is_bonded;
299 int bond_status = 0; 299 int bond_status = 0;
300 int num_slaves = 0; 300 int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
303 if (!netif_is_lag_master(upper)) 303 if (!netif_is_lag_master(upper))
304 return 0; 304 return 0;
305 305
306 lag_upper_info = info->upper_info; 306 if (info->linking)
307 lag_upper_info = info->upper_info;
307 308
308 /* The event may still be of interest if the slave does not belong to 309 /* The event may still be of interest if the slave does not belong to
309 * us, but is enslaved to a master which has one or more of our netdevs 310 * us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c4242a4e8130..60154a175bd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
87 [2] = { 87 [2] = {
88 .mask = MLX5_PROF_MASK_QP_SIZE | 88 .mask = MLX5_PROF_MASK_QP_SIZE |
89 MLX5_PROF_MASK_MR_CACHE, 89 MLX5_PROF_MASK_MR_CACHE,
90 .log_max_qp = 17, 90 .log_max_qp = 18,
91 .mr_cache[0] = { 91 .mr_cache[0] = {
92 .size = 500, 92 .size = 500,
93 .limit = 250 93 .limit = 250
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
1352 if (err) 1352 if (err)
1353 goto clean_load; 1353 goto clean_load;
1354 1354
1355 pci_save_state(pdev);
1355 return 0; 1356 return 0;
1356 1357
1357clean_load: 1358clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1407 1408
1408 mlx5_enter_error_state(dev); 1409 mlx5_enter_error_state(dev);
1409 mlx5_unload_one(dev, priv, false); 1410 mlx5_unload_one(dev, priv, false);
1410 /* In case of kernel call save the pci state and drain the health wq */ 1411 /* In case of kernel call drain the health wq */
1411 if (state) { 1412 if (state) {
1412 pci_save_state(pdev);
1413 mlx5_drain_health_wq(dev); 1413 mlx5_drain_health_wq(dev);
1414 mlx5_pci_disable_device(dev); 1414 mlx5_pci_disable_device(dev);
1415 } 1415 }
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1461 1461
1462 pci_set_master(pdev); 1462 pci_set_master(pdev);
1463 pci_restore_state(pdev); 1463 pci_restore_state(pdev);
1464 pci_save_state(pdev);
1464 1465
1465 if (wait_vital(pdev)) { 1466 if (wait_vital(pdev)) {
1466 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); 1467 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0899e2d310e2..d9616daf8a70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
769#define MLXSW_REG_SPVM_ID 0x200F 769#define MLXSW_REG_SPVM_ID 0x200F
770#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ 770#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
771#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ 771#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
772#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 772#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
773#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ 773#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
774 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) 774 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
775 775
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
1702#define MLXSW_REG_SPVMLR_ID 0x2020 1702#define MLXSW_REG_SPVMLR_ID 0x2020
1703#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ 1703#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
1704#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ 1704#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
1705#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 1705#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
1706#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ 1706#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
1707 MLXSW_REG_SPVMLR_REC_LEN * \ 1707 MLXSW_REG_SPVMLR_REC_LEN * \
1708 MLXSW_REG_SPVMLR_REC_MAX_COUNT) 1708 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 22ab42925377..ae6cccc666e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
303 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, 303 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
304 ingress, 304 ingress,
305 MLXSW_SP_ACL_PROFILE_FLOWER); 305 MLXSW_SP_ACL_PROFILE_FLOWER);
306 if (WARN_ON(IS_ERR(ruleset))) 306 if (IS_ERR(ruleset))
307 return; 307 return;
308 308
309 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 309 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
310 if (!WARN_ON(!rule)) { 310 if (rule) {
311 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 311 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
312 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 312 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
313 } 313 }
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 06c9f4100cb9..6ad44be08b33 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/crc32c.h> 27#include <linux/crc32c.h>
28#include <linux/circ_buf.h>
28 29
29#include "moxart_ether.h" 30#include "moxart_ether.h"
30 31
@@ -278,6 +279,13 @@ rx_next:
278 return rx; 279 return rx;
279} 280}
280 281
282static int moxart_tx_queue_space(struct net_device *ndev)
283{
284 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
285
286 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
287}
288
281static void moxart_tx_finished(struct net_device *ndev) 289static void moxart_tx_finished(struct net_device *ndev)
282{ 290{
283 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 291 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
297 tx_tail = TX_NEXT(tx_tail); 305 tx_tail = TX_NEXT(tx_tail);
298 } 306 }
299 priv->tx_tail = tx_tail; 307 priv->tx_tail = tx_tail;
308 if (netif_queue_stopped(ndev) &&
309 moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
310 netif_wake_queue(ndev);
300} 311}
301 312
302static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) 313static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
324 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 335 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
325 void *desc; 336 void *desc;
326 unsigned int len; 337 unsigned int len;
327 unsigned int tx_head = priv->tx_head; 338 unsigned int tx_head;
328 u32 txdes1; 339 u32 txdes1;
329 int ret = NETDEV_TX_BUSY; 340 int ret = NETDEV_TX_BUSY;
330 341
342 spin_lock_irq(&priv->txlock);
343
344 tx_head = priv->tx_head;
331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 345 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
332 346
333 spin_lock_irq(&priv->txlock); 347 if (moxart_tx_queue_space(ndev) == 1)
348 netif_stop_queue(ndev);
349
334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 350 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
335 net_dbg_ratelimited("no TX space for packet\n"); 351 net_dbg_ratelimited("no TX space for packet\n");
336 priv->stats.tx_dropped++; 352 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563ac7c6..afc32ec998c0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) 59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600 60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) 61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62#define TX_WAKE_THRESHOLD 16
62 63
63#define RX_DESC_NUM 64 64#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) 65#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..a41377e26c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
3275{ 3275{
3276 struct nfp_net *nn = netdev_priv(netdev); 3276 struct nfp_net *nn = netdev_priv(netdev);
3277 3277
3278 unregister_netdev(nn->netdev);
3279
3278 if (nn->xdp_prog) 3280 if (nn->xdp_prog)
3279 bpf_prog_put(nn->xdp_prog); 3281 bpf_prog_put(nn->xdp_prog);
3280 if (nn->bpf_offload_xdp) 3282 if (nn->bpf_offload_xdp)
3281 nfp_net_xdp_offload(nn, NULL); 3283 nfp_net_xdp_offload(nn, NULL);
3282 unregister_netdev(nn->netdev);
3283} 3284}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03df751a..7e3a6fed3da6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
422 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; 422 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
423 u32 cxt_size = CONN_CXT_SIZE(p_hwfn); 423 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
424 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 424 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
425 u32 align = elems_per_page * DQ_RANGE_ALIGN;
425 426
426 p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); 427 p_conn->cid_count = roundup(p_conn->cid_count, align);
427 } 428 }
428} 429}
429 430
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e2a081ceaf52..e518f914eab1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
2389 * size/capacity fields are of a u32 type. 2389 * size/capacity fields are of a u32 type.
2390 */ 2390 */
2391 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 2391 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
2392 chain_size > 0x10000) || 2392 chain_size > ((u32)U16_MAX + 1)) ||
2393 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && 2393 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
2394 chain_size > 0x100000000ULL)) {
2395 DP_NOTICE(cdev, 2394 DP_NOTICE(cdev,
2396 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 2395 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2397 chain_size); 2396 chain_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 3a44d6b395fa..098766f7fe88 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
190 p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; 190 p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
191 p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; 191 p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
192 p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; 192 p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
193 p_init->ooo_enable = p_params->ooo_enable;
194 p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
195 p_params->ll2_ooo_queue_id;
193 p_init->func_params.log_page_size = p_params->log_page_size; 196 p_init->func_params.log_page_size = p_params->log_page_size;
194 val = p_params->num_tasks; 197 val = p_params->num_tasks;
195 p_init->func_params.num_tasks = cpu_to_le16(val); 198 p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
786 spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); 789 spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
787} 790}
788 791
792void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
793 struct qed_iscsi_conn *p_conn)
794{
795 qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
796 qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
797 qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
798 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
799 sizeof(struct tcp_upload_params),
800 p_conn->tcp_upload_params_virt_addr,
801 p_conn->tcp_upload_params_phys_addr);
802 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
803 sizeof(struct scsi_terminate_extra_params),
804 p_conn->queue_cnts_virt_addr,
805 p_conn->queue_cnts_phys_addr);
806 kfree(p_conn);
807}
808
789struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) 809struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
790{ 810{
791 struct qed_iscsi_info *p_iscsi_info; 811 struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
807void qed_iscsi_free(struct qed_hwfn *p_hwfn, 827void qed_iscsi_free(struct qed_hwfn *p_hwfn,
808 struct qed_iscsi_info *p_iscsi_info) 828 struct qed_iscsi_info *p_iscsi_info)
809{ 829{
830 struct qed_iscsi_conn *p_conn = NULL;
831
832 while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
833 p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
834 struct qed_iscsi_conn, list_entry);
835 if (p_conn) {
836 list_del(&p_conn->list_entry);
837 qed_iscsi_free_connection(p_hwfn, p_conn);
838 }
839 }
840
810 kfree(p_iscsi_info); 841 kfree(p_iscsi_info);
811} 842}
812 843
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af10a57..0d3cef409c96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
211 /* If need to reuse or there's no replacement buffer, repost this */ 211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc) 212 if (rc)
213 goto out_post; 213 goto out_post;
214 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
215 cdev->ll2->rx_size, DMA_FROM_DEVICE);
214 216
215 skb = build_skb(buffer->data, 0); 217 skb = build_skb(buffer->data, 0);
216 if (!skb) { 218 if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
474static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, 476static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
475 struct qed_ll2_info *p_ll2_conn, 477 struct qed_ll2_info *p_ll2_conn,
476 union core_rx_cqe_union *p_cqe, 478 union core_rx_cqe_union *p_cqe,
477 unsigned long lock_flags, 479 unsigned long *p_lock_flags,
478 bool b_last_cqe) 480 bool b_last_cqe)
479{ 481{
480 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 482 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
495 "Mismatch between active_descq and the LL2 Rx chain\n"); 497 "Mismatch between active_descq and the LL2 Rx chain\n");
496 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); 498 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
497 499
498 spin_unlock_irqrestore(&p_rx->lock, lock_flags); 500 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
499 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, 501 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
500 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); 502 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
501 spin_lock_irqsave(&p_rx->lock, lock_flags); 503 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
502 504
503 return 0; 505 return 0;
504} 506}
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
538 break; 540 break;
539 case CORE_RX_CQE_TYPE_REGULAR: 541 case CORE_RX_CQE_TYPE_REGULAR:
540 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, 542 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
541 cqe, flags, b_last_cqe); 543 cqe, &flags,
544 b_last_cqe);
542 break; 545 break;
543 default: 546 default:
544 rc = -EIO; 547 rc = -EIO;
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
968{ 971{
969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 972 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 973 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
971 struct qed_ll2_conn ll2_info; 974 struct qed_ll2_conn ll2_info = { 0 };
972 int rc; 975 int rc;
973 976
974 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; 977 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 7d731c6cb892..378afce58b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
159 if (!p_ooo_info->ooo_history.p_cqes) 159 if (!p_ooo_info->ooo_history.p_cqes)
160 goto no_history_mem; 160 goto no_history_mem;
161 161
162 p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
163
162 return p_ooo_info; 164 return p_ooo_info;
163 165
164no_history_mem: 166no_history_mem:
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7cd76b6b5cb9..2ae852454780 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2216{ 2216{
2217 bool want[OFDPA_CTRL_MAX] = { 0, }; 2217 bool want[OFDPA_CTRL_MAX] = { 0, };
2218 bool prev_ctrls[OFDPA_CTRL_MAX]; 2218 bool prev_ctrls[OFDPA_CTRL_MAX];
2219 u8 uninitialized_var(prev_state); 2219 u8 prev_state;
2220 int err; 2220 int err;
2221 int i; 2221 int i;
2222 2222
2223 if (switchdev_trans_ph_prepare(trans)) { 2223 prev_state = ofdpa_port->stp_state;
2224 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); 2224 if (prev_state == state)
2225 prev_state = ofdpa_port->stp_state;
2226 }
2227
2228 if (ofdpa_port->stp_state == state)
2229 return 0; 2225 return 0;
2230 2226
2227 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2231 ofdpa_port->stp_state = state; 2228 ofdpa_port->stp_state = state;
2232 2229
2233 switch (state) { 2230 switch (state) {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 334bcc6df6b2..50d28261b6b9 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
2404 tnl.type = (u16)efx_tunnel_type; 2404 tnl.type = (u16)efx_tunnel_type;
2405 tnl.port = ti->port; 2405 tnl.port = ti->port;
2406 2406
2407 if (efx->type->udp_tnl_add_port) 2407 if (efx->type->udp_tnl_del_port)
2408 (void)efx->type->udp_tnl_del_port(efx, tnl); 2408 (void)efx->type->udp_tnl_del_port(efx, tnl);
2409} 2409}
2410 2410
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 65077c77082a..91e9bd7159ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
1535 * Ethtool support 1535 * Ethtool support
1536 */ 1536 */
1537static int 1537static int
1538smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1538smc_ethtool_get_link_ksettings(struct net_device *dev,
1539 struct ethtool_link_ksettings *cmd)
1539{ 1540{
1540 struct smc_local *lp = netdev_priv(dev); 1541 struct smc_local *lp = netdev_priv(dev);
1541 int ret; 1542 int ret;
1542 1543
1543 cmd->maxtxpkt = 1;
1544 cmd->maxrxpkt = 1;
1545
1546 if (lp->phy_type != 0) { 1544 if (lp->phy_type != 0) {
1547 spin_lock_irq(&lp->lock); 1545 spin_lock_irq(&lp->lock);
1548 ret = mii_ethtool_gset(&lp->mii, cmd); 1546 ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
1549 spin_unlock_irq(&lp->lock); 1547 spin_unlock_irq(&lp->lock);
1550 } else { 1548 } else {
1551 cmd->supported = SUPPORTED_10baseT_Half | 1549 u32 supported = SUPPORTED_10baseT_Half |
1552 SUPPORTED_10baseT_Full | 1550 SUPPORTED_10baseT_Full |
1553 SUPPORTED_TP | SUPPORTED_AUI; 1551 SUPPORTED_TP | SUPPORTED_AUI;
1554 1552
1555 if (lp->ctl_rspeed == 10) 1553 if (lp->ctl_rspeed == 10)
1556 ethtool_cmd_speed_set(cmd, SPEED_10); 1554 cmd->base.speed = SPEED_10;
1557 else if (lp->ctl_rspeed == 100) 1555 else if (lp->ctl_rspeed == 100)
1558 ethtool_cmd_speed_set(cmd, SPEED_100); 1556 cmd->base.speed = SPEED_100;
1557
1558 cmd->base.autoneg = AUTONEG_DISABLE;
1559 cmd->base.port = 0;
1560 cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
1561 DUPLEX_FULL : DUPLEX_HALF;
1559 1562
1560 cmd->autoneg = AUTONEG_DISABLE; 1563 ethtool_convert_legacy_u32_to_link_mode(
1561 cmd->transceiver = XCVR_INTERNAL; 1564 cmd->link_modes.supported, supported);
1562 cmd->port = 0;
1563 cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
1564 1565
1565 ret = 0; 1566 ret = 0;
1566 } 1567 }
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1569} 1570}
1570 1571
1571static int 1572static int
1572smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1573smc_ethtool_set_link_ksettings(struct net_device *dev,
1574 const struct ethtool_link_ksettings *cmd)
1573{ 1575{
1574 struct smc_local *lp = netdev_priv(dev); 1576 struct smc_local *lp = netdev_priv(dev);
1575 int ret; 1577 int ret;
1576 1578
1577 if (lp->phy_type != 0) { 1579 if (lp->phy_type != 0) {
1578 spin_lock_irq(&lp->lock); 1580 spin_lock_irq(&lp->lock);
1579 ret = mii_ethtool_sset(&lp->mii, cmd); 1581 ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
1580 spin_unlock_irq(&lp->lock); 1582 spin_unlock_irq(&lp->lock);
1581 } else { 1583 } else {
1582 if (cmd->autoneg != AUTONEG_DISABLE || 1584 if (cmd->base.autoneg != AUTONEG_DISABLE ||
1583 cmd->speed != SPEED_10 || 1585 cmd->base.speed != SPEED_10 ||
1584 (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || 1586 (cmd->base.duplex != DUPLEX_HALF &&
1585 (cmd->port != PORT_TP && cmd->port != PORT_AUI)) 1587 cmd->base.duplex != DUPLEX_FULL) ||
1588 (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
1586 return -EINVAL; 1589 return -EINVAL;
1587 1590
1588// lp->port = cmd->port; 1591// lp->port = cmd->base.port;
1589 lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; 1592 lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
1590 1593
1591// if (netif_running(dev)) 1594// if (netif_running(dev))
1592// smc_set_port(dev); 1595// smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
1744 1747
1745 1748
1746static const struct ethtool_ops smc_ethtool_ops = { 1749static const struct ethtool_ops smc_ethtool_ops = {
1747 .get_settings = smc_ethtool_getsettings,
1748 .set_settings = smc_ethtool_setsettings,
1749 .get_drvinfo = smc_ethtool_getdrvinfo, 1750 .get_drvinfo = smc_ethtool_getdrvinfo,
1750 1751
1751 .get_msglevel = smc_ethtool_getmsglevel, 1752 .get_msglevel = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
1755 .get_eeprom_len = smc_ethtool_geteeprom_len, 1756 .get_eeprom_len = smc_ethtool_geteeprom_len,
1756 .get_eeprom = smc_ethtool_geteeprom, 1757 .get_eeprom = smc_ethtool_geteeprom,
1757 .set_eeprom = smc_ethtool_seteeprom, 1758 .set_eeprom = smc_ethtool_seteeprom,
1759 .get_link_ksettings = smc_ethtool_get_link_ksettings,
1760 .set_link_ksettings = smc_ethtool_set_link_ksettings,
1758}; 1761};
1759 1762
1760static const struct net_device_ops smc_netdev_ops = { 1763static const struct net_device_ops smc_netdev_ops = {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 296c8efd0038..9e631952b86f 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -74,15 +74,21 @@ config TI_CPSW
74 will be called cpsw. 74 will be called cpsw.
75 75
76config TI_CPTS 76config TI_CPTS
77 tristate "TI Common Platform Time Sync (CPTS) Support" 77 bool "TI Common Platform Time Sync (CPTS) Support"
78 depends on TI_CPSW || TI_KEYSTONE_NETCP 78 depends on TI_CPSW || TI_KEYSTONE_NETCP
79 imply PTP_1588_CLOCK 79 depends on PTP_1588_CLOCK
80 ---help--- 80 ---help---
81 This driver supports the Common Platform Time Sync unit of 81 This driver supports the Common Platform Time Sync unit of
82 the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. 82 the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
83 The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the 83 The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
84 driver offers a PTP Hardware Clock. 84 driver offers a PTP Hardware Clock.
85 85
86config TI_CPTS_MOD
87 tristate
88 depends on TI_CPTS
89 default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
90 default m
91
86config TI_KEYSTONE_NETCP 92config TI_KEYSTONE_NETCP
87 tristate "TI Keystone NETCP Core Support" 93 tristate "TI Keystone NETCP Core Support"
88 select TI_CPSW_ALE 94 select TI_CPSW_ALE
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 1e7c10bf8713..10e6b0ce51ba 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o 14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
15obj-$(CONFIG_TI_CPTS) += cpts.o 15obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
16obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 16obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
17ti_cpsw-y := cpsw.o 17ti_cpsw-y := cpsw.o
18 18
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9f3d9c67e3fe..fa674a8bda0c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1268{ 1268{
1269 u32 slave_port; 1269 u32 slave_port;
1270 struct phy_device *phy;
1270 struct cpsw_common *cpsw = priv->cpsw; 1271 struct cpsw_common *cpsw = priv->cpsw;
1271 1272
1272 soft_reset_slave(slave); 1273 soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1300 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1301 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1301 1302
1302 if (slave->data->phy_node) { 1303 if (slave->data->phy_node) {
1303 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, 1304 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1304 &cpsw_adjust_link, 0, slave->data->phy_if); 1305 &cpsw_adjust_link, 0, slave->data->phy_if);
1305 if (!slave->phy) { 1306 if (!phy) {
1306 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", 1307 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1307 slave->data->phy_node->full_name, 1308 slave->data->phy_node->full_name,
1308 slave->slave_num); 1309 slave->slave_num);
1309 return; 1310 return;
1310 } 1311 }
1311 } else { 1312 } else {
1312 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1313 phy = phy_connect(priv->ndev, slave->data->phy_id,
1313 &cpsw_adjust_link, slave->data->phy_if); 1314 &cpsw_adjust_link, slave->data->phy_if);
1314 if (IS_ERR(slave->phy)) { 1315 if (IS_ERR(phy)) {
1315 dev_err(priv->dev, 1316 dev_err(priv->dev,
1316 "phy \"%s\" not found on slave %d, err %ld\n", 1317 "phy \"%s\" not found on slave %d, err %ld\n",
1317 slave->data->phy_id, slave->slave_num, 1318 slave->data->phy_id, slave->slave_num,
1318 PTR_ERR(slave->phy)); 1319 PTR_ERR(phy));
1319 slave->phy = NULL;
1320 return; 1320 return;
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 slave->phy = phy;
1325
1324 phy_attached_info(slave->phy); 1326 phy_attached_info(slave->phy);
1325 1327
1326 phy_start(slave->phy); 1328 phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1817 } 1819 }
1818 1820
1819 cpsw_intr_enable(cpsw); 1821 cpsw_intr_enable(cpsw);
1822 netif_trans_update(ndev);
1823 netif_tx_wake_all_queues(ndev);
1820} 1824}
1821 1825
1822static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1826static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b75d9cdcfb0c..ae48c809bac9 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(DRV_VERSION); 46MODULE_VERSION(DRV_VERSION);
47 47
48#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
49
48static int fjes_request_irq(struct fjes_adapter *); 50static int fjes_request_irq(struct fjes_adapter *);
49static void fjes_free_irq(struct fjes_adapter *); 51static void fjes_free_irq(struct fjes_adapter *);
50 52
@@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
78static int fjes_poll(struct napi_struct *, int); 80static int fjes_poll(struct napi_struct *, int);
79 81
80static const struct acpi_device_id fjes_acpi_ids[] = { 82static const struct acpi_device_id fjes_acpi_ids[] = {
81 {"PNP0C02", 0}, 83 {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
82 {"", 0}, 84 {"", 0},
83}; 85};
84MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); 86MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
115 }, 117 },
116}; 118};
117 119
118static int fjes_acpi_add(struct acpi_device *device) 120static bool is_extended_socket_device(struct acpi_device *device)
119{ 121{
120 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 122 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
121 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; 123 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
122 struct platform_device *plat_dev;
123 union acpi_object *str; 124 union acpi_object *str;
124 acpi_status status; 125 acpi_status status;
125 int result; 126 int result;
126 127
127 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); 128 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
128 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
129 return -ENODEV; 130 return false;
130 131
131 str = buffer.pointer; 132 str = buffer.pointer;
132 result = utf16s_to_utf8s((wchar_t *)str->string.pointer, 133 result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
136 137
137 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { 138 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
138 kfree(buffer.pointer); 139 kfree(buffer.pointer);
139 return -ENODEV; 140 return false;
140 } 141 }
141 kfree(buffer.pointer); 142 kfree(buffer.pointer);
142 143
144 return true;
145}
146
147static int acpi_check_extended_socket_status(struct acpi_device *device)
148{
149 unsigned long long sta;
150 acpi_status status;
151
152 status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
153 if (ACPI_FAILURE(status))
154 return -ENODEV;
155
156 if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
157 (sta & ACPI_STA_DEVICE_ENABLED) &&
158 (sta & ACPI_STA_DEVICE_UI) &&
159 (sta & ACPI_STA_DEVICE_FUNCTIONING)))
160 return -ENODEV;
161
162 return 0;
163}
164
165static int fjes_acpi_add(struct acpi_device *device)
166{
167 struct platform_device *plat_dev;
168 acpi_status status;
169
170 if (!is_extended_socket_device(device))
171 return -ENODEV;
172
173 if (acpi_check_extended_socket_status(device))
174 return -ENODEV;
175
143 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, 176 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
144 fjes_get_acpi_resource, fjes_resource); 177 fjes_get_acpi_resource, fjes_resource);
145 if (ACPI_FAILURE(status)) 178 if (ACPI_FAILURE(status))
@@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
1316 netdev->min_mtu = fjes_support_mtu[0]; 1349 netdev->min_mtu = fjes_support_mtu[0];
1317 netdev->max_mtu = fjes_support_mtu[3]; 1350 netdev->max_mtu = fjes_support_mtu[3];
1318 netdev->flags |= IFF_BROADCAST; 1351 netdev->flags |= IFF_BROADCAST;
1319 netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; 1352 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1320} 1353}
1321 1354
1322static void fjes_irq_watch_task(struct work_struct *work) 1355static void fjes_irq_watch_task(struct work_struct *work)
@@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
1473 } 1506 }
1474} 1507}
1475 1508
1509static acpi_status
1510acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1511 void *context, void **return_value)
1512{
1513 struct acpi_device *device;
1514 bool *found = context;
1515 int result;
1516
1517 result = acpi_bus_get_device(obj_handle, &device);
1518 if (result)
1519 return AE_OK;
1520
1521 if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1522 return AE_OK;
1523
1524 if (!is_extended_socket_device(device))
1525 return AE_OK;
1526
1527 if (acpi_check_extended_socket_status(device))
1528 return AE_OK;
1529
1530 *found = true;
1531 return AE_CTRL_TERMINATE;
1532}
1533
1476/* fjes_init_module - Driver Registration Routine */ 1534/* fjes_init_module - Driver Registration Routine */
1477static int __init fjes_init_module(void) 1535static int __init fjes_init_module(void)
1478{ 1536{
1537 bool found = false;
1479 int result; 1538 int result;
1480 1539
1540 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1541 acpi_find_extended_socket_device, NULL, &found,
1542 NULL);
1543
1544 if (!found)
1545 return -ENODEV;
1546
1481 pr_info("%s - version %s - %s\n", 1547 pr_info("%s - version %s - %s\n",
1482 fjes_driver_string, fjes_driver_version, fjes_copyright); 1548 fjes_driver_string, fjes_driver_version, fjes_copyright);
1483 1549
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d3e73ac158ae..f9f3dba7a588 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -700,6 +700,8 @@ struct net_device_context {
700 700
701 u32 tx_checksum_mask; 701 u32 tx_checksum_mask;
702 702
703 u32 tx_send_table[VRSS_SEND_TAB_SIZE];
704
703 /* Ethtool settings */ 705 /* Ethtool settings */
704 u8 duplex; 706 u8 duplex;
705 u32 speed; 707 u32 speed;
@@ -757,7 +759,6 @@ struct netvsc_device {
757 759
758 struct nvsp_message revoke_packet; 760 struct nvsp_message revoke_packet;
759 761
760 u32 send_table[VRSS_SEND_TAB_SIZE];
761 u32 max_chn; 762 u32 max_chn;
762 u32 num_chn; 763 u32 num_chn;
763 spinlock_t sc_lock; /* Protects num_sc_offered variable */ 764 spinlock_t sc_lock; /* Protects num_sc_offered variable */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d35ebd993b38..8dd0b8770328 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
1136static void netvsc_send_table(struct hv_device *hdev, 1136static void netvsc_send_table(struct hv_device *hdev,
1137 struct nvsp_message *nvmsg) 1137 struct nvsp_message *nvmsg)
1138{ 1138{
1139 struct netvsc_device *nvscdev;
1140 struct net_device *ndev = hv_get_drvdata(hdev); 1139 struct net_device *ndev = hv_get_drvdata(hdev);
1140 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1141 int i; 1141 int i;
1142 u32 count, *tab; 1142 u32 count, *tab;
1143 1143
1144 nvscdev = get_outbound_net_device(hdev);
1145 if (!nvscdev)
1146 return;
1147
1148 count = nvmsg->msg.v5_msg.send_table.count; 1144 count = nvmsg->msg.v5_msg.send_table.count;
1149 if (count != VRSS_SEND_TAB_SIZE) { 1145 if (count != VRSS_SEND_TAB_SIZE) {
1150 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 1146 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
1155 nvmsg->msg.v5_msg.send_table.offset); 1151 nvmsg->msg.v5_msg.send_table.offset);
1156 1152
1157 for (i = 0; i < count; i++) 1153 for (i = 0; i < count; i++)
1158 nvscdev->send_table[i] = tab[i]; 1154 net_device_ctx->tx_send_table[i] = tab[i];
1159} 1155}
1160 1156
1161static void netvsc_send_vf(struct net_device_context *net_device_ctx, 1157static void netvsc_send_vf(struct net_device_context *net_device_ctx,
@@ -1235,8 +1231,11 @@ void netvsc_channel_cb(void *context)
1235 return; 1231 return;
1236 1232
1237 net_device = net_device_to_netvsc_device(ndev); 1233 net_device = net_device_to_netvsc_device(ndev);
1238 if (unlikely(net_device->destroy) && 1234 if (unlikely(!net_device))
1239 netvsc_channel_idle(net_device, q_idx)) 1235 return;
1236
1237 if (unlikely(net_device->destroy &&
1238 netvsc_channel_idle(net_device, q_idx)))
1240 return; 1239 return;
1241 1240
1242 /* commit_rd_index() -> hv_signal_on_read() needs this. */ 1241 /* commit_rd_index() -> hv_signal_on_read() needs this. */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bc05c895d958..5ede87f30463 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
206 void *accel_priv, select_queue_fallback_t fallback) 206 void *accel_priv, select_queue_fallback_t fallback)
207{ 207{
208 struct net_device_context *net_device_ctx = netdev_priv(ndev); 208 struct net_device_context *net_device_ctx = netdev_priv(ndev);
209 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; 209 unsigned int num_tx_queues = ndev->real_num_tx_queues;
210 struct sock *sk = skb->sk; 210 struct sock *sk = skb->sk;
211 int q_idx = sk_tx_queue_get(sk); 211 int q_idx = sk_tx_queue_get(sk);
212 212
213 if (q_idx < 0 || skb->ooo_okay || 213 if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
214 q_idx >= ndev->real_num_tx_queues) {
215 u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); 214 u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
216 int new_idx; 215 int new_idx;
217 216
218 new_idx = nvsc_dev->send_table[hash] 217 new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
219 % nvsc_dev->num_chn;
220 218
221 if (q_idx != new_idx && sk && 219 if (q_idx != new_idx && sk &&
222 sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 220 sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
225 q_idx = new_idx; 223 q_idx = new_idx;
226 } 224 }
227 225
228 if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
229 q_idx = 0;
230
231 return q_idx; 226 return q_idx;
232} 227}
233 228
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ffedad2a360a..15b920086251 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
418 memset(rd, 0, sizeof(*rd)); 418 memset(rd, 0, sizeof(*rd));
419 rd->hw = hwmap + i; 419 rd->hw = hwmap + i;
420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
421 if (rd->buf == NULL || 421 if (rd->buf)
422 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 422 busaddr = pci_map_single(pdev, rd->buf, len, dir);
423 if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
423 if (rd->buf) { 424 if (rd->buf) {
424 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 425 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
425 __func__, rd->buf); 426 __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
430 rd = r->rd + j; 431 rd = r->rd + j;
431 busaddr = rd_get_addr(rd); 432 busaddr = rd_get_addr(rd);
432 rd_set_addr_status(rd, 0, 0); 433 rd_set_addr_status(rd, 0, 0);
433 if (busaddr) 434 pci_unmap_single(pdev, busaddr, len, dir);
434 pci_unmap_single(pdev, busaddr, len, dir);
435 kfree(rd->buf); 435 kfree(rd->buf);
436 rd->buf = NULL; 436 rd->buf = NULL;
437 } 437 }
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f9d0fa315a47..272b051a0199 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
1883 return m88e1510_hwmon_probe(phydev); 1883 return m88e1510_hwmon_probe(phydev);
1884} 1884}
1885 1885
1886static void marvell_remove(struct phy_device *phydev)
1887{
1888#ifdef CONFIG_HWMON
1889
1890 struct marvell_priv *priv = phydev->priv;
1891
1892 if (priv && priv->hwmon_dev)
1893 hwmon_device_unregister(priv->hwmon_dev);
1894#endif
1895}
1896
1897static struct phy_driver marvell_drivers[] = { 1886static struct phy_driver marvell_drivers[] = {
1898 { 1887 {
1899 .phy_id = MARVELL_PHY_ID_88E1101, 1888 .phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
1974 .features = PHY_GBIT_FEATURES, 1963 .features = PHY_GBIT_FEATURES,
1975 .flags = PHY_HAS_INTERRUPT, 1964 .flags = PHY_HAS_INTERRUPT,
1976 .probe = &m88e1121_probe, 1965 .probe = &m88e1121_probe,
1977 .remove = &marvell_remove,
1978 .config_init = &m88e1121_config_init, 1966 .config_init = &m88e1121_config_init,
1979 .config_aneg = &m88e1121_config_aneg, 1967 .config_aneg = &m88e1121_config_aneg,
1980 .read_status = &marvell_read_status, 1968 .read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
2087 .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, 2075 .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
2088 .flags = PHY_HAS_INTERRUPT, 2076 .flags = PHY_HAS_INTERRUPT,
2089 .probe = &m88e1510_probe, 2077 .probe = &m88e1510_probe,
2090 .remove = &marvell_remove,
2091 .config_init = &m88e1510_config_init, 2078 .config_init = &m88e1510_config_init,
2092 .config_aneg = &m88e1510_config_aneg, 2079 .config_aneg = &m88e1510_config_aneg,
2093 .read_status = &marvell_read_status, 2080 .read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
2109 .features = PHY_GBIT_FEATURES, 2096 .features = PHY_GBIT_FEATURES,
2110 .flags = PHY_HAS_INTERRUPT, 2097 .flags = PHY_HAS_INTERRUPT,
2111 .probe = m88e1510_probe, 2098 .probe = m88e1510_probe,
2112 .remove = &marvell_remove,
2113 .config_init = &marvell_config_init, 2099 .config_init = &marvell_config_init,
2114 .config_aneg = &m88e1510_config_aneg, 2100 .config_aneg = &m88e1510_config_aneg,
2115 .read_status = &marvell_read_status, 2101 .read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
2127 .phy_id_mask = MARVELL_PHY_ID_MASK, 2113 .phy_id_mask = MARVELL_PHY_ID_MASK,
2128 .name = "Marvell 88E1545", 2114 .name = "Marvell 88E1545",
2129 .probe = m88e1510_probe, 2115 .probe = m88e1510_probe,
2130 .remove = &marvell_remove,
2131 .features = PHY_GBIT_FEATURES, 2116 .features = PHY_GBIT_FEATURES,
2132 .flags = PHY_HAS_INTERRUPT, 2117 .flags = PHY_HAS_INTERRUPT,
2133 .config_init = &marvell_config_init, 2118 .config_init = &marvell_config_init,
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 6b988f77da08..61941e29daae 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
84 84
85 return 0; 85 return 0;
86} 86}
87EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1be69d8bc909..a2bfc82e95d7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev)
681 cancel_delayed_work_sync(&phydev->state_queue); 681 cancel_delayed_work_sync(&phydev->state_queue);
682 682
683 mutex_lock(&phydev->lock); 683 mutex_lock(&phydev->lock);
684 if (phydev->state > PHY_UP) 684 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
685 phydev->state = PHY_UP; 685 phydev->state = PHY_UP;
686 mutex_unlock(&phydev->lock); 686 mutex_unlock(&phydev->lock);
687} 687}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index daec6555f3b1..5198ccfa347f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
1864 .phy_id = 0xffffffff, 1864 .phy_id = 0xffffffff,
1865 .phy_id_mask = 0xffffffff, 1865 .phy_id_mask = 0xffffffff,
1866 .name = "Generic PHY", 1866 .name = "Generic PHY",
1867 .soft_reset = genphy_soft_reset, 1867 .soft_reset = genphy_no_soft_reset,
1868 .config_init = genphy_config_init, 1868 .config_init = genphy_config_init,
1869 .features = PHY_GBIT_FEATURES | SUPPORTED_MII | 1869 .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
1870 SUPPORTED_AUI | SUPPORTED_FIBRE | 1870 SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedfa2994..1e2d4f1179da 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
491 if (err) 491 if (err)
492 return err; 492 return err;
493 493
494 ks->regs_attr.size = ks->chip->regs_size;
495 memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); 494 memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
495 ks->regs_attr.size = ks->chip->regs_size;
496 496
497 err = ks8995_reset(ks); 497 err = ks8995_reset(ks);
498 if (err) 498 if (err)
499 return err; 499 return err;
500 500
501 sysfs_attr_init(&ks->regs_attr.attr);
501 err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr); 502 err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
502 if (err) { 503 if (err) {
503 dev_err(&spi->dev, "unable to create sysfs file, err=%d\n", 504 dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4a24b5d15f5a..1b52520715ae 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
2072static void team_setup(struct net_device *dev) 2072static void team_setup(struct net_device *dev)
2073{ 2073{
2074 ether_setup(dev); 2074 ether_setup(dev);
2075 dev->max_mtu = ETH_MAX_MTU;
2075 2076
2076 dev->netdev_ops = &team_netdev_ops; 2077 dev->netdev_ops = &team_netdev_ops;
2077 dev->ethtool_ops = &team_ethtool_ops; 2078 dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dc1b1dd9157c..cc88cd7856f5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
822/* Net device open. */ 822/* Net device open. */
823static int tun_net_open(struct net_device *dev) 823static int tun_net_open(struct net_device *dev)
824{ 824{
825 struct tun_struct *tun = netdev_priv(dev);
826 int i;
827
825 netif_tx_start_all_queues(dev); 828 netif_tx_start_all_queues(dev);
829
830 for (i = 0; i < tun->numqueues; i++) {
831 struct tun_file *tfile;
832
833 tfile = rtnl_dereference(tun->tfiles[i]);
834 tfile->socket.sk->sk_write_space(tfile->socket.sk);
835 }
836
826 return 0; 837 return 0;
827} 838}
828 839
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1103 if (!skb_array_empty(&tfile->tx_array)) 1114 if (!skb_array_empty(&tfile->tx_array))
1104 mask |= POLLIN | POLLRDNORM; 1115 mask |= POLLIN | POLLRDNORM;
1105 1116
1106 if (sock_writeable(sk) || 1117 if (tun->dev->flags & IFF_UP &&
1107 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1118 (sock_writeable(sk) ||
1108 sock_writeable(sk))) 1119 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1120 sock_writeable(sk))))
1109 mask |= POLLOUT | POLLWRNORM; 1121 mask |= POLLOUT | POLLWRNORM;
1110 1122
1111 if (tun->dev->reg_state != NETREG_REGISTERED) 1123 if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -1919,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1919 return -EINVAL; 1931 return -EINVAL;
1920 1932
1921 tun->set_features = features; 1933 tun->set_features = features;
1934 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
1935 tun->dev->wanted_features |= features;
1922 netdev_update_features(tun->dev); 1936 netdev_update_features(tun->dev);
1923 1937
1924 return 0; 1938 return 0;
@@ -2570,7 +2584,6 @@ static int __init tun_init(void)
2570 int ret = 0; 2584 int ret = 0;
2571 2585
2572 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 2586 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2573 pr_info("%s\n", DRV_COPYRIGHT);
2574 2587
2575 ret = rtnl_link_register(&tun_link_ops); 2588 ret = rtnl_link_register(&tun_link_ops);
2576 if (ret) { 2589 if (ret) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f5552aaaa77a..f3ae88fdf332 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0 534#define HP_VENDOR_ID 0x03f0
535#define MICROSOFT_VENDOR_ID 0x045e
535 536
536static const struct usb_device_id products[] = { 537static const struct usb_device_id products[] = {
537/* BLACKLIST !! 538/* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id products[] = {
761 .driver_info = 0, 762 .driver_info = 0,
762}, 763},
763 764
765/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
766{
767 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
768 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
769 .driver_info = 0,
770},
771
772/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
773{
774 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
775 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
776 .driver_info = 0,
777},
778
764/* WHITELIST!!! 779/* WHITELIST!!!
765 * 780 *
766 * CDC Ether uses two interfaces, not necessarily consecutive. 781 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 805674550683..156f7f85e486 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
580 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), 580 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
581 .driver_info = (unsigned long)&qmi_wwan_info, 581 .driver_info = (unsigned long)&qmi_wwan_info,
582 }, 582 },
583 { /* Motorola Mapphone devices with MDM6600 */
584 USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
585 .driver_info = (unsigned long)&qmi_wwan_info,
586 },
583 587
584 /* 2. Combined interface devices matching on class+protocol */ 588 /* 2. Combined interface devices matching on class+protocol */
585 { /* Huawei E367 and possibly others in "Windows mode" */ 589 { /* Huawei E367 and possibly others in "Windows mode" */
@@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
925 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 929 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
926 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 930 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
927 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 931 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
932 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
933 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
928 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 934 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
929 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 935 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
930 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 936 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 986243c932cc..07f788c49d57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "8" 35#define NET_VERSION "9"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
501#define RTL8153_RMS RTL8153_MAX_PACKET 501#define RTL8153_RMS RTL8153_MAX_PACKET
502#define RTL8152_TX_TIMEOUT (5 * HZ) 502#define RTL8152_TX_TIMEOUT (5 * HZ)
503#define RTL8152_NAPI_WEIGHT 64 503#define RTL8152_NAPI_WEIGHT 64
504#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
505 sizeof(struct rx_desc) + RX_ALIGN)
504 506
505/* rtl8152 flags */ 507/* rtl8152 flags */
506enum rtl8152_flags { 508enum rtl8152_flags {
@@ -515,6 +517,7 @@ enum rtl8152_flags {
515 517
516/* Define these values to match your device */ 518/* Define these values to match your device */
517#define VENDOR_ID_REALTEK 0x0bda 519#define VENDOR_ID_REALTEK 0x0bda
520#define VENDOR_ID_MICROSOFT 0x045e
518#define VENDOR_ID_SAMSUNG 0x04e8 521#define VENDOR_ID_SAMSUNG 0x04e8
519#define VENDOR_ID_LENOVO 0x17ef 522#define VENDOR_ID_LENOVO 0x17ef
520#define VENDOR_ID_NVIDIA 0x0955 523#define VENDOR_ID_NVIDIA 0x0955
@@ -1292,6 +1295,7 @@ static void intr_callback(struct urb *urb)
1292 } 1295 }
1293 } else { 1296 } else {
1294 if (netif_carrier_ok(tp->netdev)) { 1297 if (netif_carrier_ok(tp->netdev)) {
1298 netif_stop_queue(tp->netdev);
1295 set_bit(RTL8152_LINK_CHG, &tp->flags); 1299 set_bit(RTL8152_LINK_CHG, &tp->flags);
1296 schedule_delayed_work(&tp->schedule, 0); 1300 schedule_delayed_work(&tp->schedule, 0);
1297 } 1301 }
@@ -1362,6 +1366,7 @@ static int alloc_all_mem(struct r8152 *tp)
1362 spin_lock_init(&tp->rx_lock); 1366 spin_lock_init(&tp->rx_lock);
1363 spin_lock_init(&tp->tx_lock); 1367 spin_lock_init(&tp->tx_lock);
1364 INIT_LIST_HEAD(&tp->tx_free); 1368 INIT_LIST_HEAD(&tp->tx_free);
1369 INIT_LIST_HEAD(&tp->rx_done);
1365 skb_queue_head_init(&tp->tx_queue); 1370 skb_queue_head_init(&tp->tx_queue);
1366 skb_queue_head_init(&tp->rx_queue); 1371 skb_queue_head_init(&tp->rx_queue);
1367 1372
@@ -2252,8 +2257,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
2252 2257
2253static void r8153_set_rx_early_size(struct r8152 *tp) 2258static void r8153_set_rx_early_size(struct r8152 *tp)
2254{ 2259{
2255 u32 mtu = tp->netdev->mtu; 2260 u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
2256 u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
2257 2261
2258 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); 2262 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
2259} 2263}
@@ -2898,7 +2902,8 @@ static void r8153_first_init(struct r8152 *tp)
2898 2902
2899 rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); 2903 rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
2900 2904
2901 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2905 ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
2906 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
2902 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); 2907 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
2903 2908
2904 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); 2909 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@@ -2950,7 +2955,8 @@ static void r8153_enter_oob(struct r8152 *tp)
2950 usleep_range(1000, 2000); 2955 usleep_range(1000, 2000);
2951 } 2956 }
2952 2957
2953 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); 2958 ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
2959 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
2954 2960
2955 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); 2961 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
2956 ocp_data &= ~TEREDO_WAKE_MASK; 2962 ocp_data &= ~TEREDO_WAKE_MASK;
@@ -3165,6 +3171,9 @@ static void set_carrier(struct r8152 *tp)
3165 napi_enable(&tp->napi); 3171 napi_enable(&tp->napi);
3166 netif_wake_queue(netdev); 3172 netif_wake_queue(netdev);
3167 netif_info(tp, link, netdev, "carrier on\n"); 3173 netif_info(tp, link, netdev, "carrier on\n");
3174 } else if (netif_queue_stopped(netdev) &&
3175 skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
3176 netif_wake_queue(netdev);
3168 } 3177 }
3169 } else { 3178 } else {
3170 if (netif_carrier_ok(netdev)) { 3179 if (netif_carrier_ok(netdev)) {
@@ -3698,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf)
3698 tp->rtl_ops.autosuspend_en(tp, false); 3707 tp->rtl_ops.autosuspend_en(tp, false);
3699 napi_disable(&tp->napi); 3708 napi_disable(&tp->napi);
3700 set_bit(WORK_ENABLE, &tp->flags); 3709 set_bit(WORK_ENABLE, &tp->flags);
3701 if (netif_carrier_ok(tp->netdev)) 3710
3702 rtl_start_rx(tp); 3711 if (netif_carrier_ok(tp->netdev)) {
3712 if (rtl8152_get_speed(tp) & LINK_STATUS) {
3713 rtl_start_rx(tp);
3714 } else {
3715 netif_carrier_off(tp->netdev);
3716 tp->rtl_ops.disable(tp);
3717 netif_info(tp, link, tp->netdev,
3718 "linking down\n");
3719 }
3720 }
3721
3703 napi_enable(&tp->napi); 3722 napi_enable(&tp->napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3723 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic(); 3724 smp_mb__after_atomic();
@@ -4200,8 +4219,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
4200 4219
4201 dev->mtu = new_mtu; 4220 dev->mtu = new_mtu;
4202 4221
4203 if (netif_running(dev) && netif_carrier_ok(dev)) 4222 if (netif_running(dev)) {
4204 r8153_set_rx_early_size(tp); 4223 u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
4224
4225 ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
4226
4227 if (netif_carrier_ok(dev))
4228 r8153_set_rx_early_size(tp);
4229 }
4205 4230
4206 mutex_unlock(&tp->control); 4231 mutex_unlock(&tp->control);
4207 4232
@@ -4497,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
4497static struct usb_device_id rtl8152_table[] = { 4522static struct usb_device_id rtl8152_table[] = {
4498 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4523 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4499 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4524 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4525 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
4526 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
4500 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4527 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4501 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4528 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4502 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, 4529 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 22379da63400..d6988db1930d 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
340 340
341static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) 341static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
342{ 342{
343 int len = skb->len;
343 netdev_tx_t ret = is_ip_tx_frame(skb, dev); 344 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
344 345
345 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 346 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
347 348
348 u64_stats_update_begin(&dstats->syncp); 349 u64_stats_update_begin(&dstats->syncp);
349 dstats->tx_pkts++; 350 dstats->tx_pkts++;
350 dstats->tx_bytes += skb->len; 351 dstats->tx_bytes += len;
351 u64_stats_update_end(&dstats->syncp); 352 u64_stats_update_end(&dstats->syncp);
352 } else { 353 } else {
353 this_cpu_inc(dev->dstats->tx_drps); 354 this_cpu_inc(dev->dstats->tx_drps);
@@ -461,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
461 } 462 }
462 463
463 if (rt6_local) { 464 if (rt6_local) {
464 if (rt6_local->rt6i_idev) 465 if (rt6_local->rt6i_idev) {
465 in6_dev_put(rt6_local->rt6i_idev); 466 in6_dev_put(rt6_local->rt6i_idev);
467 rt6_local->rt6i_idev = NULL;
468 }
466 469
467 dst = &rt6_local->dst; 470 dst = &rt6_local->dst;
468 dev_put(dst->dev); 471 dev_put(dst->dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e375560cc74e..bdb6ae16d4a8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2976 return 0; 2976 return 0;
2977} 2977}
2978 2978
2979static int __vxlan_dev_create(struct net *net, struct net_device *dev,
2980 struct vxlan_config *conf)
2981{
2982 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2983 struct vxlan_dev *vxlan = netdev_priv(dev);
2984 int err;
2985
2986 err = vxlan_dev_configure(net, dev, conf, false);
2987 if (err)
2988 return err;
2989
2990 dev->ethtool_ops = &vxlan_ethtool_ops;
2991
2992 /* create an fdb entry for a valid default destination */
2993 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2994 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2995 &vxlan->default_dst.remote_ip,
2996 NUD_REACHABLE | NUD_PERMANENT,
2997 NLM_F_EXCL | NLM_F_CREATE,
2998 vxlan->cfg.dst_port,
2999 vxlan->default_dst.remote_vni,
3000 vxlan->default_dst.remote_vni,
3001 vxlan->default_dst.remote_ifindex,
3002 NTF_SELF);
3003 if (err)
3004 return err;
3005 }
3006
3007 err = register_netdevice(dev);
3008 if (err) {
3009 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3010 return err;
3011 }
3012
3013 list_add(&vxlan->next, &vn->vxlan_list);
3014 return 0;
3015}
3016
2979static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3017static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
2980 struct net_device *dev, struct vxlan_config *conf, 3018 struct net_device *dev, struct vxlan_config *conf,
2981 bool changelink) 3019 bool changelink)
@@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3172static int vxlan_newlink(struct net *src_net, struct net_device *dev, 3210static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3173 struct nlattr *tb[], struct nlattr *data[]) 3211 struct nlattr *tb[], struct nlattr *data[])
3174{ 3212{
3175 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
3176 struct vxlan_dev *vxlan = netdev_priv(dev);
3177 struct vxlan_config conf; 3213 struct vxlan_config conf;
3178 int err; 3214 int err;
3179 3215
@@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3181 if (err) 3217 if (err)
3182 return err; 3218 return err;
3183 3219
3184 err = vxlan_dev_configure(src_net, dev, &conf, false); 3220 return __vxlan_dev_create(src_net, dev, &conf);
3185 if (err)
3186 return err;
3187
3188 dev->ethtool_ops = &vxlan_ethtool_ops;
3189
3190 /* create an fdb entry for a valid default destination */
3191 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3192 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3193 &vxlan->default_dst.remote_ip,
3194 NUD_REACHABLE | NUD_PERMANENT,
3195 NLM_F_EXCL | NLM_F_CREATE,
3196 vxlan->cfg.dst_port,
3197 vxlan->default_dst.remote_vni,
3198 vxlan->default_dst.remote_vni,
3199 vxlan->default_dst.remote_ifindex,
3200 NTF_SELF);
3201 if (err)
3202 return err;
3203 }
3204
3205 err = register_netdevice(dev);
3206 if (err) {
3207 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3208 return err;
3209 }
3210
3211 list_add(&vxlan->next, &vn->vxlan_list);
3212
3213 return 0;
3214} 3221}
3215 3222
3216static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], 3223static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
3440 if (IS_ERR(dev)) 3447 if (IS_ERR(dev))
3441 return dev; 3448 return dev;
3442 3449
3443 err = vxlan_dev_configure(net, dev, conf, false); 3450 err = __vxlan_dev_create(net, dev, conf);
3444 if (err < 0) { 3451 if (err < 0) {
3445 free_netdev(dev); 3452 free_netdev(dev);
3446 return ERR_PTR(err); 3453 return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a5045b5279d7..6742ae605660 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
381 /* set bd status and length */ 381 /* set bd status and length */
382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
383 383
384 iowrite16be(bd_status, &bd->status);
385 iowrite16be(skb->len, &bd->length); 384 iowrite16be(skb->len, &bd->length);
385 iowrite16be(bd_status, &bd->status);
386 386
387 /* Move to next BD in the ring */ 387 /* Move to next BD in the ring */
388 if (!(bd_status & T_W_S)) 388 if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
457 struct sk_buff *skb; 457 struct sk_buff *skb;
458 hdlc_device *hdlc = dev_to_hdlc(dev); 458 hdlc_device *hdlc = dev_to_hdlc(dev);
459 struct qe_bd *bd; 459 struct qe_bd *bd;
460 u32 bd_status; 460 u16 bd_status;
461 u16 length, howmany = 0; 461 u16 length, howmany = 0;
462 u8 *bdbuffer; 462 u8 *bdbuffer;
463 int i; 463 int i;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910a6519..f8eb66ef2944 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
467 struct i2400mu *i2400mu; 467 struct i2400mu *i2400mu;
468 struct usb_device *usb_dev = interface_to_usbdev(iface); 468 struct usb_device *usb_dev = interface_to_usbdev(iface);
469 469
470 if (iface->cur_altsetting->desc.bNumEndpoints < 4)
471 return -ENODEV;
472
470 if (usb_dev->speed != USB_SPEED_HIGH) 473 if (usb_dev->speed != USB_SPEED_HIGH)
471 dev_err(dev, "device not connected as high speed\n"); 474 dev_err(dev, "device not connected as high speed\n");
472 475
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 33fb26833cd0..d9f37ee4bfdd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
51 .rtc_soc_base_address = 0x00000800, 51 .rtc_soc_base_address = 0x00000800,
52 .rtc_wmac_base_address = 0x00001000, 52 .rtc_wmac_base_address = 0x00001000,
53 .soc_core_base_address = 0x0003a000, 53 .soc_core_base_address = 0x0003a000,
54 .wlan_mac_base_address = 0x00020000, 54 .wlan_mac_base_address = 0x00010000,
55 .ce_wrapper_base_address = 0x00034000, 55 .ce_wrapper_base_address = 0x00034000,
56 .ce0_base_address = 0x00034400, 56 .ce0_base_address = 0x00034400,
57 .ce1_base_address = 0x00034800, 57 .ce1_base_address = 0x00034800,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index de19c7c92bc6..85d949e03f79 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); 2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
2239 struct brcmf_p2p_info *p2p = &cfg->p2p; 2239 struct brcmf_p2p_info *p2p = &cfg->p2p;
2240 struct brcmf_cfg80211_vif *vif; 2240 struct brcmf_cfg80211_vif *vif;
2241 enum nl80211_iftype iftype;
2241 bool wait_for_disable = false; 2242 bool wait_for_disable = false;
2242 int err; 2243 int err;
2243 2244
2244 brcmf_dbg(TRACE, "delete P2P vif\n"); 2245 brcmf_dbg(TRACE, "delete P2P vif\n");
2245 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 2246 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
2246 2247
2248 iftype = vif->wdev.iftype;
2247 brcmf_cfg80211_arm_vif_event(cfg, vif); 2249 brcmf_cfg80211_arm_vif_event(cfg, vif);
2248 switch (vif->wdev.iftype) { 2250 switch (iftype) {
2249 case NL80211_IFTYPE_P2P_CLIENT: 2251 case NL80211_IFTYPE_P2P_CLIENT:
2250 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) 2252 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
2251 wait_for_disable = true; 2253 wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2275 BRCMF_P2P_DISABLE_TIMEOUT); 2277 BRCMF_P2P_DISABLE_TIMEOUT);
2276 2278
2277 err = 0; 2279 err = 0;
2278 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { 2280 if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
2279 brcmf_vif_clear_mgmt_ies(vif); 2281 brcmf_vif_clear_mgmt_ies(vif);
2280 err = brcmf_p2p_release_p2p_if(vif); 2282 err = brcmf_p2p_release_p2p_if(vif);
2281 } 2283 }
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2291 brcmf_remove_interface(vif->ifp, true); 2293 brcmf_remove_interface(vif->ifp, true);
2292 2294
2293 brcmf_cfg80211_arm_vif_event(cfg, NULL); 2295 brcmf_cfg80211_arm_vif_event(cfg, NULL);
2294 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) 2296 if (iftype != NL80211_IFTYPE_P2P_DEVICE)
2295 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; 2297 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
2296 2298
2297 return err; 2299 return err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a260cd503200..077bfd8f4c0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1056 1056
1057 if (ret) 1057 if (ret)
1058 return ret; 1058 return ret;
1059 if (count == 0)
1060 return 0;
1059 1061
1060 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, 1062 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
1061 (count - 1), NULL); 1063 (count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 99132ea16ede..c5734e1a02d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
216 qmask |= BIT(vif->hw_queue[ac]); 216 qmask |= BIT(vif->hw_queue[ac]);
217 } 217 }
218 218
219 if (vif->type == NL80211_IFTYPE_AP) 219 if (vif->type == NL80211_IFTYPE_AP ||
220 vif->type == NL80211_IFTYPE_ADHOC)
220 qmask |= BIT(vif->cab_queue); 221 qmask |= BIT(vif->cab_queue);
221 222
222 return qmask; 223 return qmask;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d37b1695c64e..486dcceed17a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2319{ 2319{
2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2321 2321
2322 /* Called when we need to transmit (a) frame(s) from agg queue */ 2322 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2323 2323
2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2324 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2325 tids, more_data, true); 2325 tids, more_data, true);
@@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2339 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2340 2340
2341 if (tid_data->state != IWL_AGG_ON && 2341 if (!iwl_mvm_is_dqa_supported(mvm) &&
2342 tid_data->state != IWL_AGG_ON &&
2342 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2343 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2343 continue; 2344 continue;
2344 2345
@@ -2400,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2400 return; 2401 return;
2401 2402
2402 rcu_read_lock(); 2403 rcu_read_lock();
2403 sta = mvm->fw_id_to_mac_id[notif->sta_id]; 2404 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2404 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2405 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2405 rcu_read_unlock(); 2406 rcu_read_unlock();
2406 return; 2407 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index bd1dcc863d8f..9d28db7f56aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1807 int queue; 1807 int queue;
1808 1808
1809 if (vif->type == NL80211_IFTYPE_AP) 1809 if (vif->type == NL80211_IFTYPE_AP ||
1810 vif->type == NL80211_IFTYPE_ADHOC)
1810 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 1811 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1811 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1812 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1812 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 1813 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
@@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1837 * enabled-cab_queue to the mask) 1838 * enabled-cab_queue to the mask)
1838 */ 1839 */
1839 if (iwl_mvm_is_dqa_supported(mvm) && 1840 if (iwl_mvm_is_dqa_supported(mvm) &&
1840 vif->type == NL80211_IFTYPE_AP) { 1841 (vif->type == NL80211_IFTYPE_AP ||
1842 vif->type == NL80211_IFTYPE_ADHOC)) {
1841 struct iwl_trans_txq_scd_cfg cfg = { 1843 struct iwl_trans_txq_scd_cfg cfg = {
1842 .fifo = IWL_MVM_TX_FIFO_MCAST, 1844 .fifo = IWL_MVM_TX_FIFO_MCAST,
1843 .sta_id = mvmvif->bcast_sta.sta_id, 1845 .sta_id = mvmvif->bcast_sta.sta_id,
@@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1862 1864
1863 lockdep_assert_held(&mvm->mutex); 1865 lockdep_assert_held(&mvm->mutex);
1864 1866
1865 if (vif->type == NL80211_IFTYPE_AP) 1867 if (vif->type == NL80211_IFTYPE_AP ||
1868 vif->type == NL80211_IFTYPE_ADHOC)
1866 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, 1869 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1867 IWL_MAX_TID_COUNT, 0); 1870 IWL_MAX_TID_COUNT, 0);
1868 1871
@@ -3135,7 +3138,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3135 struct ieee80211_sta *sta, 3138 struct ieee80211_sta *sta,
3136 enum ieee80211_frame_release_type reason, 3139 enum ieee80211_frame_release_type reason,
3137 u16 cnt, u16 tids, bool more_data, 3140 u16 cnt, u16 tids, bool more_data,
3138 bool agg) 3141 bool single_sta_queue)
3139{ 3142{
3140 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3143 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3141 struct iwl_mvm_add_sta_cmd cmd = { 3144 struct iwl_mvm_add_sta_cmd cmd = {
@@ -3155,14 +3158,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3155 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3158 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3156 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3159 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3157 3160
3158 /* If we're releasing frames from aggregation queues then check if the 3161 /* If we're releasing frames from aggregation or dqa queues then check
3159 * all queues combined that we're releasing frames from have 3162 * if all the queues that we're releasing frames from, combined, have:
3160 * - more frames than the service period, in which case more_data 3163 * - more frames than the service period, in which case more_data
3161 * needs to be set 3164 * needs to be set
3162 * - fewer than 'cnt' frames, in which case we need to adjust the 3165 * - fewer than 'cnt' frames, in which case we need to adjust the
3163 * firmware command (but do that unconditionally) 3166 * firmware command (but do that unconditionally)
3164 */ 3167 */
3165 if (agg) { 3168 if (single_sta_queue) {
3166 int remaining = cnt; 3169 int remaining = cnt;
3167 int sleep_tx_count; 3170 int sleep_tx_count;
3168 3171
@@ -3172,7 +3175,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3172 u16 n_queued; 3175 u16 n_queued;
3173 3176
3174 tid_data = &mvmsta->tid_data[tid]; 3177 tid_data = &mvmsta->tid_data[tid];
3175 if (WARN(tid_data->state != IWL_AGG_ON && 3178 if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
3179 tid_data->state != IWL_AGG_ON &&
3176 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, 3180 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3177 "TID %d state is %d\n", 3181 "TID %d state is %d\n",
3178 tid, tid_data->state)) { 3182 tid, tid_data->state)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4be34f902278..1927ce607798 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
547 struct ieee80211_sta *sta, 547 struct ieee80211_sta *sta,
548 enum ieee80211_frame_release_type reason, 548 enum ieee80211_frame_release_type reason,
549 u16 cnt, u16 tids, bool more_data, 549 u16 cnt, u16 tids, bool more_data,
550 bool agg); 550 bool single_sta_queue);
551int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 551int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
552 bool drain); 552 bool drain);
553void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 553void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dd2b4a300819..1ba0a6f55503 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
34 * 34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
37 * All rights reserved. 38 * All rights reserved.
38 * 39 *
39 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -505,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
505 506
506 switch (info->control.vif->type) { 507 switch (info->control.vif->type) {
507 case NL80211_IFTYPE_AP: 508 case NL80211_IFTYPE_AP:
509 case NL80211_IFTYPE_ADHOC:
508 /* 510 /*
509 * Handle legacy hostapd as well, where station may be added 511 * Handle legacy hostapd as well, where station may be added
510 * only after assoc. Take care of the case where we send a 512 * only after assoc. Take care of the case where we send a
@@ -516,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
516 if (info->hw_queue == info->control.vif->cab_queue) 518 if (info->hw_queue == info->control.vif->cab_queue)
517 return info->hw_queue; 519 return info->hw_queue;
518 520
519 WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); 521 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
522 "fc=0x%02x", le16_to_cpu(fc));
520 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 523 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
521 case NL80211_IFTYPE_P2P_DEVICE: 524 case NL80211_IFTYPE_P2P_DEVICE:
522 if (ieee80211_is_mgmt(fc)) 525 if (ieee80211_is_mgmt(fc))
@@ -583,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
583 iwl_mvm_vif_from_mac80211(info.control.vif); 586 iwl_mvm_vif_from_mac80211(info.control.vif);
584 587
585 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 588 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
586 info.control.vif->type == NL80211_IFTYPE_AP) { 589 info.control.vif->type == NL80211_IFTYPE_AP ||
590 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
587 sta_id = mvmvif->bcast_sta.sta_id; 591 sta_id = mvmvif->bcast_sta.sta_id;
588 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 592 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
589 hdr->frame_control); 593 hdr->frame_control);
@@ -628,8 +632,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
628 * values. 632 * values.
629 * Note that we don't need to make sure it isn't agg'd, since we're 633 * Note that we don't need to make sure it isn't agg'd, since we're
630 * TXing non-sta 634 * TXing non-sta
635 * For DQA mode - we shouldn't increase it though
631 */ 636 */
632 atomic_inc(&mvm->pending_frames[sta_id]); 637 if (!iwl_mvm_is_dqa_supported(mvm))
638 atomic_inc(&mvm->pending_frames[sta_id]);
633 639
634 return 0; 640 return 0;
635} 641}
@@ -1005,11 +1011,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1005 1011
1006 spin_unlock(&mvmsta->lock); 1012 spin_unlock(&mvmsta->lock);
1007 1013
1008 /* Increase pending frames count if this isn't AMPDU */ 1014 /* Increase pending frames count if this isn't AMPDU or DQA queue */
1009 if ((iwl_mvm_is_dqa_supported(mvm) && 1015 if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
1010 mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
1011 mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
1012 (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
1013 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); 1016 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
1014 1017
1015 return 0; 1018 return 0;
@@ -1079,12 +1082,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1079 lockdep_assert_held(&mvmsta->lock); 1082 lockdep_assert_held(&mvmsta->lock);
1080 1083
1081 if ((tid_data->state == IWL_AGG_ON || 1084 if ((tid_data->state == IWL_AGG_ON ||
1082 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && 1085 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
1086 iwl_mvm_is_dqa_supported(mvm)) &&
1083 iwl_mvm_tid_queued(tid_data) == 0) { 1087 iwl_mvm_tid_queued(tid_data) == 0) {
1084 /* 1088 /*
1085 * Now that this aggregation queue is empty tell mac80211 so it 1089 * Now that this aggregation or DQA queue is empty tell
1086 * knows we no longer have frames buffered for the station on 1090 * mac80211 so it knows we no longer have frames buffered for
1087 * this TID (for the TIM bitmap calculation.) 1091 * the station on this TID (for the TIM bitmap calculation.)
1088 */ 1092 */
1089 ieee80211_sta_set_buffered(sta, tid, false); 1093 ieee80211_sta_set_buffered(sta, tid, false);
1090 } 1094 }
@@ -1257,7 +1261,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1257 u8 skb_freed = 0; 1261 u8 skb_freed = 0;
1258 u16 next_reclaimed, seq_ctl; 1262 u16 next_reclaimed, seq_ctl;
1259 bool is_ndp = false; 1263 bool is_ndp = false;
1260 bool txq_agg = false; /* Is this TXQ aggregated */
1261 1264
1262 __skb_queue_head_init(&skbs); 1265 __skb_queue_head_init(&skbs);
1263 1266
@@ -1283,6 +1286,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1283 info->flags |= IEEE80211_TX_STAT_ACK; 1286 info->flags |= IEEE80211_TX_STAT_ACK;
1284 break; 1287 break;
1285 case TX_STATUS_FAIL_DEST_PS: 1288 case TX_STATUS_FAIL_DEST_PS:
1289 /* In DQA, the FW should have stopped the queue and not
1290 * return this status
1291 */
1292 WARN_ON(iwl_mvm_is_dqa_supported(mvm));
1286 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1293 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1287 break; 1294 break;
1288 default: 1295 default:
@@ -1387,15 +1394,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1387 bool send_eosp_ndp = false; 1394 bool send_eosp_ndp = false;
1388 1395
1389 spin_lock_bh(&mvmsta->lock); 1396 spin_lock_bh(&mvmsta->lock);
1390 if (iwl_mvm_is_dqa_supported(mvm)) {
1391 enum iwl_mvm_agg_state state;
1392
1393 state = mvmsta->tid_data[tid].state;
1394 txq_agg = (state == IWL_AGG_ON ||
1395 state == IWL_EMPTYING_HW_QUEUE_DELBA);
1396 } else {
1397 txq_agg = txq_id >= mvm->first_agg_queue;
1398 }
1399 1397
1400 if (!is_ndp) { 1398 if (!is_ndp) {
1401 tid_data->next_reclaimed = next_reclaimed; 1399 tid_data->next_reclaimed = next_reclaimed;
@@ -1452,11 +1450,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1452 * If the txq is not an AMPDU queue, there is no chance we freed 1450 * If the txq is not an AMPDU queue, there is no chance we freed
1453 * several skbs. Check that out... 1451 * several skbs. Check that out...
1454 */ 1452 */
1455 if (txq_agg) 1453 if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
1456 goto out; 1454 goto out;
1457 1455
1458 /* We can't free more than one frame at once on a shared queue */ 1456 /* We can't free more than one frame at once on a shared queue */
1459 WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); 1457 WARN_ON(skb_freed > 1);
1460 1458
1461 /* If we have still frames for this STA nothing to do here */ 1459 /* If we have still frames for this STA nothing to do here */
1462 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) 1460 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 5ebca1d0cfc7..b62e03d11c2e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
57 * In case of any errors during inittialization, this function also ensures 57 * In case of any errors during inittialization, this function also ensures
58 * proper cleanup before exiting. 58 * proper cleanup before exiting.
59 */ 59 */
60static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, 60static int mwifiex_register(void *card, struct device *dev,
61 void **padapter) 61 struct mwifiex_if_ops *if_ops, void **padapter)
62{ 62{
63 struct mwifiex_adapter *adapter; 63 struct mwifiex_adapter *adapter;
64 int i; 64 int i;
@@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
68 return -ENOMEM; 68 return -ENOMEM;
69 69
70 *padapter = adapter; 70 *padapter = adapter;
71 adapter->dev = dev;
71 adapter->card = card; 72 adapter->card = card;
72 73
73 /* Save interface specific operations in adapter */ 74 /* Save interface specific operations in adapter */
@@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
1568{ 1569{
1569 struct mwifiex_adapter *adapter; 1570 struct mwifiex_adapter *adapter;
1570 1571
1571 if (mwifiex_register(card, if_ops, (void **)&adapter)) { 1572 if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
1572 pr_err("%s: software init failed\n", __func__); 1573 pr_err("%s: software init failed\n", __func__);
1573 goto err_init_sw; 1574 goto err_init_sw;
1574 } 1575 }
1575 1576
1576 adapter->dev = dev;
1577 mwifiex_probe_of(adapter); 1577 mwifiex_probe_of(adapter);
1578 1578
1579 adapter->iface_type = iface_type; 1579 adapter->iface_type = iface_type;
@@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
1718 wiphy_unregister(adapter->wiphy); 1718 wiphy_unregister(adapter->wiphy);
1719 wiphy_free(adapter->wiphy); 1719 wiphy_free(adapter->wiphy);
1720 1720
1721 if (adapter->irq_wakeup >= 0)
1722 device_init_wakeup(adapter->dev, false);
1723
1721 /* Unregister device */ 1724 /* Unregister device */
1722 mwifiex_dbg(adapter, INFO, 1725 mwifiex_dbg(adapter, INFO,
1723 "info: unregister device\n"); 1726 "info: unregister device\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index a0d918094889..b8c990d10d6e 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
2739 schedule_work(&card->work); 2739 schedule_work(&card->work);
2740} 2740}
2741 2741
2742static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
2743{
2744 struct pcie_service_card *card = adapter->card;
2745 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
2746
2747 if (reg->sleep_cookie)
2748 mwifiex_pcie_delete_sleep_cookie_buf(adapter);
2749
2750 mwifiex_pcie_delete_cmdrsp_buf(adapter);
2751 mwifiex_pcie_delete_evtbd_ring(adapter);
2752 mwifiex_pcie_delete_rxbd_ring(adapter);
2753 mwifiex_pcie_delete_txbd_ring(adapter);
2754 card->cmdrsp_buf = NULL;
2755}
2756
2742/* 2757/*
2743 * This function initializes the PCI-E host memory space, WCB rings, etc. 2758 * This function initializes the PCI-E host memory space, WCB rings, etc.
2744 * 2759 *
@@ -2850,13 +2865,6 @@ err_enable_dev:
2850 2865
2851/* 2866/*
2852 * This function cleans up the allocated card buffers. 2867 * This function cleans up the allocated card buffers.
2853 *
2854 * The following are freed by this function -
2855 * - TXBD ring buffers
2856 * - RXBD ring buffers
2857 * - Event BD ring buffers
2858 * - Command response ring buffer
2859 * - Sleep cookie buffer
2860 */ 2868 */
2861static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) 2869static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
2862{ 2870{
@@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
2875 "Failed to write driver not-ready signature\n"); 2883 "Failed to write driver not-ready signature\n");
2876 } 2884 }
2877 2885
2886 mwifiex_pcie_free_buffers(adapter);
2887
2878 if (pdev) { 2888 if (pdev) {
2879 pci_iounmap(pdev, card->pci_mmap); 2889 pci_iounmap(pdev, card->pci_mmap);
2880 pci_iounmap(pdev, card->pci_mmap1); 2890 pci_iounmap(pdev, card->pci_mmap1);
@@ -3126,10 +3136,7 @@ err_cre_txbd:
3126 pci_iounmap(pdev, card->pci_mmap1); 3136 pci_iounmap(pdev, card->pci_mmap1);
3127} 3137}
3128 3138
3129/* This function cleans up the PCI-E host memory space. 3139/* This function cleans up the PCI-E host memory space. */
3130 * Some code is extracted from mwifiex_unregister_dev()
3131 *
3132 */
3133static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) 3140static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
3134{ 3141{
3135 struct pcie_service_card *card = adapter->card; 3142 struct pcie_service_card *card = adapter->card;
@@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
3140 3147
3141 adapter->seq_num = 0; 3148 adapter->seq_num = 0;
3142 3149
3143 if (reg->sleep_cookie) 3150 mwifiex_pcie_free_buffers(adapter);
3144 mwifiex_pcie_delete_sleep_cookie_buf(adapter);
3145
3146 mwifiex_pcie_delete_cmdrsp_buf(adapter);
3147 mwifiex_pcie_delete_evtbd_ring(adapter);
3148 mwifiex_pcie_delete_rxbd_ring(adapter);
3149 mwifiex_pcie_delete_txbd_ring(adapter);
3150 card->cmdrsp_buf = NULL;
3151} 3151}
3152 3152
3153static struct mwifiex_if_ops pcie_ops = { 3153static struct mwifiex_if_ops pcie_ops = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index caea350f05aa..bdc379178e87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
1742 unsigned long flags; 1742 unsigned long flags;
1743 struct rtl_c2hcmd *c2hcmd; 1743 struct rtl_c2hcmd *c2hcmd;
1744 1744
1745 c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); 1745 c2hcmd = kmalloc(sizeof(*c2hcmd),
1746 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1746 1747
1747 if (!c2hcmd) 1748 if (!c2hcmd)
1748 goto label_err; 1749 goto label_err;
1749 1750
1750 c2hcmd->val = kmalloc(len, GFP_KERNEL); 1751 c2hcmd->val = kmalloc(len,
1752 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1751 1753
1752 if (!c2hcmd->val) 1754 if (!c2hcmd->val)
1753 goto label_err2; 1755 goto label_err2;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 829b26cd4549..8397f6c92451 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
165{ 165{
166 struct xenvif *vif = netdev_priv(dev); 166 struct xenvif *vif = netdev_priv(dev);
167 struct xenvif_queue *queue = NULL; 167 struct xenvif_queue *queue = NULL;
168 unsigned int num_queues = vif->num_queues; 168 unsigned int num_queues;
169 u16 index; 169 u16 index;
170 struct xenvif_rx_cb *cb; 170 struct xenvif_rx_cb *cb;
171 171
172 BUG_ON(skb->dev != dev); 172 BUG_ON(skb->dev != dev);
173 173
174 /* Drop the packet if queues are not set up */ 174 /* Drop the packet if queues are not set up.
175 * This handler should be called inside an RCU read section
176 * so we don't need to enter it here explicitly.
177 */
178 num_queues = READ_ONCE(vif->num_queues);
175 if (num_queues < 1) 179 if (num_queues < 1)
176 goto drop; 180 goto drop;
177 181
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
222{ 226{
223 struct xenvif *vif = netdev_priv(dev); 227 struct xenvif *vif = netdev_priv(dev);
224 struct xenvif_queue *queue = NULL; 228 struct xenvif_queue *queue = NULL;
229 unsigned int num_queues;
225 u64 rx_bytes = 0; 230 u64 rx_bytes = 0;
226 u64 rx_packets = 0; 231 u64 rx_packets = 0;
227 u64 tx_bytes = 0; 232 u64 tx_bytes = 0;
228 u64 tx_packets = 0; 233 u64 tx_packets = 0;
229 unsigned int index; 234 unsigned int index;
230 235
231 spin_lock(&vif->lock); 236 rcu_read_lock();
232 if (vif->queues == NULL) 237 num_queues = READ_ONCE(vif->num_queues);
233 goto out;
234 238
235 /* Aggregate tx and rx stats from each queue */ 239 /* Aggregate tx and rx stats from each queue */
236 for (index = 0; index < vif->num_queues; ++index) { 240 for (index = 0; index < num_queues; ++index) {
237 queue = &vif->queues[index]; 241 queue = &vif->queues[index];
238 rx_bytes += queue->stats.rx_bytes; 242 rx_bytes += queue->stats.rx_bytes;
239 rx_packets += queue->stats.rx_packets; 243 rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
241 tx_packets += queue->stats.tx_packets; 245 tx_packets += queue->stats.tx_packets;
242 } 246 }
243 247
244out: 248 rcu_read_unlock();
245 spin_unlock(&vif->lock);
246 249
247 vif->dev->stats.rx_bytes = rx_bytes; 250 vif->dev->stats.rx_bytes = rx_bytes;
248 vif->dev->stats.rx_packets = rx_packets; 251 vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
378 struct ethtool_stats *stats, u64 * data) 381 struct ethtool_stats *stats, u64 * data)
379{ 382{
380 struct xenvif *vif = netdev_priv(dev); 383 struct xenvif *vif = netdev_priv(dev);
381 unsigned int num_queues = vif->num_queues; 384 unsigned int num_queues;
382 int i; 385 int i;
383 unsigned int queue_index; 386 unsigned int queue_index;
384 387
388 rcu_read_lock();
389 num_queues = READ_ONCE(vif->num_queues);
390
385 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 391 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
386 unsigned long accum = 0; 392 unsigned long accum = 0;
387 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 393 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
390 } 396 }
391 data[i] = accum; 397 data[i] = accum;
392 } 398 }
399
400 rcu_read_unlock();
393} 401}
394 402
395static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 403static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f9bcf4a665bc..602d408fa25e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
214 netdev_err(vif->dev, "fatal error; disabling device\n"); 214 netdev_err(vif->dev, "fatal error; disabling device\n");
215 vif->disabled = true; 215 vif->disabled = true;
216 /* Disable the vif from queue 0's kthread */ 216 /* Disable the vif from queue 0's kthread */
217 if (vif->queues) 217 if (vif->num_queues)
218 xenvif_kick_thread(&vif->queues[0]); 218 xenvif_kick_thread(&vif->queues[0]);
219} 219}
220 220
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d2d7cd9145b1..a56d3eab35dd 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
495 struct xenvif *vif = be->vif; 495 struct xenvif *vif = be->vif;
496 496
497 if (vif) { 497 if (vif) {
498 unsigned int num_queues = vif->num_queues;
498 unsigned int queue_index; 499 unsigned int queue_index;
499 struct xenvif_queue *queues;
500 500
501 xen_unregister_watchers(vif); 501 xen_unregister_watchers(vif);
502#ifdef CONFIG_DEBUG_FS 502#ifdef CONFIG_DEBUG_FS
503 xenvif_debugfs_delif(vif); 503 xenvif_debugfs_delif(vif);
504#endif /* CONFIG_DEBUG_FS */ 504#endif /* CONFIG_DEBUG_FS */
505 xenvif_disconnect_data(vif); 505 xenvif_disconnect_data(vif);
506 for (queue_index = 0;
507 queue_index < vif->num_queues;
508 ++queue_index)
509 xenvif_deinit_queue(&vif->queues[queue_index]);
510 506
511 spin_lock(&vif->lock); 507 /* At this point some of the handlers may still be active
512 queues = vif->queues; 508 * so we need to have additional synchronization here.
509 */
513 vif->num_queues = 0; 510 vif->num_queues = 0;
514 vif->queues = NULL; 511 synchronize_net();
515 spin_unlock(&vif->lock);
516 512
517 vfree(queues); 513 for (queue_index = 0; queue_index < num_queues; ++queue_index)
514 xenvif_deinit_queue(&vif->queues[queue_index]);
515
516 vfree(vif->queues);
517 vif->queues = NULL;
518 518
519 xenvif_disconnect_ctrl(vif); 519 xenvif_disconnect_ctrl(vif);
520 } 520 }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b3b57fef446..9583a5f58a1d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
270 memset(cmnd, 0, sizeof(*cmnd)); 270 memset(cmnd, 0, sizeof(*cmnd));
271 cmnd->dsm.opcode = nvme_cmd_dsm; 271 cmnd->dsm.opcode = nvme_cmd_dsm;
272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
273 cmnd->dsm.nr = segments - 1; 273 cmnd->dsm.nr = cpu_to_le32(segments - 1);
274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
275 275
276 req->special_vec.bv_page = virt_to_page(range); 276 req->special_vec.bv_page = virt_to_page(range);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 779f516e7a4e..47a479f26e5d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
343 struct ib_device *ibdev = dev->dev; 343 struct ib_device *ibdev = dev->dev;
344 int ret; 344 int ret;
345 345
346 BUG_ON(queue_idx >= ctrl->queue_count);
347
348 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 346 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
349 DMA_TO_DEVICE); 347 DMA_TO_DEVICE);
350 if (ret) 348 if (ret)
@@ -652,8 +650,22 @@ out_free_queues:
652 650
653static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) 651static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
654{ 652{
653 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
654 unsigned int nr_io_queues;
655 int i, ret; 655 int i, ret;
656 656
657 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
658 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
659 if (ret)
660 return ret;
661
662 ctrl->queue_count = nr_io_queues + 1;
663 if (ctrl->queue_count < 2)
664 return 0;
665
666 dev_info(ctrl->ctrl.device,
667 "creating %d I/O queues.\n", nr_io_queues);
668
657 for (i = 1; i < ctrl->queue_count; i++) { 669 for (i = 1; i < ctrl->queue_count; i++) {
658 ret = nvme_rdma_init_queue(ctrl, i, 670 ret = nvme_rdma_init_queue(ctrl, i,
659 ctrl->ctrl.opts->queue_size); 671 ctrl->ctrl.opts->queue_size);
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1791 1803
1792static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) 1804static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1793{ 1805{
1794 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1795 int ret; 1806 int ret;
1796 1807
1797 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1798 if (ret)
1799 return ret;
1800
1801 ctrl->queue_count = opts->nr_io_queues + 1;
1802 if (ctrl->queue_count < 2)
1803 return 0;
1804
1805 dev_info(ctrl->ctrl.device,
1806 "creating %d I/O queues.\n", opts->nr_io_queues);
1807
1808 ret = nvme_rdma_init_io_queues(ctrl); 1808 ret = nvme_rdma_init_io_queues(ctrl);
1809 if (ret) 1809 if (ret)
1810 return ret; 1810 return ret;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a7bcff45f437..76450b0c55f1 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
100 u16 status; 100 u16 status;
101 101
102 WARN_ON(req == NULL || slog == NULL); 102 WARN_ON(req == NULL || slog == NULL);
103 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) 103 if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
104 status = nvmet_get_smart_log_all(req, slog); 104 status = nvmet_get_smart_log_all(req, slog);
105 else 105 else
106 status = nvmet_get_smart_log_nsid(req, slog); 106 status = nvmet_get_smart_log_nsid(req, slog);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 11b0a0a5f661..798653b329b2 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
425 ctrl->sqs[qid] = sq; 425 ctrl->sqs[qid] = sq;
426} 426}
427 427
428static void nvmet_confirm_sq(struct percpu_ref *ref)
429{
430 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
431
432 complete(&sq->confirm_done);
433}
434
428void nvmet_sq_destroy(struct nvmet_sq *sq) 435void nvmet_sq_destroy(struct nvmet_sq *sq)
429{ 436{
430 /* 437 /*
@@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
433 */ 440 */
434 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) 441 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
435 nvmet_async_events_free(sq->ctrl); 442 nvmet_async_events_free(sq->ctrl);
436 percpu_ref_kill(&sq->ref); 443 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
444 wait_for_completion(&sq->confirm_done);
437 wait_for_completion(&sq->free_done); 445 wait_for_completion(&sq->free_done);
438 percpu_ref_exit(&sq->ref); 446 percpu_ref_exit(&sq->ref);
439 447
@@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
461 return ret; 469 return ret;
462 } 470 }
463 init_completion(&sq->free_done); 471 init_completion(&sq->free_done);
472 init_completion(&sq->confirm_done);
464 473
465 return 0; 474 return 0;
466} 475}
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..6b0baa9caab9 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
180 180
181 sector = le64_to_cpu(write_zeroes->slba) << 181 sector = le64_to_cpu(write_zeroes->slba) <<
182 (req->ns->blksize_shift - 9); 182 (req->ns->blksize_shift - 9);
183 nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << 183 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 (req->ns->blksize_shift - 9)) + 1; 184 (req->ns->blksize_shift - 9)) + 1;
185 185
186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, 186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
230 return 0; 230 return 0;
231 case nvme_cmd_dsm: 231 case nvme_cmd_dsm:
232 req->execute = nvmet_execute_dsm; 232 req->execute = nvmet_execute_dsm;
233 req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * 233 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
234 sizeof(struct nvme_dsm_range); 234 sizeof(struct nvme_dsm_range);
235 return 0; 235 return 0;
236 case nvme_cmd_write_zeroes: 236 case nvme_cmd_write_zeroes:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d1f06e7768ff..22f7bc6bac7f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx) 224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{ 225{
226 BUG_ON(queue_idx >= ctrl->queue_count);
227
228 iod->req.cmd = &iod->cmd; 226 iod->req.cmd = &iod->cmd;
229 iod->req.rsp = &iod->rsp; 227 iod->req.rsp = &iod->rsp;
230 iod->queue = &ctrl->queues[queue_idx]; 228 iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
288 286
289static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 287static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
290{ 288{
289 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
291 blk_cleanup_queue(ctrl->ctrl.admin_q); 290 blk_cleanup_queue(ctrl->ctrl.admin_q);
292 blk_mq_free_tag_set(&ctrl->admin_tag_set); 291 blk_mq_free_tag_set(&ctrl->admin_tag_set);
293 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
294} 292}
295 293
296static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 294static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ free_ctrl:
314 kfree(ctrl); 312 kfree(ctrl);
315} 313}
316 314
315static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
316{
317 int i;
318
319 for (i = 1; i < ctrl->queue_count; i++)
320 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321}
322
323static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
324{
325 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
326 unsigned int nr_io_queues;
327 int ret, i;
328
329 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
330 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
331 if (ret || !nr_io_queues)
332 return ret;
333
334 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
335
336 for (i = 1; i <= nr_io_queues; i++) {
337 ctrl->queues[i].ctrl = ctrl;
338 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
339 if (ret)
340 goto out_destroy_queues;
341
342 ctrl->queue_count++;
343 }
344
345 return 0;
346
347out_destroy_queues:
348 nvme_loop_destroy_io_queues(ctrl);
349 return ret;
350}
351
317static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 352static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
318{ 353{
319 int error; 354 int error;
@@ -385,17 +420,13 @@ out_free_sq:
385 420
386static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 421static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
387{ 422{
388 int i;
389
390 nvme_stop_keep_alive(&ctrl->ctrl); 423 nvme_stop_keep_alive(&ctrl->ctrl);
391 424
392 if (ctrl->queue_count > 1) { 425 if (ctrl->queue_count > 1) {
393 nvme_stop_queues(&ctrl->ctrl); 426 nvme_stop_queues(&ctrl->ctrl);
394 blk_mq_tagset_busy_iter(&ctrl->tag_set, 427 blk_mq_tagset_busy_iter(&ctrl->tag_set,
395 nvme_cancel_request, &ctrl->ctrl); 428 nvme_cancel_request, &ctrl->ctrl);
396 429 nvme_loop_destroy_io_queues(ctrl);
397 for (i = 1; i < ctrl->queue_count; i++)
398 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
399 } 430 }
400 431
401 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
467 if (ret) 498 if (ret)
468 goto out_disable; 499 goto out_disable;
469 500
470 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 501 ret = nvme_loop_init_io_queues(ctrl);
471 ctrl->queues[i].ctrl = ctrl; 502 if (ret)
472 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 503 goto out_destroy_admin;
473 if (ret)
474 goto out_free_queues;
475
476 ctrl->queue_count++;
477 }
478 504
479 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 505 for (i = 1; i < ctrl->queue_count; i++) {
480 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 506 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
481 if (ret) 507 if (ret)
482 goto out_free_queues; 508 goto out_destroy_io;
483 } 509 }
484 510
485 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
492 518
493 return; 519 return;
494 520
495out_free_queues: 521out_destroy_io:
496 for (i = 1; i < ctrl->queue_count; i++) 522 nvme_loop_destroy_io_queues(ctrl);
497 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 523out_destroy_admin:
498 nvme_loop_destroy_admin_queue(ctrl); 524 nvme_loop_destroy_admin_queue(ctrl);
499out_disable: 525out_disable:
500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
533 559
534static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 560static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
535{ 561{
536 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
537 int ret, i; 562 int ret, i;
538 563
539 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); 564 ret = nvme_loop_init_io_queues(ctrl);
540 if (ret || !opts->nr_io_queues) 565 if (ret)
541 return ret; 566 return ret;
542 567
543 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
544 opts->nr_io_queues);
545
546 for (i = 1; i <= opts->nr_io_queues; i++) {
547 ctrl->queues[i].ctrl = ctrl;
548 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
549 if (ret)
550 goto out_destroy_queues;
551
552 ctrl->queue_count++;
553 }
554
555 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
556 ctrl->tag_set.ops = &nvme_loop_mq_ops; 569 ctrl->tag_set.ops = &nvme_loop_mq_ops;
557 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
575 goto out_free_tagset; 588 goto out_free_tagset;
576 } 589 }
577 590
578 for (i = 1; i <= opts->nr_io_queues; i++) { 591 for (i = 1; i < ctrl->queue_count; i++) {
579 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
580 if (ret) 593 if (ret)
581 goto out_cleanup_connect_q; 594 goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ out_cleanup_connect_q:
588out_free_tagset: 601out_free_tagset:
589 blk_mq_free_tag_set(&ctrl->tag_set); 602 blk_mq_free_tag_set(&ctrl->tag_set);
590out_destroy_queues: 603out_destroy_queues:
591 for (i = 1; i < ctrl->queue_count; i++) 604 nvme_loop_destroy_io_queues(ctrl);
592 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
593 return ret; 605 return ret;
594} 606}
595 607
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 1370eee0a3c0..f7ff15f17ca9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@ struct nvmet_sq {
73 u16 qid; 73 u16 qid;
74 u16 size; 74 u16 size;
75 struct completion free_done; 75 struct completion free_done;
76 struct completion confirm_done;
76}; 77};
77 78
78/** 79/**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9aa1da3778b3..ecc4fe862561 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
703{ 703{
704 u16 status; 704 u16 status;
705 705
706 cmd->queue = queue;
707 cmd->n_rdma = 0;
708 cmd->req.port = queue->port;
709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device, 706 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 707 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE); 708 DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
760 755
761 cmd->queue = queue; 756 cmd->queue = queue;
762 rsp = nvmet_rdma_get_rsp(queue); 757 rsp = nvmet_rdma_get_rsp(queue);
758 rsp->queue = queue;
763 rsp->cmd = cmd; 759 rsp->cmd = cmd;
764 rsp->flags = 0; 760 rsp->flags = 0;
765 rsp->req.cmd = cmd->nvme_cmd; 761 rsp->req.cmd = cmd->nvme_cmd;
762 rsp->req.port = queue->port;
763 rsp->n_rdma = 0;
766 764
767 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 765 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
768 unsigned long flags; 766 unsigned long flags;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index bc090daa850a..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
939 * pardevice fields. -arca 939 * pardevice fields. -arca
940 */ 940 */
941 port->ops->init_state(par_dev, par_dev->state); 941 port->ops->init_state(par_dev, par_dev->state);
942 port->proc_device = par_dev; 942 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
943 parport_device_proc_register(par_dev); 943 port->proc_device = par_dev;
944 parport_device_proc_register(par_dev);
945 }
944 946
945 return par_dev; 947 return par_dev;
946 948
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index dfb8a69afc28..d2d2ba5b8a68 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -89,6 +89,7 @@ config PCI_HISI
89 depends on PCI_MSI_IRQ_DOMAIN 89 depends on PCI_MSI_IRQ_DOMAIN
90 select PCIEPORTBUS 90 select PCIEPORTBUS
91 select PCIE_DW_HOST 91 select PCIE_DW_HOST
92 select PCI_HOST_COMMON
92 help 93 help
93 Say Y here if you want PCIe controller support on HiSilicon 94 Say Y here if you want PCIe controller support on HiSilicon
94 Hip05 and Hip06 SoCs 95 Hip05 and Hip06 SoCs
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index fcd3ef845883..6d23683c0892 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
234 return 0; 234 return 0;
235} 235}
236 236
237static const struct dw_pcie_ops dw_pcie_ops = {
238};
239
237static int artpec6_pcie_probe(struct platform_device *pdev) 240static int artpec6_pcie_probe(struct platform_device *pdev)
238{ 241{
239 struct device *dev = &pdev->dev; 242 struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
252 return -ENOMEM; 255 return -ENOMEM;
253 256
254 pci->dev = dev; 257 pci->dev = dev;
258 pci->ops = &dw_pcie_ops;
255 259
256 artpec6_pcie->pci = pci; 260 artpec6_pcie->pci = pci;
257 261
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index b6c832ba39dd..f20d494922ab 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
86 return 0; 86 return 0;
87} 87}
88 88
89static const struct dw_pcie_ops dw_pcie_ops = {
90};
91
89static int dw_plat_pcie_probe(struct platform_device *pdev) 92static int dw_plat_pcie_probe(struct platform_device *pdev)
90{ 93{
91 struct device *dev = &pdev->dev; 94 struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
103 return -ENOMEM; 106 return -ENOMEM;
104 107
105 pci->dev = dev; 108 pci->dev = dev;
109 pci->ops = &dw_pcie_ops;
106 110
107 dw_plat_pcie->pci = pci; 111 dw_plat_pcie->pci = pci;
108 112
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 52b5bdccf5f0..6e031b522529 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 - 2016 Cavium, Inc. 14 * Copyright (C) 2015 - 2016 Cavium, Inc.
15 */ 15 */
16 16
17#include <linux/bitfield.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
@@ -334,6 +335,49 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
334 335
335#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 336#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
336 337
338#define PEM_RES_BASE 0x87e0c0000000UL
339#define PEM_NODE_MASK GENMASK(45, 44)
340#define PEM_INDX_MASK GENMASK(26, 24)
341#define PEM_MIN_DOM_IN_NODE 4
342#define PEM_MAX_DOM_IN_NODE 10
343
344static void thunder_pem_reserve_range(struct device *dev, int seg,
345 struct resource *r)
346{
347 resource_size_t start = r->start, end = r->end;
348 struct resource *res;
349 const char *regionid;
350
351 regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
352 if (!regionid)
353 return;
354
355 res = request_mem_region(start, end - start + 1, regionid);
356 if (res)
357 res->flags &= ~IORESOURCE_BUSY;
358 else
359 kfree(regionid);
360
361 dev_info(dev, "%pR %s reserved\n", r,
362 res ? "has been" : "could not be");
363}
364
365static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
366 struct resource *res_pem)
367{
368 int node = acpi_get_node(root->device->handle);
369 int index;
370
371 if (node == NUMA_NO_NODE)
372 node = 0;
373
374 index = root->segment - PEM_MIN_DOM_IN_NODE;
375 index -= node * PEM_MAX_DOM_IN_NODE;
376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
377 FIELD_PREP(PEM_INDX_MASK, index);
378 res_pem->flags = IORESOURCE_MEM;
379}
380
337static int thunder_pem_acpi_init(struct pci_config_window *cfg) 381static int thunder_pem_acpi_init(struct pci_config_window *cfg)
338{ 382{
339 struct device *dev = cfg->parent; 383 struct device *dev = cfg->parent;
@@ -346,10 +390,24 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
346 if (!res_pem) 390 if (!res_pem)
347 return -ENOMEM; 391 return -ENOMEM;
348 392
349 ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); 393 ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
394
395 /*
396 * If we fail to gather resources it means that we run with old
397 * FW where we need to calculate PEM-specific resources manually.
398 */
350 if (ret) { 399 if (ret) {
351 dev_err(dev, "can't get rc base address\n"); 400 thunder_pem_legacy_fw(root, res_pem);
352 return ret; 401 /*
402 * Reserve 64K size PEM specific resources. The full 16M range
403 * size is required for thunder_pem_init() call.
404 */
405 res_pem->end = res_pem->start + SZ_64K - 1;
406 thunder_pem_reserve_range(dev, root->segment, res_pem);
407 res_pem->end = res_pem->start + SZ_16M - 1;
408
409 /* Reserve PCI configuration space as well. */
410 thunder_pem_reserve_range(dev, root->segment, &cfg->res);
353 } 411 }
354 412
355 return thunder_pem_init(dev, cfg, res_pem); 413 return thunder_pem_init(dev, cfg, res_pem);
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index bd4c9ec25edc..384c27e664fe 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
44{ 44{
45 struct device *dev = &bdev->dev; 45 struct device *dev = &bdev->dev;
46 struct iproc_pcie *pcie; 46 struct iproc_pcie *pcie;
47 LIST_HEAD(res); 47 LIST_HEAD(resources);
48 struct resource res_mem;
49 int ret; 48 int ret;
50 49
51 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 50 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
63 62
64 pcie->base_addr = bdev->addr; 63 pcie->base_addr = bdev->addr;
65 64
66 res_mem.start = bdev->addr_s[0]; 65 pcie->mem.start = bdev->addr_s[0];
67 res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 66 pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
68 res_mem.name = "PCIe MEM space"; 67 pcie->mem.name = "PCIe MEM space";
69 res_mem.flags = IORESOURCE_MEM; 68 pcie->mem.flags = IORESOURCE_MEM;
70 pci_add_resource(&res, &res_mem); 69 pci_add_resource(&resources, &pcie->mem);
71 70
72 pcie->map_irq = iproc_pcie_bcma_map_irq; 71 pcie->map_irq = iproc_pcie_bcma_map_irq;
73 72
74 ret = iproc_pcie_setup(pcie, &res); 73 ret = iproc_pcie_setup(pcie, &resources);
75 if (ret) 74 if (ret) {
76 dev_err(dev, "PCIe controller setup failed\n"); 75 dev_err(dev, "PCIe controller setup failed\n");
77 76 pci_free_resource_list(&resources);
78 pci_free_resource_list(&res); 77 return ret;
78 }
79 79
80 bcma_set_drvdata(bdev, pcie); 80 bcma_set_drvdata(bdev, pcie);
81 return ret; 81 return 0;
82} 82}
83 83
84static void iproc_pcie_bcma_remove(struct bcma_device *bdev) 84static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index f4909bb0b2ad..8c6a327ca6cd 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
51 struct device_node *np = dev->of_node; 51 struct device_node *np = dev->of_node;
52 struct resource reg; 52 struct resource reg;
53 resource_size_t iobase = 0; 53 resource_size_t iobase = 0;
54 LIST_HEAD(res); 54 LIST_HEAD(resources);
55 int ret; 55 int ret;
56 56
57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
96 pcie->phy = NULL; 96 pcie->phy = NULL;
97 } 97 }
98 98
99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); 99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
100 &iobase);
100 if (ret) { 101 if (ret) {
101 dev_err(dev, 102 dev_err(dev, "unable to get PCI host bridge resources\n");
102 "unable to get PCI host bridge resources\n");
103 return ret; 103 return ret;
104 } 104 }
105 105
@@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
112 pcie->map_irq = of_irq_parse_and_map_pci; 112 pcie->map_irq = of_irq_parse_and_map_pci;
113 } 113 }
114 114
115 ret = iproc_pcie_setup(pcie, &res); 115 ret = iproc_pcie_setup(pcie, &resources);
116 if (ret) 116 if (ret) {
117 dev_err(dev, "PCIe controller setup failed\n"); 117 dev_err(dev, "PCIe controller setup failed\n");
118 118 pci_free_resource_list(&resources);
119 pci_free_resource_list(&res); 119 return ret;
120 }
120 121
121 platform_set_drvdata(pdev, pcie); 122 platform_set_drvdata(pdev, pcie);
122 return ret; 123 return 0;
123} 124}
124 125
125static int iproc_pcie_pltfm_remove(struct platform_device *pdev) 126static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 04fed8e907f1..0bbe2ea44f3e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -90,6 +90,7 @@ struct iproc_pcie {
90#ifdef CONFIG_ARM 90#ifdef CONFIG_ARM
91 struct pci_sys_data sysdata; 91 struct pci_sys_data sysdata;
92#endif 92#endif
93 struct resource mem;
93 struct pci_bus *root_bus; 94 struct pci_bus *root_bus;
94 struct phy *phy; 95 struct phy *phy;
95 int (*map_irq)(const struct pci_dev *, u8, u8); 96 int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dc5277ad1b5a..005cadb7a3f8 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -449,6 +449,7 @@ config PHY_QCOM_UFS
449config PHY_QCOM_USB_HS 449config PHY_QCOM_USB_HS
450 tristate "Qualcomm USB HS PHY module" 450 tristate "Qualcomm USB HS PHY module"
451 depends on USB_ULPI_BUS 451 depends on USB_ULPI_BUS
452 depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
452 select GENERIC_PHY 453 select GENERIC_PHY
453 help 454 help
454 Support for the USB high-speed ULPI compliant phy on Qualcomm 455 Support for the USB high-speed ULPI compliant phy on Qualcomm
@@ -510,12 +511,4 @@ config PHY_MESON8B_USB2
510 and GXBB SoCs. 511 and GXBB SoCs.
511 If unsure, say N. 512 If unsure, say N.
512 513
513config PHY_NSP_USB3
514 tristate "Broadcom NorthStar plus USB3 PHY driver"
515 depends on OF && (ARCH_BCM_NSP || COMPILE_TEST)
516 select GENERIC_PHY
517 default ARCH_BCM_NSP
518 help
519 Enable this to support the Broadcom Northstar plus USB3 PHY.
520 If unsure, say N.
521endmenu 514endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e7b0feb1e125..dd8f3b5d2918 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
62obj-$(CONFIG_ARCH_TEGRA) += tegra/ 62obj-$(CONFIG_ARCH_TEGRA) += tegra/
63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o 63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o 64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
65obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c
deleted file mode 100644
index 49024eaa5545..000000000000
--- a/drivers/phy/phy-bcm-nsp-usb3.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (C) 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mfd/syscon.h>
18#include <linux/mdio.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/phy/phy.h>
23#include <linux/regmap.h>
24
25#define NSP_USB3_RST_CTRL_OFFSET 0x3f8
26
27/* mdio reg access */
28#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f
29
30#define NSP_USB3_PHY_PLL30_BLOCK 0x8000
31#define NSP_USB3_PLL_CONTROL 0x01
32#define NSP_USB3_PLLA_CONTROL0 0x0a
33#define NSP_USB3_PLLA_CONTROL1 0x0b
34
35#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040
36#define NSP_USB3_TX_PMD_CONTROL1 0x01
37
38#define NSP_USB3_PHY_PIPE_BLOCK 0x8060
39#define NSP_USB3_LFPS_CMP 0x02
40#define NSP_USB3_LFPS_DEGLITCH 0x03
41
42struct nsp_usb3_phy {
43 struct regmap *usb3_ctrl;
44 struct phy *phy;
45 struct mdio_device *mdiodev;
46};
47
48static int nsp_usb3_phy_init(struct phy *phy)
49{
50 struct nsp_usb3_phy *iphy = phy_get_drvdata(phy);
51 struct mii_bus *bus = iphy->mdiodev->bus;
52 int addr = iphy->mdiodev->addr;
53 u32 data;
54 int rc;
55
56 rc = regmap_read(iphy->usb3_ctrl, 0, &data);
57 if (rc)
58 return rc;
59 data |= 1;
60 rc = regmap_write(iphy->usb3_ctrl, 0, data);
61 if (rc)
62 return rc;
63
64 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1);
65 if (rc)
66 return rc;
67
68 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
69 NSP_USB3_PHY_PLL30_BLOCK);
70 if (rc)
71 return rc;
72
73 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000);
74 if (rc)
75 return rc;
76
77 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400);
78 if (rc)
79 return rc;
80
81 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000);
82 if (rc)
83 return rc;
84
85 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000);
86 if (rc)
87 return rc;
88
89 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0);
90 if (rc)
91 return rc;
92
93 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000);
94 if (rc)
95 return rc;
96
97 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
98 NSP_USB3_PHY_PIPE_BLOCK);
99 if (rc)
100 return rc;
101
102 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d);
103 if (rc)
104 return rc;
105
106 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302);
107 if (rc)
108 return rc;
109
110 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
111 NSP_USB3_PHY_TX_PMD_BLOCK);
112 if (rc)
113 return rc;
114
115 rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003);
116
117 return rc;
118}
119
120static struct phy_ops nsp_usb3_phy_ops = {
121 .init = nsp_usb3_phy_init,
122 .owner = THIS_MODULE,
123};
124
125static int nsp_usb3_phy_probe(struct mdio_device *mdiodev)
126{
127 struct device *dev = &mdiodev->dev;
128 struct phy_provider *provider;
129 struct nsp_usb3_phy *iphy;
130
131 iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL);
132 if (!iphy)
133 return -ENOMEM;
134 iphy->mdiodev = mdiodev;
135
136 iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
137 "usb3-ctrl-syscon");
138 if (IS_ERR(iphy->usb3_ctrl))
139 return PTR_ERR(iphy->usb3_ctrl);
140
141 iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops);
142 if (IS_ERR(iphy->phy)) {
143 dev_err(dev, "failed to create PHY\n");
144 return PTR_ERR(iphy->phy);
145 }
146
147 phy_set_drvdata(iphy->phy, iphy);
148
149 provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
150 if (IS_ERR(provider)) {
151 dev_err(dev, "could not register PHY provider\n");
152 return PTR_ERR(provider);
153 }
154
155 return 0;
156}
157
158static const struct of_device_id nsp_usb3_phy_of_match[] = {
159 {.compatible = "brcm,nsp-usb3-phy",},
160 { /* sentinel */ }
161};
162
163static struct mdio_driver nsp_usb3_phy_driver = {
164 .mdiodrv = {
165 .driver = {
166 .name = "nsp-usb3-phy",
167 .of_match_table = nsp_usb3_phy_of_match,
168 },
169 },
170 .probe = nsp_usb3_phy_probe,
171};
172
173mdio_module_driver(nsp_usb3_phy_driver);
174
175MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver");
176MODULE_LICENSE("GPL v2");
177MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com");
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
index 4f60b83641d5..60baf25d98e2 100644
--- a/drivers/phy/phy-exynos-pcie.c
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
254 254
255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
256 exynos_phy->blk_base = devm_ioremap_resource(dev, res); 256 exynos_phy->blk_base = devm_ioremap_resource(dev, res);
257 if (IS_ERR(exynos_phy->phy_base)) 257 if (IS_ERR(exynos_phy->blk_base))
258 return PTR_ERR(exynos_phy->phy_base); 258 return PTR_ERR(exynos_phy->blk_base);
259 259
260 exynos_phy->drv_data = drv_data; 260 exynos_phy->drv_data = drv_data;
261 261
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index d69046537b75..32822b0d9cd0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2010,29 +2010,57 @@ out_err:
2010 return ERR_PTR(ret); 2010 return ERR_PTR(ret);
2011} 2011}
2012 2012
2013static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) 2013static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
2014{ 2014{
2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev); 2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev);
2016 if (!IS_ERR(pctldev->p)) { 2016 if (PTR_ERR(pctldev->p) == -ENODEV) {
2017 kref_get(&pctldev->p->users); 2017 dev_dbg(pctldev->dev, "no hogs found\n");
2018 pctldev->hog_default =
2019 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2020 if (IS_ERR(pctldev->hog_default)) {
2021 dev_dbg(pctldev->dev,
2022 "failed to lookup the default state\n");
2023 } else {
2024 if (pinctrl_select_state(pctldev->p,
2025 pctldev->hog_default))
2026 dev_err(pctldev->dev,
2027 "failed to select default state\n");
2028 }
2029 2018
2030 pctldev->hog_sleep = 2019 return 0;
2031 pinctrl_lookup_state(pctldev->p, 2020 }
2032 PINCTRL_STATE_SLEEP); 2021
2033 if (IS_ERR(pctldev->hog_sleep)) 2022 if (IS_ERR(pctldev->p)) {
2034 dev_dbg(pctldev->dev, 2023 dev_err(pctldev->dev, "error claiming hogs: %li\n",
2035 "failed to lookup the sleep state\n"); 2024 PTR_ERR(pctldev->p));
2025
2026 return PTR_ERR(pctldev->p);
2027 }
2028
2029 kref_get(&pctldev->p->users);
2030 pctldev->hog_default =
2031 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2032 if (IS_ERR(pctldev->hog_default)) {
2033 dev_dbg(pctldev->dev,
2034 "failed to lookup the default state\n");
2035 } else {
2036 if (pinctrl_select_state(pctldev->p,
2037 pctldev->hog_default))
2038 dev_err(pctldev->dev,
2039 "failed to select default state\n");
2040 }
2041
2042 pctldev->hog_sleep =
2043 pinctrl_lookup_state(pctldev->p,
2044 PINCTRL_STATE_SLEEP);
2045 if (IS_ERR(pctldev->hog_sleep))
2046 dev_dbg(pctldev->dev,
2047 "failed to lookup the sleep state\n");
2048
2049 return 0;
2050}
2051
2052int pinctrl_enable(struct pinctrl_dev *pctldev)
2053{
2054 int error;
2055
2056 error = pinctrl_claim_hogs(pctldev);
2057 if (error) {
2058 dev_err(pctldev->dev, "could not claim hogs: %i\n",
2059 error);
2060 mutex_destroy(&pctldev->mutex);
2061 kfree(pctldev);
2062
2063 return error;
2036 } 2064 }
2037 2065
2038 mutex_lock(&pinctrldev_list_mutex); 2066 mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
2043 2071
2044 return 0; 2072 return 0;
2045} 2073}
2074EXPORT_SYMBOL_GPL(pinctrl_enable);
2046 2075
2047/** 2076/**
2048 * pinctrl_register() - register a pin controller device 2077 * pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
2065 if (IS_ERR(pctldev)) 2094 if (IS_ERR(pctldev))
2066 return pctldev; 2095 return pctldev;
2067 2096
2068 error = pinctrl_create_and_start(pctldev); 2097 error = pinctrl_enable(pctldev);
2069 if (error) { 2098 if (error)
2070 mutex_destroy(&pctldev->mutex);
2071 kfree(pctldev);
2072
2073 return ERR_PTR(error); 2099 return ERR_PTR(error);
2074 }
2075 2100
2076 return pctldev; 2101 return pctldev;
2077 2102
2078} 2103}
2079EXPORT_SYMBOL_GPL(pinctrl_register); 2104EXPORT_SYMBOL_GPL(pinctrl_register);
2080 2105
2106/**
2107 * pinctrl_register_and_init() - register and init pin controller device
2108 * @pctldesc: descriptor for this pin controller
2109 * @dev: parent device for this pin controller
2110 * @driver_data: private pin controller data for this pin controller
2111 * @pctldev: pin controller device
2112 *
2113 * Note that pinctrl_enable() still needs to be manually called after
2114 * this once the driver is ready.
2115 */
2081int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 2116int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2082 struct device *dev, void *driver_data, 2117 struct device *dev, void *driver_data,
2083 struct pinctrl_dev **pctldev) 2118 struct pinctrl_dev **pctldev)
2084{ 2119{
2085 struct pinctrl_dev *p; 2120 struct pinctrl_dev *p;
2086 int error;
2087 2121
2088 p = pinctrl_init_controller(pctldesc, dev, driver_data); 2122 p = pinctrl_init_controller(pctldesc, dev, driver_data);
2089 if (IS_ERR(p)) 2123 if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2097 */ 2131 */
2098 *pctldev = p; 2132 *pctldev = p;
2099 2133
2100 error = pinctrl_create_and_start(p);
2101 if (error) {
2102 mutex_destroy(&p->mutex);
2103 kfree(p);
2104 *pctldev = NULL;
2105
2106 return error;
2107 }
2108
2109 return 0; 2134 return 0;
2110} 2135}
2111EXPORT_SYMBOL_GPL(pinctrl_register_and_init); 2136EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a7ace9e1ad81..74bd90dfd7b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
790 790
791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); 791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
792 792
793 return 0; 793 return pinctrl_enable(ipctl->pctl);
794 794
795free: 795free:
796 imx_free_resources(ipctl); 796 imx_free_resources(ipctl);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7671424d46cb..31a3a98d067c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
667}; 667};
668 668
669static const char * const i2c_ao_groups[] = { 669static const char * const i2c_ao_groups[] = {
670 "i2c_sdk_ao", "i2c_sda_ao", 670 "i2c_sck_ao", "i2c_sda_ao",
671}; 671};
672 672
673static const char * const i2c_slave_ao_groups[] = { 673static const char * const i2c_slave_ao_groups[] = {
674 "i2c_slave_sdk_ao", "i2c_slave_sda_ao", 674 "i2c_slave_sck_ao", "i2c_slave_sda_ao",
675}; 675};
676 676
677static const char * const remote_input_ao_groups[] = { 677static const char * const remote_input_ao_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8b2d45e85bae..9c267dcda094 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n",
1782 pcs->desc.npins, pcs->base, pcs->size); 1782 pcs->desc.npins, pcs->base, pcs->size);
1783 1783
1784 return 0; 1784 return pinctrl_enable(pcs->pctl);
1785 1785
1786free: 1786free:
1787 pcs_free_resources(pcs); 1787 pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 676efcc032d2..3ae8066bc127 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); 1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
1286} 1286}
1287 1287
1288static int st_gpio_irq_request_resources(struct irq_data *d)
1289{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1291
1292 st_gpio_direction_input(gc, d->hwirq);
1293
1294 return gpiochip_lock_as_irq(gc, d->hwirq);
1295}
1296
1297static void st_gpio_irq_release_resources(struct irq_data *d)
1298{
1299 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1300
1301 gpiochip_unlock_as_irq(gc, d->hwirq);
1302}
1303
1288static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) 1304static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
1289{ 1305{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1306 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
1438}; 1454};
1439 1455
1440static struct irq_chip st_gpio_irqchip = { 1456static struct irq_chip st_gpio_irqchip = {
1441 .name = "GPIO", 1457 .name = "GPIO",
1442 .irq_disable = st_gpio_irq_mask, 1458 .irq_request_resources = st_gpio_irq_request_resources,
1443 .irq_mask = st_gpio_irq_mask, 1459 .irq_release_resources = st_gpio_irq_release_resources,
1444 .irq_unmask = st_gpio_irq_unmask, 1460 .irq_disable = st_gpio_irq_mask,
1445 .irq_set_type = st_gpio_irq_set_type, 1461 .irq_mask = st_gpio_irq_mask,
1446 .flags = IRQCHIP_SKIP_SET_WAKE, 1462 .irq_unmask = st_gpio_irq_unmask,
1463 .irq_set_type = st_gpio_irq_set_type,
1464 .flags = IRQCHIP_SKIP_SET_WAKE,
1447}; 1465};
1448 1466
1449static int st_gpiolib_register_bank(struct st_pinctrl *info, 1467static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae424cee2..743d1f458205 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408 PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
409 PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
410 PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
411 PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
412 PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
413 PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
414 PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
415 PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
416 PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
417 PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
418 PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
419 PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
420 PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
421 PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
422 PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
423 PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
424 PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
425 PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
426 PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
427 PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
428 PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
429 PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
430 PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
431 PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
432 PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
433 PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
434 PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
435 PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
436 PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
437 PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408}; 438};
409 439
410static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { 440static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c978be5eb9eb..273badd92561 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
609 609
610 raw_spin_lock_irqsave(&pctrl->lock, flags); 610 raw_spin_lock_irqsave(&pctrl->lock, flags);
611 611
612 val = readl(pctrl->regs + g->intr_status_reg);
613 val &= ~BIT(g->intr_status_bit);
614 writel(val, pctrl->regs + g->intr_status_reg);
615
616 val = readl(pctrl->regs + g->intr_cfg_reg); 612 val = readl(pctrl->regs + g->intr_cfg_reg);
617 val |= BIT(g->intr_enable_bit); 613 val |= BIT(g->intr_enable_bit);
618 writel(val, pctrl->regs + g->intr_cfg_reg); 614 writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f9ddba7decc1..d7aa22cff480 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
988 988
989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { 989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
990 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 990 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
991 virt_base[i] = devm_ioremap_resource(&pdev->dev, res); 991 if (!res) {
992 if (IS_ERR(virt_base[i])) 992 dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
993 return ERR_CAST(virt_base[i]); 993 return ERR_PTR(-EINVAL);
994 }
995 virt_base[i] = devm_ioremap(&pdev->dev, res->start,
996 resource_size(res));
997 if (!virt_base[i]) {
998 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
999 return ERR_PTR(-EIO);
1000 }
994 } 1001 }
995 1002
996 bank = d->pin_banks; 1003 bank = d->pin_banks;
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 08150a321be6..a70157f0acf4 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
816 pmx->pctl_desc.pins = pmx->pins; 816 pmx->pctl_desc.pins = pmx->pins;
817 pmx->pctl_desc.npins = pfc->info->nr_pins; 817 pmx->pctl_desc.npins = pfc->info->nr_pins;
818 818
819 return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, 819 ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
820 &pmx->pctl); 820 &pmx->pctl);
821 if (ret) {
822 dev_err(pfc->dev, "could not register: %i\n", ret);
823
824 return ret;
825 }
826
827 return pinctrl_enable(pmx->pctl);
821} 828}
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
index 815a88673d38..542077069391 100644
--- a/drivers/pinctrl/ti/Kconfig
+++ b/drivers/pinctrl/ti/Kconfig
@@ -1,6 +1,6 @@
1config PINCTRL_TI_IODELAY 1config PINCTRL_TI_IODELAY
2 tristate "TI IODelay Module pinconf driver" 2 tristate "TI IODelay Module pinconf driver"
3 depends on OF 3 depends on OF && (SOC_DRA7XX || COMPILE_TEST)
4 select GENERIC_PINCTRL_GROUPS 4 select GENERIC_PINCTRL_GROUPS
5 select GENERIC_PINMUX_FUNCTIONS 5 select GENERIC_PINMUX_FUNCTIONS
6 select GENERIC_PINCONF 6 select GENERIC_PINCONF
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 717e3404900c..362c50918c13 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
893 893
894 platform_set_drvdata(pdev, iod); 894 platform_set_drvdata(pdev, iod);
895 895
896 return pinctrl_enable(iod->pctl);
897
896exit_out: 898exit_out:
897 of_node_put(np); 899 of_node_put(np);
898 return ret; 900 return ret;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 5be4783e40d4..dea98ffb6f60 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
103 .wapf = 2, 103 .wapf = 2,
104}; 104};
105 105
106static struct quirk_entry quirk_no_rfkill = {
107 .no_rfkill = true,
108};
109
110static struct quirk_entry quirk_no_rfkill_wapf4 = {
111 .wapf = 4,
112 .no_rfkill = true,
113};
114
115static struct quirk_entry quirk_asus_ux303ub = { 106static struct quirk_entry quirk_asus_ux303ub = {
116 .wmi_backlight_native = true, 107 .wmi_backlight_native = true,
117}; 108};
@@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
194 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 185 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
195 DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"), 186 DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
196 }, 187 },
197 .driver_data = &quirk_no_rfkill_wapf4, 188 .driver_data = &quirk_asus_wapf4,
198 }, 189 },
199 { 190 {
200 .callback = dmi_matched, 191 .callback = dmi_matched,
@@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
203 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 194 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
204 DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), 195 DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
205 }, 196 },
206 .driver_data = &quirk_no_rfkill_wapf4, 197 .driver_data = &quirk_asus_wapf4,
207 }, 198 },
208 { 199 {
209 .callback = dmi_matched, 200 .callback = dmi_matched,
@@ -369,42 +360,6 @@ static const struct dmi_system_id asus_quirks[] = {
369 }, 360 },
370 { 361 {
371 .callback = dmi_matched, 362 .callback = dmi_matched,
372 .ident = "ASUSTeK COMPUTER INC. X555UB",
373 .matches = {
374 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
375 DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
376 },
377 .driver_data = &quirk_no_rfkill,
378 },
379 {
380 .callback = dmi_matched,
381 .ident = "ASUSTeK COMPUTER INC. N552VW",
382 .matches = {
383 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
384 DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
385 },
386 .driver_data = &quirk_no_rfkill,
387 },
388 {
389 .callback = dmi_matched,
390 .ident = "ASUSTeK COMPUTER INC. U303LB",
391 .matches = {
392 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
393 DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
394 },
395 .driver_data = &quirk_no_rfkill,
396 },
397 {
398 .callback = dmi_matched,
399 .ident = "ASUSTeK COMPUTER INC. Z550MA",
400 .matches = {
401 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
402 DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
403 },
404 .driver_data = &quirk_no_rfkill,
405 },
406 {
407 .callback = dmi_matched,
408 .ident = "ASUSTeK COMPUTER INC. UX303UB", 363 .ident = "ASUSTeK COMPUTER INC. UX303UB",
409 .matches = { 364 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 365 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 43cb680adbb4..8fe5890bf539 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
159#define USB_INTEL_XUSB2PR 0xD0 159#define USB_INTEL_XUSB2PR 0xD0
160#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 160#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
161 161
162static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
163
162struct bios_args { 164struct bios_args {
163 u32 arg0; 165 u32 arg0;
164 u32 arg1; 166 u32 arg1;
@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
2051 return 0; 2053 return 0;
2052} 2054}
2053 2055
2056static bool ashs_present(void)
2057{
2058 int i = 0;
2059 while (ashs_ids[i]) {
2060 if (acpi_dev_found(ashs_ids[i++]))
2061 return true;
2062 }
2063 return false;
2064}
2065
2054/* 2066/*
2055 * WMI Driver 2067 * WMI Driver
2056 */ 2068 */
@@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
2095 if (err) 2107 if (err)
2096 goto fail_leds; 2108 goto fail_leds;
2097 2109
2098 if (!asus->driver->quirks->no_rfkill) { 2110 asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
2111 if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
2112 asus->driver->wlan_ctrl_by_user = 1;
2113
2114 if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
2099 err = asus_wmi_rfkill_init(asus); 2115 err = asus_wmi_rfkill_init(asus);
2100 if (err) 2116 if (err)
2101 goto fail_rfkill; 2117 goto fail_rfkill;
@@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
2134 if (err) 2150 if (err)
2135 goto fail_debugfs; 2151 goto fail_debugfs;
2136 2152
2137 asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
2138 if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
2139 asus->driver->wlan_ctrl_by_user = 1;
2140
2141 return 0; 2153 return 0;
2142 2154
2143fail_debugfs: 2155fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index fdff626c3b51..c9589d9342bb 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,7 +39,6 @@ struct key_entry;
39struct asus_wmi; 39struct asus_wmi;
40 40
41struct quirk_entry { 41struct quirk_entry {
42 bool no_rfkill;
43 bool hotplug_wireless; 42 bool hotplug_wireless;
44 bool scalar_panel_brightness; 43 bool scalar_panel_brightness;
45 bool store_backlight_power; 44 bool store_backlight_power;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 2b218b1d13e5..e12cc3504d48 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -78,18 +78,18 @@
78 78
79#define FUJITSU_LCD_N_LEVELS 8 79#define FUJITSU_LCD_N_LEVELS 8
80 80
81#define ACPI_FUJITSU_CLASS "fujitsu" 81#define ACPI_FUJITSU_CLASS "fujitsu"
82#define ACPI_FUJITSU_HID "FUJ02B1" 82#define ACPI_FUJITSU_BL_HID "FUJ02B1"
83#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" 83#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
84#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" 84#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
85#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" 85#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
86#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" 86#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
87#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" 87#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
88 88
89#define ACPI_FUJITSU_NOTIFY_CODE1 0x80 89#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
90 90
91/* FUNC interface - command values */ 91/* FUNC interface - command values */
92#define FUNC_RFKILL 0x1000 92#define FUNC_FLAGS 0x1000
93#define FUNC_LEDS 0x1001 93#define FUNC_LEDS 0x1001
94#define FUNC_BUTTONS 0x1002 94#define FUNC_BUTTONS 0x1002
95#define FUNC_BACKLIGHT 0x1004 95#define FUNC_BACKLIGHT 0x1004
@@ -97,6 +97,11 @@
97/* FUNC interface - responses */ 97/* FUNC interface - responses */
98#define UNSUPPORTED_CMD 0x80000000 98#define UNSUPPORTED_CMD 0x80000000
99 99
100/* FUNC interface - status flags */
101#define FLAG_RFKILL 0x020
102#define FLAG_LID 0x100
103#define FLAG_DOCK 0x200
104
100#if IS_ENABLED(CONFIG_LEDS_CLASS) 105#if IS_ENABLED(CONFIG_LEDS_CLASS)
101/* FUNC interface - LED control */ 106/* FUNC interface - LED control */
102#define FUNC_LED_OFF 0x1 107#define FUNC_LED_OFF 0x1
@@ -136,7 +141,7 @@
136#endif 141#endif
137 142
138/* Device controlling the backlight and associated keys */ 143/* Device controlling the backlight and associated keys */
139struct fujitsu_t { 144struct fujitsu_bl {
140 acpi_handle acpi_handle; 145 acpi_handle acpi_handle;
141 struct acpi_device *dev; 146 struct acpi_device *dev;
142 struct input_dev *input; 147 struct input_dev *input;
@@ -150,12 +155,12 @@ struct fujitsu_t {
150 unsigned int brightness_level; 155 unsigned int brightness_level;
151}; 156};
152 157
153static struct fujitsu_t *fujitsu; 158static struct fujitsu_bl *fujitsu_bl;
154static int use_alt_lcd_levels = -1; 159static int use_alt_lcd_levels = -1;
155static int disable_brightness_adjust = -1; 160static int disable_brightness_adjust = -1;
156 161
157/* Device used to access other hotkeys on the laptop */ 162/* Device used to access hotkeys and other features on the laptop */
158struct fujitsu_hotkey_t { 163struct fujitsu_laptop {
159 acpi_handle acpi_handle; 164 acpi_handle acpi_handle;
160 struct acpi_device *dev; 165 struct acpi_device *dev;
161 struct input_dev *input; 166 struct input_dev *input;
@@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
163 struct platform_device *pf_device; 168 struct platform_device *pf_device;
164 struct kfifo fifo; 169 struct kfifo fifo;
165 spinlock_t fifo_lock; 170 spinlock_t fifo_lock;
166 int rfkill_supported; 171 int flags_supported;
167 int rfkill_state; 172 int flags_state;
168 int logolamp_registered; 173 int logolamp_registered;
169 int kblamps_registered; 174 int kblamps_registered;
170 int radio_led_registered; 175 int radio_led_registered;
171 int eco_led_registered; 176 int eco_led_registered;
172}; 177};
173 178
174static struct fujitsu_hotkey_t *fujitsu_hotkey; 179static struct fujitsu_laptop *fujitsu_laptop;
175
176static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
177 180
178#if IS_ENABLED(CONFIG_LEDS_CLASS) 181#if IS_ENABLED(CONFIG_LEDS_CLASS)
179static enum led_brightness logolamp_get(struct led_classdev *cdev); 182static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
222static u32 dbg_level = 0x03; 225static u32 dbg_level = 0x03;
223#endif 226#endif
224 227
225static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
226
227/* Fujitsu ACPI interface function */ 228/* Fujitsu ACPI interface function */
228 229
229static int call_fext_func(int cmd, int arg0, int arg1, int arg2) 230static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
@@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
239 unsigned long long value; 240 unsigned long long value;
240 acpi_handle handle = NULL; 241 acpi_handle handle = NULL;
241 242
242 status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); 243 status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
243 if (ACPI_FAILURE(status)) { 244 if (ACPI_FAILURE(status)) {
244 vdbg_printk(FUJLAPTOP_DBG_ERROR, 245 vdbg_printk(FUJLAPTOP_DBG_ERROR,
245 "FUNC interface is not present\n"); 246 "FUNC interface is not present\n");
@@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
300 enum led_brightness brightness) 301 enum led_brightness brightness)
301{ 302{
302 if (brightness >= LED_FULL) 303 if (brightness >= LED_FULL)
303 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); 304 return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
304 else 305 else
305 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); 306 return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
306} 307}
307 308
308static int eco_led_set(struct led_classdev *cdev, 309static int eco_led_set(struct led_classdev *cdev,
@@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
346{ 347{
347 enum led_brightness brightness = LED_OFF; 348 enum led_brightness brightness = LED_OFF;
348 349
349 if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON) 350 if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
350 brightness = LED_FULL; 351 brightness = LED_FULL;
351 352
352 return brightness; 353 return brightness;
@@ -373,10 +374,10 @@ static int set_lcd_level(int level)
373 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", 374 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
374 level); 375 level);
375 376
376 if (level < 0 || level >= fujitsu->max_brightness) 377 if (level < 0 || level >= fujitsu_bl->max_brightness)
377 return -EINVAL; 378 return -EINVAL;
378 379
379 status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); 380 status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
380 if (ACPI_FAILURE(status)) { 381 if (ACPI_FAILURE(status)) {
381 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); 382 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
382 return -ENODEV; 383 return -ENODEV;
@@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
398 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", 399 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
399 level); 400 level);
400 401
401 if (level < 0 || level >= fujitsu->max_brightness) 402 if (level < 0 || level >= fujitsu_bl->max_brightness)
402 return -EINVAL; 403 return -EINVAL;
403 404
404 status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); 405 status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
405 if (ACPI_FAILURE(status)) { 406 if (ACPI_FAILURE(status)) {
406 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); 407 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
407 return -ENODEV; 408 return -ENODEV;
@@ -421,19 +422,19 @@ static int get_lcd_level(void)
421 422
422 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); 423 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
423 424
424 status = 425 status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
425 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); 426 &state);
426 if (ACPI_FAILURE(status)) 427 if (ACPI_FAILURE(status))
427 return 0; 428 return 0;
428 429
429 fujitsu->brightness_level = state & 0x0fffffff; 430 fujitsu_bl->brightness_level = state & 0x0fffffff;
430 431
431 if (state & 0x80000000) 432 if (state & 0x80000000)
432 fujitsu->brightness_changed = 1; 433 fujitsu_bl->brightness_changed = 1;
433 else 434 else
434 fujitsu->brightness_changed = 0; 435 fujitsu_bl->brightness_changed = 0;
435 436
436 return fujitsu->brightness_level; 437 return fujitsu_bl->brightness_level;
437} 438}
438 439
439static int get_max_brightness(void) 440static int get_max_brightness(void)
@@ -443,14 +444,14 @@ static int get_max_brightness(void)
443 444
444 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); 445 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
445 446
446 status = 447 status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
447 acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); 448 &state);
448 if (ACPI_FAILURE(status)) 449 if (ACPI_FAILURE(status))
449 return -1; 450 return -1;
450 451
451 fujitsu->max_brightness = state; 452 fujitsu_bl->max_brightness = state;
452 453
453 return fujitsu->max_brightness; 454 return fujitsu_bl->max_brightness;
454} 455}
455 456
456/* Backlight device stuff */ 457/* Backlight device stuff */
@@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
483 return ret; 484 return ret;
484} 485}
485 486
486static const struct backlight_ops fujitsubl_ops = { 487static const struct backlight_ops fujitsu_bl_ops = {
487 .get_brightness = bl_get_brightness, 488 .get_brightness = bl_get_brightness,
488 .update_status = bl_update_status, 489 .update_status = bl_update_status,
489}; 490};
@@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
511 512
512 int ret; 513 int ret;
513 514
514 ret = fujitsu->brightness_changed; 515 ret = fujitsu_bl->brightness_changed;
515 if (ret < 0) 516 if (ret < 0)
516 return ret; 517 return ret;
517 518
@@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
539 int level, ret; 540 int level, ret;
540 541
541 if (sscanf(buf, "%i", &level) != 1 542 if (sscanf(buf, "%i", &level) != 1
542 || (level < 0 || level >= fujitsu->max_brightness)) 543 || (level < 0 || level >= fujitsu_bl->max_brightness))
543 return -EINVAL; 544 return -EINVAL;
544 545
545 if (use_alt_lcd_levels) 546 if (use_alt_lcd_levels)
@@ -567,9 +568,9 @@ static ssize_t
567show_lid_state(struct device *dev, 568show_lid_state(struct device *dev,
568 struct device_attribute *attr, char *buf) 569 struct device_attribute *attr, char *buf)
569{ 570{
570 if (!(fujitsu_hotkey->rfkill_supported & 0x100)) 571 if (!(fujitsu_laptop->flags_supported & FLAG_LID))
571 return sprintf(buf, "unknown\n"); 572 return sprintf(buf, "unknown\n");
572 if (fujitsu_hotkey->rfkill_state & 0x100) 573 if (fujitsu_laptop->flags_state & FLAG_LID)
573 return sprintf(buf, "open\n"); 574 return sprintf(buf, "open\n");
574 else 575 else
575 return sprintf(buf, "closed\n"); 576 return sprintf(buf, "closed\n");
@@ -579,9 +580,9 @@ static ssize_t
579show_dock_state(struct device *dev, 580show_dock_state(struct device *dev,
580 struct device_attribute *attr, char *buf) 581 struct device_attribute *attr, char *buf)
581{ 582{
582 if (!(fujitsu_hotkey->rfkill_supported & 0x200)) 583 if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
583 return sprintf(buf, "unknown\n"); 584 return sprintf(buf, "unknown\n");
584 if (fujitsu_hotkey->rfkill_state & 0x200) 585 if (fujitsu_laptop->flags_state & FLAG_DOCK)
585 return sprintf(buf, "docked\n"); 586 return sprintf(buf, "docked\n");
586 else 587 else
587 return sprintf(buf, "undocked\n"); 588 return sprintf(buf, "undocked\n");
@@ -591,9 +592,9 @@ static ssize_t
591show_radios_state(struct device *dev, 592show_radios_state(struct device *dev,
592 struct device_attribute *attr, char *buf) 593 struct device_attribute *attr, char *buf)
593{ 594{
594 if (!(fujitsu_hotkey->rfkill_supported & 0x20)) 595 if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
595 return sprintf(buf, "unknown\n"); 596 return sprintf(buf, "unknown\n");
596 if (fujitsu_hotkey->rfkill_state & 0x20) 597 if (fujitsu_laptop->flags_state & FLAG_RFKILL)
597 return sprintf(buf, "on\n"); 598 return sprintf(buf, "on\n");
598 else 599 else
599 return sprintf(buf, "killed\n"); 600 return sprintf(buf, "killed\n");
@@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
607static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store); 608static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
608static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store); 609static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
609 610
610static struct attribute *fujitsupf_attributes[] = { 611static struct attribute *fujitsu_pf_attributes[] = {
611 &dev_attr_brightness_changed.attr, 612 &dev_attr_brightness_changed.attr,
612 &dev_attr_max_brightness.attr, 613 &dev_attr_max_brightness.attr,
613 &dev_attr_lcd_level.attr, 614 &dev_attr_lcd_level.attr,
@@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
617 NULL 618 NULL
618}; 619};
619 620
620static struct attribute_group fujitsupf_attribute_group = { 621static struct attribute_group fujitsu_pf_attribute_group = {
621 .attrs = fujitsupf_attributes 622 .attrs = fujitsu_pf_attributes
622}; 623};
623 624
624static struct platform_driver fujitsupf_driver = { 625static struct platform_driver fujitsu_pf_driver = {
625 .driver = { 626 .driver = {
626 .name = "fujitsu-laptop", 627 .name = "fujitsu-laptop",
627 } 628 }
@@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
630static void __init dmi_check_cb_common(const struct dmi_system_id *id) 631static void __init dmi_check_cb_common(const struct dmi_system_id *id)
631{ 632{
632 pr_info("Identified laptop model '%s'\n", id->ident); 633 pr_info("Identified laptop model '%s'\n", id->ident);
633 if (use_alt_lcd_levels == -1) {
634 if (acpi_has_method(NULL,
635 "\\_SB.PCI0.LPCB.FJEX.SBL2"))
636 use_alt_lcd_levels = 1;
637 else
638 use_alt_lcd_levels = 0;
639 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
640 "%i\n", use_alt_lcd_levels);
641 }
642} 634}
643 635
644static int __init dmi_check_cb_s6410(const struct dmi_system_id *id) 636static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
645{ 637{
646 dmi_check_cb_common(id); 638 dmi_check_cb_common(id);
647 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ 639 fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
648 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ 640 fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
649 return 1; 641 return 1;
650} 642}
651 643
652static int __init dmi_check_cb_s6420(const struct dmi_system_id *id) 644static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
653{ 645{
654 dmi_check_cb_common(id); 646 dmi_check_cb_common(id);
655 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ 647 fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
656 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ 648 fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
657 return 1; 649 return 1;
658} 650}
659 651
660static int __init dmi_check_cb_p8010(const struct dmi_system_id *id) 652static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
661{ 653{
662 dmi_check_cb_common(id); 654 dmi_check_cb_common(id);
663 fujitsu->keycode1 = KEY_HELP; /* "Support" */ 655 fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
664 fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */ 656 fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
665 fujitsu->keycode4 = KEY_WWW; /* "Internet" */ 657 fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
666 return 1; 658 return 1;
667} 659}
668 660
@@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
693 685
694/* ACPI device for LCD brightness control */ 686/* ACPI device for LCD brightness control */
695 687
696static int acpi_fujitsu_add(struct acpi_device *device) 688static int acpi_fujitsu_bl_add(struct acpi_device *device)
697{ 689{
698 int state = 0; 690 int state = 0;
699 struct input_dev *input; 691 struct input_dev *input;
@@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
702 if (!device) 694 if (!device)
703 return -EINVAL; 695 return -EINVAL;
704 696
705 fujitsu->acpi_handle = device->handle; 697 fujitsu_bl->acpi_handle = device->handle;
706 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME); 698 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
707 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 699 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
708 device->driver_data = fujitsu; 700 device->driver_data = fujitsu_bl;
709 701
710 fujitsu->input = input = input_allocate_device(); 702 fujitsu_bl->input = input = input_allocate_device();
711 if (!input) { 703 if (!input) {
712 error = -ENOMEM; 704 error = -ENOMEM;
713 goto err_stop; 705 goto err_stop;
714 } 706 }
715 707
716 snprintf(fujitsu->phys, sizeof(fujitsu->phys), 708 snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
717 "%s/video/input0", acpi_device_hid(device)); 709 "%s/video/input0", acpi_device_hid(device));
718 710
719 input->name = acpi_device_name(device); 711 input->name = acpi_device_name(device);
720 input->phys = fujitsu->phys; 712 input->phys = fujitsu_bl->phys;
721 input->id.bustype = BUS_HOST; 713 input->id.bustype = BUS_HOST;
722 input->id.product = 0x06; 714 input->id.product = 0x06;
723 input->dev.parent = &device->dev; 715 input->dev.parent = &device->dev;
@@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
730 if (error) 722 if (error)
731 goto err_free_input_dev; 723 goto err_free_input_dev;
732 724
733 error = acpi_bus_update_power(fujitsu->acpi_handle, &state); 725 error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
734 if (error) { 726 if (error) {
735 pr_err("Error reading power state\n"); 727 pr_err("Error reading power state\n");
736 goto err_unregister_input_dev; 728 goto err_unregister_input_dev;
@@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
740 acpi_device_name(device), acpi_device_bid(device), 732 acpi_device_name(device), acpi_device_bid(device),
741 !device->power.state ? "on" : "off"); 733 !device->power.state ? "on" : "off");
742 734
743 fujitsu->dev = device; 735 fujitsu_bl->dev = device;
744 736
745 if (acpi_has_method(device->handle, METHOD_NAME__INI)) { 737 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
746 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); 738 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
750 pr_err("_INI Method failed\n"); 742 pr_err("_INI Method failed\n");
751 } 743 }
752 744
745 if (use_alt_lcd_levels == -1) {
746 if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
747 use_alt_lcd_levels = 1;
748 else
749 use_alt_lcd_levels = 0;
750 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
751 use_alt_lcd_levels);
752 }
753
753 /* do config (detect defaults) */ 754 /* do config (detect defaults) */
754 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; 755 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
755 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; 756 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
758 use_alt_lcd_levels, disable_brightness_adjust); 759 use_alt_lcd_levels, disable_brightness_adjust);
759 760
760 if (get_max_brightness() <= 0) 761 if (get_max_brightness() <= 0)
761 fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; 762 fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
762 get_lcd_level(); 763 get_lcd_level();
763 764
764 return 0; 765 return 0;
@@ -772,38 +773,38 @@ err_stop:
772 return error; 773 return error;
773} 774}
774 775
775static int acpi_fujitsu_remove(struct acpi_device *device) 776static int acpi_fujitsu_bl_remove(struct acpi_device *device)
776{ 777{
777 struct fujitsu_t *fujitsu = acpi_driver_data(device); 778 struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
778 struct input_dev *input = fujitsu->input; 779 struct input_dev *input = fujitsu_bl->input;
779 780
780 input_unregister_device(input); 781 input_unregister_device(input);
781 782
782 fujitsu->acpi_handle = NULL; 783 fujitsu_bl->acpi_handle = NULL;
783 784
784 return 0; 785 return 0;
785} 786}
786 787
787/* Brightness notify */ 788/* Brightness notify */
788 789
789static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) 790static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
790{ 791{
791 struct input_dev *input; 792 struct input_dev *input;
792 int keycode; 793 int keycode;
793 int oldb, newb; 794 int oldb, newb;
794 795
795 input = fujitsu->input; 796 input = fujitsu_bl->input;
796 797
797 switch (event) { 798 switch (event) {
798 case ACPI_FUJITSU_NOTIFY_CODE1: 799 case ACPI_FUJITSU_NOTIFY_CODE1:
799 keycode = 0; 800 keycode = 0;
800 oldb = fujitsu->brightness_level; 801 oldb = fujitsu_bl->brightness_level;
801 get_lcd_level(); 802 get_lcd_level();
802 newb = fujitsu->brightness_level; 803 newb = fujitsu_bl->brightness_level;
803 804
804 vdbg_printk(FUJLAPTOP_DBG_TRACE, 805 vdbg_printk(FUJLAPTOP_DBG_TRACE,
805 "brightness button event [%i -> %i (%i)]\n", 806 "brightness button event [%i -> %i (%i)]\n",
806 oldb, newb, fujitsu->brightness_changed); 807 oldb, newb, fujitsu_bl->brightness_changed);
807 808
808 if (oldb < newb) { 809 if (oldb < newb) {
809 if (disable_brightness_adjust != 1) { 810 if (disable_brightness_adjust != 1) {
@@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
840 841
841/* ACPI device for hotkey handling */ 842/* ACPI device for hotkey handling */
842 843
843static int acpi_fujitsu_hotkey_add(struct acpi_device *device) 844static int acpi_fujitsu_laptop_add(struct acpi_device *device)
844{ 845{
845 int result = 0; 846 int result = 0;
846 int state = 0; 847 int state = 0;
@@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
851 if (!device) 852 if (!device)
852 return -EINVAL; 853 return -EINVAL;
853 854
854 fujitsu_hotkey->acpi_handle = device->handle; 855 fujitsu_laptop->acpi_handle = device->handle;
855 sprintf(acpi_device_name(device), "%s", 856 sprintf(acpi_device_name(device), "%s",
856 ACPI_FUJITSU_HOTKEY_DEVICE_NAME); 857 ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
857 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 858 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
858 device->driver_data = fujitsu_hotkey; 859 device->driver_data = fujitsu_laptop;
859 860
860 /* kfifo */ 861 /* kfifo */
861 spin_lock_init(&fujitsu_hotkey->fifo_lock); 862 spin_lock_init(&fujitsu_laptop->fifo_lock);
862 error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int), 863 error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
863 GFP_KERNEL); 864 GFP_KERNEL);
864 if (error) { 865 if (error) {
865 pr_err("kfifo_alloc failed\n"); 866 pr_err("kfifo_alloc failed\n");
866 goto err_stop; 867 goto err_stop;
867 } 868 }
868 869
869 fujitsu_hotkey->input = input = input_allocate_device(); 870 fujitsu_laptop->input = input = input_allocate_device();
870 if (!input) { 871 if (!input) {
871 error = -ENOMEM; 872 error = -ENOMEM;
872 goto err_free_fifo; 873 goto err_free_fifo;
873 } 874 }
874 875
875 snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), 876 snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
876 "%s/video/input0", acpi_device_hid(device)); 877 "%s/video/input0", acpi_device_hid(device));
877 878
878 input->name = acpi_device_name(device); 879 input->name = acpi_device_name(device);
879 input->phys = fujitsu_hotkey->phys; 880 input->phys = fujitsu_laptop->phys;
880 input->id.bustype = BUS_HOST; 881 input->id.bustype = BUS_HOST;
881 input->id.product = 0x06; 882 input->id.product = 0x06;
882 input->dev.parent = &device->dev; 883 input->dev.parent = &device->dev;
883 884
884 set_bit(EV_KEY, input->evbit); 885 set_bit(EV_KEY, input->evbit);
885 set_bit(fujitsu->keycode1, input->keybit); 886 set_bit(fujitsu_bl->keycode1, input->keybit);
886 set_bit(fujitsu->keycode2, input->keybit); 887 set_bit(fujitsu_bl->keycode2, input->keybit);
887 set_bit(fujitsu->keycode3, input->keybit); 888 set_bit(fujitsu_bl->keycode3, input->keybit);
888 set_bit(fujitsu->keycode4, input->keybit); 889 set_bit(fujitsu_bl->keycode4, input->keybit);
889 set_bit(fujitsu->keycode5, input->keybit); 890 set_bit(fujitsu_bl->keycode5, input->keybit);
890 set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit); 891 set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
891 set_bit(KEY_UNKNOWN, input->keybit); 892 set_bit(KEY_UNKNOWN, input->keybit);
892 893
@@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
894 if (error) 895 if (error)
895 goto err_free_input_dev; 896 goto err_free_input_dev;
896 897
897 error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); 898 error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
898 if (error) { 899 if (error) {
899 pr_err("Error reading power state\n"); 900 pr_err("Error reading power state\n");
900 goto err_unregister_input_dev; 901 goto err_unregister_input_dev;
@@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
904 acpi_device_name(device), acpi_device_bid(device), 905 acpi_device_name(device), acpi_device_bid(device),
905 !device->power.state ? "on" : "off"); 906 !device->power.state ? "on" : "off");
906 907
907 fujitsu_hotkey->dev = device; 908 fujitsu_laptop->dev = device;
908 909
909 if (acpi_has_method(device->handle, METHOD_NAME__INI)) { 910 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
910 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); 911 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
920 ; /* No action, result is discarded */ 921 ; /* No action, result is discarded */
921 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); 922 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
922 923
923 fujitsu_hotkey->rfkill_supported = 924 fujitsu_laptop->flags_supported =
924 call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0); 925 call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
925 926
926 /* Make sure our bitmask of supported functions is cleared if the 927 /* Make sure our bitmask of supported functions is cleared if the
927 RFKILL function block is not implemented, like on the S7020. */ 928 RFKILL function block is not implemented, like on the S7020. */
928 if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD) 929 if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
929 fujitsu_hotkey->rfkill_supported = 0; 930 fujitsu_laptop->flags_supported = 0;
930 931
931 if (fujitsu_hotkey->rfkill_supported) 932 if (fujitsu_laptop->flags_supported)
932 fujitsu_hotkey->rfkill_state = 933 fujitsu_laptop->flags_state =
933 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); 934 call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
934 935
935 /* Suspect this is a keymap of the application panel, print it */ 936 /* Suspect this is a keymap of the application panel, print it */
936 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); 937 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
937 938
938#if IS_ENABLED(CONFIG_LEDS_CLASS) 939#if IS_ENABLED(CONFIG_LEDS_CLASS)
939 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { 940 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
940 result = led_classdev_register(&fujitsu->pf_device->dev, 941 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
941 &logolamp_led); 942 &logolamp_led);
942 if (result == 0) { 943 if (result == 0) {
943 fujitsu_hotkey->logolamp_registered = 1; 944 fujitsu_laptop->logolamp_registered = 1;
944 } else { 945 } else {
945 pr_err("Could not register LED handler for logo lamp, error %i\n", 946 pr_err("Could not register LED handler for logo lamp, error %i\n",
946 result); 947 result);
@@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
949 950
950 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) && 951 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
951 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) { 952 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
952 result = led_classdev_register(&fujitsu->pf_device->dev, 953 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
953 &kblamps_led); 954 &kblamps_led);
954 if (result == 0) { 955 if (result == 0) {
955 fujitsu_hotkey->kblamps_registered = 1; 956 fujitsu_laptop->kblamps_registered = 1;
956 } else { 957 } else {
957 pr_err("Could not register LED handler for keyboard lamps, error %i\n", 958 pr_err("Could not register LED handler for keyboard lamps, error %i\n",
958 result); 959 result);
@@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
966 * that an RF LED is present. 967 * that an RF LED is present.
967 */ 968 */
968 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { 969 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
969 result = led_classdev_register(&fujitsu->pf_device->dev, 970 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
970 &radio_led); 971 &radio_led);
971 if (result == 0) { 972 if (result == 0) {
972 fujitsu_hotkey->radio_led_registered = 1; 973 fujitsu_laptop->radio_led_registered = 1;
973 } else { 974 } else {
974 pr_err("Could not register LED handler for radio LED, error %i\n", 975 pr_err("Could not register LED handler for radio LED, error %i\n",
975 result); 976 result);
@@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
983 */ 984 */
984 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) && 985 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
985 (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) { 986 (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
986 result = led_classdev_register(&fujitsu->pf_device->dev, 987 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
987 &eco_led); 988 &eco_led);
988 if (result == 0) { 989 if (result == 0) {
989 fujitsu_hotkey->eco_led_registered = 1; 990 fujitsu_laptop->eco_led_registered = 1;
990 } else { 991 } else {
991 pr_err("Could not register LED handler for eco LED, error %i\n", 992 pr_err("Could not register LED handler for eco LED, error %i\n",
992 result); 993 result);
@@ -1002,47 +1003,47 @@ err_unregister_input_dev:
1002err_free_input_dev: 1003err_free_input_dev:
1003 input_free_device(input); 1004 input_free_device(input);
1004err_free_fifo: 1005err_free_fifo:
1005 kfifo_free(&fujitsu_hotkey->fifo); 1006 kfifo_free(&fujitsu_laptop->fifo);
1006err_stop: 1007err_stop:
1007 return error; 1008 return error;
1008} 1009}
1009 1010
1010static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) 1011static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
1011{ 1012{
1012 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); 1013 struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
1013 struct input_dev *input = fujitsu_hotkey->input; 1014 struct input_dev *input = fujitsu_laptop->input;
1014 1015
1015#if IS_ENABLED(CONFIG_LEDS_CLASS) 1016#if IS_ENABLED(CONFIG_LEDS_CLASS)
1016 if (fujitsu_hotkey->logolamp_registered) 1017 if (fujitsu_laptop->logolamp_registered)
1017 led_classdev_unregister(&logolamp_led); 1018 led_classdev_unregister(&logolamp_led);
1018 1019
1019 if (fujitsu_hotkey->kblamps_registered) 1020 if (fujitsu_laptop->kblamps_registered)
1020 led_classdev_unregister(&kblamps_led); 1021 led_classdev_unregister(&kblamps_led);
1021 1022
1022 if (fujitsu_hotkey->radio_led_registered) 1023 if (fujitsu_laptop->radio_led_registered)
1023 led_classdev_unregister(&radio_led); 1024 led_classdev_unregister(&radio_led);
1024 1025
1025 if (fujitsu_hotkey->eco_led_registered) 1026 if (fujitsu_laptop->eco_led_registered)
1026 led_classdev_unregister(&eco_led); 1027 led_classdev_unregister(&eco_led);
1027#endif 1028#endif
1028 1029
1029 input_unregister_device(input); 1030 input_unregister_device(input);
1030 1031
1031 kfifo_free(&fujitsu_hotkey->fifo); 1032 kfifo_free(&fujitsu_laptop->fifo);
1032 1033
1033 fujitsu_hotkey->acpi_handle = NULL; 1034 fujitsu_laptop->acpi_handle = NULL;
1034 1035
1035 return 0; 1036 return 0;
1036} 1037}
1037 1038
1038static void acpi_fujitsu_hotkey_press(int keycode) 1039static void acpi_fujitsu_laptop_press(int keycode)
1039{ 1040{
1040 struct input_dev *input = fujitsu_hotkey->input; 1041 struct input_dev *input = fujitsu_laptop->input;
1041 int status; 1042 int status;
1042 1043
1043 status = kfifo_in_locked(&fujitsu_hotkey->fifo, 1044 status = kfifo_in_locked(&fujitsu_laptop->fifo,
1044 (unsigned char *)&keycode, sizeof(keycode), 1045 (unsigned char *)&keycode, sizeof(keycode),
1045 &fujitsu_hotkey->fifo_lock); 1046 &fujitsu_laptop->fifo_lock);
1046 if (status != sizeof(keycode)) { 1047 if (status != sizeof(keycode)) {
1047 vdbg_printk(FUJLAPTOP_DBG_WARN, 1048 vdbg_printk(FUJLAPTOP_DBG_WARN,
1048 "Could not push keycode [0x%x]\n", keycode); 1049 "Could not push keycode [0x%x]\n", keycode);
@@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
1054 "Push keycode into ringbuffer [%d]\n", keycode); 1055 "Push keycode into ringbuffer [%d]\n", keycode);
1055} 1056}
1056 1057
1057static void acpi_fujitsu_hotkey_release(void) 1058static void acpi_fujitsu_laptop_release(void)
1058{ 1059{
1059 struct input_dev *input = fujitsu_hotkey->input; 1060 struct input_dev *input = fujitsu_laptop->input;
1060 int keycode, status; 1061 int keycode, status;
1061 1062
1062 while (true) { 1063 while (true) {
1063 status = kfifo_out_locked(&fujitsu_hotkey->fifo, 1064 status = kfifo_out_locked(&fujitsu_laptop->fifo,
1064 (unsigned char *)&keycode, 1065 (unsigned char *)&keycode,
1065 sizeof(keycode), 1066 sizeof(keycode),
1066 &fujitsu_hotkey->fifo_lock); 1067 &fujitsu_laptop->fifo_lock);
1067 if (status != sizeof(keycode)) 1068 if (status != sizeof(keycode))
1068 return; 1069 return;
1069 input_report_key(input, keycode, 0); 1070 input_report_key(input, keycode, 0);
@@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
1073 } 1074 }
1074} 1075}
1075 1076
1076static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) 1077static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
1077{ 1078{
1078 struct input_dev *input; 1079 struct input_dev *input;
1079 int keycode; 1080 int keycode;
1080 unsigned int irb = 1; 1081 unsigned int irb = 1;
1081 int i; 1082 int i;
1082 1083
1083 input = fujitsu_hotkey->input; 1084 input = fujitsu_laptop->input;
1084 1085
1085 if (event != ACPI_FUJITSU_NOTIFY_CODE1) { 1086 if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
1086 keycode = KEY_UNKNOWN; 1087 keycode = KEY_UNKNOWN;
@@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1093 return; 1094 return;
1094 } 1095 }
1095 1096
1096 if (fujitsu_hotkey->rfkill_supported) 1097 if (fujitsu_laptop->flags_supported)
1097 fujitsu_hotkey->rfkill_state = 1098 fujitsu_laptop->flags_state =
1098 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); 1099 call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
1099 1100
1100 i = 0; 1101 i = 0;
1101 while ((irb = 1102 while ((irb =
@@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1103 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { 1104 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
1104 switch (irb & 0x4ff) { 1105 switch (irb & 0x4ff) {
1105 case KEY1_CODE: 1106 case KEY1_CODE:
1106 keycode = fujitsu->keycode1; 1107 keycode = fujitsu_bl->keycode1;
1107 break; 1108 break;
1108 case KEY2_CODE: 1109 case KEY2_CODE:
1109 keycode = fujitsu->keycode2; 1110 keycode = fujitsu_bl->keycode2;
1110 break; 1111 break;
1111 case KEY3_CODE: 1112 case KEY3_CODE:
1112 keycode = fujitsu->keycode3; 1113 keycode = fujitsu_bl->keycode3;
1113 break; 1114 break;
1114 case KEY4_CODE: 1115 case KEY4_CODE:
1115 keycode = fujitsu->keycode4; 1116 keycode = fujitsu_bl->keycode4;
1116 break; 1117 break;
1117 case KEY5_CODE: 1118 case KEY5_CODE:
1118 keycode = fujitsu->keycode5; 1119 keycode = fujitsu_bl->keycode5;
1119 break; 1120 break;
1120 case 0: 1121 case 0:
1121 keycode = 0; 1122 keycode = 0;
@@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1128 } 1129 }
1129 1130
1130 if (keycode > 0) 1131 if (keycode > 0)
1131 acpi_fujitsu_hotkey_press(keycode); 1132 acpi_fujitsu_laptop_press(keycode);
1132 else if (keycode == 0) 1133 else if (keycode == 0)
1133 acpi_fujitsu_hotkey_release(); 1134 acpi_fujitsu_laptop_release();
1134 } 1135 }
1135 1136
1136 /* On some models (first seen on the Skylake-based Lifebook 1137 /* On some models (first seen on the Skylake-based Lifebook
1137 * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is 1138 * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
1138 * handled in software; its state is queried using FUNC_RFKILL 1139 * handled in software; its state is queried using FUNC_FLAGS
1139 */ 1140 */
1140 if ((fujitsu_hotkey->rfkill_supported & BIT(26)) && 1141 if ((fujitsu_laptop->flags_supported & BIT(26)) &&
1141 (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) { 1142 (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
1142 keycode = KEY_TOUCHPAD_TOGGLE; 1143 keycode = KEY_TOUCHPAD_TOGGLE;
1143 input_report_key(input, keycode, 1); 1144 input_report_key(input, keycode, 1);
1144 input_sync(input); 1145 input_sync(input);
@@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1150 1151
1151/* Initialization */ 1152/* Initialization */
1152 1153
1153static const struct acpi_device_id fujitsu_device_ids[] = { 1154static const struct acpi_device_id fujitsu_bl_device_ids[] = {
1154 {ACPI_FUJITSU_HID, 0}, 1155 {ACPI_FUJITSU_BL_HID, 0},
1155 {"", 0}, 1156 {"", 0},
1156}; 1157};
1157 1158
1158static struct acpi_driver acpi_fujitsu_driver = { 1159static struct acpi_driver acpi_fujitsu_bl_driver = {
1159 .name = ACPI_FUJITSU_DRIVER_NAME, 1160 .name = ACPI_FUJITSU_BL_DRIVER_NAME,
1160 .class = ACPI_FUJITSU_CLASS, 1161 .class = ACPI_FUJITSU_CLASS,
1161 .ids = fujitsu_device_ids, 1162 .ids = fujitsu_bl_device_ids,
1162 .ops = { 1163 .ops = {
1163 .add = acpi_fujitsu_add, 1164 .add = acpi_fujitsu_bl_add,
1164 .remove = acpi_fujitsu_remove, 1165 .remove = acpi_fujitsu_bl_remove,
1165 .notify = acpi_fujitsu_notify, 1166 .notify = acpi_fujitsu_bl_notify,
1166 }, 1167 },
1167}; 1168};
1168 1169
1169static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { 1170static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
1170 {ACPI_FUJITSU_HOTKEY_HID, 0}, 1171 {ACPI_FUJITSU_LAPTOP_HID, 0},
1171 {"", 0}, 1172 {"", 0},
1172}; 1173};
1173 1174
1174static struct acpi_driver acpi_fujitsu_hotkey_driver = { 1175static struct acpi_driver acpi_fujitsu_laptop_driver = {
1175 .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, 1176 .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
1176 .class = ACPI_FUJITSU_CLASS, 1177 .class = ACPI_FUJITSU_CLASS,
1177 .ids = fujitsu_hotkey_device_ids, 1178 .ids = fujitsu_laptop_device_ids,
1178 .ops = { 1179 .ops = {
1179 .add = acpi_fujitsu_hotkey_add, 1180 .add = acpi_fujitsu_laptop_add,
1180 .remove = acpi_fujitsu_hotkey_remove, 1181 .remove = acpi_fujitsu_laptop_remove,
1181 .notify = acpi_fujitsu_hotkey_notify, 1182 .notify = acpi_fujitsu_laptop_notify,
1182 }, 1183 },
1183}; 1184};
1184 1185
1185static const struct acpi_device_id fujitsu_ids[] __used = { 1186static const struct acpi_device_id fujitsu_ids[] __used = {
1186 {ACPI_FUJITSU_HID, 0}, 1187 {ACPI_FUJITSU_BL_HID, 0},
1187 {ACPI_FUJITSU_HOTKEY_HID, 0}, 1188 {ACPI_FUJITSU_LAPTOP_HID, 0},
1188 {"", 0} 1189 {"", 0}
1189}; 1190};
1190MODULE_DEVICE_TABLE(acpi, fujitsu_ids); 1191MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
1191 1192
1192static int __init fujitsu_init(void) 1193static int __init fujitsu_init(void)
1193{ 1194{
1194 int ret, result, max_brightness; 1195 int ret, max_brightness;
1195 1196
1196 if (acpi_disabled) 1197 if (acpi_disabled)
1197 return -ENODEV; 1198 return -ENODEV;
1198 1199
1199 fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL); 1200 fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
1200 if (!fujitsu) 1201 if (!fujitsu_bl)
1201 return -ENOMEM; 1202 return -ENOMEM;
1202 fujitsu->keycode1 = KEY_PROG1; 1203 fujitsu_bl->keycode1 = KEY_PROG1;
1203 fujitsu->keycode2 = KEY_PROG2; 1204 fujitsu_bl->keycode2 = KEY_PROG2;
1204 fujitsu->keycode3 = KEY_PROG3; 1205 fujitsu_bl->keycode3 = KEY_PROG3;
1205 fujitsu->keycode4 = KEY_PROG4; 1206 fujitsu_bl->keycode4 = KEY_PROG4;
1206 fujitsu->keycode5 = KEY_RFKILL; 1207 fujitsu_bl->keycode5 = KEY_RFKILL;
1207 dmi_check_system(fujitsu_dmi_table); 1208 dmi_check_system(fujitsu_dmi_table);
1208 1209
1209 result = acpi_bus_register_driver(&acpi_fujitsu_driver); 1210 ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
1210 if (result < 0) { 1211 if (ret)
1211 ret = -ENODEV;
1212 goto fail_acpi; 1212 goto fail_acpi;
1213 }
1214 1213
1215 /* Register platform stuff */ 1214 /* Register platform stuff */
1216 1215
1217 fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); 1216 fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
1218 if (!fujitsu->pf_device) { 1217 if (!fujitsu_bl->pf_device) {
1219 ret = -ENOMEM; 1218 ret = -ENOMEM;
1220 goto fail_platform_driver; 1219 goto fail_platform_driver;
1221 } 1220 }
1222 1221
1223 ret = platform_device_add(fujitsu->pf_device); 1222 ret = platform_device_add(fujitsu_bl->pf_device);
1224 if (ret) 1223 if (ret)
1225 goto fail_platform_device1; 1224 goto fail_platform_device1;
1226 1225
1227 ret = 1226 ret =
1228 sysfs_create_group(&fujitsu->pf_device->dev.kobj, 1227 sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
1229 &fujitsupf_attribute_group); 1228 &fujitsu_pf_attribute_group);
1230 if (ret) 1229 if (ret)
1231 goto fail_platform_device2; 1230 goto fail_platform_device2;
1232 1231
@@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
1236 struct backlight_properties props; 1235 struct backlight_properties props;
1237 1236
1238 memset(&props, 0, sizeof(struct backlight_properties)); 1237 memset(&props, 0, sizeof(struct backlight_properties));
1239 max_brightness = fujitsu->max_brightness; 1238 max_brightness = fujitsu_bl->max_brightness;
1240 props.type = BACKLIGHT_PLATFORM; 1239 props.type = BACKLIGHT_PLATFORM;
1241 props.max_brightness = max_brightness - 1; 1240 props.max_brightness = max_brightness - 1;
1242 fujitsu->bl_device = backlight_device_register("fujitsu-laptop", 1241 fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
1243 NULL, NULL, 1242 NULL, NULL,
1244 &fujitsubl_ops, 1243 &fujitsu_bl_ops,
1245 &props); 1244 &props);
1246 if (IS_ERR(fujitsu->bl_device)) { 1245 if (IS_ERR(fujitsu_bl->bl_device)) {
1247 ret = PTR_ERR(fujitsu->bl_device); 1246 ret = PTR_ERR(fujitsu_bl->bl_device);
1248 fujitsu->bl_device = NULL; 1247 fujitsu_bl->bl_device = NULL;
1249 goto fail_sysfs_group; 1248 goto fail_sysfs_group;
1250 } 1249 }
1251 fujitsu->bl_device->props.brightness = fujitsu->brightness_level; 1250 fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
1252 } 1251 }
1253 1252
1254 ret = platform_driver_register(&fujitsupf_driver); 1253 ret = platform_driver_register(&fujitsu_pf_driver);
1255 if (ret) 1254 if (ret)
1256 goto fail_backlight; 1255 goto fail_backlight;
1257 1256
1258 /* Register hotkey driver */ 1257 /* Register laptop driver */
1259 1258
1260 fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); 1259 fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
1261 if (!fujitsu_hotkey) { 1260 if (!fujitsu_laptop) {
1262 ret = -ENOMEM; 1261 ret = -ENOMEM;
1263 goto fail_hotkey; 1262 goto fail_laptop;
1264 } 1263 }
1265 1264
1266 result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); 1265 ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
1267 if (result < 0) { 1266 if (ret)
1268 ret = -ENODEV; 1267 goto fail_laptop1;
1269 goto fail_hotkey1;
1270 }
1271 1268
1272 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */ 1269 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
1273 if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { 1270 if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
1274 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) 1271 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
1275 fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN; 1272 fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
1276 else 1273 else
1277 fujitsu->bl_device->props.power = FB_BLANK_UNBLANK; 1274 fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
1278 } 1275 }
1279 1276
1280 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); 1277 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
1281 1278
1282 return 0; 1279 return 0;
1283 1280
1284fail_hotkey1: 1281fail_laptop1:
1285 kfree(fujitsu_hotkey); 1282 kfree(fujitsu_laptop);
1286fail_hotkey: 1283fail_laptop:
1287 platform_driver_unregister(&fujitsupf_driver); 1284 platform_driver_unregister(&fujitsu_pf_driver);
1288fail_backlight: 1285fail_backlight:
1289 backlight_device_unregister(fujitsu->bl_device); 1286 backlight_device_unregister(fujitsu_bl->bl_device);
1290fail_sysfs_group: 1287fail_sysfs_group:
1291 sysfs_remove_group(&fujitsu->pf_device->dev.kobj, 1288 sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
1292 &fujitsupf_attribute_group); 1289 &fujitsu_pf_attribute_group);
1293fail_platform_device2: 1290fail_platform_device2:
1294 platform_device_del(fujitsu->pf_device); 1291 platform_device_del(fujitsu_bl->pf_device);
1295fail_platform_device1: 1292fail_platform_device1:
1296 platform_device_put(fujitsu->pf_device); 1293 platform_device_put(fujitsu_bl->pf_device);
1297fail_platform_driver: 1294fail_platform_driver:
1298 acpi_bus_unregister_driver(&acpi_fujitsu_driver); 1295 acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
1299fail_acpi: 1296fail_acpi:
1300 kfree(fujitsu); 1297 kfree(fujitsu_bl);
1301 1298
1302 return ret; 1299 return ret;
1303} 1300}
1304 1301
1305static void __exit fujitsu_cleanup(void) 1302static void __exit fujitsu_cleanup(void)
1306{ 1303{
1307 acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); 1304 acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
1308 1305
1309 kfree(fujitsu_hotkey); 1306 kfree(fujitsu_laptop);
1310 1307
1311 platform_driver_unregister(&fujitsupf_driver); 1308 platform_driver_unregister(&fujitsu_pf_driver);
1312 1309
1313 backlight_device_unregister(fujitsu->bl_device); 1310 backlight_device_unregister(fujitsu_bl->bl_device);
1314 1311
1315 sysfs_remove_group(&fujitsu->pf_device->dev.kobj, 1312 sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
1316 &fujitsupf_attribute_group); 1313 &fujitsu_pf_attribute_group);
1317 1314
1318 platform_device_unregister(fujitsu->pf_device); 1315 platform_device_unregister(fujitsu_bl->pf_device);
1319 1316
1320 acpi_bus_unregister_driver(&acpi_fujitsu_driver); 1317 acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
1321 1318
1322 kfree(fujitsu); 1319 kfree(fujitsu_bl);
1323 1320
1324 pr_info("driver unloaded\n"); 1321 pr_info("driver unloaded\n");
1325} 1322}
@@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
1341MODULE_DESCRIPTION("Fujitsu laptop extras support"); 1338MODULE_DESCRIPTION("Fujitsu laptop extras support");
1342MODULE_VERSION(FUJITSU_DRIVER_VERSION); 1339MODULE_VERSION(FUJITSU_DRIVER_VERSION);
1343MODULE_LICENSE("GPL"); 1340MODULE_LICENSE("GPL");
1344
1345MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1346MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
1347MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 09b4df74291e..bb865695d7a6 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
193 193
194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); 194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
195 195
196 if (IS_ERR(kvm_ptp_clock.ptp_clock)) 196 return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
197 return PTR_ERR(kvm_ptp_clock.ptp_clock);
198
199 return 0;
200} 197}
201 198
202module_init(ptp_kvm_init); 199module_init(ptp_kvm_init);
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9d19b9a62011..315a4be8dc1e 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,8 +37,8 @@
37#include "tsi721.h" 37#include "tsi721.h"
38 38
39#ifdef DEBUG 39#ifdef DEBUG
40u32 dbg_level; 40u32 tsi_dbg_level;
41module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 41module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
43#endif 43#endif
44 44
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5941437cbdd1..957eadc58150 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -40,11 +40,11 @@ enum {
40}; 40};
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43extern u32 dbg_level; 43extern u32 tsi_dbg_level;
44 44
45#define tsi_debug(level, dev, fmt, arg...) \ 45#define tsi_debug(level, dev, fmt, arg...) \
46 do { \ 46 do { \
47 if (DBG_##level & dbg_level) \ 47 if (DBG_##level & tsi_dbg_level) \
48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ 48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
49 } while (0) 49 } while (0)
50#else 50#else
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 65f86bc24c07..1dc43fc5f65f 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -76,7 +76,7 @@ config QCOM_ADSP_PIL
76 depends on OF && ARCH_QCOM 76 depends on OF && ARCH_QCOM
77 depends on REMOTEPROC 77 depends on REMOTEPROC
78 depends on QCOM_SMEM 78 depends on QCOM_SMEM
79 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 79 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
80 select MFD_SYSCON 80 select MFD_SYSCON
81 select QCOM_MDT_LOADER 81 select QCOM_MDT_LOADER
82 select QCOM_RPROC_COMMON 82 select QCOM_RPROC_COMMON
@@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL
93 depends on OF && ARCH_QCOM 93 depends on OF && ARCH_QCOM
94 depends on QCOM_SMEM 94 depends on QCOM_SMEM
95 depends on REMOTEPROC 95 depends on REMOTEPROC
96 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 96 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
97 select MFD_SYSCON 97 select MFD_SYSCON
98 select QCOM_RPROC_COMMON 98 select QCOM_RPROC_COMMON
99 select QCOM_SCM 99 select QCOM_SCM
@@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL
104config QCOM_WCNSS_PIL 104config QCOM_WCNSS_PIL
105 tristate "Qualcomm WCNSS Peripheral Image Loader" 105 tristate "Qualcomm WCNSS Peripheral Image Loader"
106 depends on OF && ARCH_QCOM 106 depends on OF && ARCH_QCOM
107 depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 107 depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
108 depends on QCOM_SMEM 108 depends on QCOM_SMEM
109 depends on REMOTEPROC 109 depends on REMOTEPROC
110 select QCOM_MDT_LOADER 110 select QCOM_MDT_LOADER
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 40f1136f5568..058db724b5a2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
572 rc = -EIO; 572 rc = -EIO;
573 goto out; 573 goto out;
574 } 574 }
575 if (prepcblk->ccp_rscode != 0) {
576 DEBUG_WARN(
577 "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
578 (int) prepcblk->ccp_rtcode,
579 (int) prepcblk->ccp_rscode);
580 }
575 581
576 /* process response cprb param block */ 582 /* process response cprb param block */
577 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); 583 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
761} 767}
762 768
763/* 769/*
764 * Fetch just the mkvp value via query_crypto_facility from adapter. 770 * Fetch the current and old mkvp values via
771 * query_crypto_facility from adapter.
765 */ 772 */
766static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) 773static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
767{ 774{
768 int rc, found = 0; 775 int rc, found = 0;
769 size_t rlen, vlen; 776 size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
779 rc = query_crypto_facility(cardnr, domain, "STATICSA", 786 rc = query_crypto_facility(cardnr, domain, "STATICSA",
780 rarray, &rlen, varray, &vlen); 787 rarray, &rlen, varray, &vlen);
781 if (rc == 0 && rlen > 8*8 && vlen > 184+8) { 788 if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
782 if (rarray[64] == '2') { 789 if (rarray[8*8] == '2') {
783 /* current master key state is valid */ 790 /* current master key state is valid */
784 *mkvp = *((u64 *)(varray + 184)); 791 mkvp[0] = *((u64 *)(varray + 184));
792 mkvp[1] = *((u64 *)(varray + 172));
785 found = 1; 793 found = 1;
786 } 794 }
787 } 795 }
@@ -796,14 +804,14 @@ struct mkvp_info {
796 struct list_head list; 804 struct list_head list;
797 u16 cardnr; 805 u16 cardnr;
798 u16 domain; 806 u16 domain;
799 u64 mkvp; 807 u64 mkvp[2];
800}; 808};
801 809
802/* a list with mkvp_info entries */ 810/* a list with mkvp_info entries */
803static LIST_HEAD(mkvp_list); 811static LIST_HEAD(mkvp_list);
804static DEFINE_SPINLOCK(mkvp_list_lock); 812static DEFINE_SPINLOCK(mkvp_list_lock);
805 813
806static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) 814static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
807{ 815{
808 int rc = -ENOENT; 816 int rc = -ENOENT;
809 struct mkvp_info *ptr; 817 struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
812 list_for_each_entry(ptr, &mkvp_list, list) { 820 list_for_each_entry(ptr, &mkvp_list, list) {
813 if (ptr->cardnr == cardnr && 821 if (ptr->cardnr == cardnr &&
814 ptr->domain == domain) { 822 ptr->domain == domain) {
815 *mkvp = ptr->mkvp; 823 memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
816 rc = 0; 824 rc = 0;
817 break; 825 break;
818 } 826 }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
822 return rc; 830 return rc;
823} 831}
824 832
825static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) 833static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
826{ 834{
827 int found = 0; 835 int found = 0;
828 struct mkvp_info *ptr; 836 struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
831 list_for_each_entry(ptr, &mkvp_list, list) { 839 list_for_each_entry(ptr, &mkvp_list, list) {
832 if (ptr->cardnr == cardnr && 840 if (ptr->cardnr == cardnr &&
833 ptr->domain == domain) { 841 ptr->domain == domain) {
834 ptr->mkvp = mkvp; 842 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
835 found = 1; 843 found = 1;
836 break; 844 break;
837 } 845 }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
844 } 852 }
845 ptr->cardnr = cardnr; 853 ptr->cardnr = cardnr;
846 ptr->domain = domain; 854 ptr->domain = domain;
847 ptr->mkvp = mkvp; 855 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
848 list_add(&ptr->list, &mkvp_list); 856 list_add(&ptr->list, &mkvp_list);
849 } 857 }
850 spin_unlock_bh(&mkvp_list_lock); 858 spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
888 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 896 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
889 struct zcrypt_device_matrix *device_matrix; 897 struct zcrypt_device_matrix *device_matrix;
890 u16 card, dom; 898 u16 card, dom;
891 u64 mkvp; 899 u64 mkvp[2];
892 int i, rc; 900 int i, rc, oi = -1;
893 901
894 /* mkvp must not be zero */ 902 /* mkvp must not be zero */
895 if (t->mkvp == 0) 903 if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
910 device_matrix->device[i].functions & 0x04) { 918 device_matrix->device[i].functions & 0x04) {
911 /* an enabled CCA Coprocessor card */ 919 /* an enabled CCA Coprocessor card */
912 /* try cached mkvp */ 920 /* try cached mkvp */
913 if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && 921 if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
914 t->mkvp == mkvp) { 922 t->mkvp == mkvp[0]) {
915 if (!verify) 923 if (!verify)
916 break; 924 break;
917 /* verify: fetch mkvp from adapter */ 925 /* verify: fetch mkvp from adapter */
918 if (fetch_mkvp(card, dom, &mkvp) == 0) { 926 if (fetch_mkvp(card, dom, mkvp) == 0) {
919 mkvp_cache_update(card, dom, mkvp); 927 mkvp_cache_update(card, dom, mkvp);
920 if (t->mkvp == mkvp) 928 if (t->mkvp == mkvp[0])
921 break; 929 break;
922 } 930 }
923 } 931 }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
936 card = AP_QID_CARD(device_matrix->device[i].qid); 944 card = AP_QID_CARD(device_matrix->device[i].qid);
937 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 945 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
938 /* fresh fetch mkvp from adapter */ 946 /* fresh fetch mkvp from adapter */
939 if (fetch_mkvp(card, dom, &mkvp) == 0) { 947 if (fetch_mkvp(card, dom, mkvp) == 0) {
940 mkvp_cache_update(card, dom, mkvp); 948 mkvp_cache_update(card, dom, mkvp);
941 if (t->mkvp == mkvp) 949 if (t->mkvp == mkvp[0])
942 break; 950 break;
951 if (t->mkvp == mkvp[1] && oi < 0)
952 oi = i;
943 } 953 }
944 } 954 }
955 if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
956 /* old mkvp matched, use this card then */
957 card = AP_QID_CARD(device_matrix->device[oi].qid);
958 dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
959 }
945 } 960 }
946 if (i < MAX_ZDEV_ENTRIES) { 961 if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
947 if (pcardnr) 962 if (pcardnr)
948 *pcardnr = card; 963 *pcardnr = card;
949 if (pdomain) 964 if (pdomain)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7addea8741b..d9561e39c3b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); 961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
962int qeth_bridgeport_an_set(struct qeth_card *card, int enable); 962int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
964int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); 964int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
965 int extra_elems, int data_offset);
965int qeth_get_elements_for_frags(struct sk_buff *); 966int qeth_get_elements_for_frags(struct sk_buff *);
966int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 967int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
967 struct sk_buff *, struct qeth_hdr *, int, int, int); 968 struct sk_buff *, struct qeth_hdr *, int, int, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 315d8a2db7c0..9a5f99ccb122 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3837 * @card: qeth card structure, to check max. elems. 3837 * @card: qeth card structure, to check max. elems.
3838 * @skb: SKB address 3838 * @skb: SKB address
3839 * @extra_elems: extra elems needed, to check against max. 3839 * @extra_elems: extra elems needed, to check against max.
3840 * @data_offset: range starts at skb->data + data_offset
3840 * 3841 *
3841 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3842 * skb data, including linear part and fragments. Checks if the result plus 3843 * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3844 * Note: extra_elems is not included in the returned result. 3845 * Note: extra_elems is not included in the returned result.
3845 */ 3846 */
3846int qeth_get_elements_no(struct qeth_card *card, 3847int qeth_get_elements_no(struct qeth_card *card,
3847 struct sk_buff *skb, int extra_elems) 3848 struct sk_buff *skb, int extra_elems, int data_offset)
3848{ 3849{
3849 int elements = qeth_get_elements_for_range( 3850 int elements = qeth_get_elements_for_range(
3850 (addr_t)skb->data, 3851 (addr_t)skb->data + data_offset,
3851 (addr_t)skb->data + skb_headlen(skb)) + 3852 (addr_t)skb->data + skb_headlen(skb)) +
3852 qeth_get_elements_for_frags(skb); 3853 qeth_get_elements_for_frags(skb);
3853 3854
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bea483307618..af4e6a639fec 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
849 * chaining we can not send long frag lists 849 * chaining we can not send long frag lists
850 */ 850 */
851 if ((card->info.type != QETH_CARD_TYPE_IQD) && 851 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
852 !qeth_get_elements_no(card, new_skb, 0)) { 852 !qeth_get_elements_no(card, new_skb, 0, 0)) {
853 int lin_rc = skb_linearize(new_skb); 853 int lin_rc = skb_linearize(new_skb);
854 854
855 if (card->options.performance_stats) { 855 if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 } 894 }
895 } 895 }
896 896
897 elements = qeth_get_elements_no(card, new_skb, elements_needed); 897 elements = qeth_get_elements_no(card, new_skb, elements_needed,
898 (data_offset > 0) ? data_offset : 0);
898 if (!elements) { 899 if (!elements) {
899 if (data_offset >= 0) 900 if (data_offset >= 0)
900 kmem_cache_free(qeth_core_header_cache, hdr); 901 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06d0addcc058..653f0fb76573 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2609 char daddr[16]; 2609 char daddr[16];
2610 struct af_iucv_trans_hdr *iucv_hdr; 2610 struct af_iucv_trans_hdr *iucv_hdr;
2611 2611
2612 skb_pull(skb, 14);
2613 card->dev->header_ops->create(skb, card->dev, 0,
2614 card->dev->dev_addr, card->dev->dev_addr,
2615 card->dev->addr_len);
2616 skb_pull(skb, 14);
2617 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2618 memset(hdr, 0, sizeof(struct qeth_hdr)); 2612 memset(hdr, 0, sizeof(struct qeth_hdr));
2619 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2613 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2620 hdr->hdr.l3.ext_flags = 0; 2614 hdr->hdr.l3.ext_flags = 0;
2621 hdr->hdr.l3.length = skb->len; 2615 hdr->hdr.l3.length = skb->len - ETH_HLEN;
2622 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 2616 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2617
2618 iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
2623 memset(daddr, 0, sizeof(daddr)); 2619 memset(daddr, 0, sizeof(daddr));
2624 daddr[0] = 0xfe; 2620 daddr[0] = 0xfe;
2625 daddr[1] = 0x80; 2621 daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2823 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2819 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2824 !skb_is_nonlinear(skb)) { 2820 !skb_is_nonlinear(skb)) {
2825 new_skb = skb; 2821 new_skb = skb;
2826 if (new_skb->protocol == ETH_P_AF_IUCV) 2822 data_offset = ETH_HLEN;
2827 data_offset = 0;
2828 else
2829 data_offset = ETH_HLEN;
2830 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2823 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2831 if (!hdr) 2824 if (!hdr)
2832 goto tx_drop; 2825 goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 */ 2860 */
2868 if ((card->info.type != QETH_CARD_TYPE_IQD) && 2861 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
2869 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || 2862 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2870 (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { 2863 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
2871 int lin_rc = skb_linearize(new_skb); 2864 int lin_rc = skb_linearize(new_skb);
2872 2865
2873 if (card->options.performance_stats) { 2866 if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2909 2902
2910 elements = use_tso ? 2903 elements = use_tso ?
2911 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : 2904 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2912 qeth_get_elements_no(card, new_skb, hdr_elements); 2905 qeth_get_elements_no(card, new_skb, hdr_elements,
2906 (data_offset > 0) ? data_offset : 0);
2913 if (!elements) { 2907 if (!elements) {
2914 if (data_offset >= 0) 2908 if (data_offset >= 0)
2915 kmem_cache_free(qeth_core_header_cache, hdr); 2909 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 230043c1c90f..3c52867dfe28 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1241,16 +1241,15 @@ config SCSI_LPFC
1241 tristate "Emulex LightPulse Fibre Channel Support" 1241 tristate "Emulex LightPulse Fibre Channel Support"
1242 depends on PCI && SCSI 1242 depends on PCI && SCSI
1243 depends on SCSI_FC_ATTRS 1243 depends on SCSI_FC_ATTRS
1244 depends on NVME_FC && NVME_TARGET_FC
1245 select CRC_T10DIF 1244 select CRC_T10DIF
1246 help 1245 ---help---
1247 This lpfc driver supports the Emulex LightPulse 1246 This lpfc driver supports the Emulex LightPulse
1248 Family of Fibre Channel PCI host adapters. 1247 Family of Fibre Channel PCI host adapters.
1249 1248
1250config SCSI_LPFC_DEBUG_FS 1249config SCSI_LPFC_DEBUG_FS
1251 bool "Emulex LightPulse Fibre Channel debugfs Support" 1250 bool "Emulex LightPulse Fibre Channel debugfs Support"
1252 depends on SCSI_LPFC && DEBUG_FS 1251 depends on SCSI_LPFC && DEBUG_FS
1253 help 1252 ---help---
1254 This makes debugging information from the lpfc driver 1253 This makes debugging information from the lpfc driver
1255 available via the debugfs filesystem. 1254 available via the debugfs filesystem.
1256 1255
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a3ad04293487..c8172f16cf33 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2056,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2056{ 2056{
2057 struct hw_fib **hw_fib_p; 2057 struct hw_fib **hw_fib_p;
2058 struct fib **fib_p; 2058 struct fib **fib_p;
2059 int rcode = 1;
2060 2059
2061 hw_fib_p = hw_fib_pool; 2060 hw_fib_p = hw_fib_pool;
2062 fib_p = fib_pool; 2061 fib_p = fib_pool;
@@ -2074,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2074 } 2073 }
2075 } 2074 }
2076 2075
2076 /*
2077 * Get the actual number of allocated fibs
2078 */
2077 num = hw_fib_p - hw_fib_pool; 2079 num = hw_fib_p - hw_fib_pool;
2078 if (!num) 2080 return num;
2079 rcode = 0;
2080
2081 return rcode;
2082} 2081}
2083 2082
2084static void wakeup_fibctx_threads(struct aac_dev *dev, 2083static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2186,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev)
2186 struct fib *fib; 2185 struct fib *fib;
2187 unsigned long flags; 2186 unsigned long flags;
2188 spinlock_t *t_lock; 2187 spinlock_t *t_lock;
2189 unsigned int rcode;
2190 2188
2191 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2189 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2192 spin_lock_irqsave(t_lock, flags); 2190 spin_lock_irqsave(t_lock, flags);
@@ -2269,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev)
2269 * Fill up fib pointer pools with actual fibs 2267 * Fill up fib pointer pools with actual fibs
2270 * and hw_fibs 2268 * and hw_fibs
2271 */ 2269 */
2272 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2270 num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2273 if (!rcode) 2271 if (!num)
2274 goto free_mem; 2272 goto free_mem;
2275 2273
2276 /* 2274 /*
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2e5338dec621..7b0410e0f569 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -468,7 +468,7 @@ err_out:
468 return -1; 468 return -1;
469 469
470err_blink: 470err_blink:
471 return (status > 16) & 0xFF; 471 return (status >> 16) & 0xFF;
472} 472}
473 473
474static inline u32 aac_get_vector(struct aac_dev *dev) 474static inline u32 aac_get_vector(struct aac_dev *dev)
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 48e200102221..c01b47e5b55a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@ struct alua_queue_data {
113#define ALUA_POLICY_SWITCH_ALL 1 113#define ALUA_POLICY_SWITCH_ALL 1
114 114
115static void alua_rtpg_work(struct work_struct *work); 115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg, 116static bool alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev, 117 struct scsi_device *sdev,
118 struct alua_queue_data *qdata, bool force); 118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force); 119static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
862 kref_put(&pg->kref, release_port_group); 862 kref_put(&pg->kref, release_port_group);
863} 863}
864 864
865static void alua_rtpg_queue(struct alua_port_group *pg, 865/**
866 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
867 *
868 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
869 * That function is responsible for calling @qdata->fn().
870 */
871static bool alua_rtpg_queue(struct alua_port_group *pg,
866 struct scsi_device *sdev, 872 struct scsi_device *sdev,
867 struct alua_queue_data *qdata, bool force) 873 struct alua_queue_data *qdata, bool force)
868{ 874{
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
870 unsigned long flags; 876 unsigned long flags;
871 struct workqueue_struct *alua_wq = kaluad_wq; 877 struct workqueue_struct *alua_wq = kaluad_wq;
872 878
873 if (!pg) 879 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
874 return; 880 return false;
875 881
876 spin_lock_irqsave(&pg->lock, flags); 882 spin_lock_irqsave(&pg->lock, flags);
877 if (qdata) { 883 if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
884 pg->flags |= ALUA_PG_RUN_RTPG; 890 pg->flags |= ALUA_PG_RUN_RTPG;
885 kref_get(&pg->kref); 891 kref_get(&pg->kref);
886 pg->rtpg_sdev = sdev; 892 pg->rtpg_sdev = sdev;
887 scsi_device_get(sdev);
888 start_queue = 1; 893 start_queue = 1;
889 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 894 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
890 pg->flags |= ALUA_PG_RUN_RTPG; 895 pg->flags |= ALUA_PG_RUN_RTPG;
891 /* Do not queue if the worker is already running */ 896 /* Do not queue if the worker is already running */
892 if (!(pg->flags & ALUA_PG_RUNNING)) { 897 if (!(pg->flags & ALUA_PG_RUNNING)) {
893 kref_get(&pg->kref); 898 kref_get(&pg->kref);
894 sdev = NULL;
895 start_queue = 1; 899 start_queue = 1;
896 } 900 }
897 } 901 }
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
900 alua_wq = kaluad_sync_wq; 904 alua_wq = kaluad_sync_wq;
901 spin_unlock_irqrestore(&pg->lock, flags); 905 spin_unlock_irqrestore(&pg->lock, flags);
902 906
903 if (start_queue && 907 if (start_queue) {
904 !queue_delayed_work(alua_wq, &pg->rtpg_work, 908 if (queue_delayed_work(alua_wq, &pg->rtpg_work,
905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 909 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
906 if (sdev) 910 sdev = NULL;
907 scsi_device_put(sdev); 911 else
908 kref_put(&pg->kref, release_port_group); 912 kref_put(&pg->kref, release_port_group);
909 } 913 }
914 if (sdev)
915 scsi_device_put(sdev);
916
917 return true;
910} 918}
911 919
912/* 920/*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
1007 mutex_unlock(&h->init_mutex); 1015 mutex_unlock(&h->init_mutex);
1008 goto out; 1016 goto out;
1009 } 1017 }
1010 fn = NULL;
1011 rcu_read_unlock(); 1018 rcu_read_unlock();
1012 mutex_unlock(&h->init_mutex); 1019 mutex_unlock(&h->init_mutex);
1013 1020
1014 alua_rtpg_queue(pg, sdev, qdata, true); 1021 if (alua_rtpg_queue(pg, sdev, qdata, true))
1022 fn = NULL;
1023 else
1024 err = SCSI_DH_DEV_OFFLINED;
1015 kref_put(&pg->kref, release_port_group); 1025 kref_put(&pg->kref, release_port_group);
1016out: 1026out:
1017 if (fn) 1027 if (fn)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 524a0c755ed7..9d659aaace15 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2956 /* fill_cmd can't fail here, no data buffer to map. */ 2956 /* fill_cmd can't fail here, no data buffer to map. */
2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2958 scsi3addr, TYPE_MSG); 2958 scsi3addr, TYPE_MSG);
2959 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 2959 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2960 if (rc) { 2960 if (rc) {
2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2962 goto out; 2962 goto out;
@@ -3714,7 +3714,7 @@ exit_failed:
3714 * # (integer code indicating one of several NOT READY states 3714 * # (integer code indicating one of several NOT READY states
3715 * describing why a volume is to be kept offline) 3715 * describing why a volume is to be kept offline)
3716 */ 3716 */
3717static int hpsa_volume_offline(struct ctlr_info *h, 3717static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3718 unsigned char scsi3addr[]) 3718 unsigned char scsi3addr[])
3719{ 3719{
3720 struct CommandList *c; 3720 struct CommandList *c;
@@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3735 DEFAULT_TIMEOUT); 3735 DEFAULT_TIMEOUT);
3736 if (rc) { 3736 if (rc) {
3737 cmd_free(h, c); 3737 cmd_free(h, c);
3738 return 0; 3738 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3739 } 3739 }
3740 sense = c->err_info->SenseInfo; 3740 sense = c->err_info->SenseInfo;
3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3746 cmd_status = c->err_info->CommandStatus; 3746 cmd_status = c->err_info->CommandStatus;
3747 scsi_status = c->err_info->ScsiStatus; 3747 scsi_status = c->err_info->ScsiStatus;
3748 cmd_free(h, c); 3748 cmd_free(h, c);
3749 /* Is the volume 'not ready'? */
3750 if (cmd_status != CMD_TARGET_STATUS ||
3751 scsi_status != SAM_STAT_CHECK_CONDITION ||
3752 sense_key != NOT_READY ||
3753 asc != ASC_LUN_NOT_READY) {
3754 return 0;
3755 }
3756 3749
3757 /* Determine the reason for not ready state */ 3750 /* Determine the reason for not ready state */
3758 ldstat = hpsa_get_volume_status(h, scsi3addr); 3751 ldstat = hpsa_get_volume_status(h, scsi3addr);
3759 3752
3760 /* Keep volume offline in certain cases: */ 3753 /* Keep volume offline in certain cases: */
3761 switch (ldstat) { 3754 switch (ldstat) {
3755 case HPSA_LV_FAILED:
3762 case HPSA_LV_UNDERGOING_ERASE: 3756 case HPSA_LV_UNDERGOING_ERASE:
3763 case HPSA_LV_NOT_AVAILABLE: 3757 case HPSA_LV_NOT_AVAILABLE:
3764 case HPSA_LV_UNDERGOING_RPI: 3758 case HPSA_LV_UNDERGOING_RPI:
@@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3780 default: 3774 default:
3781 break; 3775 break;
3782 } 3776 }
3783 return 0; 3777 return HPSA_LV_OK;
3784} 3778}
3785 3779
3786/* 3780/*
@@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3853 /* Do an inquiry to the device to see what it is. */ 3847 /* Do an inquiry to the device to see what it is. */
3854 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3848 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3855 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3849 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3856 /* Inquiry failed (msg printed already) */
3857 dev_err(&h->pdev->dev, 3850 dev_err(&h->pdev->dev,
3858 "hpsa_update_device_info: inquiry failed\n"); 3851 "%s: inquiry failed, device will be skipped.\n",
3859 rc = -EIO; 3852 __func__);
3853 rc = HPSA_INQUIRY_FAILED;
3860 goto bail_out; 3854 goto bail_out;
3861 } 3855 }
3862 3856
@@ -3885,15 +3879,20 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3885 if ((this_device->devtype == TYPE_DISK || 3879 if ((this_device->devtype == TYPE_DISK ||
3886 this_device->devtype == TYPE_ZBC) && 3880 this_device->devtype == TYPE_ZBC) &&
3887 is_logical_dev_addr_mode(scsi3addr)) { 3881 is_logical_dev_addr_mode(scsi3addr)) {
3888 int volume_offline; 3882 unsigned char volume_offline;
3889 3883
3890 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 3884 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3891 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3892 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3893 volume_offline = hpsa_volume_offline(h, scsi3addr); 3887 volume_offline = hpsa_volume_offline(h, scsi3addr);
3894 if (volume_offline < 0 || volume_offline > 0xff) 3888 this_device->volume_offline = volume_offline;
3895 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 3889 if (volume_offline == HPSA_LV_FAILED) {
3896 this_device->volume_offline = volume_offline & 0xff; 3890 rc = HPSA_LV_FAILED;
3891 dev_err(&h->pdev->dev,
3892 "%s: LV failed, device will be skipped.\n",
3893 __func__);
3894 goto bail_out;
3895 }
3897 } else { 3896 } else {
3898 this_device->raid_level = RAID_UNKNOWN; 3897 this_device->raid_level = RAID_UNKNOWN;
3899 this_device->offload_config = 0; 3898 this_device->offload_config = 0;
@@ -4379,8 +4378,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4379 goto out; 4378 goto out;
4380 } 4379 }
4381 if (rc) { 4380 if (rc) {
4382 dev_warn(&h->pdev->dev, 4381 h->drv_req_rescan = 1;
4383 "Inquiry failed, skipping device.\n");
4384 continue; 4382 continue;
4385 } 4383 }
4386 4384
@@ -5558,7 +5556,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
5558 5556
5559 spin_lock_irqsave(&h->scan_lock, flags); 5557 spin_lock_irqsave(&h->scan_lock, flags);
5560 h->scan_finished = 1; 5558 h->scan_finished = 1;
5561 wake_up_all(&h->scan_wait_queue); 5559 wake_up(&h->scan_wait_queue);
5562 spin_unlock_irqrestore(&h->scan_lock, flags); 5560 spin_unlock_irqrestore(&h->scan_lock, flags);
5563} 5561}
5564 5562
@@ -5576,11 +5574,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
5576 if (unlikely(lockup_detected(h))) 5574 if (unlikely(lockup_detected(h)))
5577 return hpsa_scan_complete(h); 5575 return hpsa_scan_complete(h);
5578 5576
5577 /*
5578 * If a scan is already waiting to run, no need to add another
5579 */
5580 spin_lock_irqsave(&h->scan_lock, flags);
5581 if (h->scan_waiting) {
5582 spin_unlock_irqrestore(&h->scan_lock, flags);
5583 return;
5584 }
5585
5586 spin_unlock_irqrestore(&h->scan_lock, flags);
5587
5579 /* wait until any scan already in progress is finished. */ 5588 /* wait until any scan already in progress is finished. */
5580 while (1) { 5589 while (1) {
5581 spin_lock_irqsave(&h->scan_lock, flags); 5590 spin_lock_irqsave(&h->scan_lock, flags);
5582 if (h->scan_finished) 5591 if (h->scan_finished)
5583 break; 5592 break;
5593 h->scan_waiting = 1;
5584 spin_unlock_irqrestore(&h->scan_lock, flags); 5594 spin_unlock_irqrestore(&h->scan_lock, flags);
5585 wait_event(h->scan_wait_queue, h->scan_finished); 5595 wait_event(h->scan_wait_queue, h->scan_finished);
5586 /* Note: We don't need to worry about a race between this 5596 /* Note: We don't need to worry about a race between this
@@ -5590,6 +5600,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
5590 */ 5600 */
5591 } 5601 }
5592 h->scan_finished = 0; /* mark scan as in progress */ 5602 h->scan_finished = 0; /* mark scan as in progress */
5603 h->scan_waiting = 0;
5593 spin_unlock_irqrestore(&h->scan_lock, flags); 5604 spin_unlock_irqrestore(&h->scan_lock, flags);
5594 5605
5595 if (unlikely(lockup_detected(h))) 5606 if (unlikely(lockup_detected(h)))
@@ -8792,6 +8803,7 @@ reinit_after_soft_reset:
8792 init_waitqueue_head(&h->event_sync_wait_queue); 8803 init_waitqueue_head(&h->event_sync_wait_queue);
8793 mutex_init(&h->reset_mutex); 8804 mutex_init(&h->reset_mutex);
8794 h->scan_finished = 1; /* no scan currently in progress */ 8805 h->scan_finished = 1; /* no scan currently in progress */
8806 h->scan_waiting = 0;
8795 8807
8796 pci_set_drvdata(pdev, h); 8808 pci_set_drvdata(pdev, h);
8797 h->ndevices = 0; 8809 h->ndevices = 0;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index bf6cdc106654..6f04f2ad4125 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -201,6 +201,7 @@ struct ctlr_info {
201 dma_addr_t errinfo_pool_dhandle; 201 dma_addr_t errinfo_pool_dhandle;
202 unsigned long *cmd_pool_bits; 202 unsigned long *cmd_pool_bits;
203 int scan_finished; 203 int scan_finished;
204 u8 scan_waiting : 1;
204 spinlock_t scan_lock; 205 spinlock_t scan_lock;
205 wait_queue_head_t scan_wait_queue; 206 wait_queue_head_t scan_wait_queue;
206 207
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf07058..5961705eef76 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
156#define CFGTBL_BusType_Fibre2G 0x00000200l 156#define CFGTBL_BusType_Fibre2G 0x00000200l
157 157
158/* VPD Inquiry types */ 158/* VPD Inquiry types */
159#define HPSA_INQUIRY_FAILED 0x02
159#define HPSA_VPD_SUPPORTED_PAGES 0x00 160#define HPSA_VPD_SUPPORTED_PAGES 0x00
160#define HPSA_VPD_LV_DEVICE_ID 0x83 161#define HPSA_VPD_LV_DEVICE_ID 0x83
161#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 162#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
@@ -166,6 +167,7 @@
166/* Logical volume states */ 167/* Logical volume states */
167#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 168#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
168#define HPSA_LV_OK 0x0 169#define HPSA_LV_OK 0x0
170#define HPSA_LV_FAILED 0x01
169#define HPSA_LV_NOT_AVAILABLE 0x0b 171#define HPSA_LV_NOT_AVAILABLE 0x0b
170#define HPSA_LV_UNDERGOING_ERASE 0x0F 172#define HPSA_LV_UNDERGOING_ERASE 0x0F
171#define HPSA_LV_UNDERGOING_RPI 0x12 173#define HPSA_LV_UNDERGOING_RPI 0x12
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 07c08ce68d70..894b1e3ebd56 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
561 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); 561 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
562 task->state = state; 562 task->state = state;
563 563
564 if (!list_empty(&task->running)) 564 spin_lock_bh(&conn->taskqueuelock);
565 if (!list_empty(&task->running)) {
566 pr_debug_once("%s while task on list", __func__);
565 list_del_init(&task->running); 567 list_del_init(&task->running);
568 }
569 spin_unlock_bh(&conn->taskqueuelock);
566 570
567 if (conn->task == task) 571 if (conn->task == task)
568 conn->task = NULL; 572 conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
784 if (session->tt->xmit_task(task)) 788 if (session->tt->xmit_task(task))
785 goto free_task; 789 goto free_task;
786 } else { 790 } else {
791 spin_lock_bh(&conn->taskqueuelock);
787 list_add_tail(&task->running, &conn->mgmtqueue); 792 list_add_tail(&task->running, &conn->mgmtqueue);
793 spin_unlock_bh(&conn->taskqueuelock);
788 iscsi_conn_queue_work(conn); 794 iscsi_conn_queue_work(conn);
789 } 795 }
790 796
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
1475 * this may be on the requeue list already if the xmit_task callout 1481 * this may be on the requeue list already if the xmit_task callout
1476 * is handling the r2ts while we are adding new ones 1482 * is handling the r2ts while we are adding new ones
1477 */ 1483 */
1484 spin_lock_bh(&conn->taskqueuelock);
1478 if (list_empty(&task->running)) 1485 if (list_empty(&task->running))
1479 list_add_tail(&task->running, &conn->requeue); 1486 list_add_tail(&task->running, &conn->requeue);
1487 spin_unlock_bh(&conn->taskqueuelock);
1480 iscsi_conn_queue_work(conn); 1488 iscsi_conn_queue_work(conn);
1481} 1489}
1482EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1490EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1513 * only have one nop-out as a ping from us and targets should not 1521 * only have one nop-out as a ping from us and targets should not
1514 * overflow us with nop-ins 1522 * overflow us with nop-ins
1515 */ 1523 */
1524 spin_lock_bh(&conn->taskqueuelock);
1516check_mgmt: 1525check_mgmt:
1517 while (!list_empty(&conn->mgmtqueue)) { 1526 while (!list_empty(&conn->mgmtqueue)) {
1518 conn->task = list_entry(conn->mgmtqueue.next, 1527 conn->task = list_entry(conn->mgmtqueue.next,
1519 struct iscsi_task, running); 1528 struct iscsi_task, running);
1520 list_del_init(&conn->task->running); 1529 list_del_init(&conn->task->running);
1530 spin_unlock_bh(&conn->taskqueuelock);
1521 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1531 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1522 /* regular RX path uses back_lock */ 1532 /* regular RX path uses back_lock */
1523 spin_lock_bh(&conn->session->back_lock); 1533 spin_lock_bh(&conn->session->back_lock);
1524 __iscsi_put_task(conn->task); 1534 __iscsi_put_task(conn->task);
1525 spin_unlock_bh(&conn->session->back_lock); 1535 spin_unlock_bh(&conn->session->back_lock);
1526 conn->task = NULL; 1536 conn->task = NULL;
1537 spin_lock_bh(&conn->taskqueuelock);
1527 continue; 1538 continue;
1528 } 1539 }
1529 rc = iscsi_xmit_task(conn); 1540 rc = iscsi_xmit_task(conn);
1530 if (rc) 1541 if (rc)
1531 goto done; 1542 goto done;
1543 spin_lock_bh(&conn->taskqueuelock);
1532 } 1544 }
1533 1545
1534 /* process pending command queue */ 1546 /* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
1536 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, 1548 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1537 running); 1549 running);
1538 list_del_init(&conn->task->running); 1550 list_del_init(&conn->task->running);
1551 spin_unlock_bh(&conn->taskqueuelock);
1539 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1552 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1540 fail_scsi_task(conn->task, DID_IMM_RETRY); 1553 fail_scsi_task(conn->task, DID_IMM_RETRY);
1554 spin_lock_bh(&conn->taskqueuelock);
1541 continue; 1555 continue;
1542 } 1556 }
1543 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1557 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1544 if (rc) { 1558 if (rc) {
1545 if (rc == -ENOMEM || rc == -EACCES) { 1559 if (rc == -ENOMEM || rc == -EACCES) {
1560 spin_lock_bh(&conn->taskqueuelock);
1546 list_add_tail(&conn->task->running, 1561 list_add_tail(&conn->task->running,
1547 &conn->cmdqueue); 1562 &conn->cmdqueue);
1548 conn->task = NULL; 1563 conn->task = NULL;
1564 spin_unlock_bh(&conn->taskqueuelock);
1549 goto done; 1565 goto done;
1550 } else 1566 } else
1551 fail_scsi_task(conn->task, DID_ABORT); 1567 fail_scsi_task(conn->task, DID_ABORT);
1568 spin_lock_bh(&conn->taskqueuelock);
1552 continue; 1569 continue;
1553 } 1570 }
1554 rc = iscsi_xmit_task(conn); 1571 rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
1559 * we need to check the mgmt queue for nops that need to 1576 * we need to check the mgmt queue for nops that need to
1560 * be sent to aviod starvation 1577 * be sent to aviod starvation
1561 */ 1578 */
1579 spin_lock_bh(&conn->taskqueuelock);
1562 if (!list_empty(&conn->mgmtqueue)) 1580 if (!list_empty(&conn->mgmtqueue))
1563 goto check_mgmt; 1581 goto check_mgmt;
1564 } 1582 }
@@ -1578,12 +1596,15 @@ check_mgmt:
1578 conn->task = task; 1596 conn->task = task;
1579 list_del_init(&conn->task->running); 1597 list_del_init(&conn->task->running);
1580 conn->task->state = ISCSI_TASK_RUNNING; 1598 conn->task->state = ISCSI_TASK_RUNNING;
1599 spin_unlock_bh(&conn->taskqueuelock);
1581 rc = iscsi_xmit_task(conn); 1600 rc = iscsi_xmit_task(conn);
1582 if (rc) 1601 if (rc)
1583 goto done; 1602 goto done;
1603 spin_lock_bh(&conn->taskqueuelock);
1584 if (!list_empty(&conn->mgmtqueue)) 1604 if (!list_empty(&conn->mgmtqueue))
1585 goto check_mgmt; 1605 goto check_mgmt;
1586 } 1606 }
1607 spin_unlock_bh(&conn->taskqueuelock);
1587 spin_unlock_bh(&conn->session->frwd_lock); 1608 spin_unlock_bh(&conn->session->frwd_lock);
1588 return -ENODATA; 1609 return -ENODATA;
1589 1610
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1739 goto prepd_reject; 1760 goto prepd_reject;
1740 } 1761 }
1741 } else { 1762 } else {
1763 spin_lock_bh(&conn->taskqueuelock);
1742 list_add_tail(&task->running, &conn->cmdqueue); 1764 list_add_tail(&task->running, &conn->cmdqueue);
1765 spin_unlock_bh(&conn->taskqueuelock);
1743 iscsi_conn_queue_work(conn); 1766 iscsi_conn_queue_work(conn);
1744 } 1767 }
1745 1768
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2897 INIT_LIST_HEAD(&conn->mgmtqueue); 2920 INIT_LIST_HEAD(&conn->mgmtqueue);
2898 INIT_LIST_HEAD(&conn->cmdqueue); 2921 INIT_LIST_HEAD(&conn->cmdqueue);
2899 INIT_LIST_HEAD(&conn->requeue); 2922 INIT_LIST_HEAD(&conn->requeue);
2923 spin_lock_init(&conn->taskqueuelock);
2900 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2924 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2901 2925
2902 /* allocate login_task used for the login/text sequences */ 2926 /* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012fdeca..87f5e694dbed 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
221 task->num_scatter = qc->n_elem; 221 task->num_scatter = qc->n_elem;
222 } else { 222 } else {
223 for_each_sg(qc->sg, sg, qc->n_elem, si) 223 for_each_sg(qc->sg, sg, qc->n_elem, si)
224 xfer += sg->length; 224 xfer += sg_dma_len(sg);
225 225
226 task->total_xfer_len = xfer; 226 task->total_xfer_len = xfer;
227 task->num_scatter = si; 227 task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 0bba2e30b4f0..257bbdd0f0b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
99#define FC_MAX_ADPTMSG 64 99#define FC_MAX_ADPTMSG 64
100 100
101#define MAX_HBAEVT 32 101#define MAX_HBAEVT 32
102#define MAX_HBAS_NO_RESET 16
102 103
103/* Number of MSI-X vectors the driver uses */ 104/* Number of MSI-X vectors the driver uses */
104#define LPFC_MSIX_VECTORS 2 105#define LPFC_MSIX_VECTORS 2
105 106
106/* lpfc wait event data ready flag */ 107/* lpfc wait event data ready flag */
107#define LPFC_DATA_READY (1<<0) 108#define LPFC_DATA_READY 0 /* bit 0 */
108 109
109/* queue dump line buffer size */ 110/* queue dump line buffer size */
110#define LPFC_LBUF_SZ 128 111#define LPFC_LBUF_SZ 128
@@ -692,6 +693,7 @@ struct lpfc_hba {
692 * capability 693 * capability
693 */ 694 */
694#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ 695#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
696#define NVME_XRI_ABORT_EVENT 0x100000
695 697
696 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 698 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
697 struct lpfc_dmabuf slim2p; 699 struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c783ef7f260..22819afbaef5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3010static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, 3010static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
3011 lpfc_poll_show, lpfc_poll_store); 3011 lpfc_poll_show, lpfc_poll_store);
3012 3012
3013int lpfc_no_hba_reset_cnt;
3014unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3015 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3016module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3017MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3018
3013LPFC_ATTR(sli_mode, 0, 0, 3, 3019LPFC_ATTR(sli_mode, 0, 0, 3,
3014 "SLI mode selector:" 3020 "SLI mode selector:"
3015 " 0 - auto (SLI-3 if supported)," 3021 " 0 - auto (SLI-3 if supported),"
@@ -3309,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
3309 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3315 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3310 * Supported Values: 1 - register just FCP 3316 * Supported Values: 1 - register just FCP
3311 * 3 - register both FCP and NVME 3317 * 3 - register both FCP and NVME
3312 * Supported values are [1,3]. Default value is 3 3318 * Supported values are [1,3]. Default value is 1
3313 */ 3319 */
3314LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, 3320LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
3315 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3321 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3316 "Define fc4 type to register with fabric."); 3322 "Define fc4 type to register with fabric.");
3317 3323
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
4451 return -EINVAL; 4457 return -EINVAL;
4452 4458
4453 phba->cfg_fcp_imax = (uint32_t)val; 4459 phba->cfg_fcp_imax = (uint32_t)val;
4454 for (i = 0; i < phba->io_channel_irqs; i++) 4460
4461 for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
4455 lpfc_modify_hba_eq_delay(phba, i); 4462 lpfc_modify_hba_eq_delay(phba, i);
4456 4463
4457 return strlen(buf); 4464 return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 843dd73004da..54e6ac42fbcd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
384extern struct device_attribute *lpfc_hba_attrs[]; 384extern struct device_attribute *lpfc_hba_attrs[];
385extern struct device_attribute *lpfc_vport_attrs[]; 385extern struct device_attribute *lpfc_vport_attrs[];
386extern struct scsi_host_template lpfc_template; 386extern struct scsi_host_template lpfc_template;
387extern struct scsi_host_template lpfc_template_s3; 387extern struct scsi_host_template lpfc_template_no_hr;
388extern struct scsi_host_template lpfc_template_nvme; 388extern struct scsi_host_template lpfc_template_nvme;
389extern struct scsi_host_template lpfc_vport_template; 389extern struct scsi_host_template lpfc_vport_template;
390extern struct fc_function_template lpfc_transport_functions; 390extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
554 struct lpfc_wcqe_complete *abts_cmpl); 554 struct lpfc_wcqe_complete *abts_cmpl);
555extern int lpfc_enable_nvmet_cnt; 555extern int lpfc_enable_nvmet_cnt;
556extern unsigned long long lpfc_enable_nvmet[]; 556extern unsigned long long lpfc_enable_nvmet[];
557extern int lpfc_no_hba_reset_cnt;
558extern unsigned long lpfc_no_hba_reset[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c22bb3f887e1..d3e9af983015 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
939 "FC4 x%08x, Data: x%08x x%08x\n", 939 "FC4 x%08x, Data: x%08x x%08x\n",
940 ndlp, did, ndlp->nlp_fc4_type, 940 ndlp, did, ndlp->nlp_fc4_type,
941 FC_TYPE_FCP, FC_TYPE_NVME); 941 FC_TYPE_FCP, FC_TYPE_NVME);
942 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
942 } 943 }
943 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
944 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 944 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
945 lpfc_issue_els_prli(vport, ndlp, 0); 945 lpfc_issue_els_prli(vport, ndlp, 0);
946 } else 946 } else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9f4798e9d938..913eed822cb8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
3653 idiag.ptr_private = phba->sli4_hba.nvmels_cq; 3653 idiag.ptr_private = phba->sli4_hba.nvmels_cq;
3654 goto pass_check; 3654 goto pass_check;
3655 } 3655 }
3656 /* NVME LS complete queue */
3657 if (phba->sli4_hba.nvmels_cq &&
3658 phba->sli4_hba.nvmels_cq->queue_id == queid) {
3659 /* Sanity check */
3660 rc = lpfc_idiag_que_param_check(
3661 phba->sli4_hba.nvmels_cq, index, count);
3662 if (rc)
3663 goto error_out;
3664 idiag.ptr_private = phba->sli4_hba.nvmels_cq;
3665 goto pass_check;
3666 }
3667 /* FCP complete queue */ 3656 /* FCP complete queue */
3668 if (phba->sli4_hba.fcp_cq) { 3657 if (phba->sli4_hba.fcp_cq) {
3669 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 3658 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
3738 idiag.ptr_private = phba->sli4_hba.nvmels_wq; 3727 idiag.ptr_private = phba->sli4_hba.nvmels_wq;
3739 goto pass_check; 3728 goto pass_check;
3740 } 3729 }
3741 /* NVME LS work queue */
3742 if (phba->sli4_hba.nvmels_wq &&
3743 phba->sli4_hba.nvmels_wq->queue_id == queid) {
3744 /* Sanity check */
3745 rc = lpfc_idiag_que_param_check(
3746 phba->sli4_hba.nvmels_wq, index, count);
3747 if (rc)
3748 goto error_out;
3749 idiag.ptr_private = phba->sli4_hba.nvmels_wq;
3750 goto pass_check;
3751 }
3752 /* FCP work queue */ 3730 /* FCP work queue */
3753 if (phba->sli4_hba.fcp_wq) { 3731 if (phba->sli4_hba.fcp_wq) {
3754 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 3732 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index c05f56c3023f..7b7d314af0e0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -44,14 +44,6 @@
44/* hbqinfo output buffer size */ 44/* hbqinfo output buffer size */
45#define LPFC_HBQINFO_SIZE 8192 45#define LPFC_HBQINFO_SIZE 8192
46 46
47enum {
48 DUMP_FCP,
49 DUMP_NVME,
50 DUMP_MBX,
51 DUMP_ELS,
52 DUMP_NVMELS,
53};
54
55/* nvmestat output buffer size */ 47/* nvmestat output buffer size */
56#define LPFC_NVMESTAT_SIZE 8192 48#define LPFC_NVMESTAT_SIZE 8192
57#define LPFC_NVMEKTIME_SIZE 8192 49#define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
283 struct lpfc_idiag_offset offset; 275 struct lpfc_idiag_offset offset;
284 void *ptr_private; 276 void *ptr_private;
285}; 277};
278
279#else
280
281#define lpfc_nvmeio_data(phba, fmt, arg...) \
282 no_printk(fmt, ##arg)
283
286#endif 284#endif
287 285
286enum {
287 DUMP_FCP,
288 DUMP_NVME,
289 DUMP_MBX,
290 DUMP_ELS,
291 DUMP_NVMELS,
292};
293
288/* Mask for discovery_trace */ 294/* Mask for discovery_trace */
289#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ 295#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
290#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ 296#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2d26440e6f2f..a5ca37e45fb6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5177 5177
5178static uint32_t 5178static uint32_t
5179lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5179lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5180 struct lpfc_hba *phba) 5180 struct lpfc_vport *vport)
5181{ 5181{
5182 5182
5183 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5183 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5184 5184
5185 memcpy(desc->port_names.wwnn, phba->wwnn, 5185 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
5186 sizeof(desc->port_names.wwnn)); 5186 sizeof(desc->port_names.wwnn));
5187 5187
5188 memcpy(desc->port_names.wwpn, phba->wwpn, 5188 memcpy(desc->port_names.wwpn, &vport->fc_portname,
5189 sizeof(desc->port_names.wwpn)); 5189 sizeof(desc->port_names.wwpn));
5190 5190
5191 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5191 desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5279 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 5279 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5280 (len + pcmd), &rdp_context->link_stat); 5280 (len + pcmd), &rdp_context->link_stat);
5281 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 5281 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5282 (len + pcmd), phba); 5282 (len + pcmd), vport);
5283 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 5283 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5284 (len + pcmd), vport, ndlp); 5284 (len + pcmd), vport, ndlp);
5285 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5285 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7968 did, vport->port_state, ndlp->nlp_flag); 7968 did, vport->port_state, ndlp->nlp_flag);
7969 7969
7970 phba->fc_stat.elsRcvPRLI++; 7970 phba->fc_stat.elsRcvPRLI++;
7971 if (vport->port_state < LPFC_DISC_AUTH) { 7971 if ((vport->port_state < LPFC_DISC_AUTH) &&
7972 (vport->fc_flag & FC_FABRIC)) {
7972 rjt_err = LSRJT_UNABLE_TPC; 7973 rjt_err = LSRJT_UNABLE_TPC;
7973 rjt_exp = LSEXP_NOTHING_MORE; 7974 rjt_exp = LSEXP_NOTHING_MORE;
7974 break; 7975 break;
@@ -8371,11 +8372,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8371 spin_lock_irq(shost->host_lock); 8372 spin_lock_irq(shost->host_lock);
8372 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8373 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8373 spin_unlock_irq(shost->host_lock); 8374 spin_unlock_irq(shost->host_lock);
8374 if (vport->port_type == LPFC_PHYSICAL_PORT 8375 if (mb->mbxStatus == MBX_NOT_FINISHED)
8375 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 8376 break;
8376 lpfc_issue_init_vfi(vport); 8377 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
8377 else 8378 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
8379 if (phba->sli_rev == LPFC_SLI_REV4)
8380 lpfc_issue_init_vfi(vport);
8381 else
8382 lpfc_initial_flogi(vport);
8383 } else {
8378 lpfc_initial_fdisc(vport); 8384 lpfc_initial_fdisc(vport);
8385 }
8379 break; 8386 break;
8380 } 8387 }
8381 } else { 8388 } else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 194a14d5f8a9..180b072beef6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
313 ndlp->nlp_state, ndlp->nlp_rpi); 313 ndlp->nlp_state, ndlp->nlp_rpi);
314 } 314 }
315 315
316 if (!(vport->load_flag & FC_UNLOADING) && 316 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
317 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
318 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 317 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
319 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 318 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
320 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && 319 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
641 lpfc_handle_rrq_active(phba); 640 lpfc_handle_rrq_active(phba);
642 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 641 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
643 lpfc_sli4_fcp_xri_abort_event_proc(phba); 642 lpfc_sli4_fcp_xri_abort_event_proc(phba);
643 if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
644 lpfc_sli4_nvme_xri_abort_event_proc(phba);
644 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 645 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
645 lpfc_sli4_els_xri_abort_event_proc(phba); 646 lpfc_sli4_els_xri_abort_event_proc(phba);
646 if (phba->hba_flag & ASYNC_EVENT) 647 if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2173 uint32_t boot_flag, addr_mode; 2174 uint32_t boot_flag, addr_mode;
2174 uint16_t fcf_index, next_fcf_index; 2175 uint16_t fcf_index, next_fcf_index;
2175 struct lpfc_fcf_rec *fcf_rec = NULL; 2176 struct lpfc_fcf_rec *fcf_rec = NULL;
2176 uint16_t vlan_id; 2177 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2177 bool select_new_fcf; 2178 bool select_new_fcf;
2178 int rc; 2179 int rc;
2179 2180
@@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4020 rdata = rport->dd_data; 4021 rdata = rport->dd_data;
4021 /* break the link before dropping the ref */ 4022 /* break the link before dropping the ref */
4022 ndlp->rport = NULL; 4023 ndlp->rport = NULL;
4023 if (rdata && rdata->pnode == ndlp) 4024 if (rdata) {
4024 lpfc_nlp_put(ndlp); 4025 if (rdata->pnode == ndlp)
4025 rdata->pnode = NULL; 4026 lpfc_nlp_put(ndlp);
4027 rdata->pnode = NULL;
4028 }
4026 /* drop reference for earlier registeration */ 4029 /* drop reference for earlier registeration */
4027 put_device(&rport->dev); 4030 put_device(&rport->dev);
4028 } 4031 }
@@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4344{ 4347{
4345 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4348 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4346 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4349 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4347 init_timer(&ndlp->nlp_delayfunc); 4350 setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
4348 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 4351 (unsigned long)ndlp);
4349 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4350 ndlp->nlp_DID = did; 4352 ndlp->nlp_DID = did;
4351 ndlp->vport = vport; 4353 ndlp->vport = vport;
4352 ndlp->phba = vport->phba; 4354 ndlp->phba = vport->phba;
@@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4606 pring = qp->pring; 4608 pring = qp->pring;
4607 if (!pring) 4609 if (!pring)
4608 continue; 4610 continue;
4609 spin_lock_irq(&pring->ring_lock); 4611 spin_lock(&pring->ring_lock);
4610 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); 4612 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4611 spin_unlock_irq(&pring->ring_lock); 4613 spin_unlock(&pring->ring_lock);
4612 } 4614 }
4613 spin_unlock_irq(&phba->hbalock); 4615 spin_unlock_irq(&phba->hbalock);
4614} 4616}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index cfdb068a3bfc..15277705cb6b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
1001 uint32_t phase; 1001 uint32_t phase;
1002 uint32_t delay_multi; 1002 uint32_t delay_multi;
1003}; 1003};
1004#define LPFC_MAX_EQ_DELAY 8 1004#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
1005 1005
1006struct sgl_page_pairs { 1006struct sgl_page_pairs {
1007 uint32_t sgl_pg0_addr_lo; 1007 uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
1070 union { 1070 union {
1071 struct { 1071 struct {
1072 uint32_t num_eq; 1072 uint32_t num_eq;
1073 struct eq_delay_info eq[LPFC_MAX_EQ_DELAY]; 1073 struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
1074 } request; 1074 } request;
1075 struct { 1075 struct {
1076 uint32_t word0; 1076 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ee429d773f3..6cc561b04211 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3555,6 +3555,44 @@ out_free_mem:
3555 return rc; 3555 return rc;
3556} 3556}
3557 3557
3558static uint64_t
3559lpfc_get_wwpn(struct lpfc_hba *phba)
3560{
3561 uint64_t wwn;
3562 int rc;
3563 LPFC_MBOXQ_t *mboxq;
3564 MAILBOX_t *mb;
3565
3566
3567 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3568 GFP_KERNEL);
3569 if (!mboxq)
3570 return (uint64_t)-1;
3571
3572 /* First get WWN of HBA instance */
3573 lpfc_read_nv(phba, mboxq);
3574 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3575 if (rc != MBX_SUCCESS) {
3576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3577 "6019 Mailbox failed , mbxCmd x%x "
3578 "READ_NV, mbxStatus x%x\n",
3579 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3580 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3581 mempool_free(mboxq, phba->mbox_mem_pool);
3582 return (uint64_t) -1;
3583 }
3584 mb = &mboxq->u.mb;
3585 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3586 /* wwn is WWPN of HBA instance */
3587 mempool_free(mboxq, phba->mbox_mem_pool);
3588 if (phba->sli_rev == LPFC_SLI_REV4)
3589 return be64_to_cpu(wwn);
3590 else
3591 return (((wwn & 0xffffffff00000000) >> 32) |
3592 ((wwn & 0x00000000ffffffff) << 32));
3593
3594}
3595
3558/** 3596/**
3559 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping 3597 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3560 * @phba: pointer to lpfc hba data structure. 3598 * @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3676 struct lpfc_vport *vport; 3714 struct lpfc_vport *vport;
3677 struct Scsi_Host *shost = NULL; 3715 struct Scsi_Host *shost = NULL;
3678 int error = 0; 3716 int error = 0;
3717 int i;
3718 uint64_t wwn;
3719 bool use_no_reset_hba = false;
3720
3721 wwn = lpfc_get_wwpn(phba);
3722
3723 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3724 if (wwn == lpfc_no_hba_reset[i]) {
3725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3726 "6020 Setting use_no_reset port=%llx\n",
3727 wwn);
3728 use_no_reset_hba = true;
3729 break;
3730 }
3731 }
3679 3732
3680 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 3733 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3681 if (dev != &phba->pcidev->dev) { 3734 if (dev != &phba->pcidev->dev) {
3682 shost = scsi_host_alloc(&lpfc_vport_template, 3735 shost = scsi_host_alloc(&lpfc_vport_template,
3683 sizeof(struct lpfc_vport)); 3736 sizeof(struct lpfc_vport));
3684 } else { 3737 } else {
3685 if (phba->sli_rev == LPFC_SLI_REV4) 3738 if (!use_no_reset_hba)
3686 shost = scsi_host_alloc(&lpfc_template, 3739 shost = scsi_host_alloc(&lpfc_template,
3687 sizeof(struct lpfc_vport)); 3740 sizeof(struct lpfc_vport));
3688 else 3741 else
3689 shost = scsi_host_alloc(&lpfc_template_s3, 3742 shost = scsi_host_alloc(&lpfc_template_no_hr,
3690 sizeof(struct lpfc_vport)); 3743 sizeof(struct lpfc_vport));
3691 } 3744 }
3692 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 3745 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3734 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3787 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3735 spin_lock_init(&vport->work_port_lock); 3788 spin_lock_init(&vport->work_port_lock);
3736 3789
3737 init_timer(&vport->fc_disctmo); 3790 setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
3738 vport->fc_disctmo.function = lpfc_disc_timeout; 3791 (unsigned long)vport);
3739 vport->fc_disctmo.data = (unsigned long)vport;
3740 3792
3741 init_timer(&vport->els_tmofunc); 3793 setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
3742 vport->els_tmofunc.function = lpfc_els_timeout; 3794 (unsigned long)vport);
3743 vport->els_tmofunc.data = (unsigned long)vport;
3744 3795
3745 init_timer(&vport->delayed_disc_tmo); 3796 setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
3746 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3797 (unsigned long)vport);
3747 vport->delayed_disc_tmo.data = (unsigned long)vport;
3748 3798
3749 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3799 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3750 if (error) 3800 if (error)
@@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5406 INIT_LIST_HEAD(&phba->luns); 5456 INIT_LIST_HEAD(&phba->luns);
5407 5457
5408 /* MBOX heartbeat timer */ 5458 /* MBOX heartbeat timer */
5409 init_timer(&psli->mbox_tmo); 5459 setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
5410 psli->mbox_tmo.function = lpfc_mbox_timeout;
5411 psli->mbox_tmo.data = (unsigned long) phba;
5412 /* Fabric block timer */ 5460 /* Fabric block timer */
5413 init_timer(&phba->fabric_block_timer); 5461 setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
5414 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5462 (unsigned long)phba);
5415 phba->fabric_block_timer.data = (unsigned long) phba;
5416 /* EA polling mode timer */ 5463 /* EA polling mode timer */
5417 init_timer(&phba->eratt_poll); 5464 setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
5418 phba->eratt_poll.function = lpfc_poll_eratt; 5465 (unsigned long)phba);
5419 phba->eratt_poll.data = (unsigned long) phba;
5420 /* Heartbeat timer */ 5466 /* Heartbeat timer */
5421 init_timer(&phba->hb_tmofunc); 5467 setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
5422 phba->hb_tmofunc.function = lpfc_hb_timeout;
5423 phba->hb_tmofunc.data = (unsigned long)phba;
5424 5468
5425 return 0; 5469 return 0;
5426} 5470}
@@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5446 */ 5490 */
5447 5491
5448 /* FCP polling mode timer */ 5492 /* FCP polling mode timer */
5449 init_timer(&phba->fcp_poll_timer); 5493 setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
5450 phba->fcp_poll_timer.function = lpfc_poll_timeout; 5494 (unsigned long)phba);
5451 phba->fcp_poll_timer.data = (unsigned long) phba;
5452 5495
5453 /* Host attention work mask setup */ 5496 /* Host attention work mask setup */
5454 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5497 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5482 5525
5483 /* Initialize the host templates the configured values. */ 5526 /* Initialize the host templates the configured values. */
5484 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5527 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5485 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; 5528 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5529 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5486 5530
5487 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5531 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5488 if (phba->cfg_enable_bg) { 5532 if (phba->cfg_enable_bg) {
@@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5617 * Initialize timers used by driver 5661 * Initialize timers used by driver
5618 */ 5662 */
5619 5663
5620 init_timer(&phba->rrq_tmr); 5664 setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
5621 phba->rrq_tmr.function = lpfc_rrq_timeout;
5622 phba->rrq_tmr.data = (unsigned long)phba;
5623 5665
5624 /* FCF rediscover timer */ 5666 /* FCF rediscover timer */
5625 init_timer(&phba->fcf.redisc_wait); 5667 setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
5626 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5668 (unsigned long)phba);
5627 phba->fcf.redisc_wait.data = (unsigned long)phba;
5628 5669
5629 /* 5670 /*
5630 * Control structure for handling external multi-buffer mailbox 5671 * Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5706 /* Initialize the host templates with the updated values. */ 5747 /* Initialize the host templates with the updated values. */
5707 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5748 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5708 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5749 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5750 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5709 5751
5710 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5752 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5711 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5753 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5736 /* Initialize the Abort nvme buffer list used by driver */ 5778 /* Initialize the Abort nvme buffer list used by driver */
5737 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5779 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5738 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5780 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5781 /* Fast-path XRI aborted CQ Event work queue list */
5782 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5739 } 5783 }
5740 5784
5741 /* This abort list used by worker thread */ 5785 /* This abort list used by worker thread */
@@ -5847,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5847 /* Check to see if it matches any module parameter */ 5891 /* Check to see if it matches any module parameter */
5848 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5892 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
5849 if (wwn == lpfc_enable_nvmet[i]) { 5893 if (wwn == lpfc_enable_nvmet[i]) {
5894#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
5850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5851 "6017 NVME Target %016llx\n", 5896 "6017 NVME Target %016llx\n",
5852 wwn); 5897 wwn);
5853 phba->nvmet_support = 1; /* a match */ 5898 phba->nvmet_support = 1; /* a match */
5899#else
5900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5901 "6021 Can't enable NVME Target."
5902 " NVME_TARGET_FC infrastructure"
5903 " is not in kernel\n");
5904#endif
5854 } 5905 }
5855 } 5906 }
5856 } 5907 }
@@ -8712,12 +8763,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8712 } 8763 }
8713 } 8764 }
8714 8765
8715 /* 8766 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
8716 * Configure EQ delay multipier for interrupt coalescing using
8717 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
8718 */
8719 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
8720 lpfc_modify_hba_eq_delay(phba, qidx); 8767 lpfc_modify_hba_eq_delay(phba, qidx);
8768
8721 return 0; 8769 return 0;
8722 8770
8723out_destroy: 8771out_destroy:
@@ -8973,6 +9021,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
8973 /* Pending ELS XRI abort events */ 9021 /* Pending ELS XRI abort events */
8974 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9022 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8975 &cqelist); 9023 &cqelist);
9024 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9025 /* Pending NVME XRI abort events */
9026 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
9027 &cqelist);
9028 }
8976 /* Pending asynnc events */ 9029 /* Pending asynnc events */
8977 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9030 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
8978 &cqelist); 9031 &cqelist);
@@ -10400,12 +10453,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10400 fc_remove_host(shost); 10453 fc_remove_host(shost);
10401 scsi_remove_host(shost); 10454 scsi_remove_host(shost);
10402 10455
10403 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
10404 * localports are destroyed after to cleanup all transport memory.
10405 */
10406 lpfc_cleanup(vport); 10456 lpfc_cleanup(vport);
10407 lpfc_nvmet_destroy_targetport(phba);
10408 lpfc_nvme_destroy_localport(vport);
10409 10457
10410 /* 10458 /*
10411 * Bring down the SLI Layer. This step disable all interrupts, 10459 * Bring down the SLI Layer. This step disable all interrupts,
@@ -12018,6 +12066,7 @@ static struct pci_driver lpfc_driver = {
12018 .id_table = lpfc_id_table, 12066 .id_table = lpfc_id_table,
12019 .probe = lpfc_pci_probe_one, 12067 .probe = lpfc_pci_probe_one,
12020 .remove = lpfc_pci_remove_one, 12068 .remove = lpfc_pci_remove_one,
12069 .shutdown = lpfc_pci_remove_one,
12021 .suspend = lpfc_pci_suspend_one, 12070 .suspend = lpfc_pci_suspend_one,
12022 .resume = lpfc_pci_resume_one, 12071 .resume = lpfc_pci_resume_one,
12023 .err_handler = &lpfc_err_handler, 12072 .err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index c61d8d692ede..5986c7957199 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
646 } 646 }
647 647
648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba); 648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
649 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
650 if (!dma_buf->iocbq) { 649 if (!dma_buf->iocbq) {
651 kfree(dma_buf->context); 650 kfree(dma_buf->context);
652 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, 651 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
658 "2621 Ran out of nvmet iocb/WQEs\n"); 657 "2621 Ran out of nvmet iocb/WQEs\n");
659 return NULL; 658 return NULL;
660 } 659 }
660 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
661 nvmewqe = dma_buf->iocbq; 661 nvmewqe = dma_buf->iocbq;
662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
663 /* Initialize WQE */ 663 /* Initialize WQE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 609a908ea9db..0024de1c6c1f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); 319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
321 321
322 /* Word 6 */ 322 /* Word 6 */
@@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
620 * Embed the payload in the last half of the WQE 620 * Embed the payload in the last half of the WQE
621 * WQE words 16-30 get the NVME CMD IU payload 621 * WQE words 16-30 get the NVME CMD IU payload
622 * 622 *
623 * WQE Word 16 is already setup with flags 623 * WQE words 16-19 get payload Words 1-4
624 * WQE words 17-19 get payload Words 2-4
625 * WQE words 20-21 get payload Words 6-7 624 * WQE words 20-21 get payload Words 6-7
626 * WQE words 22-29 get payload Words 16-23 625 * WQE words 22-29 get payload Words 16-23
627 */ 626 */
628 wptr = &wqe->words[17]; /* WQE ptr */ 627 wptr = &wqe->words[16]; /* WQE ptr */
629 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 628 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
630 dptr += 2; /* Skip Words 0-1 in payload */ 629 dptr++; /* Skip Word 0 in payload */
631 630
631 *wptr++ = *dptr++; /* Word 1 */
632 *wptr++ = *dptr++; /* Word 2 */ 632 *wptr++ = *dptr++; /* Word 2 */
633 *wptr++ = *dptr++; /* Word 3 */ 633 *wptr++ = *dptr++; /* Word 3 */
634 *wptr++ = *dptr++; /* Word 4 */ 634 *wptr++ = *dptr++; /* Word 4 */
@@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
978 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 978 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
979 NVME_WRITE_CMD); 979 NVME_WRITE_CMD);
980 980
981 /* Word 16 */
982 wqe->words[16] = LPFC_NVME_EMBED_WRITE;
983
984 phba->fc4NvmeOutputRequests++; 981 phba->fc4NvmeOutputRequests++;
985 } else { 982 } else {
986 /* Word 7 */ 983 /* Word 7 */
@@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1002 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 999 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1003 NVME_READ_CMD); 1000 NVME_READ_CMD);
1004 1001
1005 /* Word 16 */
1006 wqe->words[16] = LPFC_NVME_EMBED_READ;
1007
1008 phba->fc4NvmeInputRequests++; 1002 phba->fc4NvmeInputRequests++;
1009 } 1003 }
1010 } else { 1004 } else {
@@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1026 /* Word 11 */ 1020 /* Word 11 */
1027 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); 1021 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1028 1022
1029 /* Word 16 */
1030 wqe->words[16] = LPFC_NVME_EMBED_CMD;
1031
1032 phba->fc4NvmeControlRequests++; 1023 phba->fc4NvmeControlRequests++;
1033 } 1024 }
1034 /* 1025 /*
@@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1286 pnvme_fcreq->private = (void *)lpfc_ncmd; 1277 pnvme_fcreq->private = (void *)lpfc_ncmd;
1287 lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1278 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1288 lpfc_ncmd->nrport = rport; 1279 lpfc_ncmd->nrport = rport;
1280 lpfc_ncmd->ndlp = ndlp;
1289 lpfc_ncmd->start_time = jiffies; 1281 lpfc_ncmd->start_time = jiffies;
1290 1282
1291 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp); 1283 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1319 "sid: x%x did: x%x oxid: x%x\n", 1311 "sid: x%x did: x%x oxid: x%x\n",
1320 ret, vport->fc_myDID, ndlp->nlp_DID, 1312 ret, vport->fc_myDID, ndlp->nlp_DID,
1321 lpfc_ncmd->cur_iocbq.sli4_xritag); 1313 lpfc_ncmd->cur_iocbq.sli4_xritag);
1322 ret = -EINVAL; 1314 ret = -EBUSY;
1323 goto out_free_nvme_buf; 1315 goto out_free_nvme_buf;
1324 } 1316 }
1325 1317
@@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1821 pdma_phys_sgl1, cur_xritag); 1813 pdma_phys_sgl1, cur_xritag);
1822 if (status) { 1814 if (status) {
1823 /* failure, put on abort nvme list */ 1815 /* failure, put on abort nvme list */
1824 lpfc_ncmd->exch_busy = 1; 1816 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1825 } else { 1817 } else {
1826 /* success, put on NVME buffer list */ 1818 /* success, put on NVME buffer list */
1827 lpfc_ncmd->exch_busy = 0; 1819 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1828 lpfc_ncmd->status = IOSTAT_SUCCESS; 1820 lpfc_ncmd->status = IOSTAT_SUCCESS;
1829 num_posted++; 1821 num_posted++;
1830 } 1822 }
@@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1854 struct lpfc_nvme_buf, list); 1846 struct lpfc_nvme_buf, list);
1855 if (status) { 1847 if (status) {
1856 /* failure, put on abort nvme list */ 1848 /* failure, put on abort nvme list */
1857 lpfc_ncmd->exch_busy = 1; 1849 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1858 } else { 1850 } else {
1859 /* success, put on NVME buffer list */ 1851 /* success, put on NVME buffer list */
1860 lpfc_ncmd->exch_busy = 0; 1852 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1861 lpfc_ncmd->status = IOSTAT_SUCCESS; 1853 lpfc_ncmd->status = IOSTAT_SUCCESS;
1862 num_posted++; 1854 num_posted++;
1863 } 1855 }
@@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2099 unsigned long iflag = 0; 2091 unsigned long iflag = 0;
2100 2092
2101 lpfc_ncmd->nonsg_phys = 0; 2093 lpfc_ncmd->nonsg_phys = 0;
2102 if (lpfc_ncmd->exch_busy) { 2094 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2103 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, 2095 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2104 iflag); 2096 iflag);
2105 lpfc_ncmd->nvmeCmd = NULL; 2097 lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2135int 2127int
2136lpfc_nvme_create_localport(struct lpfc_vport *vport) 2128lpfc_nvme_create_localport(struct lpfc_vport *vport)
2137{ 2129{
2130 int ret = 0;
2138 struct lpfc_hba *phba = vport->phba; 2131 struct lpfc_hba *phba = vport->phba;
2139 struct nvme_fc_port_info nfcp_info; 2132 struct nvme_fc_port_info nfcp_info;
2140 struct nvme_fc_local_port *localport; 2133 struct nvme_fc_local_port *localport;
2141 struct lpfc_nvme_lport *lport; 2134 struct lpfc_nvme_lport *lport;
2142 int len, ret = 0; 2135 int len;
2143 2136
2144 /* Initialize this localport instance. The vport wwn usage ensures 2137 /* Initialize this localport instance. The vport wwn usage ensures
2145 * that NPIV is accounted for. 2138 * that NPIV is accounted for.
@@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2156 /* localport is allocated from the stack, but the registration 2149 /* localport is allocated from the stack, but the registration
2157 * call allocates heap memory as well as the private area. 2150 * call allocates heap memory as well as the private area.
2158 */ 2151 */
2152#if (IS_ENABLED(CONFIG_NVME_FC))
2159 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2160 &vport->phba->pcidev->dev, &localport); 2154 &vport->phba->pcidev->dev, &localport);
2155#else
2156 ret = -ENOMEM;
2157#endif
2161 if (!ret) { 2158 if (!ret) {
2162 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2159 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2163 "6005 Successfully registered local " 2160 "6005 Successfully registered local "
@@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2173 lport->vport = vport; 2170 lport->vport = vport;
2174 INIT_LIST_HEAD(&lport->rport_list); 2171 INIT_LIST_HEAD(&lport->rport_list);
2175 vport->nvmei_support = 1; 2172 vport->nvmei_support = 1;
2173 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2174 vport->phba->total_nvme_bufs += len;
2176 } 2175 }
2177 2176
2178 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2179 vport->phba->total_nvme_bufs += len;
2180 return ret; 2177 return ret;
2181} 2178}
2182 2179
@@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2193void 2190void
2194lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2191lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2195{ 2192{
2193#if (IS_ENABLED(CONFIG_NVME_FC))
2196 struct nvme_fc_local_port *localport; 2194 struct nvme_fc_local_port *localport;
2197 struct lpfc_nvme_lport *lport; 2195 struct lpfc_nvme_lport *lport;
2198 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; 2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2208 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2206 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2209 "6011 Destroying NVME localport %p\n", 2207 "6011 Destroying NVME localport %p\n",
2210 localport); 2208 localport);
2211
2212 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { 2209 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
2213 /* The last node ref has to get released now before the rport 2210 /* The last node ref has to get released now before the rport
2214 * private memory area is released by the transport. 2211 * private memory area is released by the transport.
@@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2222 "6008 rport fail destroy %x\n", ret); 2219 "6008 rport fail destroy %x\n", ret);
2223 wait_for_completion_timeout(&rport->rport_unreg_done, 5); 2220 wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2224 } 2221 }
2222
2225 /* lport's rport list is clear. Unregister 2223 /* lport's rport list is clear. Unregister
2226 * lport and release resources. 2224 * lport and release resources.
2227 */ 2225 */
@@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2245 "Failed, status x%x\n", 2243 "Failed, status x%x\n",
2246 ret); 2244 ret);
2247 } 2245 }
2246#endif
2248} 2247}
2249 2248
2250void 2249void
@@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
2275int 2274int
2276lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2275lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2277{ 2276{
2277#if (IS_ENABLED(CONFIG_NVME_FC))
2278 int ret = 0; 2278 int ret = 0;
2279 struct nvme_fc_local_port *localport; 2279 struct nvme_fc_local_port *localport;
2280 struct lpfc_nvme_lport *lport; 2280 struct lpfc_nvme_lport *lport;
@@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2348 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2348 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2349 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2349 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2350 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2350 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2351
2352 ret = nvme_fc_register_remoteport(localport, &rpinfo, 2351 ret = nvme_fc_register_remoteport(localport, &rpinfo,
2353 &remote_port); 2352 &remote_port);
2354 if (!ret) { 2353 if (!ret) {
@@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2384 ndlp->nlp_type, ndlp->nlp_DID, ndlp); 2383 ndlp->nlp_type, ndlp->nlp_DID, ndlp);
2385 } 2384 }
2386 return ret; 2385 return ret;
2386#else
2387 return 0;
2388#endif
2387} 2389}
2388 2390
2389/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2391/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2401void 2403void
2402lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2404lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2403{ 2405{
2406#if (IS_ENABLED(CONFIG_NVME_FC))
2404 int ret; 2407 int ret;
2405 struct nvme_fc_local_port *localport; 2408 struct nvme_fc_local_port *localport;
2406 struct lpfc_nvme_lport *lport; 2409 struct lpfc_nvme_lport *lport;
@@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2458 return; 2461 return;
2459 2462
2460 input_err: 2463 input_err:
2464#endif
2461 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2465 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2462 "6168: State error: lport %p, rport%p FCID x%06x\n", 2466 "6168: State error: lport %p, rport%p FCID x%06x\n",
2463 vport->localport, ndlp->rport, ndlp->nlp_DID); 2467 vport->localport, ndlp->rport, ndlp->nlp_DID);
2464} 2468}
2469
2470/**
2471 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2472 * @phba: pointer to lpfc hba data structure.
2473 * @axri: pointer to the fcp xri abort wcqe structure.
2474 *
2475 * This routine is invoked by the worker thread to process a SLI4 fast-path
2476 * FCP aborted xri.
2477 **/
2478void
2479lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2480 struct sli4_wcqe_xri_aborted *axri)
2481{
2482 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2483 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2484 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2485 struct lpfc_nodelist *ndlp;
2486 unsigned long iflag = 0;
2487 int rrq_empty = 0;
2488
2489 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2490 return;
2491 spin_lock_irqsave(&phba->hbalock, iflag);
2492 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2493 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2494 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2495 list) {
2496 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2497 list_del(&lpfc_ncmd->list);
2498 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2499 lpfc_ncmd->status = IOSTAT_SUCCESS;
2500 spin_unlock(
2501 &phba->sli4_hba.abts_nvme_buf_list_lock);
2502
2503 rrq_empty = list_empty(&phba->active_rrq_list);
2504 spin_unlock_irqrestore(&phba->hbalock, iflag);
2505 ndlp = lpfc_ncmd->ndlp;
2506 if (ndlp) {
2507 lpfc_set_rrq_active(
2508 phba, ndlp,
2509 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2510 rxid, 1);
2511 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2512 }
2513 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2514 if (rrq_empty)
2515 lpfc_worker_wake_up(phba);
2516 return;
2517 }
2518 }
2519 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2520 spin_unlock_irqrestore(&phba->hbalock, iflag);
2521}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b2fae5e813f8..1347deb8dd6c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
57 struct list_head list; 57 struct list_head list;
58 struct nvmefc_fcp_req *nvmeCmd; 58 struct nvmefc_fcp_req *nvmeCmd;
59 struct lpfc_nvme_rport *nrport; 59 struct lpfc_nvme_rport *nrport;
60 struct lpfc_nodelist *ndlp;
60 61
61 uint32_t timeout; 62 uint32_t timeout;
62 63
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c421e1738ee9..acba1b67e505 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
520 struct lpfc_hba *phba = ctxp->phba; 520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq; 521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags; 522 unsigned long iflags;
523 int rc, id; 523 int rc;
524 524
525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) { 526 if (phba->ktime_on) {
@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
530 ctxp->ts_nvme_data = ktime_get_ns(); 530 ctxp->ts_nvme_data = ktime_get_ns();
531 } 531 }
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 id = smp_processor_id(); 533 int id = smp_processor_id();
534 ctxp->cpu = id; 534 ctxp->cpu = id;
535 if (id < LPFC_CHECK_CPU_CNT) 535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++; 536 phba->cpucheck_xmt_io[id]++;
@@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
572 "6102 Bad state IO x%x aborted\n", 572 "6102 Bad state IO x%x aborted\n",
573 ctxp->oxid); 573 ctxp->oxid);
574 rc = -ENXIO;
574 goto aerr; 575 goto aerr;
575 } 576 }
576 577
@@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
580 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
581 "6152 FCP Drop IO x%x: Prep\n", 582 "6152 FCP Drop IO x%x: Prep\n",
582 ctxp->oxid); 583 ctxp->oxid);
584 rc = -ENXIO;
583 goto aerr; 585 goto aerr;
584 } 586 }
585 587
@@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
618 ctxp->wqeq->hba_wqidx = 0; 620 ctxp->wqeq->hba_wqidx = 0;
619 nvmewqeq->context2 = NULL; 621 nvmewqeq->context2 = NULL;
620 nvmewqeq->context3 = NULL; 622 nvmewqeq->context3 = NULL;
623 rc = -EBUSY;
621aerr: 624aerr:
622 return -ENXIO; 625 return rc;
623} 626}
624 627
625static void 628static void
@@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
669 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
670 673
674#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
672 &phba->pcidev->dev, 676 &phba->pcidev->dev,
673 &phba->targetport); 677 &phba->targetport);
678#else
679 error = -ENOMEM;
680#endif
674 if (error) { 681 if (error) {
675 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 682 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
676 "6025 Cannot register NVME targetport " 683 "6025 Cannot register NVME targetport "
@@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
731 return 0; 738 return 0;
732} 739}
733 740
741/**
742 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
743 * @phba: pointer to lpfc hba data structure.
744 * @axri: pointer to the nvmet xri abort wcqe structure.
745 *
746 * This routine is invoked by the worker thread to process a SLI4 fast-path
747 * NVMET aborted xri.
748 **/
749void
750lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
751 struct sli4_wcqe_xri_aborted *axri)
752{
753 /* TODO: work in progress */
754}
755
734void 756void
735lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 757lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
736{ 758{
759#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
737 struct lpfc_nvmet_tgtport *tgtp; 760 struct lpfc_nvmet_tgtport *tgtp;
738 761
739 if (phba->nvmet_support == 0) 762 if (phba->nvmet_support == 0)
@@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
745 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 768 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
746 } 769 }
747 phba->targetport = NULL; 770 phba->targetport = NULL;
771#endif
748} 772}
749 773
750/** 774/**
@@ -764,6 +788,7 @@ static void
764lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 788lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
765 struct hbq_dmabuf *nvmebuf) 789 struct hbq_dmabuf *nvmebuf)
766{ 790{
791#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
767 struct lpfc_nvmet_tgtport *tgtp; 792 struct lpfc_nvmet_tgtport *tgtp;
768 struct fc_frame_header *fc_hdr; 793 struct fc_frame_header *fc_hdr;
769 struct lpfc_nvmet_rcv_ctx *ctxp; 794 struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +869,7 @@ dropit:
844 869
845 atomic_inc(&tgtp->xmt_ls_abort); 870 atomic_inc(&tgtp->xmt_ls_abort);
846 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 871 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
872#endif
847} 873}
848 874
849/** 875/**
@@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
865 struct rqb_dmabuf *nvmebuf, 891 struct rqb_dmabuf *nvmebuf,
866 uint64_t isr_timestamp) 892 uint64_t isr_timestamp)
867{ 893{
894#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
868 struct lpfc_nvmet_rcv_ctx *ctxp; 895 struct lpfc_nvmet_rcv_ctx *ctxp;
869 struct lpfc_nvmet_tgtport *tgtp; 896 struct lpfc_nvmet_tgtport *tgtp;
870 struct fc_frame_header *fc_hdr; 897 struct fc_frame_header *fc_hdr;
@@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
955 982
956 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 983 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
957 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 984 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
958 "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n", 985 "6159 FCP Drop IO x%x: err x%x\n",
959 ctxp->oxid, rc); 986 ctxp->oxid, rc);
960dropit: 987dropit:
961 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 988 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +997,7 @@ dropit:
970 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 997 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
971 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 998 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
972 } 999 }
1000#endif
973} 1001}
974 1002
975/** 1003/**
@@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1114 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1142 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1115 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1143 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1116 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1144 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1117 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL); 1145 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1118 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1146 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1119 1147
1120 /* Word 6 */ 1148 /* Word 6 */
@@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1445 1473
1446 case NVMET_FCOP_RSP: 1474 case NVMET_FCOP_RSP:
1447 /* Words 0 - 2 */ 1475 /* Words 0 - 2 */
1448 sgel = &rsp->sg[0];
1449 physaddr = rsp->rspdma; 1476 physaddr = rsp->rspdma;
1450 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1477 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1451 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 1478 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1681 struct lpfc_nodelist *ndlp; 1708 struct lpfc_nodelist *ndlp;
1682 1709
1683 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1710 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1684 "6067 %s: Entrypoint: sid %x xri %x\n", __func__, 1711 "6067 Abort: sid %x xri x%x/x%x\n",
1685 sid, xri); 1712 sid, xri, ctxp->wqeq->sli4_xritag);
1686 1713
1687 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1714 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1688 1715
@@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1693 atomic_inc(&tgtp->xmt_abort_rsp_error); 1720 atomic_inc(&tgtp->xmt_abort_rsp_error);
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1721 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1695 "6134 Drop ABTS - wrong NDLP state x%x.\n", 1722 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1696 ndlp->nlp_state); 1723 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1697 1724
1698 /* No failure to an ABTS request. */ 1725 /* No failure to an ABTS request. */
1699 return 0; 1726 return 0;
@@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
1791 atomic_inc(&tgtp->xmt_abort_rsp_error); 1818 atomic_inc(&tgtp->xmt_abort_rsp_error);
1792 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1819 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1793 "6160 Drop ABTS - wrong NDLP state x%x.\n", 1820 "6160 Drop ABTS - wrong NDLP state x%x.\n",
1794 ndlp->nlp_state); 1821 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1795 1822
1796 /* No failure to an ABTS request. */ 1823 /* No failure to an ABTS request. */
1797 return 0; 1824 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9d6384af9fce..54fd0c81ceaf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
5953 .track_queue_depth = 0, 5953 .track_queue_depth = 0,
5954}; 5954};
5955 5955
5956struct scsi_host_template lpfc_template_s3 = { 5956struct scsi_host_template lpfc_template_no_hr = {
5957 .module = THIS_MODULE, 5957 .module = THIS_MODULE,
5958 .name = LPFC_DRIVER_NAME, 5958 .name = LPFC_DRIVER_NAME,
5959 .proc_name = LPFC_DRIVER_NAME, 5959 .proc_name = LPFC_DRIVER_NAME,
5960 .info = lpfc_info, 5960 .info = lpfc_info,
5961 .queuecommand = lpfc_queuecommand, 5961 .queuecommand = lpfc_queuecommand,
5962 .eh_timed_out = fc_eh_timed_out,
5962 .eh_abort_handler = lpfc_abort_handler, 5963 .eh_abort_handler = lpfc_abort_handler,
5963 .eh_device_reset_handler = lpfc_device_reset_handler, 5964 .eh_device_reset_handler = lpfc_device_reset_handler,
5964 .eh_target_reset_handler = lpfc_target_reset_handler, 5965 .eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
6015 .eh_abort_handler = lpfc_abort_handler, 6016 .eh_abort_handler = lpfc_abort_handler,
6016 .eh_device_reset_handler = lpfc_device_reset_handler, 6017 .eh_device_reset_handler = lpfc_device_reset_handler,
6017 .eh_target_reset_handler = lpfc_target_reset_handler, 6018 .eh_target_reset_handler = lpfc_target_reset_handler,
6018 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6019 .slave_alloc = lpfc_slave_alloc, 6019 .slave_alloc = lpfc_slave_alloc,
6020 .slave_configure = lpfc_slave_configure, 6020 .slave_configure = lpfc_slave_configure,
6021 .slave_destroy = lpfc_slave_destroy, 6021 .slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e43e5e23c24b..1c9fa45df7eb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,3 +1,4 @@
1
1/******************************************************************* 2/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 3 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 4 * Fibre Channel Host Bus Adapters. *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
952 start_sglq = sglq; 953 start_sglq = sglq;
953 while (!found) { 954 while (!found) {
954 if (!sglq) 955 if (!sglq)
955 return NULL; 956 break;
956 if (ndlp && ndlp->active_rrqs_xri_bitmap && 957 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
957 test_bit(sglq->sli4_lxritag, 958 test_bit(sglq->sli4_lxritag,
958 ndlp->active_rrqs_xri_bitmap)) { 959 ndlp->active_rrqs_xri_bitmap)) {
@@ -12213,6 +12214,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12213} 12214}
12214 12215
12215/** 12216/**
12217 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12218 * @phba: pointer to lpfc hba data structure.
12219 *
12220 * This routine is invoked by the worker thread to process all the pending
12221 * SLI4 NVME abort XRI events.
12222 **/
12223void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12224{
12225 struct lpfc_cq_event *cq_event;
12226
12227 /* First, declare the fcp xri abort event has been handled */
12228 spin_lock_irq(&phba->hbalock);
12229 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12230 spin_unlock_irq(&phba->hbalock);
12231 /* Now, handle all the fcp xri abort events */
12232 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12233 /* Get the first event from the head of the event queue */
12234 spin_lock_irq(&phba->hbalock);
12235 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12236 cq_event, struct lpfc_cq_event, list);
12237 spin_unlock_irq(&phba->hbalock);
12238 /* Notify aborted XRI for NVME work queue */
12239 if (phba->nvmet_support) {
12240 lpfc_sli4_nvmet_xri_aborted(phba,
12241 &cq_event->cqe.wcqe_axri);
12242 } else {
12243 lpfc_sli4_nvme_xri_aborted(phba,
12244 &cq_event->cqe.wcqe_axri);
12245 }
12246 /* Free the event processed back to the free pool */
12247 lpfc_sli4_cq_event_release(phba, cq_event);
12248 }
12249}
12250
12251/**
12216 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12252 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12217 * @phba: pointer to lpfc hba data structure. 12253 * @phba: pointer to lpfc hba data structure.
12218 * 12254 *
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12709 spin_unlock_irqrestore(&phba->hbalock, iflags); 12745 spin_unlock_irqrestore(&phba->hbalock, iflags);
12710 workposted = true; 12746 workposted = true;
12711 break; 12747 break;
12748 case LPFC_NVME:
12749 spin_lock_irqsave(&phba->hbalock, iflags);
12750 list_add_tail(&cq_event->list,
12751 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12752 /* Set the nvme xri abort event flag */
12753 phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12754 spin_unlock_irqrestore(&phba->hbalock, iflags);
12755 workposted = true;
12756 break;
12712 default: 12757 default:
12713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12714 "0603 Invalid work queue CQE subtype (x%x)\n", 12759 "0603 Invalid CQ subtype %d: "
12715 cq->subtype); 12760 "%08x %08x %08x %08x\n",
12761 cq->subtype, wcqe->word0, wcqe->parameter,
12762 wcqe->word2, wcqe->word3);
12763 lpfc_sli4_cq_event_release(phba, cq_event);
12716 workposted = false; 12764 workposted = false;
12717 break; 12765 break;
12718 } 12766 }
@@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13827 * @startq: The starting FCP EQ to modify 13875 * @startq: The starting FCP EQ to modify
13828 * 13876 *
13829 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 13877 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13878 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
13879 * updated in one mailbox command.
13830 * 13880 *
13831 * The @phba struct is used to send mailbox command to HBA. The @startq 13881 * The @phba struct is used to send mailbox command to HBA. The @startq
13832 * is used to get the starting FCP EQ to change. 13882 * is used to get the starting FCP EQ to change.
@@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13879 eq_delay->u.request.eq[cnt].phase = 0; 13929 eq_delay->u.request.eq[cnt].phase = 0;
13880 eq_delay->u.request.eq[cnt].delay_multi = dmult; 13930 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13881 cnt++; 13931 cnt++;
13882 if (cnt >= LPFC_MAX_EQ_DELAY) 13932 if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
13883 break; 13933 break;
13884 } 13934 }
13885 eq_delay->u.request.num_eq = cnt; 13935 eq_delay->u.request.num_eq = cnt;
@@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15185 drq = drqp[idx]; 15235 drq = drqp[idx];
15186 cq = cqp[idx]; 15236 cq = cqp[idx];
15187 15237
15188 if (hrq->entry_count != drq->entry_count) {
15189 status = -EINVAL;
15190 goto out;
15191 }
15192
15193 /* sanity check on queue memory */ 15238 /* sanity check on queue memory */
15194 if (!hrq || !drq || !cq) { 15239 if (!hrq || !drq || !cq) {
15195 status = -ENODEV; 15240 status = -ENODEV;
15196 goto out; 15241 goto out;
15197 } 15242 }
15198 15243
15244 if (hrq->entry_count != drq->entry_count) {
15245 status = -EINVAL;
15246 goto out;
15247 }
15248
15199 if (idx == 0) { 15249 if (idx == 0) {
15200 bf_set(lpfc_mbx_rq_create_num_pages, 15250 bf_set(lpfc_mbx_rq_create_num_pages,
15201 &rq_create->u.request, 15251 &rq_create->u.request,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 91153c9f6d18..710458cf11d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
642 struct list_head sp_asynce_work_queue; 642 struct list_head sp_asynce_work_queue;
643 struct list_head sp_fcp_xri_aborted_work_queue; 643 struct list_head sp_fcp_xri_aborted_work_queue;
644 struct list_head sp_els_xri_aborted_work_queue; 644 struct list_head sp_els_xri_aborted_work_queue;
645 struct list_head sp_nvme_xri_aborted_work_queue;
645 struct list_head sp_unsol_work_queue; 646 struct list_head sp_unsol_work_queue;
646 struct lpfc_sli4_link link_state; 647 struct lpfc_sli4_link link_state;
647 struct lpfc_sli4_lnk_info lnk_info; 648 struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
794int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, 795int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
795 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); 796 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
796void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 797void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
798void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
797void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 799void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
798void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, 800void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
799 struct sli4_wcqe_xri_aborted *); 801 struct sli4_wcqe_xri_aborted *);
802void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
803 struct sli4_wcqe_xri_aborted *axri);
804void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
805 struct sli4_wcqe_xri_aborted *axri);
800void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, 806void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
801 struct sli4_wcqe_xri_aborted *); 807 struct sli4_wcqe_xri_aborted *);
802void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); 808void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 86c6c9b26b82..d4e95e28f4e3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.2.0.7" 23#define LPFC_DRIVER_VERSION "11.2.0.10"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e7e5974e1a2c..2b209bbb4c91 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "07.701.16.00-rc1" 38#define MEGASAS_VERSION "07.701.17.00-rc1"
39#define MEGASAS_RELDATE "February 2, 2017" 39#define MEGASAS_RELDATE "March 2, 2017"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7ac9a9ee9bd4..0016f12cc563 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1963,6 +1963,9 @@ scan_target:
1963 if (!mr_device_priv_data) 1963 if (!mr_device_priv_data)
1964 return -ENOMEM; 1964 return -ENOMEM;
1965 sdev->hostdata = mr_device_priv_data; 1965 sdev->hostdata = mr_device_priv_data;
1966
1967 atomic_set(&mr_device_priv_data->r1_ldio_hint,
1968 instance->r1_ldio_hint_default);
1966 return 0; 1969 return 0;
1967} 1970}
1968 1971
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5034 &instance->irq_context[j]); 5037 &instance->irq_context[j]);
5035 /* Retry irq register for IO_APIC*/ 5038 /* Retry irq register for IO_APIC*/
5036 instance->msix_vectors = 0; 5039 instance->msix_vectors = 0;
5037 if (is_probe) 5040 if (is_probe) {
5041 pci_free_irq_vectors(instance->pdev);
5038 return megasas_setup_irqs_ioapic(instance); 5042 return megasas_setup_irqs_ioapic(instance);
5039 else 5043 } else {
5040 return -1; 5044 return -1;
5045 }
5041 } 5046 }
5042 } 5047 }
5043 return 0; 5048 return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5282 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5278 } 5283 }
5279 5284
5280 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5285 if (!instance->msix_vectors) {
5281 if (i < 0) 5286 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5282 goto fail_setup_irqs; 5287 if (i < 0)
5288 goto fail_setup_irqs;
5289 }
5283 5290
5284 dev_info(&instance->pdev->dev, 5291 dev_info(&instance->pdev->dev,
5285 "firmware supports msix\t: (%d)", fw_msix_count); 5292 "firmware supports msix\t: (%d)", fw_msix_count);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 29650ba669da..f990ab4d45e1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
2159 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2159 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2160 2160
2161 if (is_stream_detected(rctx_g35) && 2161 if (is_stream_detected(rctx_g35) &&
2162 (raid->level == 5) && 2162 ((raid->level == 5) || (raid->level == 6)) &&
2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2165 cpu_sel = MR_RAID_CTX_CPUSEL_0; 2165 cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2338 fp_possible = false; 2338 fp_possible = false;
2339 atomic_dec(&instance->fw_outstanding); 2339 atomic_dec(&instance->fw_outstanding);
2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2341 atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { 2341 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
2342 fp_possible = false; 2342 fp_possible = false;
2343 atomic_dec(&instance->fw_outstanding); 2343 atomic_dec(&instance->fw_outstanding);
2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) 2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 7fe7e6ed595b..8981806fb13f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
1442 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); 1442 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
1443extern struct sas_function_template mpt3sas_transport_functions; 1443extern struct sas_function_template mpt3sas_transport_functions;
1444extern struct scsi_transport_template *mpt3sas_transport_template; 1444extern struct scsi_transport_template *mpt3sas_transport_template;
1445extern int scsi_internal_device_block(struct scsi_device *sdev);
1446extern int scsi_internal_device_unblock(struct scsi_device *sdev,
1447 enum scsi_device_state new_state);
1448/* trigger data externs */ 1445/* trigger data externs */
1449void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 1446void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
1450 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); 1447 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 46e866c36c8a..919ba2bb15f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
2859 sas_device_priv_data->sas_target->handle); 2859 sas_device_priv_data->sas_target->handle);
2860 sas_device_priv_data->block = 1; 2860 sas_device_priv_data->block = 1;
2861 2861
2862 r = scsi_internal_device_block(sdev); 2862 r = scsi_internal_device_block(sdev, false);
2863 if (r == -EINVAL) 2863 if (r == -EINVAL)
2864 sdev_printk(KERN_WARNING, sdev, 2864 sdev_printk(KERN_WARNING, sdev,
2865 "device_block failed with return(%d) for handle(0x%04x)\n", 2865 "device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
2895 "performing a block followed by an unblock\n", 2895 "performing a block followed by an unblock\n",
2896 r, sas_device_priv_data->sas_target->handle); 2896 r, sas_device_priv_data->sas_target->handle);
2897 sas_device_priv_data->block = 1; 2897 sas_device_priv_data->block = 1;
2898 r = scsi_internal_device_block(sdev); 2898 r = scsi_internal_device_block(sdev, false);
2899 if (r) 2899 if (r)
2900 sdev_printk(KERN_WARNING, sdev, "retried device_block " 2900 sdev_printk(KERN_WARNING, sdev, "retried device_block "
2901 "failed with return(%d) for handle(0x%04x)\n", 2901 "failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4677 struct MPT3SAS_DEVICE *sas_device_priv_data; 4677 struct MPT3SAS_DEVICE *sas_device_priv_data;
4678 u32 response_code = 0; 4678 u32 response_code = 0;
4679 unsigned long flags; 4679 unsigned long flags;
4680 unsigned int sector_sz;
4681 4680
4682 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 4681 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4683 4682
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4742 } 4741 }
4743 4742
4744 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 4743 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
4745
4746 /* In case of bogus fw or device, we could end up having
4747 * unaligned partial completion. We can force alignment here,
4748 * then scsi-ml does not need to handle this misbehavior.
4749 */
4750 sector_sz = scmd->device->sector_size;
4751 if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
4752 xfer_cnt % sector_sz)) {
4753 sdev_printk(KERN_INFO, scmd->device,
4754 "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
4755 xfer_cnt, sector_sz);
4756 xfer_cnt = round_down(xfer_cnt, sector_sz);
4757 }
4758
4759 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 4744 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
4760 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 4745 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
4761 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4746 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 23bd70628a2f..7d173f48a81e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
81#define QEDF_INFO(pdev, level, fmt, ...) \ 81#define QEDF_INFO(pdev, level, fmt, ...) \
82 qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \ 82 qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
83 ## __VA_ARGS__) 83 ## __VA_ARGS__)
84 84__printf(4, 5)
85extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 85void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
86 const char *fmt, ...); 86 const char *fmt, ...);
87extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 87__printf(4, 5)
88void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
88 const char *, ...); 89 const char *, ...);
89extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, 90__printf(4, 5)
91void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
90 u32 line, const char *, ...); 92 u32 line, const char *, ...);
91extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 93__printf(5, 6)
94void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
92 u32 info, const char *fmt, ...); 95 u32 info, const char *fmt, ...);
93 96
94/* GRC Dump related defines */ 97/* GRC Dump related defines */
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 868d423380d1..ed58b9104f58 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
203 case FIP_DT_MAC: 203 case FIP_DT_MAC:
204 mp = (struct fip_mac_desc *)desc; 204 mp = (struct fip_mac_desc *)desc;
205 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 205 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
206 "fd_mac=%pM.\n", __func__, mp->fd_mac); 206 "fd_mac=%pM\n", mp->fd_mac);
207 ether_addr_copy(cvl_mac, mp->fd_mac); 207 ether_addr_copy(cvl_mac, mp->fd_mac);
208 break; 208 break;
209 case FIP_DT_NAME: 209 case FIP_DT_NAME:
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..46debe5034af 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1342,7 +1342,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1342 } else { 1342 } else {
1343 refcount = kref_read(&io_req->refcount); 1343 refcount = kref_read(&io_req->refcount);
1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1345 "%d:0:%d:%d xid=0x%0x op=0x%02x " 1345 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1346 "lba=%02x%02x%02x%02x cdb_status=%d " 1346 "lba=%02x%02x%02x%02x cdb_status=%d "
1347 "fcp_resid=0x%x refcount=%d.\n", 1347 "fcp_resid=0x%x refcount=%d.\n",
1348 qedf->lport->host->host_no, sc_cmd->device->id, 1348 qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1426,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1426 1426
1427 sc_cmd->result = result << 16; 1427 sc_cmd->result = result << 16;
1428 refcount = kref_read(&io_req->refcount); 1428 refcount = kref_read(&io_req->refcount);
1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing " 1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " 1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1431 "allowed=%d retries=%d refcount=%d.\n", 1431 "allowed=%d retries=%d refcount=%d.\n",
1432 qedf->lport->host->host_no, sc_cmd->device->id, 1432 qedf->lport->host->host_no, sc_cmd->device->id,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d9d7a86b5f8b..8e2a160490e6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2456 } 2456 }
2457 2457
2458 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 2458 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2459 "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl, 2459 "BDQ PBL addr=0x%p dma=%pad\n",
2460 qedf->bdq_pbl_dma); 2460 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2461 2461
2462 /* 2462 /*
2463 * Populate BDQ PBL with physical and virtual address of individual 2463 * Populate BDQ PBL with physical and virtual address of individual
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 955936274241..59417199bf36 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -14,7 +14,7 @@
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17int do_not_recover; 17int qedi_do_not_recover;
18static struct dentry *qedi_dbg_root; 18static struct dentry *qedi_dbg_root;
19 19
20void 20void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
74static ssize_t 74static ssize_t
75qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) 75qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
76{ 76{
77 if (!do_not_recover) 77 if (!qedi_do_not_recover)
78 do_not_recover = 1; 78 qedi_do_not_recover = 1;
79 79
80 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", 80 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
81 do_not_recover); 81 qedi_do_not_recover);
82 return 0; 82 return 0;
83} 83}
84 84
85static ssize_t 85static ssize_t
86qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) 86qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
87{ 87{
88 if (do_not_recover) 88 if (qedi_do_not_recover)
89 do_not_recover = 0; 89 qedi_do_not_recover = 0;
90 90
91 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", 91 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
92 do_not_recover); 92 qedi_do_not_recover);
93 return 0; 93 return 0;
94} 94}
95 95
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
141 if (*ppos) 141 if (*ppos)
142 return 0; 142 return 0;
143 143
144 cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover); 144 cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
145 cnt = min_t(int, count, cnt - *ppos); 145 cnt = min_t(int, count, cnt - *ppos);
146 *ppos += cnt; 146 *ppos += cnt;
147 return cnt; 147 return cnt;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..2bce3efc66a4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1461,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work)
1461 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, 1461 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
1462 qedi_conn->iscsi_conn_id); 1462 qedi_conn->iscsi_conn_id);
1463 1463
1464 if (do_not_recover) { 1464 if (qedi_do_not_recover) {
1465 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", 1465 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
1466 do_not_recover); 1466 qedi_do_not_recover);
1467 goto abort_ret; 1467 goto abort_ret;
1468 } 1468 }
1469 1469
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8e488de88ece..63d793f46064 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -12,8 +12,14 @@
12 12
13#include "qedi_iscsi.h" 13#include "qedi_iscsi.h"
14 14
15#ifdef CONFIG_DEBUG_FS
16extern int qedi_do_not_recover;
17#else
18#define qedi_do_not_recover (0)
19#endif
20
15extern uint qedi_io_tracing; 21extern uint qedi_io_tracing;
16extern int do_not_recover; 22
17extern struct scsi_host_template qedi_host_template; 23extern struct scsi_host_template qedi_host_template;
18extern struct iscsi_transport qedi_iscsi_transport; 24extern struct iscsi_transport qedi_iscsi_transport;
19extern const struct qed_iscsi_ops *qedi_ops; 25extern const struct qed_iscsi_ops *qedi_ops;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d36142d..4cc474364c50 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
833 return ERR_PTR(ret); 833 return ERR_PTR(ret);
834 } 834 }
835 835
836 if (do_not_recover) { 836 if (qedi_do_not_recover) {
837 ret = -ENOMEM; 837 ret = -ENOMEM;
838 return ERR_PTR(ret); 838 return ERR_PTR(ret);
839 } 839 }
@@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
957 struct qedi_endpoint *qedi_ep; 957 struct qedi_endpoint *qedi_ep;
958 int ret = 0; 958 int ret = 0;
959 959
960 if (do_not_recover) 960 if (qedi_do_not_recover)
961 return 1; 961 return 1;
962 962
963 qedi_ep = ep->dd_data; 963 qedi_ep = ep->dd_data;
@@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1025 } 1025 }
1026 1026
1027 if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { 1027 if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
1028 if (do_not_recover) { 1028 if (qedi_do_not_recover) {
1029 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1029 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1030 "Do not recover cid=0x%x\n", 1030 "Do not recover cid=0x%x\n",
1031 qedi_ep->iscsi_cid); 1031 qedi_ep->iscsi_cid);
@@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1039 } 1039 }
1040 } 1040 }
1041 1041
1042 if (do_not_recover) 1042 if (qedi_do_not_recover)
1043 goto ep_exit_recover; 1043 goto ep_exit_recover;
1044 1044
1045 switch (qedi_ep->state) { 1045 switch (qedi_ep->state) {
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5eda21d903e9..92775a8b74b1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
1805 */ 1805 */
1806 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); 1806 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
1807 1807
1808 qedi_setup_int(qedi); 1808 rc = qedi_setup_int(qedi);
1809 if (rc) 1809 if (rc)
1810 goto stop_iscsi_func; 1810 goto stop_iscsi_func;
1811 1811
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
2007 2007
2008static struct pci_device_id qedi_pci_tbl[] = { 2008static struct pci_device_id qedi_pci_tbl[] = {
2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2010 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
2010 { 0 }, 2011 { 0 },
2011}; 2012};
2012MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2013MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5aa3212..de952935b5d2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
3 depends on PCI && SCSI 3 depends on PCI && SCSI
4 depends on SCSI_FC_ATTRS 4 depends on SCSI_FC_ATTRS
5 select FW_LOADER 5 select FW_LOADER
6 select BTREE
6 ---help--- 7 ---help---
7 This qla2xxx driver supports all QLogic Fibre Channel 8 This qla2xxx driver supports all QLogic Fibre Channel
8 PCI and PCIe host adapters. 9 PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f610103994af..435ff7fd6384 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2154 "Timer for the VP[%d] has stopped\n", vha->vp_idx); 2154 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2155 } 2155 }
2156 2156
2157 BUG_ON(atomic_read(&vha->vref_count));
2158
2159 qla2x00_free_fcports(vha); 2157 qla2x00_free_fcports(vha);
2160 2158
2161 mutex_lock(&ha->vport_lock); 2159 mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2166 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2164 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2167 vha->gnl.ldma); 2165 vha->gnl.ldma);
2168 2166
2169 if (vha->qpair->vp_idx == vha->vp_idx) { 2167 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2170 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2168 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2171 ql_log(ql_log_warn, vha, 0x7087, 2169 ql_log(ql_log_warn, vha, 0x7087,
2172 "Queue Pair delete failed.\n"); 2170 "Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 21d9fb7fc887..51b4179469d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); 2707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2708 ql_dbg(level, vha, id, 2708 ql_dbg(level, vha, id,
2709 "----- -----------------------------------------------\n"); 2709 "----- -----------------------------------------------\n");
2710 for (cnt = 0; cnt < size; cnt++, buf++) { 2710 for (cnt = 0; cnt < size; cnt += 16) {
2711 if (cnt % 16 == 0) 2711 ql_dbg(level, vha, id, "%04x: ", cnt);
2712 ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU); 2712 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2713 printk(" %02x", *buf); 2713 buf + cnt, min(16U, size - cnt), false);
2714 if (cnt % 16 == 15)
2715 printk("\n");
2716 } 2714 }
2717 if (cnt % 16 != 0)
2718 printk("\n");
2719} 2715}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e66966a..c6bffe929fe7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
348#define ql_dbg_tgt 0x00004000 /* Target mode */ 348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ 349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ 350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
351#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
351 352
352extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, 353extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
353 uint32_t, void **); 354 uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438e3cce..ae119018dfaa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/btree.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
395 struct completion comp; 396 struct completion comp;
396 } abt; 397 } abt;
397 struct ct_arg ctarg; 398 struct ct_arg ctarg;
399#define MAX_IOCB_MB_REG 28
400#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
398 struct { 401 struct {
399 __le16 in_mb[28]; /* fr fw */ 402 __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
400 __le16 out_mb[28]; /* to fw */ 403 __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
401 void *out, *in; 404 void *out, *in;
402 dma_addr_t out_dma, in_dma; 405 dma_addr_t out_dma, in_dma;
406 struct completion comp;
407 int rc;
403 } mbx; 408 } mbx;
404 struct { 409 struct {
405 struct imm_ntfy_from_isp *ntfy; 410 struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
437 uint32_t handle; 442 uint32_t handle;
438 uint16_t flags; 443 uint16_t flags;
439 uint16_t type; 444 uint16_t type;
440 char *name; 445 const char *name;
441 int iocbs; 446 int iocbs;
442 struct qla_qpair *qpair; 447 struct qla_qpair *qpair;
443 u32 gen1; /* scratch */ 448 u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
2300 struct ct_sns_desc ct_desc; 2305 struct ct_sns_desc ct_desc;
2301 enum discovery_state disc_state; 2306 enum discovery_state disc_state;
2302 enum login_state fw_login_state; 2307 enum login_state fw_login_state;
2308 unsigned long plogi_nack_done_deadline;
2309
2303 u32 login_gen, last_login_gen; 2310 u32 login_gen, last_login_gen;
2304 u32 rscn_gen, last_rscn_gen; 2311 u32 rscn_gen, last_rscn_gen;
2305 u32 chip_reset; 2312 u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
3106 uint32_t gold_fw_version; 3113 uint32_t gold_fw_version;
3107}; 3114};
3108 3115
3116struct qla_dif_statistics {
3117 uint64_t dif_input_bytes;
3118 uint64_t dif_output_bytes;
3119 uint64_t dif_input_requests;
3120 uint64_t dif_output_requests;
3121 uint32_t dif_guard_err;
3122 uint32_t dif_ref_tag_err;
3123 uint32_t dif_app_tag_err;
3124};
3125
3109struct qla_statistics { 3126struct qla_statistics {
3110 uint32_t total_isp_aborts; 3127 uint32_t total_isp_aborts;
3111 uint64_t input_bytes; 3128 uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
3118 uint32_t stat_max_pend_cmds; 3135 uint32_t stat_max_pend_cmds;
3119 uint32_t stat_max_qfull_cmds_alloc; 3136 uint32_t stat_max_qfull_cmds_alloc;
3120 uint32_t stat_max_qfull_cmds_dropped; 3137 uint32_t stat_max_qfull_cmds_dropped;
3138
3139 struct qla_dif_statistics qla_dif_stats;
3121}; 3140};
3122 3141
3123struct bidi_statistics { 3142struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
3125 unsigned long long transfer_bytes; 3144 unsigned long long transfer_bytes;
3126}; 3145};
3127 3146
3147struct qla_tc_param {
3148 struct scsi_qla_host *vha;
3149 uint32_t blk_sz;
3150 uint32_t bufflen;
3151 struct scatterlist *sg;
3152 struct scatterlist *prot_sg;
3153 struct crc_context *ctx;
3154 uint8_t *ctx_dsd_alloced;
3155};
3156
3128/* Multi queue support */ 3157/* Multi queue support */
3129#define MBC_INITIALIZE_MULTIQ 0x1f 3158#define MBC_INITIALIZE_MULTIQ 0x1f
3130#define QLA_QUE_PAGE 0X1000 3159#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
3272 uint8_t tgt_node_name[WWN_SIZE]; 3301 uint8_t tgt_node_name[WWN_SIZE];
3273 3302
3274 struct dentry *dfs_tgt_sess; 3303 struct dentry *dfs_tgt_sess;
3304 struct dentry *dfs_tgt_port_database;
3305
3275 struct list_head q_full_list; 3306 struct list_head q_full_list;
3276 uint32_t num_pend_cmds; 3307 uint32_t num_pend_cmds;
3277 uint32_t num_qfull_cmds_alloc; 3308 uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
3281 spinlock_t sess_lock; 3312 spinlock_t sess_lock;
3282 int rspq_vector_cpuid; 3313 int rspq_vector_cpuid;
3283 spinlock_t atio_lock ____cacheline_aligned; 3314 spinlock_t atio_lock ____cacheline_aligned;
3315 struct btree_head32 host_map;
3284}; 3316};
3285 3317
3286#define MAX_QFULL_CMDS_ALLOC 8192 3318#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
3290 3322
3291#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ 3323#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
3292 3324
3325#define QLA_EARLY_LINKUP(_ha) \
3326 ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
3327 _ha->flags.fw_started && !_ha->flags.fw_init_done)
3328
3293/* 3329/*
3294 * Qlogic host adapter specific data structure. 3330 * Qlogic host adapter specific data structure.
3295*/ 3331*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
3339 uint32_t fawwpn_enabled:1; 3375 uint32_t fawwpn_enabled:1;
3340 uint32_t exlogins_enabled:1; 3376 uint32_t exlogins_enabled:1;
3341 uint32_t exchoffld_enabled:1; 3377 uint32_t exchoffld_enabled:1;
3342 /* 35 bits */ 3378
3379 uint32_t lip_ae:1;
3380 uint32_t n2n_ae:1;
3381 uint32_t fw_started:1;
3382 uint32_t fw_init_done:1;
3343 } flags; 3383 } flags;
3344 3384
3345 /* This spinlock is used to protect "io transactions", you must 3385 /* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
3432#define P2P_LOOP 3 3472#define P2P_LOOP 3
3433 uint8_t interrupts_on; 3473 uint8_t interrupts_on;
3434 uint32_t isp_abort_cnt; 3474 uint32_t isp_abort_cnt;
3435
3436#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 3475#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
3437#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 3476#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
3438#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 3477#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
3913 struct list_head vp_fcports; /* list of fcports */ 3952 struct list_head vp_fcports; /* list of fcports */
3914 struct list_head work_list; 3953 struct list_head work_list;
3915 spinlock_t work_lock; 3954 spinlock_t work_lock;
3955 struct work_struct iocb_work;
3916 3956
3917 /* Commonly used flags and state information. */ 3957 /* Commonly used flags and state information. */
3918 struct Scsi_Host *host; 3958 struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
4076 /* Count of active session/fcport */ 4116 /* Count of active session/fcport */
4077 int fcport_count; 4117 int fcport_count;
4078 wait_queue_head_t fcport_waitQ; 4118 wait_queue_head_t fcport_waitQ;
4119 wait_queue_head_t vref_waitq;
4079} scsi_qla_host_t; 4120} scsi_qla_host_t;
4080 4121
4081struct qla27xx_image_status { 4122struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
4131 mb(); \ 4172 mb(); \
4132 if (__vha->flags.delete_progress) { \ 4173 if (__vha->flags.delete_progress) { \
4133 atomic_dec(&__vha->vref_count); \ 4174 atomic_dec(&__vha->vref_count); \
4175 wake_up(&__vha->vref_waitq); \
4134 __bail = 1; \ 4176 __bail = 1; \
4135 } else { \ 4177 } else { \
4136 __bail = 0; \ 4178 __bail = 0; \
4137 } \ 4179 } \
4138} while (0) 4180} while (0)
4139 4181
4140#define QLA_VHA_MARK_NOT_BUSY(__vha) \ 4182#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
4141 atomic_dec(&__vha->vref_count); \ 4183 atomic_dec(&__vha->vref_count); \
4184 wake_up(&__vha->vref_waitq); \
4185} while (0) \
4142 4186
4143#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ 4187#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
4144 atomic_inc(&__qpair->ref_count); \ 4188 atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce696bac..989e17b0758c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
19 struct qla_hw_data *ha = vha->hw; 19 struct qla_hw_data *ha = vha->hw;
20 unsigned long flags; 20 unsigned long flags;
21 struct fc_port *sess = NULL; 21 struct fc_port *sess = NULL;
22 struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; 22 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
23 23
24 seq_printf(s, "%s\n",vha->host_str); 24 seq_printf(s, "%s\n", vha->host_str);
25 if (tgt) { 25 if (tgt) {
26 seq_printf(s, "Port ID Port Name Handle\n"); 26 seq_puts(s, "Port ID Port Name Handle\n");
27 27
28 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 28 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
29 list_for_each_entry(sess, &vha->vp_fcports, list) 29 list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
44 return single_open(file, qla2x00_dfs_tgt_sess_show, vha); 44 return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
45} 45}
46 46
47
48static const struct file_operations dfs_tgt_sess_ops = { 47static const struct file_operations dfs_tgt_sess_ops = {
49 .open = qla2x00_dfs_tgt_sess_open, 48 .open = qla2x00_dfs_tgt_sess_open,
50 .read = seq_read, 49 .read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
53}; 52};
54 53
55static int 54static int
55qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
56{
57 scsi_qla_host_t *vha = s->private;
58 struct qla_hw_data *ha = vha->hw;
59 struct gid_list_info *gid_list;
60 dma_addr_t gid_list_dma;
61 fc_port_t fc_port;
62 char *id_iter;
63 int rc, i;
64 uint16_t entries, loop_id;
65 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
66
67 seq_printf(s, "%s\n", vha->host_str);
68 if (tgt) {
69 gid_list = dma_alloc_coherent(&ha->pdev->dev,
70 qla2x00_gid_list_size(ha),
71 &gid_list_dma, GFP_KERNEL);
72 if (!gid_list) {
73 ql_dbg(ql_dbg_user, vha, 0x705c,
74 "DMA allocation failed for %u\n",
75 qla2x00_gid_list_size(ha));
76 return 0;
77 }
78
79 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
80 &entries);
81 if (rc != QLA_SUCCESS)
82 goto out_free_id_list;
83
84 id_iter = (char *)gid_list;
85
86 seq_puts(s, "Port Name Port ID Loop ID\n");
87
88 for (i = 0; i < entries; i++) {
89 struct gid_list_info *gid =
90 (struct gid_list_info *)id_iter;
91 loop_id = le16_to_cpu(gid->loop_id);
92 memset(&fc_port, 0, sizeof(fc_port_t));
93
94 fc_port.loop_id = loop_id;
95
96 rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
97 seq_printf(s, "%8phC %02x%02x%02x %d\n",
98 fc_port.port_name, fc_port.d_id.b.domain,
99 fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
100 fc_port.loop_id);
101 id_iter += ha->gid_list_info_size;
102 }
103out_free_id_list:
104 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
105 gid_list, gid_list_dma);
106 }
107
108 return 0;
109}
110
111static int
112qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
113{
114 scsi_qla_host_t *vha = inode->i_private;
115
116 return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
117}
118
119static const struct file_operations dfs_tgt_port_database_ops = {
120 .open = qla2x00_dfs_tgt_port_database_open,
121 .read = seq_read,
122 .llseek = seq_lseek,
123 .release = single_release,
124};
125
126static int
56qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) 127qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
57{ 128{
58 struct scsi_qla_host *vha = s->private; 129 struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
114 seq_printf(s, "num Q full sent = %lld\n", 185 seq_printf(s, "num Q full sent = %lld\n",
115 vha->tgt_counters.num_q_full_sent); 186 vha->tgt_counters.num_q_full_sent);
116 187
188 /* DIF stats */
189 seq_printf(s, "DIF Inp Bytes = %lld\n",
190 vha->qla_stats.qla_dif_stats.dif_input_bytes);
191 seq_printf(s, "DIF Outp Bytes = %lld\n",
192 vha->qla_stats.qla_dif_stats.dif_output_bytes);
193 seq_printf(s, "DIF Inp Req = %lld\n",
194 vha->qla_stats.qla_dif_stats.dif_input_requests);
195 seq_printf(s, "DIF Outp Req = %lld\n",
196 vha->qla_stats.qla_dif_stats.dif_output_requests);
197 seq_printf(s, "DIF Guard err = %d\n",
198 vha->qla_stats.qla_dif_stats.dif_guard_err);
199 seq_printf(s, "DIF Ref tag err = %d\n",
200 vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
201 seq_printf(s, "DIF App tag err = %d\n",
202 vha->qla_stats.qla_dif_stats.dif_app_tag_err);
117 return 0; 203 return 0;
118} 204}
119 205
@@ -281,6 +367,14 @@ create_nodes:
281 goto out; 367 goto out;
282 } 368 }
283 369
370 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
371 S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
372 if (!ha->tgt.dfs_tgt_port_database) {
373 ql_log(ql_log_warn, vha, 0xffff,
374 "Unable to create debugFS tgt_port_database node.\n");
375 goto out;
376 }
377
284 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 378 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
285 &dfs_fce_ops); 379 &dfs_fce_ops);
286 if (!ha->dfs_fce) { 380 if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
311 ha->tgt.dfs_tgt_sess = NULL; 405 ha->tgt.dfs_tgt_sess = NULL;
312 } 406 }
313 407
408 if (ha->tgt.dfs_tgt_port_database) {
409 debugfs_remove(ha->tgt.dfs_tgt_port_database);
410 ha->tgt.dfs_tgt_port_database = NULL;
411 }
412
314 if (ha->dfs_fw_resource_cnt) { 413 if (ha->dfs_fw_resource_cnt) {
315 debugfs_remove(ha->dfs_fw_resource_cnt); 414 debugfs_remove(ha->dfs_fw_resource_cnt);
316 ha->dfs_fw_resource_cnt = NULL; 415 ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441d1d90..5b2451745e9f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
193void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, 193void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
194 uint16_t *); 194 uint16_t *);
195int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 195int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
196int qla24xx_async_abort_cmd(srb_t *);
196 197
197/* 198/*
198 * Global Functions in qla_mid.c source file. 199 * Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
256extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 257extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
257extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 258extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
258extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, 259extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
259 uint32_t *, uint16_t, struct qla_tgt_cmd *); 260 uint32_t *, uint16_t, struct qla_tc_param *);
260extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, 261extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
261 uint32_t *, uint16_t, struct qla_tgt_cmd *); 262 uint32_t *, uint16_t, struct qla_tc_param *);
262extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, 263extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
263 uint32_t *, uint16_t, struct qla_tgt_cmd *); 264 uint32_t *, uint16_t, struct qla_tc_param *);
264extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); 265extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
265extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); 266extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
266extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, 267extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
368 369
369extern int 370extern int
370qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 371qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
371 dma_addr_t, uint); 372 dma_addr_t, uint16_t);
372 373
373extern int qla24xx_abort_command(srb_t *); 374extern int qla24xx_abort_command(srb_t *);
374extern int qla24xx_async_abort_command(srb_t *); 375extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
472extern int 473extern int
473qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); 474qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
474 475
476int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
477int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
478int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
479 uint16_t *);
480int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
481 struct port_database_24xx *);
482
475/* 483/*
476 * Global Function Prototypes in qla_isr.c source file. 484 * Global Function Prototypes in qla_isr.c source file.
477 */ 485 */
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
846 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); 854 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
847void qla24xx_delete_sess_fn(struct work_struct *); 855void qla24xx_delete_sess_fn(struct work_struct *);
848void qlt_unknown_atio_work_fn(struct work_struct *); 856void qlt_unknown_atio_work_fn(struct work_struct *);
857void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
858void qlt_remove_target_resources(struct qla_hw_data *);
849 859
850#endif /* _QLA_GBL_H */ 860#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 32fb9007f137..f9d2fe7b1ade 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
629 struct srb *sp = s; 629 struct srb *sp = s;
630 struct scsi_qla_host *vha = sp->vha; 630 struct scsi_qla_host *vha = sp->vha;
631 struct qla_hw_data *ha = vha->hw; 631 struct qla_hw_data *ha = vha->hw;
632 uint64_t zero = 0;
633 struct port_database_24xx *pd; 632 struct port_database_24xx *pd;
634 fc_port_t *fcport = sp->fcport; 633 fc_port_t *fcport = sp->fcport;
635 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 634 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
649 648
650 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 649 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
651 650
652 /* Check for logged in state. */ 651 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
653 if (pd->current_login_state != PDS_PRLI_COMPLETE &&
654 pd->last_login_state != PDS_PRLI_COMPLETE) {
655 ql_dbg(ql_dbg_mbx, vha, 0xffff,
656 "Unable to verify login-state (%x/%x) for "
657 "loop_id %x.\n", pd->current_login_state,
658 pd->last_login_state, fcport->loop_id);
659 rval = QLA_FUNCTION_FAILED;
660 goto gpd_error_out;
661 }
662
663 if (fcport->loop_id == FC_NO_LOOP_ID ||
664 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
665 memcmp(fcport->port_name, pd->port_name, 8))) {
666 /* We lost the device mid way. */
667 rval = QLA_NOT_LOGGED_IN;
668 goto gpd_error_out;
669 }
670
671 /* Names are little-endian. */
672 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
673
674 /* Get port_id of device. */
675 fcport->d_id.b.domain = pd->port_id[0];
676 fcport->d_id.b.area = pd->port_id[1];
677 fcport->d_id.b.al_pa = pd->port_id[2];
678 fcport->d_id.b.rsvd_1 = 0;
679
680 /* If not target must be initiator or unknown type. */
681 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
682 fcport->port_type = FCT_INITIATOR;
683 else
684 fcport->port_type = FCT_TARGET;
685
686 /* Passback COS information. */
687 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
688 FC_COS_CLASS2 : FC_COS_CLASS3;
689
690 if (pd->prli_svc_param_word_3[0] & BIT_7) {
691 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
692 fcport->conf_compl_supported = 1;
693 }
694 652
695gpd_error_out: 653gpd_error_out:
696 memset(&ea, 0, sizeof(ea)); 654 memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
876 fcport->login_retry--; 834 fcport->login_retry--;
877 835
878 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 836 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
879 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
880 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 837 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
881 return 0; 838 return 0;
882 839
840 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
841 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
842 return 0;
843 }
844
883 /* for pure Target Mode. Login will not be initiated */ 845 /* for pure Target Mode. Login will not be initiated */
884 if (vha->host->active_mode == MODE_TARGET) 846 if (vha->host->active_mode == MODE_TARGET)
885 return 0; 847 return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1041 fcport->flags); 1003 fcport->flags);
1042 1004
1043 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1005 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1044 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
1045 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1006 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1046 return; 1007 return;
1047 1008
1009 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1010 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1011 return;
1012 }
1013
1048 if (fcport->flags & FCF_ASYNC_SENT) { 1014 if (fcport->flags & FCF_ASYNC_SENT) {
1049 fcport->login_retry++; 1015 fcport->login_retry++;
1050 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1016 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
1258 complete(&abt->u.abt.comp); 1224 complete(&abt->u.abt.comp);
1259} 1225}
1260 1226
1261static int 1227int
1262qla24xx_async_abort_cmd(srb_t *cmd_sp) 1228qla24xx_async_abort_cmd(srb_t *cmd_sp)
1263{ 1229{
1264 scsi_qla_host_t *vha = cmd_sp->vha; 1230 scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
3212 } else { 3178 } else {
3213 ql_dbg(ql_dbg_init, vha, 0x00d3, 3179 ql_dbg(ql_dbg_init, vha, 0x00d3,
3214 "Init Firmware -- success.\n"); 3180 "Init Firmware -- success.\n");
3181 ha->flags.fw_started = 1;
3215 } 3182 }
3216 3183
3217 return (rval); 3184 return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3374 uint8_t domain; 3341 uint8_t domain;
3375 char connect_type[22]; 3342 char connect_type[22];
3376 struct qla_hw_data *ha = vha->hw; 3343 struct qla_hw_data *ha = vha->hw;
3377 unsigned long flags;
3378 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3344 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3345 port_id_t id;
3379 3346
3380 /* Get host addresses. */ 3347 /* Get host addresses. */
3381 rval = qla2x00_get_adapter_id(vha, 3348 rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3453 3420
3454 /* Save Host port and loop ID. */ 3421 /* Save Host port and loop ID. */
3455 /* byte order - Big Endian */ 3422 /* byte order - Big Endian */
3456 vha->d_id.b.domain = domain; 3423 id.b.domain = domain;
3457 vha->d_id.b.area = area; 3424 id.b.area = area;
3458 vha->d_id.b.al_pa = al_pa; 3425 id.b.al_pa = al_pa;
3459 3426 id.b.rsvd_1 = 0;
3460 spin_lock_irqsave(&ha->vport_slock, flags); 3427 qlt_update_host_map(vha, id);
3461 qlt_update_vp_map(vha, SET_AL_PA);
3462 spin_unlock_irqrestore(&ha->vport_slock, flags);
3463 3428
3464 if (!vha->flags.init_done) 3429 if (!vha->flags.init_done)
3465 ql_log(ql_log_info, vha, 0x2010, 3430 ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
4036 atomic_set(&vha->loop_state, LOOP_READY); 4001 atomic_set(&vha->loop_state, LOOP_READY);
4037 ql_dbg(ql_dbg_disc, vha, 0x2069, 4002 ql_dbg(ql_dbg_disc, vha, 0x2069,
4038 "LOOP READY.\n"); 4003 "LOOP READY.\n");
4004 ha->flags.fw_init_done = 1;
4039 4005
4040 /* 4006 /*
4041 * Process any ATIO queue entries that came in 4007 * Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5148 } 5114 }
5149 } 5115 }
5150 atomic_dec(&vha->vref_count); 5116 atomic_dec(&vha->vref_count);
5117 wake_up(&vha->vref_waitq);
5151 } 5118 }
5152 spin_unlock_irqrestore(&ha->vport_slock, flags); 5119 spin_unlock_irqrestore(&ha->vport_slock, flags);
5153} 5120}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5526 if (!(IS_P3P_TYPE(ha))) 5493 if (!(IS_P3P_TYPE(ha)))
5527 ha->isp_ops->reset_chip(vha); 5494 ha->isp_ops->reset_chip(vha);
5528 5495
5496 ha->flags.n2n_ae = 0;
5497 ha->flags.lip_ae = 0;
5498 ha->current_topology = 0;
5499 ha->flags.fw_started = 0;
5500 ha->flags.fw_init_done = 0;
5529 ha->chip_reset++; 5501 ha->chip_reset++;
5530 5502
5531 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 5503 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
6802 return; 6774 return;
6803 if (!ha->fw_major_version) 6775 if (!ha->fw_major_version)
6804 return; 6776 return;
6777 if (!ha->flags.fw_started)
6778 return;
6805 6779
6806 ret = qla2x00_stop_firmware(vha); 6780 ret = qla2x00_stop_firmware(vha);
6807 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 6781 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
6815 "Attempting retry of stop-firmware command.\n"); 6789 "Attempting retry of stop-firmware command.\n");
6816 ret = qla2x00_stop_firmware(vha); 6790 ret = qla2x00_stop_firmware(vha);
6817 } 6791 }
6792
6793 ha->flags.fw_started = 0;
6794 ha->flags.fw_init_done = 0;
6818} 6795}
6819 6796
6820int 6797int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 535079280288..ea027f6a7fd4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
889 889
890int 890int
891qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 891qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
893{ 893{
894 void *next_dsd; 894 void *next_dsd;
895 uint8_t avail_dsds = 0; 895 uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
898 struct scatterlist *sg_prot; 898 struct scatterlist *sg_prot;
899 uint32_t *cur_dsd = dsd; 899 uint32_t *cur_dsd = dsd;
900 uint16_t used_dsds = tot_dsds; 900 uint16_t used_dsds = tot_dsds;
901
902 uint32_t prot_int; /* protection interval */ 901 uint32_t prot_int; /* protection interval */
903 uint32_t partial; 902 uint32_t partial;
904 struct qla2_sgx sgx; 903 struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
966 } else { 965 } else {
967 list_add_tail(&dsd_ptr->list, 966 list_add_tail(&dsd_ptr->list,
968 &(tc->ctx->dsd_list)); 967 &(tc->ctx->dsd_list));
969 tc->ctx_dsd_alloced = 1; 968 *tc->ctx_dsd_alloced = 1;
970 } 969 }
971 970
972 971
@@ -1005,7 +1004,7 @@ alloc_and_fill:
1005 1004
1006int 1005int
1007qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1006qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1008 uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1007 uint16_t tot_dsds, struct qla_tc_param *tc)
1009{ 1008{
1010 void *next_dsd; 1009 void *next_dsd;
1011 uint8_t avail_dsds = 0; 1010 uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1066 } else { 1065 } else {
1067 list_add_tail(&dsd_ptr->list, 1066 list_add_tail(&dsd_ptr->list,
1068 &(tc->ctx->dsd_list)); 1067 &(tc->ctx->dsd_list));
1069 tc->ctx_dsd_alloced = 1; 1068 *tc->ctx_dsd_alloced = 1;
1070 } 1069 }
1071 1070
1072 /* add new list to cmd iocb or last list */ 1071 /* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1092 1091
1093int 1092int
1094qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1093qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1095 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1094 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1096{ 1095{
1097 void *next_dsd; 1096 void *next_dsd;
1098 uint8_t avail_dsds = 0; 1097 uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1158 } else { 1157 } else {
1159 list_add_tail(&dsd_ptr->list, 1158 list_add_tail(&dsd_ptr->list,
1160 &(tc->ctx->dsd_list)); 1159 &(tc->ctx->dsd_list));
1161 tc->ctx_dsd_alloced = 1; 1160 *tc->ctx_dsd_alloced = 1;
1162 } 1161 }
1163 1162
1164 /* add new list to cmd iocb or last list */ 1163 /* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3c66ea29de27..3203367a4f42 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ skip_rio:
708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
709 709
710 ha->isp_ops->fw_dump(vha, 1); 710 ha->isp_ops->fw_dump(vha, 1);
711 ha->flags.fw_init_done = 0;
712 ha->flags.fw_started = 0;
711 713
712 if (IS_FWI2_CAPABLE(ha)) { 714 if (IS_FWI2_CAPABLE(ha)) {
713 if (mb[1] == 0 && mb[2] == 0) { 715 if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
761 break; 763 break;
762 764
763 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 765 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
766 ha->flags.lip_ae = 1;
767 ha->flags.n2n_ae = 0;
768
764 ql_dbg(ql_dbg_async, vha, 0x5009, 769 ql_dbg(ql_dbg_async, vha, 0x5009,
765 "LIP occurred (%x).\n", mb[1]); 770 "LIP occurred (%x).\n", mb[1]);
766 771
@@ -797,6 +802,10 @@ skip_rio:
797 break; 802 break;
798 803
799 case MBA_LOOP_DOWN: /* Loop Down Event */ 804 case MBA_LOOP_DOWN: /* Loop Down Event */
805 ha->flags.n2n_ae = 0;
806 ha->flags.lip_ae = 0;
807 ha->current_topology = 0;
808
800 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 809 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
801 ? RD_REG_WORD(&reg24->mailbox4) : 0; 810 ? RD_REG_WORD(&reg24->mailbox4) : 0;
802 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) 811 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
866 875
867 /* case MBA_DCBX_COMPLETE: */ 876 /* case MBA_DCBX_COMPLETE: */
868 case MBA_POINT_TO_POINT: /* Point-to-Point */ 877 case MBA_POINT_TO_POINT: /* Point-to-Point */
878 ha->flags.lip_ae = 0;
879 ha->flags.n2n_ae = 1;
880
869 if (IS_QLA2100(ha)) 881 if (IS_QLA2100(ha))
870 break; 882 break;
871 883
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1620 QLA_LOGIO_LOGIN_RETRIED : 0; 1632 QLA_LOGIO_LOGIN_RETRIED : 0;
1621 if (logio->entry_status) { 1633 if (logio->entry_status) {
1622 ql_log(ql_log_warn, fcport->vha, 0x5034, 1634 ql_log(ql_log_warn, fcport->vha, 0x5034,
1623 "Async-%s error entry - hdl=%x" 1635 "Async-%s error entry - %8phC hdl=%x"
1624 "portid=%02x%02x%02x entry-status=%x.\n", 1636 "portid=%02x%02x%02x entry-status=%x.\n",
1625 type, sp->handle, fcport->d_id.b.domain, 1637 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1626 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1638 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1627 logio->entry_status); 1639 logio->entry_status);
1628 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1640 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1633 1645
1634 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1646 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1635 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1647 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1636 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1648 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1637 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1649 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1650 fcport->d_id.b.domain,
1638 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1651 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1639 le32_to_cpu(logio->io_parameter[0])); 1652 le32_to_cpu(logio->io_parameter[0]));
1640 1653
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1674 case LSC_SCODE_NPORT_USED: 1687 case LSC_SCODE_NPORT_USED:
1675 data[0] = MBS_LOOP_ID_USED; 1688 data[0] = MBS_LOOP_ID_USED;
1676 break; 1689 break;
1690 case LSC_SCODE_CMD_FAILED:
1691 if (iop[1] == 0x0606) {
1692 /*
1693 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1694 * Target side acked.
1695 */
1696 data[0] = MBS_COMMAND_COMPLETE;
1697 goto logio_done;
1698 }
1699 data[0] = MBS_COMMAND_ERROR;
1700 break;
1677 case LSC_SCODE_NOXCB: 1701 case LSC_SCODE_NOXCB:
1678 vha->hw->exch_starvation++; 1702 vha->hw->exch_starvation++;
1679 if (vha->hw->exch_starvation > 5) { 1703 if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1695 } 1719 }
1696 1720
1697 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1721 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1698 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1722 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1699 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1723 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1724 sp->handle, fcport->d_id.b.domain,
1700 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1725 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1701 le16_to_cpu(logio->comp_status), 1726 le16_to_cpu(logio->comp_status),
1702 le32_to_cpu(logio->io_parameter[0]), 1727 le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2679 return; 2704 return;
2680 2705
2681 abt = &sp->u.iocb_cmd; 2706 abt = &sp->u.iocb_cmd;
2682 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2707 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2683 sp->done(sp, 0); 2708 sp->done(sp, 0);
2684} 2709}
2685 2710
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2693 struct sts_entry_24xx *pkt; 2718 struct sts_entry_24xx *pkt;
2694 struct qla_hw_data *ha = vha->hw; 2719 struct qla_hw_data *ha = vha->hw;
2695 2720
2696 if (!vha->flags.online) 2721 if (!ha->flags.fw_started)
2697 return; 2722 return;
2698 2723
2699 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2724 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f417417..a113ab3592a7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16} mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20};
21
22static const char *mb_to_str(uint16_t cmd)
23{
24 int i;
25 struct mb_cmd_name *e;
26
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
31 }
32 return "unknown";
33}
34
13static struct rom_cmd { 35static struct rom_cmd {
14 uint16_t cmd; 36 uint16_t cmd;
15} rom_cmds[] = { 37} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2818 2840
2819int 2841int
2820qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 2842qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2821 dma_addr_t stats_dma, uint options) 2843 dma_addr_t stats_dma, uint16_t options)
2822{ 2844{
2823 int rval; 2845 int rval;
2824 mbx_cmd_t mc; 2846 mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 2850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2829 "Entered %s.\n", __func__); 2851 "Entered %s.\n", __func__);
2830 2852
2831 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2853 memset(&mc, 0, sizeof(mc));
2832 mcp->mb[2] = MSW(stats_dma); 2854 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2833 mcp->mb[3] = LSW(stats_dma); 2855 mc.mb[2] = MSW(stats_dma);
2834 mcp->mb[6] = MSW(MSD(stats_dma)); 2856 mc.mb[3] = LSW(stats_dma);
2835 mcp->mb[7] = LSW(MSD(stats_dma)); 2857 mc.mb[6] = MSW(MSD(stats_dma));
2836 mcp->mb[8] = sizeof(struct link_statistics) / 4; 2858 mc.mb[7] = LSW(MSD(stats_dma));
2837 mcp->mb[9] = vha->vp_idx; 2859 mc.mb[8] = sizeof(struct link_statistics) / 4;
2838 mcp->mb[10] = options; 2860 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2839 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2861 mc.mb[10] = cpu_to_le16(options);
2840 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2862
2841 mcp->tov = MBX_TOV_SECONDS; 2863 rval = qla24xx_send_mb_cmd(vha, &mc);
2842 mcp->flags = IOCTL_CMD;
2843 rval = qla2x00_mailbox_command(vha, mcp);
2844 2864
2845 if (rval == QLA_SUCCESS) { 2865 if (rval == QLA_SUCCESS) {
2846 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2866 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3603 scsi_qla_host_t *vp = NULL; 3623 scsi_qla_host_t *vp = NULL;
3604 unsigned long flags; 3624 unsigned long flags;
3605 int found; 3625 int found;
3626 port_id_t id;
3606 3627
3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3608 "Entered %s.\n", __func__); 3629 "Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3610 if (rptid_entry->entry_status != 0) 3631 if (rptid_entry->entry_status != 0)
3611 return; 3632 return;
3612 3633
3634 id.b.domain = rptid_entry->port_id[2];
3635 id.b.area = rptid_entry->port_id[1];
3636 id.b.al_pa = rptid_entry->port_id[0];
3637 id.b.rsvd_1 = 0;
3638
3613 if (rptid_entry->format == 0) { 3639 if (rptid_entry->format == 0) {
3614 /* loop */ 3640 /* loop */
3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, 3641 ql_dbg(ql_dbg_async, vha, 0x10b7,
3616 "Format 0 : Number of VPs setup %d, number of " 3642 "Format 0 : Number of VPs setup %d, number of "
3617 "VPs acquired %d.\n", rptid_entry->vp_setup, 3643 "VPs acquired %d.\n", rptid_entry->vp_setup,
3618 rptid_entry->vp_acquired); 3644 rptid_entry->vp_acquired);
3619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, 3645 ql_dbg(ql_dbg_async, vha, 0x10b8,
3620 "Primary port id %02x%02x%02x.\n", 3646 "Primary port id %02x%02x%02x.\n",
3621 rptid_entry->port_id[2], rptid_entry->port_id[1], 3647 rptid_entry->port_id[2], rptid_entry->port_id[1],
3622 rptid_entry->port_id[0]); 3648 rptid_entry->port_id[0]);
3623 3649
3624 vha->d_id.b.domain = rptid_entry->port_id[2]; 3650 qlt_update_host_map(vha, id);
3625 vha->d_id.b.area = rptid_entry->port_id[1];
3626 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3627
3628 spin_lock_irqsave(&ha->vport_slock, flags);
3629 qlt_update_vp_map(vha, SET_AL_PA);
3630 spin_unlock_irqrestore(&ha->vport_slock, flags);
3631 3651
3632 } else if (rptid_entry->format == 1) { 3652 } else if (rptid_entry->format == 1) {
3633 /* fabric */ 3653 /* fabric */
3634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, 3654 ql_dbg(ql_dbg_async, vha, 0x10b9,
3635 "Format 1: VP[%d] enabled - status %d - with " 3655 "Format 1: VP[%d] enabled - status %d - with "
3636 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3656 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3637 rptid_entry->vp_status, 3657 rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3653 WWN_SIZE); 3673 WWN_SIZE);
3654 } 3674 }
3655 3675
3656 vha->d_id.b.domain = rptid_entry->port_id[2]; 3676 qlt_update_host_map(vha, id);
3657 vha->d_id.b.area = rptid_entry->port_id[1];
3658 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3659 spin_lock_irqsave(&ha->vport_slock, flags);
3660 qlt_update_vp_map(vha, SET_AL_PA);
3661 spin_unlock_irqrestore(&ha->vport_slock, flags);
3662 } 3677 }
3663 3678
3664 fc_host_port_name(vha->host) = 3679 fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3694 if (!found) 3709 if (!found)
3695 return; 3710 return;
3696 3711
3697 vp->d_id.b.domain = rptid_entry->port_id[2]; 3712 qlt_update_host_map(vp, id);
3698 vp->d_id.b.area = rptid_entry->port_id[1];
3699 vp->d_id.b.al_pa = rptid_entry->port_id[0];
3700 spin_lock_irqsave(&ha->vport_slock, flags);
3701 qlt_update_vp_map(vp, SET_AL_PA);
3702 spin_unlock_irqrestore(&ha->vport_slock, flags);
3703 3713
3704 /* 3714 /*
3705 * Cannot configure here as we are still sitting on the 3715 * Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5827 5837
5828 return rval; 5838 return rval;
5829} 5839}
5840
5841static void qla2x00_async_mb_sp_done(void *s, int res)
5842{
5843 struct srb *sp = s;
5844
5845 sp->u.iocb_cmd.u.mbx.rc = res;
5846
5847 complete(&sp->u.iocb_cmd.u.mbx.comp);
5848 /* don't free sp here. Let the caller do the free */
5849}
5850
5851/*
5852 * This mailbox uses the iocb interface to send MB command.
5853 * This allows non-critial (non chip setup) command to go
5854 * out in parrallel.
5855 */
5856int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
5857{
5858 int rval = QLA_FUNCTION_FAILED;
5859 srb_t *sp;
5860 struct srb_iocb *c;
5861
5862 if (!vha->hw->flags.fw_started)
5863 goto done;
5864
5865 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
5866 if (!sp)
5867 goto done;
5868
5869 sp->type = SRB_MB_IOCB;
5870 sp->name = mb_to_str(mcp->mb[0]);
5871
5872 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5873
5874 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
5875
5876 c = &sp->u.iocb_cmd;
5877 c->timeout = qla2x00_async_iocb_timeout;
5878 init_completion(&c->u.mbx.comp);
5879
5880 sp->done = qla2x00_async_mb_sp_done;
5881
5882 rval = qla2x00_start_sp(sp);
5883 if (rval != QLA_SUCCESS) {
5884 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5885 "%s: %s Failed submission. %x.\n",
5886 __func__, sp->name, rval);
5887 goto done_free_sp;
5888 }
5889
5890 ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
5891 sp->name, sp->handle);
5892
5893 wait_for_completion(&c->u.mbx.comp);
5894 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
5895
5896 rval = c->u.mbx.rc;
5897 switch (rval) {
5898 case QLA_FUNCTION_TIMEOUT:
5899 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
5900 __func__, sp->name, rval);
5901 break;
5902 case QLA_SUCCESS:
5903 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
5904 __func__, sp->name);
5905 sp->free(sp);
5906 break;
5907 default:
5908 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
5909 __func__, sp->name, rval);
5910 sp->free(sp);
5911 break;
5912 }
5913
5914 return rval;
5915
5916done_free_sp:
5917 sp->free(sp);
5918done:
5919 return rval;
5920}
5921
5922/*
5923 * qla24xx_gpdb_wait
5924 * NOTE: Do not call this routine from DPC thread
5925 */
5926int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
5927{
5928 int rval = QLA_FUNCTION_FAILED;
5929 dma_addr_t pd_dma;
5930 struct port_database_24xx *pd;
5931 struct qla_hw_data *ha = vha->hw;
5932 mbx_cmd_t mc;
5933
5934 if (!vha->hw->flags.fw_started)
5935 goto done;
5936
5937 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
5938 if (pd == NULL) {
5939 ql_log(ql_log_warn, vha, 0xffff,
5940 "Failed to allocate port database structure.\n");
5941 goto done_free_sp;
5942 }
5943 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
5944
5945 memset(&mc, 0, sizeof(mc));
5946 mc.mb[0] = MBC_GET_PORT_DATABASE;
5947 mc.mb[1] = cpu_to_le16(fcport->loop_id);
5948 mc.mb[2] = MSW(pd_dma);
5949 mc.mb[3] = LSW(pd_dma);
5950 mc.mb[6] = MSW(MSD(pd_dma));
5951 mc.mb[7] = LSW(MSD(pd_dma));
5952 mc.mb[9] = cpu_to_le16(vha->vp_idx);
5953 mc.mb[10] = cpu_to_le16((uint16_t)opt);
5954
5955 rval = qla24xx_send_mb_cmd(vha, &mc);
5956 if (rval != QLA_SUCCESS) {
5957 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5958 "%s: %8phC fail\n", __func__, fcport->port_name);
5959 goto done_free_sp;
5960 }
5961
5962 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
5963
5964 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
5965 __func__, fcport->port_name);
5966
5967done_free_sp:
5968 if (pd)
5969 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
5970done:
5971 return rval;
5972}
5973
5974int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
5975 struct port_database_24xx *pd)
5976{
5977 int rval = QLA_SUCCESS;
5978 uint64_t zero = 0;
5979
5980 /* Check for logged in state. */
5981 if (pd->current_login_state != PDS_PRLI_COMPLETE &&
5982 pd->last_login_state != PDS_PRLI_COMPLETE) {
5983 ql_dbg(ql_dbg_mbx, vha, 0xffff,
5984 "Unable to verify login-state (%x/%x) for "
5985 "loop_id %x.\n", pd->current_login_state,
5986 pd->last_login_state, fcport->loop_id);
5987 rval = QLA_FUNCTION_FAILED;
5988 goto gpd_error_out;
5989 }
5990
5991 if (fcport->loop_id == FC_NO_LOOP_ID ||
5992 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
5993 memcmp(fcport->port_name, pd->port_name, 8))) {
5994 /* We lost the device mid way. */
5995 rval = QLA_NOT_LOGGED_IN;
5996 goto gpd_error_out;
5997 }
5998
5999 /* Names are little-endian. */
6000 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6001 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6002
6003 /* Get port_id of device. */
6004 fcport->d_id.b.domain = pd->port_id[0];
6005 fcport->d_id.b.area = pd->port_id[1];
6006 fcport->d_id.b.al_pa = pd->port_id[2];
6007 fcport->d_id.b.rsvd_1 = 0;
6008
6009 /* If not target must be initiator or unknown type. */
6010 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6011 fcport->port_type = FCT_INITIATOR;
6012 else
6013 fcport->port_type = FCT_TARGET;
6014
6015 /* Passback COS information. */
6016 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6017 FC_COS_CLASS2 : FC_COS_CLASS3;
6018
6019 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6020 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6021 fcport->conf_compl_supported = 1;
6022 }
6023
6024gpd_error_out:
6025 return rval;
6026}
6027
6028/*
6029 * qla24xx_gidlist__wait
6030 * NOTE: don't call this routine from DPC thread.
6031 */
6032int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6033 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6034{
6035 int rval = QLA_FUNCTION_FAILED;
6036 mbx_cmd_t mc;
6037
6038 if (!vha->hw->flags.fw_started)
6039 goto done;
6040
6041 memset(&mc, 0, sizeof(mc));
6042 mc.mb[0] = MBC_GET_ID_LIST;
6043 mc.mb[2] = MSW(id_list_dma);
6044 mc.mb[3] = LSW(id_list_dma);
6045 mc.mb[6] = MSW(MSD(id_list_dma));
6046 mc.mb[7] = LSW(MSD(id_list_dma));
6047 mc.mb[8] = 0;
6048 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6049
6050 rval = qla24xx_send_mb_cmd(vha, &mc);
6051 if (rval != QLA_SUCCESS) {
6052 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6053 "%s: fail\n", __func__);
6054 } else {
6055 *entries = mc.mb[1];
6056 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6057 "%s: done\n", __func__);
6058 }
6059done:
6060 return rval;
6061}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d912ff..09a490c98763 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
74 * ensures no active vp_list traversal while the vport is removed 74 * ensures no active vp_list traversal while the vport is removed
75 * from the queue) 75 * from the queue)
76 */ 76 */
77 spin_lock_irqsave(&ha->vport_slock, flags); 77 wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
78 while (atomic_read(&vha->vref_count)) { 78 10*HZ);
79 spin_unlock_irqrestore(&ha->vport_slock, flags);
80
81 msleep(500);
82 79
83 spin_lock_irqsave(&ha->vport_slock, flags); 80 spin_lock_irqsave(&ha->vport_slock, flags);
81 if (atomic_read(&vha->vref_count)) {
82 ql_dbg(ql_dbg_vport, vha, 0xfffa,
83 "vha->vref_count=%u timeout\n", vha->vref_count.counter);
84 vha->vref_count = (atomic_t)ATOMIC_INIT(0);
84 } 85 }
85 list_del(&vha->list); 86 list_del(&vha->list);
86 qlt_update_vp_map(vha, RESET_VP_IDX); 87 qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
269 270
270 spin_lock_irqsave(&ha->vport_slock, flags); 271 spin_lock_irqsave(&ha->vport_slock, flags);
271 atomic_dec(&vha->vref_count); 272 atomic_dec(&vha->vref_count);
273 wake_up(&vha->vref_waitq);
272 } 274 }
273 i++; 275 i++;
274 } 276 }
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fed235a1b4a..3e7011757c82 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1651,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1651 /* Don't abort commands in adapter during EEH 1651 /* Don't abort commands in adapter during EEH
1652 * recovery as it's not accessible/responding. 1652 * recovery as it's not accessible/responding.
1653 */ 1653 */
1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
1655 (sp->type == SRB_SCSI_CMD)) {
1655 /* Get a reference to the sp and drop the lock. 1656 /* Get a reference to the sp and drop the lock.
1656 * The reference ensures this sp->done() call 1657 * The reference ensures this sp->done() call
1657 * - and not the call in qla2xxx_eh_abort() - 1658 * - and not the call in qla2xxx_eh_abort() -
@@ -2560,6 +2561,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2560 return atomic_read(&vha->loop_state) == LOOP_READY; 2561 return atomic_read(&vha->loop_state) == LOOP_READY;
2561} 2562}
2562 2563
2564static void qla2x00_iocb_work_fn(struct work_struct *work)
2565{
2566 struct scsi_qla_host *vha = container_of(work,
2567 struct scsi_qla_host, iocb_work);
2568 int cnt = 0;
2569
2570 while (!list_empty(&vha->work_list)) {
2571 qla2x00_do_work(vha);
2572 cnt++;
2573 if (cnt > 10)
2574 break;
2575 }
2576}
2577
2563/* 2578/*
2564 * PCI driver interface 2579 * PCI driver interface
2565 */ 2580 */
@@ -3078,6 +3093,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3078 */ 3093 */
3079 qla2xxx_wake_dpc(base_vha); 3094 qla2xxx_wake_dpc(base_vha);
3080 3095
3096 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3081 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3097 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3082 3098
3083 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3099 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3485,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3469 qla2x00_free_sysfs_attr(base_vha, true); 3485 qla2x00_free_sysfs_attr(base_vha, true);
3470 3486
3471 fc_remove_host(base_vha->host); 3487 fc_remove_host(base_vha->host);
3488 qlt_remove_target_resources(ha);
3472 3489
3473 scsi_remove_host(base_vha->host); 3490 scsi_remove_host(base_vha->host);
3474 3491
@@ -4268,6 +4285,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4268 spin_lock_init(&vha->work_lock); 4285 spin_lock_init(&vha->work_lock);
4269 spin_lock_init(&vha->cmd_list_lock); 4286 spin_lock_init(&vha->cmd_list_lock);
4270 init_waitqueue_head(&vha->fcport_waitQ); 4287 init_waitqueue_head(&vha->fcport_waitQ);
4288 init_waitqueue_head(&vha->vref_waitq);
4271 4289
4272 vha->gnl.size = sizeof(struct get_name_list_extended) * 4290 vha->gnl.size = sizeof(struct get_name_list_extended) *
4273 (ha->max_loop_id + 1); 4291 (ha->max_loop_id + 1);
@@ -4319,7 +4337,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
4319 spin_lock_irqsave(&vha->work_lock, flags); 4337 spin_lock_irqsave(&vha->work_lock, flags);
4320 list_add_tail(&e->list, &vha->work_list); 4338 list_add_tail(&e->list, &vha->work_list);
4321 spin_unlock_irqrestore(&vha->work_lock, flags); 4339 spin_unlock_irqrestore(&vha->work_lock, flags);
4322 qla2xxx_wake_dpc(vha); 4340
4341 if (QLA_EARLY_LINKUP(vha->hw))
4342 schedule_work(&vha->iocb_work);
4343 else
4344 qla2xxx_wake_dpc(vha);
4323 4345
4324 return QLA_SUCCESS; 4346 return QLA_SUCCESS;
4325} 4347}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077684f0..0e03ca2ab3e5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
130static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 130static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
131 fc_port_t *fcport, bool local); 131 fc_port_t *fcport, bool local);
132void qlt_unreg_sess(struct fc_port *sess); 132void qlt_unreg_sess(struct fc_port *sess);
133static void qlt_24xx_handle_abts(struct scsi_qla_host *,
134 struct abts_recv_from_24xx *);
135
133/* 136/*
134 * Global Variables 137 * Global Variables
135 */ 138 */
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
140static DEFINE_MUTEX(qla_tgt_mutex); 143static DEFINE_MUTEX(qla_tgt_mutex);
141static LIST_HEAD(qla_tgt_glist); 144static LIST_HEAD(qla_tgt_glist);
142 145
146static const char *prot_op_str(u32 prot_op)
147{
148 switch (prot_op) {
149 case TARGET_PROT_NORMAL: return "NORMAL";
150 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
151 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
152 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
153 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
154 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
155 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
156 default: return "UNKNOWN";
157 }
158}
159
143/* This API intentionally takes dest as a parameter, rather than returning 160/* This API intentionally takes dest as a parameter, rather than returning
144 * int value to avoid caller forgetting to issue wmb() after the store */ 161 * int value to avoid caller forgetting to issue wmb() after the store */
145void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 162void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
170struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 187struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
171 uint8_t *d_id) 188 uint8_t *d_id)
172{ 189{
173 struct qla_hw_data *ha = vha->hw; 190 struct scsi_qla_host *host;
174 uint8_t vp_idx; 191 uint32_t key = 0;
175
176 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
177 return NULL;
178 192
179 if (vha->d_id.b.al_pa == d_id[2]) 193 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
194 (vha->d_id.b.al_pa == d_id[2]))
180 return vha; 195 return vha;
181 196
182 BUG_ON(ha->tgt.tgt_vp_map == NULL); 197 key = (uint32_t)d_id[0] << 16;
183 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 198 key |= (uint32_t)d_id[1] << 8;
184 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 199 key |= (uint32_t)d_id[2];
185 return ha->tgt.tgt_vp_map[vp_idx].vha;
186 200
187 return NULL; 201 host = btree_lookup32(&vha->hw->tgt.host_map, key);
202 if (!host)
203 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
204 "Unable to find host %06x\n", key);
205
206 return host;
188} 207}
189 208
190static inline 209static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
389 (struct abts_recv_from_24xx *)atio; 408 (struct abts_recv_from_24xx *)atio;
390 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 409 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
391 entry->vp_index); 410 entry->vp_index);
411 unsigned long flags;
412
392 if (unlikely(!host)) { 413 if (unlikely(!host)) {
393 ql_dbg(ql_dbg_tgt, vha, 0xffff, 414 ql_dbg(ql_dbg_tgt, vha, 0xffff,
394 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 415 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
396 vha->vp_idx, entry->vp_index); 417 vha->vp_idx, entry->vp_index);
397 break; 418 break;
398 } 419 }
399 qlt_response_pkt(host, (response_t *)atio); 420 if (!ha_locked)
421 spin_lock_irqsave(&host->hw->hardware_lock, flags);
422 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
423 if (!ha_locked)
424 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
400 break; 425 break;
401
402 } 426 }
403 427
404 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 428 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
554 sp->fcport->login_gen++; 578 sp->fcport->login_gen++;
555 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 579 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
556 sp->fcport->logout_on_delete = 1; 580 sp->fcport->logout_on_delete = 1;
581 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
557 break; 582 break;
558 583
559 case SRB_NACK_PRLI: 584 case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
613 break; 638 break;
614 case SRB_NACK_PRLI: 639 case SRB_NACK_PRLI:
615 fcport->fw_login_state = DSC_LS_PRLI_PEND; 640 fcport->fw_login_state = DSC_LS_PRLI_PEND;
641 fcport->deleted = 0;
616 c = "PRLI"; 642 c = "PRLI";
617 break; 643 break;
618 case SRB_NACK_LOGO: 644 case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
1215 } 1241 }
1216 1242
1217 /* Get list of logged in devices */ 1243 /* Get list of logged in devices */
1218 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 1244 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1219 if (rc != QLA_SUCCESS) { 1245 if (rc != QLA_SUCCESS) {
1220 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1246 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1221 "qla_target(%d): get_id_list() failed: %x\n", 1247 "qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1551 request_t *pkt; 1577 request_t *pkt;
1552 struct nack_to_isp *nack; 1578 struct nack_to_isp *nack;
1553 1579
1580 if (!ha->flags.fw_started)
1581 return;
1582
1554 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1583 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1555 1584
1556 /* Send marker if required */ 1585 /* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2013} 2042}
2014EXPORT_SYMBOL(qlt_free_mcmd); 2043EXPORT_SYMBOL(qlt_free_mcmd);
2015 2044
2045/*
2046 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2047 * reacquire
2048 */
2049void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
2050 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2051{
2052 struct atio_from_isp *atio = &cmd->atio;
2053 struct ctio7_to_24xx *ctio;
2054 uint16_t temp;
2055
2056 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2057 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2058 "sense_key=%02x, asc=%02x, ascq=%02x",
2059 vha, atio, scsi_status, sense_key, asc, ascq);
2060
2061 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2062 if (!ctio) {
2063 ql_dbg(ql_dbg_async, vha, 0x3067,
2064 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2065 vha->host_no, __func__);
2066 goto out;
2067 }
2068
2069 ctio->entry_type = CTIO_TYPE7;
2070 ctio->entry_count = 1;
2071 ctio->handle = QLA_TGT_SKIP_HANDLE;
2072 ctio->nport_handle = cmd->sess->loop_id;
2073 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2074 ctio->vp_index = vha->vp_idx;
2075 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2076 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2077 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2078 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2079 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
2080 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
2081 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2082 ctio->u.status1.ox_id = cpu_to_le16(temp);
2083 ctio->u.status1.scsi_status =
2084 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2085 ctio->u.status1.response_len = cpu_to_le16(18);
2086 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2087
2088 if (ctio->u.status1.residual != 0)
2089 ctio->u.status1.scsi_status |=
2090 cpu_to_le16(SS_RESIDUAL_UNDER);
2091
2092 /* Response code and sense key */
2093 put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
2094 (&ctio->u.status1.sense_data)[0]);
2095 /* Additional sense length */
2096 put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
2097 /* ASC and ASCQ */
2098 put_unaligned_le32(((asc << 24) | (ascq << 16)),
2099 (&ctio->u.status1.sense_data)[3]);
2100
2101 /* Memory Barrier */
2102 wmb();
2103
2104 qla2x00_start_iocbs(vha, vha->req);
2105out:
2106 return;
2107}
2108
2016/* callback from target fabric module code */ 2109/* callback from target fabric module code */
2017void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2110void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2018{ 2111{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
2261 */ 2354 */
2262 return -EAGAIN; 2355 return -EAGAIN;
2263 } else 2356 } else
2264 ha->tgt.cmds[h-1] = prm->cmd; 2357 ha->tgt.cmds[h - 1] = prm->cmd;
2265 2358
2266 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2359 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2267 pkt->nport_handle = prm->cmd->loop_id; 2360 pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2391 return cmd->bufflen > 0; 2484 return cmd->bufflen > 0;
2392} 2485}
2393 2486
2487static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2488{
2489 struct qla_tgt_cmd *cmd;
2490 struct scsi_qla_host *vha;
2491
2492 /* asc 0x10=dif error */
2493 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2494 cmd = prm->cmd;
2495 vha = cmd->vha;
2496 /* ASCQ */
2497 switch (prm->sense_buffer[13]) {
2498 case 1:
2499 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2500 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2501 "se_cmd=%p tag[%x]",
2502 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2503 cmd->atio.u.isp24.exchange_addr);
2504 break;
2505 case 2:
2506 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2507 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2508 "se_cmd=%p tag[%x]",
2509 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2510 cmd->atio.u.isp24.exchange_addr);
2511 break;
2512 case 3:
2513 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2514 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2515 "se_cmd=%p tag[%x]",
2516 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2517 cmd->atio.u.isp24.exchange_addr);
2518 break;
2519 default:
2520 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
2521 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2522 "se_cmd=%p tag[%x]",
2523 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2524 cmd->atio.u.isp24.exchange_addr);
2525 break;
2526 }
2527 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
2528 }
2529}
2530
2394/* 2531/*
2395 * Called without ha->hardware_lock held 2532 * Called without ha->hardware_lock held
2396 */ 2533 */
@@ -2512,18 +2649,9 @@ skip_explict_conf:
2512 for (i = 0; i < prm->sense_buffer_len/4; i++) 2649 for (i = 0; i < prm->sense_buffer_len/4; i++)
2513 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2650 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2514 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2651 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2515#if 0 2652
2516 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 2653 qlt_print_dif_err(prm);
2517 static int q; 2654
2518 if (q < 10) {
2519 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2520 "qla_target(%d): %d bytes of sense "
2521 "lost", prm->tgt->ha->vp_idx,
2522 prm->sense_buffer_len % 4);
2523 q++;
2524 }
2525 }
2526#endif
2527 } else { 2655 } else {
2528 ctio->u.status1.flags &= 2656 ctio->u.status1.flags &=
2529 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2657 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
2537 /* Sense with len > 24, is it possible ??? */ 2665 /* Sense with len > 24, is it possible ??? */
2538} 2666}
2539 2667
2540
2541
2542/* diff */
2543static inline int 2668static inline int
2544qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2669qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2545{ 2670{
2546 /*
2547 * Uncomment when corresponding SCSI changes are done.
2548 *
2549 if (!sp->cmd->prot_chk)
2550 return 0;
2551 *
2552 */
2553 switch (se_cmd->prot_op) { 2671 switch (se_cmd->prot_op) {
2554 case TARGET_PROT_DOUT_INSERT: 2672 case TARGET_PROT_DOUT_INSERT:
2555 case TARGET_PROT_DIN_STRIP: 2673 case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2570 return 0; 2688 return 0;
2571} 2689}
2572 2690
2691static inline int
2692qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2693{
2694 switch (se_cmd->prot_op) {
2695 case TARGET_PROT_DIN_INSERT:
2696 case TARGET_PROT_DOUT_INSERT:
2697 case TARGET_PROT_DIN_STRIP:
2698 case TARGET_PROT_DOUT_STRIP:
2699 case TARGET_PROT_DIN_PASS:
2700 case TARGET_PROT_DOUT_PASS:
2701 return 1;
2702 default:
2703 return 0;
2704 }
2705 return 0;
2706}
2707
2573/* 2708/*
2574 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 2709 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2575 *
2576 */ 2710 */
2577static inline void 2711static void
2578qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 2712qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2713 uint16_t *pfw_prot_opts)
2579{ 2714{
2715 struct se_cmd *se_cmd = &cmd->se_cmd;
2580 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2716 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2717 scsi_qla_host_t *vha = cmd->tgt->vha;
2718 struct qla_hw_data *ha = vha->hw;
2719 uint32_t t32 = 0;
2581 2720
2582 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 2721 /*
2722 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2583 * have been immplemented by TCM, before AppTag is avail. 2723 * have been immplemented by TCM, before AppTag is avail.
2584 * Look for modesense_handlers[] 2724 * Look for modesense_handlers[]
2585 */ 2725 */
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2587 ctx->app_tag_mask[0] = 0x0; 2727 ctx->app_tag_mask[0] = 0x0;
2588 ctx->app_tag_mask[1] = 0x0; 2728 ctx->app_tag_mask[1] = 0x0;
2589 2729
2730 if (IS_PI_UNINIT_CAPABLE(ha)) {
2731 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2732 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2733 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2734 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2735 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2736 }
2737
2738 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2739
2590 switch (se_cmd->prot_type) { 2740 switch (se_cmd->prot_type) {
2591 case TARGET_DIF_TYPE0_PROT: 2741 case TARGET_DIF_TYPE0_PROT:
2592 /* 2742 /*
2593 * No check for ql2xenablehba_err_chk, as it would be an 2743 * No check for ql2xenablehba_err_chk, as it
2594 * I/O error if hba tag generation is not done. 2744 * would be an I/O error if hba tag generation
2745 * is not done.
2595 */ 2746 */
2596 ctx->ref_tag = cpu_to_le32(lba); 2747 ctx->ref_tag = cpu_to_le32(lba);
2597
2598 if (!qlt_hba_err_chk_enabled(se_cmd))
2599 break;
2600
2601 /* enable ALL bytes of the ref tag */ 2748 /* enable ALL bytes of the ref tag */
2602 ctx->ref_tag_mask[0] = 0xff; 2749 ctx->ref_tag_mask[0] = 0xff;
2603 ctx->ref_tag_mask[1] = 0xff; 2750 ctx->ref_tag_mask[1] = 0xff;
2604 ctx->ref_tag_mask[2] = 0xff; 2751 ctx->ref_tag_mask[2] = 0xff;
2605 ctx->ref_tag_mask[3] = 0xff; 2752 ctx->ref_tag_mask[3] = 0xff;
2606 break; 2753 break;
2607 /*
2608 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2609 * 16 bit app tag.
2610 */
2611 case TARGET_DIF_TYPE1_PROT: 2754 case TARGET_DIF_TYPE1_PROT:
2612 ctx->ref_tag = cpu_to_le32(lba); 2755 /*
2613 2756 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2614 if (!qlt_hba_err_chk_enabled(se_cmd)) 2757 * REF tag, and 16 bit app tag.
2615 break; 2758 */
2616 2759 ctx->ref_tag = cpu_to_le32(lba);
2617 /* enable ALL bytes of the ref tag */ 2760 if (!qla_tgt_ref_mask_check(se_cmd) ||
2618 ctx->ref_tag_mask[0] = 0xff; 2761 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2619 ctx->ref_tag_mask[1] = 0xff; 2762 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2620 ctx->ref_tag_mask[2] = 0xff; 2763 break;
2621 ctx->ref_tag_mask[3] = 0xff; 2764 }
2622 break; 2765 /* enable ALL bytes of the ref tag */
2623 /* 2766 ctx->ref_tag_mask[0] = 0xff;
2624 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2767 ctx->ref_tag_mask[1] = 0xff;
2625 * match LBA in CDB + N 2768 ctx->ref_tag_mask[2] = 0xff;
2626 */ 2769 ctx->ref_tag_mask[3] = 0xff;
2770 break;
2627 case TARGET_DIF_TYPE2_PROT: 2771 case TARGET_DIF_TYPE2_PROT:
2628 ctx->ref_tag = cpu_to_le32(lba); 2772 /*
2629 2773 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2630 if (!qlt_hba_err_chk_enabled(se_cmd)) 2774 * tag has to match LBA in CDB + N
2631 break; 2775 */
2632 2776 ctx->ref_tag = cpu_to_le32(lba);
2633 /* enable ALL bytes of the ref tag */ 2777 if (!qla_tgt_ref_mask_check(se_cmd) ||
2634 ctx->ref_tag_mask[0] = 0xff; 2778 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2635 ctx->ref_tag_mask[1] = 0xff; 2779 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2636 ctx->ref_tag_mask[2] = 0xff; 2780 break;
2637 ctx->ref_tag_mask[3] = 0xff; 2781 }
2638 break; 2782 /* enable ALL bytes of the ref tag */
2639 2783 ctx->ref_tag_mask[0] = 0xff;
2640 /* For Type 3 protection: 16 bit GUARD only */ 2784 ctx->ref_tag_mask[1] = 0xff;
2785 ctx->ref_tag_mask[2] = 0xff;
2786 ctx->ref_tag_mask[3] = 0xff;
2787 break;
2641 case TARGET_DIF_TYPE3_PROT: 2788 case TARGET_DIF_TYPE3_PROT:
2642 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2789 /* For TYPE 3 protection: 16 bit GUARD only */
2643 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2790 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2644 break; 2791 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2792 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2793 break;
2645 } 2794 }
2646} 2795}
2647 2796
2648
2649static inline int 2797static inline int
2650qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2798qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2651{ 2799{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2664 struct se_cmd *se_cmd = &cmd->se_cmd; 2812 struct se_cmd *se_cmd = &cmd->se_cmd;
2665 uint32_t h; 2813 uint32_t h;
2666 struct atio_from_isp *atio = &prm->cmd->atio; 2814 struct atio_from_isp *atio = &prm->cmd->atio;
2815 struct qla_tc_param tc;
2667 uint16_t t16; 2816 uint16_t t16;
2668 2817
2669 ha = vha->hw; 2818 ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2689 case TARGET_PROT_DIN_INSERT: 2838 case TARGET_PROT_DIN_INSERT:
2690 case TARGET_PROT_DOUT_STRIP: 2839 case TARGET_PROT_DOUT_STRIP:
2691 transfer_length = data_bytes; 2840 transfer_length = data_bytes;
2692 data_bytes += dif_bytes; 2841 if (cmd->prot_sg_cnt)
2842 data_bytes += dif_bytes;
2693 break; 2843 break;
2694
2695 case TARGET_PROT_DIN_STRIP: 2844 case TARGET_PROT_DIN_STRIP:
2696 case TARGET_PROT_DOUT_INSERT: 2845 case TARGET_PROT_DOUT_INSERT:
2697 case TARGET_PROT_DIN_PASS: 2846 case TARGET_PROT_DIN_PASS:
2698 case TARGET_PROT_DOUT_PASS: 2847 case TARGET_PROT_DOUT_PASS:
2699 transfer_length = data_bytes + dif_bytes; 2848 transfer_length = data_bytes + dif_bytes;
2700 break; 2849 break;
2701
2702 default: 2850 default:
2703 BUG(); 2851 BUG();
2704 break; 2852 break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2734 break; 2882 break;
2735 } 2883 }
2736 2884
2737
2738 /* ---- PKT ---- */ 2885 /* ---- PKT ---- */
2739 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2886 /* Update entry type to indicate Command Type CRC_2 IOCB */
2740 pkt->entry_type = CTIO_CRC2; 2887 pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2752 } else 2899 } else
2753 ha->tgt.cmds[h-1] = prm->cmd; 2900 ha->tgt.cmds[h-1] = prm->cmd;
2754 2901
2755
2756 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2902 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2757 pkt->nport_handle = prm->cmd->loop_id; 2903 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2758 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2904 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2759 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2905 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2760 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2906 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2775 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2921 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2776 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2922 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2777 2923
2778
2779 pkt->dseg_count = prm->tot_dsds; 2924 pkt->dseg_count = prm->tot_dsds;
2780 /* Fibre channel byte count */ 2925 /* Fibre channel byte count */
2781 pkt->transfer_length = cpu_to_le32(transfer_length); 2926 pkt->transfer_length = cpu_to_le32(transfer_length);
2782 2927
2783
2784 /* ----- CRC context -------- */ 2928 /* ----- CRC context -------- */
2785 2929
2786 /* Allocate CRC context from global pool */ 2930 /* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2800 /* Set handle */ 2944 /* Set handle */
2801 crc_ctx_pkt->handle = pkt->handle; 2945 crc_ctx_pkt->handle = pkt->handle;
2802 2946
2803 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2947 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2804 2948
2805 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2949 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2806 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2950 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2807 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2951 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2808 2952
2809
2810 if (!bundling) { 2953 if (!bundling) {
2811 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2954 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2812 } else { 2955 } else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2827 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2970 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2828 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 2971 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2829 2972
2973 memset((uint8_t *)&tc, 0 , sizeof(tc));
2974 tc.vha = vha;
2975 tc.blk_sz = cmd->blk_sz;
2976 tc.bufflen = cmd->bufflen;
2977 tc.sg = cmd->sg;
2978 tc.prot_sg = cmd->prot_sg;
2979 tc.ctx = crc_ctx_pkt;
2980 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
2830 2981
2831 /* Walks data segments */ 2982 /* Walks data segments */
2832 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2983 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2833 2984
2834 if (!bundling && prm->prot_seg_cnt) { 2985 if (!bundling && prm->prot_seg_cnt) {
2835 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2986 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2836 prm->tot_dsds, cmd)) 2987 prm->tot_dsds, &tc))
2837 goto crc_queuing_error; 2988 goto crc_queuing_error;
2838 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2989 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2839 (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2990 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
2840 goto crc_queuing_error; 2991 goto crc_queuing_error;
2841 2992
2842 if (bundling && prm->prot_seg_cnt) { 2993 if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2845 2996
2846 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2997 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2847 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2998 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2848 prm->prot_seg_cnt, cmd)) 2999 prm->prot_seg_cnt, &tc))
2849 goto crc_queuing_error; 3000 goto crc_queuing_error;
2850 } 3001 }
2851 return QLA_SUCCESS; 3002 return QLA_SUCCESS;
2852 3003
2853crc_queuing_error: 3004crc_queuing_error:
2854 /* Cleanup will be performed by the caller */ 3005 /* Cleanup will be performed by the caller */
3006 vha->hw->tgt.cmds[h - 1] = NULL;
2855 3007
2856 return QLA_FUNCTION_FAILED; 3008 return QLA_FUNCTION_FAILED;
2857} 3009}
2858 3010
2859
2860/* 3011/*
2861 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3012 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2862 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3013 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2906 else 3057 else
2907 vha->tgt_counters.core_qla_que_buf++; 3058 vha->tgt_counters.core_qla_que_buf++;
2908 3059
2909 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { 3060 if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
2910 /* 3061 /*
2911 * Either the port is not online or this request was from 3062 * Either the port is not online or this request was from
2912 * previous life, just abort the processing. 3063 * previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3047 3198
3048 spin_lock_irqsave(&ha->hardware_lock, flags); 3199 spin_lock_irqsave(&ha->hardware_lock, flags);
3049 3200
3050 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || 3201 if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
3051 (cmd->sess && cmd->sess->deleted)) { 3202 (cmd->sess && cmd->sess->deleted)) {
3052 /* 3203 /*
3053 * Either the port is not online or this request was from 3204 * Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
3104 3255
3105 3256
3106/* 3257/*
3107 * Checks the guard or meta-data for the type of error 3258 * it is assumed either hardware_lock or qpair lock is held.
3108 * detected by the HBA.
3109 */ 3259 */
3110static inline int 3260static void
3111qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 3261qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
3112 struct ctio_crc_from_fw *sts) 3262 struct ctio_crc_from_fw *sts)
3113{ 3263{
3114 uint8_t *ap = &sts->actual_dif[0]; 3264 uint8_t *ap = &sts->actual_dif[0];
3115 uint8_t *ep = &sts->expected_dif[0]; 3265 uint8_t *ep = &sts->expected_dif[0];
3116 uint32_t e_ref_tag, a_ref_tag;
3117 uint16_t e_app_tag, a_app_tag;
3118 uint16_t e_guard, a_guard;
3119 uint64_t lba = cmd->se_cmd.t_task_lba; 3266 uint64_t lba = cmd->se_cmd.t_task_lba;
3267 uint8_t scsi_status, sense_key, asc, ascq;
3268 unsigned long flags;
3120 3269
3121 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3270 cmd->trc_flags |= TRC_DIF_ERR;
3122 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3123 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3124
3125 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3126 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3127 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3128
3129 ql_dbg(ql_dbg_tgt, vha, 0xe075,
3130 "iocb(s) %p Returned STATUS.\n", sts);
3131
3132 ql_dbg(ql_dbg_tgt, vha, 0xf075,
3133 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
3134 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3135 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
3136
3137 /*
3138 * Ignore sector if:
3139 * For type 3: ref & app tag is all 'f's
3140 * For type 0,1,2: app tag is all 'f's
3141 */
3142 if ((a_app_tag == 0xffff) &&
3143 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
3144 (a_ref_tag == 0xffffffff))) {
3145 uint32_t blocks_done;
3146
3147 /* 2TB boundary case covered automatically with this */
3148 blocks_done = e_ref_tag - (uint32_t)lba + 1;
3149 cmd->se_cmd.bad_sector = e_ref_tag;
3150 cmd->se_cmd.pi_err = 0;
3151 ql_dbg(ql_dbg_tgt, vha, 0xf074,
3152 "need to return scsi good\n");
3153
3154 /* Update protection tag */
3155 if (cmd->prot_sg_cnt) {
3156 uint32_t i, k = 0, num_ent;
3157 struct scatterlist *sg, *sgl;
3158
3159
3160 sgl = cmd->prot_sg;
3161
3162 /* Patch the corresponding protection tags */
3163 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
3164 num_ent = sg_dma_len(sg) / 8;
3165 if (k + num_ent < blocks_done) {
3166 k += num_ent;
3167 continue;
3168 }
3169 k = blocks_done;
3170 break;
3171 }
3172 3271
3173 if (k != blocks_done) { 3272 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
3174 ql_log(ql_log_warn, vha, 0xf076, 3273 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3175 "unexpected tag values tag:lba=%u:%llu)\n", 3274 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3176 e_ref_tag, (unsigned long long)lba);
3177 goto out;
3178 }
3179 3275
3180#if 0 3276 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
3181 struct sd_dif_tuple *spt; 3277 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3182 /* TODO: 3278 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3183 * This section came from initiator. Is it valid here?
3184 * should ulp be override with actual val???
3185 */
3186 spt = page_address(sg_page(sg)) + sg->offset;
3187 spt += j;
3188 3279
3189 spt->app_tag = 0xffff; 3280 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3190 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 3281 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3191 spt->ref_tag = 0xffffffff;
3192#endif
3193 }
3194 3282
3195 return 0; 3283 scsi_status = sense_key = asc = ascq = 0;
3196 }
3197 3284
3198 /* check guard */ 3285 /* check appl tag */
3199 if (e_guard != a_guard) { 3286 if (cmd->e_app_tag != cmd->a_app_tag) {
3200 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 3287 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3201 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3288 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3202 3289 "Ref[%x|%x], App[%x|%x], "
3203 ql_log(ql_log_warn, vha, 0xe076, 3290 "Guard [%x|%x] cmd=%p ox_id[%04x]",
3204 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3291 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3205 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3292 cmd->a_ref_tag, cmd->e_ref_tag,
3206 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3293 cmd->a_app_tag, cmd->e_app_tag,
3207 a_guard, e_guard, cmd); 3294 cmd->a_guard, cmd->e_guard,
3208 goto out; 3295 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3296
3297 cmd->dif_err_code = DIF_ERR_APP;
3298 scsi_status = SAM_STAT_CHECK_CONDITION;
3299 sense_key = ABORTED_COMMAND;
3300 asc = 0x10;
3301 ascq = 0x2;
3209 } 3302 }
3210 3303
3211 /* check ref tag */ 3304 /* check ref tag */
3212 if (e_ref_tag != a_ref_tag) { 3305 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3213 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 3306 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3214 cmd->se_cmd.bad_sector = e_ref_tag; 3307 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3215 3308 "Ref[%x|%x], App[%x|%x], "
3216 ql_log(ql_log_warn, vha, 0xe077, 3309 "Guard[%x|%x] cmd=%p ox_id[%04x] ",
3217 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3310 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3218 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3311 cmd->a_ref_tag, cmd->e_ref_tag,
3219 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3312 cmd->a_app_tag, cmd->e_app_tag,
3220 a_guard, e_guard, cmd); 3313 cmd->a_guard, cmd->e_guard,
3314 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3315
3316 cmd->dif_err_code = DIF_ERR_REF;
3317 scsi_status = SAM_STAT_CHECK_CONDITION;
3318 sense_key = ABORTED_COMMAND;
3319 asc = 0x10;
3320 ascq = 0x3;
3221 goto out; 3321 goto out;
3222 } 3322 }
3223 3323
3224 /* check appl tag */ 3324 /* check guard */
3225 if (e_app_tag != a_app_tag) { 3325 if (cmd->e_guard != cmd->a_guard) {
3226 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 3326 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3227 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 3327 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
3228 3328 "Ref[%x|%x], App[%x|%x], "
3229 ql_log(ql_log_warn, vha, 0xe078, 3329 "Guard [%x|%x] cmd=%p ox_id[%04x]",
3230 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 3330 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3231 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 3331 cmd->a_ref_tag, cmd->e_ref_tag,
3232 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 3332 cmd->a_app_tag, cmd->e_app_tag,
3233 a_guard, e_guard, cmd); 3333 cmd->a_guard, cmd->e_guard,
3234 goto out; 3334 cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
3335 cmd->dif_err_code = DIF_ERR_GRD;
3336 scsi_status = SAM_STAT_CHECK_CONDITION;
3337 sense_key = ABORTED_COMMAND;
3338 asc = 0x10;
3339 ascq = 0x1;
3235 } 3340 }
3236out: 3341out:
3237 return 1; 3342 switch (cmd->state) {
3238} 3343 case QLA_TGT_STATE_NEED_DATA:
3344 /* handle_data will load DIF error code */
3345 cmd->state = QLA_TGT_STATE_DATA_IN;
3346 vha->hw->tgt.tgt_ops->handle_data(cmd);
3347 break;
3348 default:
3349 spin_lock_irqsave(&cmd->cmd_lock, flags);
3350 if (cmd->aborted) {
3351 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3352 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3353 break;
3354 }
3355 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3239 3356
3357 qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
3358 /* assume scsi status gets out on the wire.
3359 * Will not wait for completion.
3360 */
3361 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3362 break;
3363 }
3364}
3240 3365
3241/* If hardware_lock held on entry, might drop it, then reaquire */ 3366/* If hardware_lock held on entry, might drop it, then reaquire */
3242/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3367/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3251 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3376 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3252 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3377 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3253 3378
3254 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); 3379 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3255 if (pkt == NULL) { 3380 if (pkt == NULL) {
3256 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3381 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3257 "qla_target(%d): %s failed: unable to allocate " 3382 "qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3543{ 3668{
3544 int term = 0; 3669 int term = 0;
3545 3670
3671 if (cmd->se_cmd.prot_op)
3672 ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
3673 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3674 "se_cmd=%p tag[%x] op %#x/%s",
3675 cmd->lba, cmd->lba,
3676 cmd->num_blks, &cmd->se_cmd,
3677 cmd->atio.u.isp24.exchange_addr,
3678 cmd->se_cmd.prot_op,
3679 prot_op_str(cmd->se_cmd.prot_op));
3680
3546 if (ctio != NULL) { 3681 if (ctio != NULL) {
3547 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3682 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3548 term = !(c->flags & 3683 term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3760 struct ctio_crc_from_fw *crc = 3895 struct ctio_crc_from_fw *crc =
3761 (struct ctio_crc_from_fw *)ctio; 3896 (struct ctio_crc_from_fw *)ctio;
3762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3897 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3763 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 3898 "qla_target(%d): CTIO with DIF_ERROR status %x "
3899 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3900 "expect_dif[0x%llx]\n",
3764 vha->vp_idx, status, cmd->state, se_cmd, 3901 vha->vp_idx, status, cmd->state, se_cmd,
3765 *((u64 *)&crc->actual_dif[0]), 3902 *((u64 *)&crc->actual_dif[0]),
3766 *((u64 *)&crc->expected_dif[0])); 3903 *((u64 *)&crc->expected_dif[0]));
3767 3904
3768 if (qlt_handle_dif_error(vha, cmd, ctio)) { 3905 qlt_handle_dif_error(vha, cmd, ctio);
3769 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3906 return;
3770 /* scsi Write/xfer rdy complete */
3771 goto skip_term;
3772 } else {
3773 /* scsi read/xmit respond complete
3774 * call handle dif to send scsi status
3775 * rather than terminate exchange.
3776 */
3777 cmd->state = QLA_TGT_STATE_PROCESSED;
3778 ha->tgt.tgt_ops->handle_dif_err(cmd);
3779 return;
3780 }
3781 } else {
3782 /* Need to generate a SCSI good completion.
3783 * because FW did not send scsi status.
3784 */
3785 status = 0;
3786 goto skip_term;
3787 }
3788 break;
3789 } 3907 }
3790 default: 3908 default:
3791 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3808 return; 3926 return;
3809 } 3927 }
3810 } 3928 }
3811skip_term:
3812 3929
3813 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3930 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3814 cmd->trc_flags |= TRC_CTIO_DONE; 3931 cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4584 } 4701 }
4585 4702
4586 if (sess != NULL) { 4703 if (sess != NULL) {
4587 if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { 4704 if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
4705 sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4588 /* 4706 /*
4589 * Impatient initiator sent PRLI before last 4707 * Impatient initiator sent PRLI before last
4590 * PLOGI could finish. Will force him to re-try, 4708 * PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4623 4741
4624 /* Make session global (not used in fabric mode) */ 4742 /* Make session global (not used in fabric mode) */
4625 if (ha->current_topology != ISP_CFG_F) { 4743 if (ha->current_topology != ISP_CFG_F) {
4626 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4744 if (sess) {
4627 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4745 ql_dbg(ql_dbg_disc, vha, 0xffff,
4628 qla2xxx_wake_dpc(vha); 4746 "%s %d %8phC post nack\n",
4747 __func__, __LINE__, sess->port_name);
4748 qla24xx_post_nack_work(vha, sess, iocb,
4749 SRB_NACK_PRLI);
4750 res = 0;
4751 } else {
4752 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4753 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4754 qla2xxx_wake_dpc(vha);
4755 }
4629 } else { 4756 } else {
4630 if (sess) { 4757 if (sess) {
4631 ql_dbg(ql_dbg_disc, vha, 0xffff, 4758 ql_dbg(ql_dbg_disc, vha, 0xffff,
4632 "%s %d %8phC post nack\n", 4759 "%s %d %8phC post nack\n",
4633 __func__, __LINE__, sess->port_name); 4760 __func__, __LINE__, sess->port_name);
4634
4635 qla24xx_post_nack_work(vha, sess, iocb, 4761 qla24xx_post_nack_work(vha, sess, iocb,
4636 SRB_NACK_PRLI); 4762 SRB_NACK_PRLI);
4637 res = 0; 4763 res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4639 } 4765 }
4640 break; 4766 break;
4641 4767
4642
4643 case ELS_TPRLO: 4768 case ELS_TPRLO:
4644 if (le16_to_cpu(iocb->u.isp24.flags) & 4769 if (le16_to_cpu(iocb->u.isp24.flags) &
4645 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 4770 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
5079 5204
5080static int 5205static int
5081qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, 5206qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5082 struct atio_from_isp *atio) 5207 struct atio_from_isp *atio, bool ha_locked)
5083{ 5208{
5084 struct qla_hw_data *ha = vha->hw; 5209 struct qla_hw_data *ha = vha->hw;
5085 uint16_t status; 5210 uint16_t status;
5211 unsigned long flags;
5086 5212
5087 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5213 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5088 return 0; 5214 return 0;
5089 5215
5216 if (!ha_locked)
5217 spin_lock_irqsave(&ha->hardware_lock, flags);
5090 status = temp_sam_status; 5218 status = temp_sam_status;
5091 qlt_send_busy(vha, atio, status); 5219 qlt_send_busy(vha, atio, status);
5220 if (!ha_locked)
5221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5222
5092 return 1; 5223 return 1;
5093} 5224}
5094 5225
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5103 unsigned long flags; 5234 unsigned long flags;
5104 5235
5105 if (unlikely(tgt == NULL)) { 5236 if (unlikely(tgt == NULL)) {
5106 ql_dbg(ql_dbg_io, vha, 0x3064, 5237 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5107 "ATIO pkt, but no tgt (ha %p)", ha); 5238 "ATIO pkt, but no tgt (ha %p)", ha);
5108 return; 5239 return;
5109 } 5240 }
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5133 5264
5134 5265
5135 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5266 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5136 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5267 rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
5137 if (rc != 0) { 5268 if (rc != 0) {
5138 tgt->atio_irq_cmd_count--; 5269 tgt->atio_irq_cmd_count--;
5139 return; 5270 return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5256 break; 5387 break;
5257 } 5388 }
5258 5389
5259 rc = qlt_chk_qfull_thresh_hold(vha, atio); 5390 rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
5260 if (rc != 0) { 5391 if (rc != 0) {
5261 tgt->irq_cmd_count--; 5392 tgt->irq_cmd_count--;
5262 return; 5393 return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5531 5662
5532 fcport->loop_id = loop_id; 5663 fcport->loop_id = loop_id;
5533 5664
5534 rc = qla2x00_get_port_database(vha, fcport, 0); 5665 rc = qla24xx_gpdb_wait(vha, fcport, 0);
5535 if (rc != QLA_SUCCESS) { 5666 if (rc != QLA_SUCCESS) {
5536 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5667 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5537 "qla_target(%d): Failed to retrieve fcport " 5668 "qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5713 } 5844 }
5714 } 5845 }
5715 5846
5716 spin_lock_irqsave(&ha->hardware_lock, flags);
5717
5718 if (tgt->tgt_stop)
5719 goto out_term;
5720
5721 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 5847 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5848 ha->tgt.tgt_ops->put_sess(sess);
5849 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5850
5722 if (rc != 0) 5851 if (rc != 0)
5723 goto out_term; 5852 goto out_term;
5724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5725 if (sess)
5726 ha->tgt.tgt_ops->put_sess(sess);
5727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5728 return; 5853 return;
5729 5854
5730out_term2: 5855out_term2:
5731 spin_lock_irqsave(&ha->hardware_lock, flags); 5856 if (sess)
5857 ha->tgt.tgt_ops->put_sess(sess);
5858 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5732 5859
5733out_term: 5860out_term:
5861 spin_lock_irqsave(&ha->hardware_lock, flags);
5734 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 5862 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5735 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5863 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5736
5737 if (sess)
5738 ha->tgt.tgt_ops->put_sess(sess);
5739 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5740} 5864}
5741 5865
5742static void qlt_tmr_work(struct qla_tgt *tgt, 5866static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5756 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5880 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5757 5881
5758 if (tgt->tgt_stop) 5882 if (tgt->tgt_stop)
5759 goto out_term; 5883 goto out_term2;
5760 5884
5761 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 5885 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5762 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 5886 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5768 5892
5769 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5893 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5770 if (!sess) 5894 if (!sess)
5771 goto out_term; 5895 goto out_term2;
5772 } else { 5896 } else {
5773 if (sess->deleted) { 5897 if (sess->deleted) {
5774 sess = NULL; 5898 sess = NULL;
5775 goto out_term; 5899 goto out_term2;
5776 } 5900 }
5777 5901
5778 if (!kref_get_unless_zero(&sess->sess_kref)) { 5902 if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5780 "%s: kref_get fail %8phC\n", 5904 "%s: kref_get fail %8phC\n",
5781 __func__, sess->port_name); 5905 __func__, sess->port_name);
5782 sess = NULL; 5906 sess = NULL;
5783 goto out_term; 5907 goto out_term2;
5784 } 5908 }
5785 } 5909 }
5786 5910
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5790 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5914 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5791 5915
5792 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 5916 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5793 if (rc != 0)
5794 goto out_term;
5795
5796 ha->tgt.tgt_ops->put_sess(sess); 5917 ha->tgt.tgt_ops->put_sess(sess);
5797 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5918 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5919
5920 if (rc != 0)
5921 goto out_term;
5798 return; 5922 return;
5799 5923
5924out_term2:
5925 if (sess)
5926 ha->tgt.tgt_ops->put_sess(sess);
5927 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5800out_term: 5928out_term:
5801 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); 5929 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5802 ha->tgt.tgt_ops->put_sess(sess);
5803 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5804} 5930}
5805 5931
5806static void qlt_sess_work_fn(struct work_struct *work) 5932static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5893 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 6019 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5894 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 6020 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5895 6021
5896 if (base_vha->fc_vport)
5897 return 0;
5898
5899 mutex_lock(&qla_tgt_mutex); 6022 mutex_lock(&qla_tgt_mutex);
5900 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6023 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5901 mutex_unlock(&qla_tgt_mutex); 6024 mutex_unlock(&qla_tgt_mutex);
5902 6025
6026 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6027 ha->tgt.tgt_ops->add_target(base_vha);
6028
5903 return 0; 6029 return 0;
5904} 6030}
5905 6031
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5928 return 0; 6054 return 0;
5929} 6055}
5930 6056
6057void qlt_remove_target_resources(struct qla_hw_data *ha)
6058{
6059 struct scsi_qla_host *node;
6060 u32 key = 0;
6061
6062 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6063 btree_remove32(&ha->tgt.host_map, key);
6064
6065 btree_destroy32(&ha->tgt.host_map);
6066}
6067
5931static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6068static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5932 unsigned char *b) 6069 unsigned char *b)
5933{ 6070{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6234 struct atio_from_isp *pkt; 6371 struct atio_from_isp *pkt;
6235 int cnt, i; 6372 int cnt, i;
6236 6373
6237 if (!vha->flags.online) 6374 if (!ha->flags.fw_started)
6238 return; 6375 return;
6239 6376
6240 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6377 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
6581void 6718void
6582qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6719qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6583{ 6720{
6721 int rc;
6722
6584 if (!QLA_TGT_MODE_ENABLED()) 6723 if (!QLA_TGT_MODE_ENABLED())
6585 return; 6724 return;
6586 6725
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6600 qlt_unknown_atio_work_fn); 6739 qlt_unknown_atio_work_fn);
6601 6740
6602 qlt_clear_mode(base_vha); 6741 qlt_clear_mode(base_vha);
6742
6743 rc = btree_init32(&ha->tgt.host_map);
6744 if (rc)
6745 ql_log(ql_log_info, base_vha, 0xffff,
6746 "Unable to initialize ha->host_map btree\n");
6747
6748 qlt_update_vp_map(base_vha, SET_VP_IDX);
6603} 6749}
6604 6750
6605irqreturn_t 6751irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
6642 spin_lock_irqsave(&ha->hardware_lock, flags); 6788 spin_lock_irqsave(&ha->hardware_lock, flags);
6643 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); 6789 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6644 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6790 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6791
6792 kfree(op);
6645} 6793}
6646 6794
6647void 6795void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
6706void 6854void
6707qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 6855qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6708{ 6856{
6857 void *slot;
6858 u32 key;
6859 int rc;
6860
6709 if (!QLA_TGT_MODE_ENABLED()) 6861 if (!QLA_TGT_MODE_ENABLED())
6710 return; 6862 return;
6711 6863
6864 key = vha->d_id.b24;
6865
6712 switch (cmd) { 6866 switch (cmd) {
6713 case SET_VP_IDX: 6867 case SET_VP_IDX:
6714 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 6868 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6715 break; 6869 break;
6716 case SET_AL_PA: 6870 case SET_AL_PA:
6717 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 6871 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6872 if (!slot) {
6873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6874 "Save vha in host_map %p %06x\n", vha, key);
6875 rc = btree_insert32(&vha->hw->tgt.host_map,
6876 key, vha, GFP_ATOMIC);
6877 if (rc)
6878 ql_log(ql_log_info, vha, 0xffff,
6879 "Unable to insert s_id into host_map: %06x\n",
6880 key);
6881 return;
6882 }
6883 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6884 "replace existing vha in host_map %p %06x\n", vha, key);
6885 btree_update32(&vha->hw->tgt.host_map, key, vha);
6718 break; 6886 break;
6719 case RESET_VP_IDX: 6887 case RESET_VP_IDX:
6720 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 6888 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6721 break; 6889 break;
6722 case RESET_AL_PA: 6890 case RESET_AL_PA:
6723 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 6891 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
6892 "clear vha in host_map %p %06x\n", vha, key);
6893 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6894 if (slot)
6895 btree_remove32(&vha->hw->tgt.host_map, key);
6896 vha->d_id.b24 = 0;
6724 break; 6897 break;
6725 } 6898 }
6726} 6899}
6727 6900
6901void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
6902{
6903 unsigned long flags;
6904 struct qla_hw_data *ha = vha->hw;
6905
6906 if (!vha->d_id.b24) {
6907 spin_lock_irqsave(&ha->vport_slock, flags);
6908 vha->d_id = id;
6909 qlt_update_vp_map(vha, SET_AL_PA);
6910 spin_unlock_irqrestore(&ha->vport_slock, flags);
6911 } else if (vha->d_id.b24 != id.b24) {
6912 spin_lock_irqsave(&ha->vport_slock, flags);
6913 qlt_update_vp_map(vha, RESET_AL_PA);
6914 vha->d_id = id;
6915 qlt_update_vp_map(vha, SET_AL_PA);
6916 spin_unlock_irqrestore(&ha->vport_slock, flags);
6917 }
6918}
6919
6728static int __init qlt_parse_ini_mode(void) 6920static int __init qlt_parse_ini_mode(void)
6729{ 6921{
6730 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 6922 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dcaae37..d64420251194 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
378 atio->u.isp24.fcp_cmnd.add_cdb_len = 0; 378 atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
379} 379}
380 380
381static inline int get_datalen_for_atio(struct atio_from_isp *atio)
382{
383 int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
384
385 return (be32_to_cpu(get_unaligned((uint32_t *)
386 &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
387}
388
381#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 389#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
382 390
383/* 391/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
667 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 675 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
668 unsigned char *, uint32_t, int, int, int); 676 unsigned char *, uint32_t, int, int, int);
669 void (*handle_data)(struct qla_tgt_cmd *); 677 void (*handle_data)(struct qla_tgt_cmd *);
670 void (*handle_dif_err)(struct qla_tgt_cmd *);
671 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, 678 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
672 uint32_t); 679 uint32_t);
673 void (*free_cmd)(struct qla_tgt_cmd *); 680 void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
684 void (*clear_nacl_from_fcport_map)(struct fc_port *); 691 void (*clear_nacl_from_fcport_map)(struct fc_port *);
685 void (*put_sess)(struct fc_port *); 692 void (*put_sess)(struct fc_port *);
686 void (*shutdown_sess)(struct fc_port *); 693 void (*shutdown_sess)(struct fc_port *);
694 int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
695 int (*chk_dif_tags)(uint32_t tag);
696 void (*add_target)(struct scsi_qla_host *);
687}; 697};
688 698
689int qla2x00_wait_for_hba_online(struct scsi_qla_host *); 699int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
720#define QLA_TGT_ABORT_ALL 0xFFFE 730#define QLA_TGT_ABORT_ALL 0xFFFE
721#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD 731#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
722#define QLA_TGT_NEXUS_LOSS 0xFFFC 732#define QLA_TGT_NEXUS_LOSS 0xFFFC
723#define QLA_TGT_ABTS 0xFFFB 733#define QLA_TGT_ABTS 0xFFFB
724#define QLA_TGT_2G_ABORT_TASK 0xFFFA 734#define QLA_TGT_2G_ABORT_TASK 0xFFFA
725 735
726/* Notify Acknowledge flags */ 736/* Notify Acknowledge flags */
727#define NOTIFY_ACK_RES_COUNT BIT_8 737#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
845 TRC_CMD_FREE = BIT_17, 855 TRC_CMD_FREE = BIT_17,
846 TRC_DATA_IN = BIT_18, 856 TRC_DATA_IN = BIT_18,
847 TRC_ABORT = BIT_19, 857 TRC_ABORT = BIT_19,
858 TRC_DIF_ERR = BIT_20,
848}; 859};
849 860
850struct qla_tgt_cmd { 861struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
862 unsigned int sg_mapped:1; 873 unsigned int sg_mapped:1;
863 unsigned int free_sg:1; 874 unsigned int free_sg:1;
864 unsigned int write_data_transferred:1; 875 unsigned int write_data_transferred:1;
865 unsigned int ctx_dsd_alloced:1;
866 unsigned int q_full:1; 876 unsigned int q_full:1;
867 unsigned int term_exchg:1; 877 unsigned int term_exchg:1;
868 unsigned int cmd_sent_to_fw:1; 878 unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
885 struct list_head cmd_list; 895 struct list_head cmd_list;
886 896
887 struct atio_from_isp atio; 897 struct atio_from_isp atio;
888 /* t10dif */ 898
899 uint8_t ctx_dsd_alloced;
900
901 /* T10-DIF */
902#define DIF_ERR_NONE 0
903#define DIF_ERR_GRD 1
904#define DIF_ERR_REF 2
905#define DIF_ERR_APP 3
906 int8_t dif_err_code;
889 struct scatterlist *prot_sg; 907 struct scatterlist *prot_sg;
890 uint32_t prot_sg_cnt; 908 uint32_t prot_sg_cnt;
891 uint32_t blk_sz; 909 uint32_t blk_sz, num_blks;
910 uint8_t scsi_status, sense_key, asc, ascq;
911
892 struct crc_context *ctx; 912 struct crc_context *ctx;
913 uint8_t *cdb;
914 uint64_t lba;
915 uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
916 uint32_t a_ref_tag, e_ref_tag;
893 917
894 uint64_t jiffies_at_alloc; 918 uint64_t jiffies_at_alloc;
895 uint64_t jiffies_at_free; 919 uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1053extern void qlt_logo_completion_handler(fc_port_t *, int); 1077extern void qlt_logo_completion_handler(fc_port_t *, int);
1054extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); 1078extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1055 1079
1080void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
1081 uint8_t, uint8_t, uint8_t);
1082
1056#endif /* __QLA_TARGET_H */ 1083#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964b7786..45bc84e8e3bf 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.07.00.38-k" 10#define QLA2XXX_VERSION "9.00.00.00-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 9
13#define QLA_DRIVER_MINOR_VER 7 13#define QLA_DRIVER_MINOR_VER 0
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0fa9672..7443e4efa3ae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
531 return; 531 return;
532 } 532 }
533 533
534 switch (cmd->dif_err_code) {
535 case DIF_ERR_GRD:
536 cmd->se_cmd.pi_err =
537 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
538 break;
539 case DIF_ERR_REF:
540 cmd->se_cmd.pi_err =
541 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
542 break;
543 case DIF_ERR_APP:
544 cmd->se_cmd.pi_err =
545 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
546 break;
547 case DIF_ERR_NONE:
548 default:
549 break;
550 }
551
534 if (cmd->se_cmd.pi_err) 552 if (cmd->se_cmd.pi_err)
535 transport_generic_request_failure(&cmd->se_cmd, 553 transport_generic_request_failure(&cmd->se_cmd,
536 cmd->se_cmd.pi_err); 554 cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
555 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 573 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
556} 574}
557 575
558static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) 576static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
559{ 577{
560 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 578 return 0;
561
562 /* take an extra kref to prevent cmd free too early.
563 * need to wait for SCSI status/check condition to
564 * finish responding generate by transport_generic_request_failure.
565 */
566 kref_get(&cmd->se_cmd.cmd_kref);
567 transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
568} 579}
569 580
570/* 581static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
571 * Called from qla_target.c:qlt_do_ctio_completion() 582 uint16_t *pfw_prot_opts)
572 */
573static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
574{ 583{
575 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); 584 struct se_cmd *se_cmd = &cmd->se_cmd;
576 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 585
586 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
587 *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
588
589 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
590 *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
591
592 return 0;
577} 593}
578 594
579/* 595/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
1610static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1626static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1611 .handle_cmd = tcm_qla2xxx_handle_cmd, 1627 .handle_cmd = tcm_qla2xxx_handle_cmd,
1612 .handle_data = tcm_qla2xxx_handle_data, 1628 .handle_data = tcm_qla2xxx_handle_data,
1613 .handle_dif_err = tcm_qla2xxx_handle_dif_err,
1614 .handle_tmr = tcm_qla2xxx_handle_tmr, 1629 .handle_tmr = tcm_qla2xxx_handle_tmr,
1615 .free_cmd = tcm_qla2xxx_free_cmd, 1630 .free_cmd = tcm_qla2xxx_free_cmd,
1616 .free_mcmd = tcm_qla2xxx_free_mcmd, 1631 .free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1622 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1637 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1623 .put_sess = tcm_qla2xxx_put_sess, 1638 .put_sess = tcm_qla2xxx_put_sess,
1624 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1639 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1640 .get_dif_tags = tcm_qla2xxx_dif_tags,
1641 .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
1625}; 1642};
1626 1643
1627static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1644static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ba2286652ff6..e5a2d590a104 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
496 scsi_starved_list_run(sdev->host); 496 scsi_starved_list_run(sdev->host);
497 497
498 if (q->mq_ops) 498 if (q->mq_ops)
499 blk_mq_start_stopped_hw_queues(q, false); 499 blk_mq_run_hw_queues(q, false);
500 else 500 else
501 blk_run_queue(q); 501 blk_run_queue(q);
502} 502}
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
667 !list_empty(&sdev->host->starved_list)) 667 !list_empty(&sdev->host->starved_list))
668 kblockd_schedule_work(&sdev->requeue_work); 668 kblockd_schedule_work(&sdev->requeue_work);
669 else 669 else
670 blk_mq_start_stopped_hw_queues(q, true); 670 blk_mq_run_hw_queues(q, true);
671 } else { 671 } else {
672 unsigned long flags; 672 unsigned long flags;
673 673
@@ -1974,7 +1974,7 @@ out:
1974 case BLK_MQ_RQ_QUEUE_BUSY: 1974 case BLK_MQ_RQ_QUEUE_BUSY:
1975 if (atomic_read(&sdev->device_busy) == 0 && 1975 if (atomic_read(&sdev->device_busy) == 0 &&
1976 !scsi_device_blocked(sdev)) 1976 !scsi_device_blocked(sdev))
1977 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 1977 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1978 break; 1978 break;
1979 case BLK_MQ_RQ_QUEUE_ERROR: 1979 case BLK_MQ_RQ_QUEUE_ERROR:
1980 /* 1980 /*
@@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
2932/** 2932/**
2933 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2933 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2934 * @sdev: device to block 2934 * @sdev: device to block
2935 * @wait: Whether or not to wait until ongoing .queuecommand() /
2936 * .queue_rq() calls have finished.
2935 * 2937 *
2936 * Block request made by scsi lld's to temporarily stop all 2938 * Block request made by scsi lld's to temporarily stop all
2937 * scsi commands on the specified device. May sleep. 2939 * scsi commands on the specified device. May sleep.
@@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2949 * remove the rport mutex lock and unlock calls from srp_queuecommand(). 2951 * remove the rport mutex lock and unlock calls from srp_queuecommand().
2950 */ 2952 */
2951int 2953int
2952scsi_internal_device_block(struct scsi_device *sdev) 2954scsi_internal_device_block(struct scsi_device *sdev, bool wait)
2953{ 2955{
2954 struct request_queue *q = sdev->request_queue; 2956 struct request_queue *q = sdev->request_queue;
2955 unsigned long flags; 2957 unsigned long flags;
@@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
2969 * request queue. 2971 * request queue.
2970 */ 2972 */
2971 if (q->mq_ops) { 2973 if (q->mq_ops) {
2972 blk_mq_quiesce_queue(q); 2974 if (wait)
2975 blk_mq_quiesce_queue(q);
2976 else
2977 blk_mq_stop_hw_queues(q);
2973 } else { 2978 } else {
2974 spin_lock_irqsave(q->queue_lock, flags); 2979 spin_lock_irqsave(q->queue_lock, flags);
2975 blk_stop_queue(q); 2980 blk_stop_queue(q);
2976 spin_unlock_irqrestore(q->queue_lock, flags); 2981 spin_unlock_irqrestore(q->queue_lock, flags);
2977 scsi_wait_for_queuecommand(sdev); 2982 if (wait)
2983 scsi_wait_for_queuecommand(sdev);
2978 } 2984 }
2979 2985
2980 return 0; 2986 return 0;
@@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
3036static void 3042static void
3037device_block(struct scsi_device *sdev, void *data) 3043device_block(struct scsi_device *sdev, void *data)
3038{ 3044{
3039 scsi_internal_device_block(sdev); 3045 scsi_internal_device_block(sdev, true);
3040} 3046}
3041 3047
3042static int 3048static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99bfc985e190..f11bd102d6d5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
188 */ 188 */
189 189
190#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ 190#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
191extern int scsi_internal_device_block(struct scsi_device *sdev);
192extern int scsi_internal_device_unblock(struct scsi_device *sdev,
193 enum scsi_device_state new_state);
194 191
195#endif /* _SCSI_PRIV_H */ 192#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d277e8620e3e..fcfeddc79331 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1783{ 1783{
1784 int result = SCpnt->result; 1784 int result = SCpnt->result;
1785 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1785 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1786 unsigned int sector_size = SCpnt->device->sector_size;
1787 unsigned int resid;
1786 struct scsi_sense_hdr sshdr; 1788 struct scsi_sense_hdr sshdr;
1787 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1789 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
1788 struct request *req = SCpnt->request; 1790 struct request *req = SCpnt->request;
@@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1813 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1815 scsi_set_resid(SCpnt, blk_rq_bytes(req));
1814 } 1816 }
1815 break; 1817 break;
1818 default:
1819 /*
1820 * In case of bogus fw or device, we could end up having
1821 * an unaligned partial completion. Check this here and force
1822 * alignment.
1823 */
1824 resid = scsi_get_resid(SCpnt);
1825 if (resid & (sector_size - 1)) {
1826 sd_printk(KERN_INFO, sdkp,
1827 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
1828 resid, sector_size);
1829 resid = min(scsi_bufflen(SCpnt),
1830 round_up(resid, sector_size));
1831 scsi_set_resid(SCpnt, resid);
1832 }
1816 } 1833 }
1817 1834
1818 if (result) { 1835 if (result) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29b86505f796..225abaad4d1c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
996 result = get_user(val, ip); 996 result = get_user(val, ip);
997 if (result) 997 if (result)
998 return result; 998 return result;
999 if (val > SG_MAX_CDB_SIZE)
1000 return -ENOMEM;
999 sfp->next_cmd_len = (val > 0) ? val : 0; 1001 sfp->next_cmd_len = (val > 0) ? val : 0;
1000 return 0; 1002 return 0;
1001 case SG_GET_VERSION_NUM: 1003 case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 638e5f427c90..016639d7fef1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
400 */ 400 */
401static int storvsc_timeout = 180; 401static int storvsc_timeout = 180;
402 402
403static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
404
405#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 403#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
406static struct scsi_transport_template *fc_transport_template; 404static struct scsi_transport_template *fc_transport_template;
407#endif 405#endif
@@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device,
1383 return ret; 1381 return ret;
1384} 1382}
1385 1383
1384static int storvsc_device_alloc(struct scsi_device *sdevice)
1385{
1386 /*
1387 * Set blist flag to permit the reading of the VPD pages even when
1388 * the target may claim SPC-2 compliance. MSFT targets currently
1389 * claim SPC-2 compliance while they implement post SPC-2 features.
1390 * With this flag we can correctly handle WRITE_SAME_16 issues.
1391 *
1392 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
1393 * still supports REPORT LUN.
1394 */
1395 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
1396
1397 return 0;
1398}
1399
1386static int storvsc_device_configure(struct scsi_device *sdevice) 1400static int storvsc_device_configure(struct scsi_device *sdevice)
1387{ 1401{
1388 1402
@@ -1396,14 +1410,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
1396 sdevice->no_write_same = 1; 1410 sdevice->no_write_same = 1;
1397 1411
1398 /* 1412 /*
1399 * Add blist flags to permit the reading of the VPD pages even when
1400 * the target may claim SPC-2 compliance. MSFT targets currently
1401 * claim SPC-2 compliance while they implement post SPC-2 features.
1402 * With this patch we can correctly handle WRITE_SAME_16 issues.
1403 */
1404 sdevice->sdev_bflags |= msft_blist_flags;
1405
1406 /*
1407 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1413 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
1408 * if the device is a MSFT virtual device. If the host is 1414 * if the device is a MSFT virtual device. If the host is
1409 * WIN10 or newer, allow write_same. 1415 * WIN10 or newer, allow write_same.
@@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = {
1661 .eh_host_reset_handler = storvsc_host_reset_handler, 1667 .eh_host_reset_handler = storvsc_host_reset_handler,
1662 .proc_name = "storvsc_host", 1668 .proc_name = "storvsc_host",
1663 .eh_timed_out = storvsc_eh_timed_out, 1669 .eh_timed_out = storvsc_eh_timed_out,
1670 .slave_alloc = storvsc_device_alloc,
1664 .slave_configure = storvsc_device_configure, 1671 .slave_configure = storvsc_device_configure,
1665 .cmd_per_lun = 255, 1672 .cmd_per_lun = 255,
1666 .this_id = -1, 1673 .this_id = -1,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 318e4a1f76c9..54deeb754db5 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -146,7 +146,7 @@ enum attr_idn {
146/* Descriptor idn for Query requests */ 146/* Descriptor idn for Query requests */
147enum desc_idn { 147enum desc_idn {
148 QUERY_DESC_IDN_DEVICE = 0x0, 148 QUERY_DESC_IDN_DEVICE = 0x0,
149 QUERY_DESC_IDN_CONFIGURAION = 0x1, 149 QUERY_DESC_IDN_CONFIGURATION = 0x1,
150 QUERY_DESC_IDN_UNIT = 0x2, 150 QUERY_DESC_IDN_UNIT = 0x2,
151 QUERY_DESC_IDN_RFU_0 = 0x3, 151 QUERY_DESC_IDN_RFU_0 = 0x3,
152 QUERY_DESC_IDN_INTERCONNECT = 0x4, 152 QUERY_DESC_IDN_INTERCONNECT = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
162 QUERY_DESC_DESC_TYPE_OFFSET = 0x01, 162 QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
163}; 163};
164 164
165enum ufs_desc_max_size { 165enum ufs_desc_def_size {
166 QUERY_DESC_DEVICE_MAX_SIZE = 0x40, 166 QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
167 QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, 167 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
168 QUERY_DESC_UNIT_MAX_SIZE = 0x23, 168 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
169 QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, 169 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
170 /* 170 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
171 * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes 171 QUERY_DESC_POWER_DEF_SIZE = 0x62,
172 * of descriptor header.
173 */
174 QUERY_DESC_STRING_MAX_SIZE = 0xFE,
175 QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
176 QUERY_DESC_POWER_MAX_SIZE = 0x62,
177 QUERY_DESC_RFU_MAX_SIZE = 0x00,
178}; 172};
179 173
180/* Unit descriptor parameters offsets in bytes*/ 174/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..8e5e6c04c035 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
309 309
310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 mmio_base = devm_ioremap_resource(dev, mem_res); 311 mmio_base = devm_ioremap_resource(dev, mem_res);
312 if (IS_ERR(*(void **)&mmio_base)) { 312 if (IS_ERR(mmio_base)) {
313 err = PTR_ERR(*(void **)&mmio_base); 313 err = PTR_ERR(mmio_base);
314 goto out; 314 goto out;
315 } 315 }
316 316
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc6efbd1be8e..096e95b911bd 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -100,19 +100,6 @@
100#define ufshcd_hex_dump(prefix_str, buf, len) \ 100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) 101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102 102
103static u32 ufs_query_desc_max_size[] = {
104 QUERY_DESC_DEVICE_MAX_SIZE,
105 QUERY_DESC_CONFIGURAION_MAX_SIZE,
106 QUERY_DESC_UNIT_MAX_SIZE,
107 QUERY_DESC_RFU_MAX_SIZE,
108 QUERY_DESC_INTERCONNECT_MAX_SIZE,
109 QUERY_DESC_STRING_MAX_SIZE,
110 QUERY_DESC_RFU_MAX_SIZE,
111 QUERY_DESC_GEOMETRY_MAX_SIZE,
112 QUERY_DESC_POWER_MAX_SIZE,
113 QUERY_DESC_RFU_MAX_SIZE,
114};
115
116enum { 103enum {
117 UFSHCD_MAX_CHANNEL = 0, 104 UFSHCD_MAX_CHANNEL = 0,
118 UFSHCD_MAX_ID = 1, 105 UFSHCD_MAX_ID = 1,
@@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2857 goto out; 2844 goto out;
2858 } 2845 }
2859 2846
2860 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 2847 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2861 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 2848 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2862 __func__, *buf_len); 2849 __func__, *buf_len);
2863 err = -EINVAL; 2850 err = -EINVAL;
@@ -2938,6 +2925,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2938} 2925}
2939 2926
2940/** 2927/**
2928 * ufshcd_read_desc_length - read the specified descriptor length from header
2929 * @hba: Pointer to adapter instance
2930 * @desc_id: descriptor idn value
2931 * @desc_index: descriptor index
2932 * @desc_length: pointer to variable to read the length of descriptor
2933 *
2934 * Return 0 in case of success, non-zero otherwise
2935 */
2936static int ufshcd_read_desc_length(struct ufs_hba *hba,
2937 enum desc_idn desc_id,
2938 int desc_index,
2939 int *desc_length)
2940{
2941 int ret;
2942 u8 header[QUERY_DESC_HDR_SIZE];
2943 int header_len = QUERY_DESC_HDR_SIZE;
2944
2945 if (desc_id >= QUERY_DESC_IDN_MAX)
2946 return -EINVAL;
2947
2948 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2949 desc_id, desc_index, 0, header,
2950 &header_len);
2951
2952 if (ret) {
2953 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2954 __func__, desc_id);
2955 return ret;
2956 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2957 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2958 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2959 desc_id);
2960 ret = -EINVAL;
2961 }
2962
2963 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2964 return ret;
2965
2966}
2967
2968/**
2969 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2970 * @hba: Pointer to adapter instance
2971 * @desc_id: descriptor idn value
2972 * @desc_len: mapped desc length (out)
2973 *
2974 * Return 0 in case of success, non-zero otherwise
2975 */
2976int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2977 enum desc_idn desc_id, int *desc_len)
2978{
2979 switch (desc_id) {
2980 case QUERY_DESC_IDN_DEVICE:
2981 *desc_len = hba->desc_size.dev_desc;
2982 break;
2983 case QUERY_DESC_IDN_POWER:
2984 *desc_len = hba->desc_size.pwr_desc;
2985 break;
2986 case QUERY_DESC_IDN_GEOMETRY:
2987 *desc_len = hba->desc_size.geom_desc;
2988 break;
2989 case QUERY_DESC_IDN_CONFIGURATION:
2990 *desc_len = hba->desc_size.conf_desc;
2991 break;
2992 case QUERY_DESC_IDN_UNIT:
2993 *desc_len = hba->desc_size.unit_desc;
2994 break;
2995 case QUERY_DESC_IDN_INTERCONNECT:
2996 *desc_len = hba->desc_size.interc_desc;
2997 break;
2998 case QUERY_DESC_IDN_STRING:
2999 *desc_len = QUERY_DESC_MAX_SIZE;
3000 break;
3001 case QUERY_DESC_IDN_RFU_0:
3002 case QUERY_DESC_IDN_RFU_1:
3003 *desc_len = 0;
3004 break;
3005 default:
3006 *desc_len = 0;
3007 return -EINVAL;
3008 }
3009 return 0;
3010}
3011EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3012
3013/**
2941 * ufshcd_read_desc_param - read the specified descriptor parameter 3014 * ufshcd_read_desc_param - read the specified descriptor parameter
2942 * @hba: Pointer to adapter instance 3015 * @hba: Pointer to adapter instance
2943 * @desc_id: descriptor idn value 3016 * @desc_id: descriptor idn value
@@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2951static int ufshcd_read_desc_param(struct ufs_hba *hba, 3024static int ufshcd_read_desc_param(struct ufs_hba *hba,
2952 enum desc_idn desc_id, 3025 enum desc_idn desc_id,
2953 int desc_index, 3026 int desc_index,
2954 u32 param_offset, 3027 u8 param_offset,
2955 u8 *param_read_buf, 3028 u8 *param_read_buf,
2956 u32 param_size) 3029 u8 param_size)
2957{ 3030{
2958 int ret; 3031 int ret;
2959 u8 *desc_buf; 3032 u8 *desc_buf;
2960 u32 buff_len; 3033 int buff_len;
2961 bool is_kmalloc = true; 3034 bool is_kmalloc = true;
2962 3035
2963 /* safety checks */ 3036 /* Safety check */
2964 if (desc_id >= QUERY_DESC_IDN_MAX) 3037 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
2965 return -EINVAL; 3038 return -EINVAL;
2966 3039
2967 buff_len = ufs_query_desc_max_size[desc_id]; 3040 /* Get the max length of descriptor from structure filled up at probe
2968 if ((param_offset + param_size) > buff_len) 3041 * time.
2969 return -EINVAL; 3042 */
3043 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
2970 3044
2971 if (!param_offset && (param_size == buff_len)) { 3045 /* Sanity checks */
2972 /* memory space already available to hold full descriptor */ 3046 if (ret || !buff_len) {
2973 desc_buf = param_read_buf; 3047 dev_err(hba->dev, "%s: Failed to get full descriptor length",
2974 is_kmalloc = false; 3048 __func__);
2975 } else { 3049 return ret;
2976 /* allocate memory to hold full descriptor */ 3050 }
3051
3052 /* Check whether we need temp memory */
3053 if (param_offset != 0 || param_size < buff_len) {
2977 desc_buf = kmalloc(buff_len, GFP_KERNEL); 3054 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2978 if (!desc_buf) 3055 if (!desc_buf)
2979 return -ENOMEM; 3056 return -ENOMEM;
3057 } else {
3058 desc_buf = param_read_buf;
3059 is_kmalloc = false;
2980 } 3060 }
2981 3061
3062 /* Request for full descriptor */
2982 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 3063 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2983 desc_id, desc_index, 0, desc_buf, 3064 desc_id, desc_index, 0,
2984 &buff_len); 3065 desc_buf, &buff_len);
2985 3066
2986 if (ret) { 3067 if (ret) {
2987 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", 3068 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2988 __func__, desc_id, desc_index, param_offset, ret); 3069 __func__, desc_id, desc_index, param_offset, ret);
2989
2990 goto out; 3070 goto out;
2991 } 3071 }
2992 3072
@@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
2998 goto out; 3078 goto out;
2999 } 3079 }
3000 3080
3001 /* 3081 /* Check wherher we will not copy more data, than available */
3002 * While reading variable size descriptors (like string descriptor), 3082 if (is_kmalloc && param_size > buff_len)
3003 * some UFS devices may report the "LENGTH" (field in "Transaction 3083 param_size = buff_len;
3004 * Specific fields" of Query Response UPIU) same as what was requested
3005 * in Query Request UPIU instead of reporting the actual size of the
3006 * variable size descriptor.
3007 * Although it's safe to ignore the "LENGTH" field for variable size
3008 * descriptors as we can always derive the length of the descriptor from
3009 * the descriptor header fields. Hence this change impose the length
3010 * match check only for fixed size descriptors (for which we always
3011 * request the correct size as part of Query Request UPIU).
3012 */
3013 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3014 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3015 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3016 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3017 ret = -EINVAL;
3018 goto out;
3019 }
3020 3084
3021 if (is_kmalloc) 3085 if (is_kmalloc)
3022 memcpy(param_read_buf, &desc_buf[param_offset], param_size); 3086 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -4598,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4598 } 4662 }
4599 if (ufshcd_is_clkscaling_supported(hba)) 4663 if (ufshcd_is_clkscaling_supported(hba))
4600 hba->clk_scaling.active_reqs--; 4664 hba->clk_scaling.active_reqs--;
4601 if (ufshcd_is_clkscaling_supported(hba))
4602 hba->clk_scaling.active_reqs--;
4603 } 4665 }
4604 4666
4605 /* clear corresponding bits of completed commands */ 4667 /* clear corresponding bits of completed commands */
@@ -5919,8 +5981,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
5919static void ufshcd_init_icc_levels(struct ufs_hba *hba) 5981static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5920{ 5982{
5921 int ret; 5983 int ret;
5922 int buff_len = QUERY_DESC_POWER_MAX_SIZE; 5984 int buff_len = hba->desc_size.pwr_desc;
5923 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; 5985 u8 desc_buf[hba->desc_size.pwr_desc];
5924 5986
5925 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); 5987 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5926 if (ret) { 5988 if (ret) {
@@ -6017,11 +6079,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6017{ 6079{
6018 int err; 6080 int err;
6019 u8 model_index; 6081 u8 model_index;
6020 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; 6082 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6021 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; 6083 u8 desc_buf[hba->desc_size.dev_desc];
6022 6084
6023 err = ufshcd_read_device_desc(hba, desc_buf, 6085 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6024 QUERY_DESC_DEVICE_MAX_SIZE);
6025 if (err) { 6086 if (err) {
6026 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 6087 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6027 __func__, err); 6088 __func__, err);
@@ -6038,14 +6099,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6038 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 6099 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6039 6100
6040 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, 6101 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6041 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); 6102 QUERY_DESC_MAX_SIZE, ASCII_STD);
6042 if (err) { 6103 if (err) {
6043 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 6104 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6044 __func__, err); 6105 __func__, err);
6045 goto out; 6106 goto out;
6046 } 6107 }
6047 6108
6048 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; 6109 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6049 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), 6110 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6050 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], 6111 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6051 MAX_MODEL_LEN)); 6112 MAX_MODEL_LEN));
@@ -6251,6 +6312,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6251 hba->req_abort_count = 0; 6312 hba->req_abort_count = 0;
6252} 6313}
6253 6314
6315static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6316{
6317 int err;
6318
6319 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6320 &hba->desc_size.dev_desc);
6321 if (err)
6322 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6323
6324 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6325 &hba->desc_size.pwr_desc);
6326 if (err)
6327 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6328
6329 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6330 &hba->desc_size.interc_desc);
6331 if (err)
6332 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6333
6334 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6335 &hba->desc_size.conf_desc);
6336 if (err)
6337 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6338
6339 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6340 &hba->desc_size.unit_desc);
6341 if (err)
6342 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6343
6344 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6345 &hba->desc_size.geom_desc);
6346 if (err)
6347 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6348}
6349
6350static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6351{
6352 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6353 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6354 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6355 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6356 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6357 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6358}
6359
6254/** 6360/**
6255 * ufshcd_probe_hba - probe hba to detect device and initialize 6361 * ufshcd_probe_hba - probe hba to detect device and initialize
6256 * @hba: per-adapter instance 6362 * @hba: per-adapter instance
@@ -6285,6 +6391,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6285 if (ret) 6391 if (ret)
6286 goto out; 6392 goto out;
6287 6393
6394 /* Init check for device descriptor sizes */
6395 ufshcd_init_desc_sizes(hba);
6396
6288 ret = ufs_get_device_desc(hba, &card); 6397 ret = ufs_get_device_desc(hba, &card);
6289 if (ret) { 6398 if (ret) {
6290 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 6399 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6320,6 +6429,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6320 6429
6321 /* set the state as operational after switching to desired gear */ 6430 /* set the state as operational after switching to desired gear */
6322 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6431 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6432
6323 /* 6433 /*
6324 * If we are in error handling context or in power management callbacks 6434 * If we are in error handling context or in power management callbacks
6325 * context, no need to scan the host 6435 * context, no need to scan the host
@@ -7530,7 +7640,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7530 if (kstrtoul(buf, 0, &value)) 7640 if (kstrtoul(buf, 0, &value))
7531 return -EINVAL; 7641 return -EINVAL;
7532 7642
7533 if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) 7643 if (value >= UFS_PM_LVL_MAX)
7534 return -EINVAL; 7644 return -EINVAL;
7535 7645
7536 spin_lock_irqsave(hba->host->host_lock, flags); 7646 spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7774,6 +7884,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7774 hba->mmio_base = mmio_base; 7884 hba->mmio_base = mmio_base;
7775 hba->irq = irq; 7885 hba->irq = irq;
7776 7886
7887 /* Set descriptor lengths to specification defaults */
7888 ufshcd_def_desc_sizes(hba);
7889
7777 err = ufshcd_hba_init(hba); 7890 err = ufshcd_hba_init(hba);
7778 if (err) 7891 if (err)
7779 goto out_error; 7892 goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7630600217a2..cdc8bd05f7df 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
220 struct ufs_query query; 220 struct ufs_query query;
221}; 221};
222 222
223struct ufs_desc_size {
224 int dev_desc;
225 int pwr_desc;
226 int geom_desc;
227 int interc_desc;
228 int unit_desc;
229 int conf_desc;
230};
231
223/** 232/**
224 * struct ufs_clk_info - UFS clock related info 233 * struct ufs_clk_info - UFS clock related info
225 * @list: list headed by hba->clk_list_head 234 * @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
483 * @clk_list_head: UFS host controller clocks list node head 492 * @clk_list_head: UFS host controller clocks list node head
484 * @pwr_info: holds current power mode 493 * @pwr_info: holds current power mode
485 * @max_pwr_info: keeps the device max valid pwm 494 * @max_pwr_info: keeps the device max valid pwm
495 * @desc_size: descriptor sizes reported by device
486 * @urgent_bkops_lvl: keeps track of urgent bkops level for device 496 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
487 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 497 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
488 * device is known or not. 498 * device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
666 bool is_urgent_bkops_lvl_checked; 676 bool is_urgent_bkops_lvl_checked;
667 677
668 struct rw_semaphore clk_scaling_lock; 678 struct rw_semaphore clk_scaling_lock;
679 struct ufs_desc_size desc_size;
669}; 680};
670 681
671/* Returns true if clocks can be gated. Otherwise false */ 682/* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
832 enum flag_idn idn, bool *flag_res); 843 enum flag_idn idn, bool *flag_res);
833int ufshcd_hold(struct ufs_hba *hba, bool async); 844int ufshcd_hold(struct ufs_hba *hba, bool async);
834void ufshcd_release(struct ufs_hba *hba); 845void ufshcd_release(struct ufs_hba *hba);
846
847int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
848 int *desc_length);
849
835u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 850u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
836 851
837/* Wrapper functions for safely calling variant operations */ 852/* Wrapper functions for safely calling variant operations */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ef474a748744..c374e3b5c678 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1487 irq_flag &= ~PCI_IRQ_MSI; 1487 irq_flag &= ~PCI_IRQ_MSI;
1488 1488
1489 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); 1489 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
1490 if (error) 1490 if (error < 0)
1491 goto out_reset_adapter; 1491 goto out_reset_adapter;
1492 1492
1493 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); 1493 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7cbad0d45b9c..6ba270e0494d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
409 ret = PTR_ERR(vmfile); 409 ret = PTR_ERR(vmfile);
410 goto out; 410 goto out;
411 } 411 }
412 vmfile->f_mode |= FMODE_LSEEK;
412 asma->file = vmfile; 413 asma->file = vmfile;
413 } 414 }
414 get_file(asma->file); 415 get_file(asma->file);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index b7b87ecefcdf..9fca8d225ee0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
532 532
533 newsock->ops = sock->ops; 533 newsock->ops = sock->ops;
534 534
535 rc = sock->ops->accept(sock, newsock, O_NONBLOCK); 535 rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
536 if (rc == -EAGAIN) { 536 if (rc == -EAGAIN) {
537 /* Nothing ready, so wait for activity */ 537 /* Nothing ready, so wait for activity */
538 init_waitqueue_entry(&wait, current); 538 init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
540 set_current_state(TASK_INTERRUPTIBLE); 540 set_current_state(TASK_INTERRUPTIBLE);
541 schedule(); 541 schedule();
542 remove_wait_queue(sk_sleep(sock->sk), &wait); 542 remove_wait_queue(sk_sleep(sock->sk), &wait);
543 rc = sock->ops->accept(sock, newsock, O_NONBLOCK); 543 rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
544 } 544 }
545 545
546 if (rc) 546 if (rc)
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f5e330099bfc..fd7c16a7ca6e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -43,7 +43,7 @@
43#include "target_core_ua.h" 43#include "target_core_ua.h"
44 44
45static sense_reason_t core_alua_check_transition(int state, int valid, 45static sense_reason_t core_alua_check_transition(int state, int valid,
46 int *primary); 46 int *primary, int explicit);
47static int core_alua_set_tg_pt_secondary_state( 47static int core_alua_set_tg_pt_secondary_state(
48 struct se_lun *lun, int explicit, int offline); 48 struct se_lun *lun, int explicit, int offline);
49 49
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
335 * the state is a primary or secondary target port asymmetric 335 * the state is a primary or secondary target port asymmetric
336 * access state. 336 * access state.
337 */ 337 */
338 rc = core_alua_check_transition(alua_access_state, 338 rc = core_alua_check_transition(alua_access_state, valid_states,
339 valid_states, &primary); 339 &primary, 1);
340 if (rc) { 340 if (rc) {
341 /* 341 /*
342 * If the SET TARGET PORT GROUPS attempts to establish 342 * If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
691 691
692 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 692 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
693 return 0; 693 return 0;
694 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 694 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
695 return 0; 695 return 0;
696 696
697 /* 697 /*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
762 * Check implicit and explicit ALUA state change request. 762 * Check implicit and explicit ALUA state change request.
763 */ 763 */
764static sense_reason_t 764static sense_reason_t
765core_alua_check_transition(int state, int valid, int *primary) 765core_alua_check_transition(int state, int valid, int *primary, int explicit)
766{ 766{
767 /* 767 /*
768 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 768 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
804 *primary = 0; 804 *primary = 0;
805 break; 805 break;
806 case ALUA_ACCESS_STATE_TRANSITION: 806 case ALUA_ACCESS_STATE_TRANSITION:
807 /* 807 if (!(valid & ALUA_T_SUP) || explicit)
808 * Transitioning is set internally, and 808 /*
809 * cannot be selected manually. 809 * Transitioning is set internally and by tcmu daemon,
810 */ 810 * and cannot be selected through a STPG.
811 goto not_supported; 811 */
812 goto not_supported;
813 *primary = 0;
814 break;
812 default: 815 default:
813 pr_err("Unknown ALUA access state: 0x%02x\n", state); 816 pr_err("Unknown ALUA access state: 0x%02x\n", state);
814 return TCM_INVALID_PARAMETER_LIST; 817 return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013static void core_alua_do_transition_tg_pt_work(struct work_struct *work) 1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1014{ 1017{
1015 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, 1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1016 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1017 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1018 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1019 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); 1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
1070 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1071 return 0; 1074 return 0;
1072 1075
1073 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
1074 return -EAGAIN; 1077 return -EAGAIN;
1075 1078
1076 /* 1079 /*
1077 * Flush any pending transitions 1080 * Flush any pending transitions
1078 */ 1081 */
1079 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && 1082 if (!explicit)
1080 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == 1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1081 ALUA_ACCESS_STATE_TRANSITION) {
1082 /* Just in case */
1083 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1084 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1085 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1086 wait_for_completion(&wait);
1087 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1088 return 0;
1089 }
1090 1084
1091 /* 1085 /*
1092 * Save the old primary ALUA access state, and set the current state 1086 * Save the old primary ALUA access state, and set the current state
1093 * to ALUA_ACCESS_STATE_TRANSITION. 1087 * to ALUA_ACCESS_STATE_TRANSITION.
1094 */ 1088 */
1095 tg_pt_gp->tg_pt_gp_alua_previous_state =
1096 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1097 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1098
1099 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1100 ALUA_ACCESS_STATE_TRANSITION); 1090 ALUA_ACCESS_STATE_TRANSITION);
1101 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
1104 1094
1105 core_alua_queue_state_change_ua(tg_pt_gp); 1095 core_alua_queue_state_change_ua(tg_pt_gp);
1106 1096
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1098 return 0;
1099
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103
1107 /* 1104 /*
1108 * Check for the optional ALUA primary state transition delay 1105 * Check for the optional ALUA primary state transition delay
1109 */ 1106 */
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
1117 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1118 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1119 1116
1120 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
1121 unsigned long transition_tmo; 1118 if (explicit) {
1122
1123 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1124 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1125 &tg_pt_gp->tg_pt_gp_transition_work,
1126 transition_tmo);
1127 } else {
1128 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1129 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1130 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1131 wait_for_completion(&wait); 1120 wait_for_completion(&wait);
1132 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1133 } 1122 }
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
1149 struct t10_alua_tg_pt_gp *tg_pt_gp; 1138 struct t10_alua_tg_pt_gp *tg_pt_gp;
1150 int primary, valid_states, rc = 0; 1139 int primary, valid_states, rc = 0;
1151 1140
1141 if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1142 return -ENODEV;
1143
1152 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1144 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1153 if (core_alua_check_transition(new_state, valid_states, &primary) != 0) 1145 if (core_alua_check_transition(new_state, valid_states, &primary,
1146 explicit) != 0)
1154 return -EINVAL; 1147 return -EINVAL;
1155 1148
1156 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1149 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1695 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1696 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1697 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1698 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1699 core_alua_do_transition_tg_pt_work); 1692 core_alua_do_transition_tg_pt_work);
1700 tg_pt_gp->tg_pt_gp_dev = dev; 1693 tg_pt_gp->tg_pt_gp_dev = dev;
1701 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1702 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
1804 dev->t10_alua.alua_tg_pt_gps_counter--; 1797 dev->t10_alua.alua_tg_pt_gps_counter--;
1805 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1806 1799
1807 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1808 1801
1809 /* 1802 /*
1810 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
1973 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1966 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1974 int move = 0; 1967 int move = 0;
1975 1968
1976 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 1969 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1977 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 1970 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1978 return -ENODEV; 1971 return -ENODEV;
1979 1972
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
2230 unsigned long tmp; 2223 unsigned long tmp;
2231 int ret; 2224 int ret;
2232 2225
2233 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 2226 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2234 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2227 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2235 return -ENODEV; 2228 return -ENODEV;
2236 2229
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
2316 2309
2317int core_setup_alua(struct se_device *dev) 2310int core_setup_alua(struct se_device *dev)
2318{ 2311{
2319 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 2312 if (!(dev->transport->transport_flags &
2313 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2320 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2314 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2321 struct t10_alua_lu_gp_member *lu_gp_mem; 2315 struct t10_alua_lu_gp_member *lu_gp_mem;
2322 2316
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 54b36c9835be..38b5025e4c7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
421 pr_err("Missing tfo->aborted_task()\n"); 421 pr_err("Missing tfo->aborted_task()\n");
422 return -EINVAL; 422 return -EINVAL;
423 } 423 }
424 if (!tfo->check_stop_free) {
425 pr_err("Missing tfo->check_stop_free()\n");
426 return -EINVAL;
427 }
424 /* 428 /*
425 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 429 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
426 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 430 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a8f8e53f2f57..94cda7991e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
154 154
155 buf = kzalloc(12, GFP_KERNEL); 155 buf = kzalloc(12, GFP_KERNEL);
156 if (!buf) 156 if (!buf)
157 return; 157 goto out_free;
158 158
159 memset(cdb, 0, MAX_COMMAND_SIZE); 159 memset(cdb, 0, MAX_COMMAND_SIZE);
160 cdb[0] = MODE_SENSE; 160 cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
169 * If MODE_SENSE still returns zero, set the default value to 1024. 169 * If MODE_SENSE still returns zero, set the default value to 1024.
170 */ 170 */
171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 171 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
172out_free:
172 if (!sdev->sector_size) 173 if (!sdev->sector_size)
173 sdev->sector_size = 1024; 174 sdev->sector_size = 1024;
174out_free: 175
175 kfree(buf); 176 kfree(buf);
176} 177}
177 178
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
314 sd->lun, sd->queue_depth); 315 sd->lun, sd->queue_depth);
315 } 316 }
316 317
317 dev->dev_attrib.hw_block_size = sd->sector_size; 318 dev->dev_attrib.hw_block_size =
319 min_not_zero((int)sd->sector_size, 512);
318 dev->dev_attrib.hw_max_sectors = 320 dev->dev_attrib.hw_max_sectors =
319 min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 321 min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
320 dev->dev_attrib.hw_queue_depth = sd->queue_depth; 322 dev->dev_attrib.hw_queue_depth = sd->queue_depth;
321 323
322 /* 324 /*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
339 /* 341 /*
340 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 342 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
341 */ 343 */
342 if (sd->type == TYPE_TAPE) 344 if (sd->type == TYPE_TAPE) {
343 pscsi_tape_read_blocksize(dev, sd); 345 pscsi_tape_read_blocksize(dev, sd);
346 dev->dev_attrib.hw_block_size = sd->sector_size;
347 }
344 return 0; 348 return 0;
345} 349}
346 350
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
406/* 410/*
407 * Called with struct Scsi_Host->host_lock called. 411 * Called with struct Scsi_Host->host_lock called.
408 */ 412 */
409static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) 413static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
410 __releases(sh->host_lock) 414 __releases(sh->host_lock)
411{ 415{
412 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 416 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
433 return 0; 437 return 0;
434} 438}
435 439
436/*
437 * Called with struct Scsi_Host->host_lock called.
438 */
439static int pscsi_create_type_other(struct se_device *dev,
440 struct scsi_device *sd)
441 __releases(sh->host_lock)
442{
443 struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
444 struct Scsi_Host *sh = sd->host;
445 int ret;
446
447 spin_unlock_irq(sh->host_lock);
448 ret = pscsi_add_device_to_list(dev, sd);
449 if (ret)
450 return ret;
451
452 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
453 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
454 sd->channel, sd->id, sd->lun);
455 return 0;
456}
457
458static int pscsi_configure_device(struct se_device *dev) 440static int pscsi_configure_device(struct se_device *dev)
459{ 441{
460 struct se_hba *hba = dev->se_hba; 442 struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
542 case TYPE_DISK: 524 case TYPE_DISK:
543 ret = pscsi_create_type_disk(dev, sd); 525 ret = pscsi_create_type_disk(dev, sd);
544 break; 526 break;
545 case TYPE_ROM:
546 ret = pscsi_create_type_rom(dev, sd);
547 break;
548 default: 527 default:
549 ret = pscsi_create_type_other(dev, sd); 528 ret = pscsi_create_type_nondisk(dev, sd);
550 break; 529 break;
551 } 530 }
552 531
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
611 else if (pdv->pdv_lld_host) 590 else if (pdv->pdv_lld_host)
612 scsi_host_put(pdv->pdv_lld_host); 591 scsi_host_put(pdv->pdv_lld_host);
613 592
614 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 593 scsi_device_put(sd);
615 scsi_device_put(sd);
616 594
617 pdv->pdv_sd = NULL; 595 pdv->pdv_sd = NULL;
618 } 596 }
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
1064 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1042 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1065 return pdv->pdv_bd->bd_part->nr_sects; 1043 return pdv->pdv_bd->bd_part->nr_sects;
1066 1044
1067 dump_stack();
1068 return 0; 1045 return 0;
1069} 1046}
1070 1047
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
1103static const struct target_backend_ops pscsi_ops = { 1080static const struct target_backend_ops pscsi_ops = {
1104 .name = "pscsi", 1081 .name = "pscsi",
1105 .owner = THIS_MODULE, 1082 .owner = THIS_MODULE,
1106 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1083 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
1084 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
1107 .attach_hba = pscsi_attach_hba, 1085 .attach_hba = pscsi_attach_hba,
1108 .detach_hba = pscsi_detach_hba, 1086 .detach_hba = pscsi_detach_hba,
1109 .pmode_enable_hba = pscsi_pmode_enable_hba, 1087 .pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 68d8aef7ab78..c194063f169b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
1105 return ret; 1105 return ret;
1106 break; 1106 break;
1107 case VERIFY: 1107 case VERIFY:
1108 case VERIFY_16:
1108 size = 0; 1109 size = 0;
1109 sectors = transport_get_sectors_10(cdb); 1110 if (cdb[0] == VERIFY) {
1110 cmd->t_task_lba = transport_lba_32(cdb); 1111 sectors = transport_get_sectors_10(cdb);
1112 cmd->t_task_lba = transport_lba_32(cdb);
1113 } else {
1114 sectors = transport_get_sectors_16(cdb);
1115 cmd->t_task_lba = transport_lba_64(cdb);
1116 }
1111 cmd->execute_cmd = sbc_emulate_noop; 1117 cmd->execute_cmd = sbc_emulate_noop;
1112 goto check_lba; 1118 goto check_lba;
1113 case REZERO_UNIT: 1119 case REZERO_UNIT:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0dbfa016575..6fb191914f45 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
602 if (ret) 602 if (ret)
603 goto out_kill_ref; 603 goto out_kill_ref;
604 604
605 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 605 if (!(dev->transport->transport_flags &
606 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
606 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 607 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
607 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 608 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
608 609
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 434d9d693989..b1a3cdb29468 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
636 * Fabric modules are expected to return '1' here if the se_cmd being 636 * Fabric modules are expected to return '1' here if the se_cmd being
637 * passed is released at this point, or zero if not being released. 637 * passed is released at this point, or zero if not being released.
638 */ 638 */
639 return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) 639 return cmd->se_tfo->check_stop_free(cmd);
640 : 0;
641} 640}
642 641
643static void transport_lun_remove_cmd(struct se_cmd *cmd) 642static void transport_lun_remove_cmd(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c3adefe95e50..c6874c38a10b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,7 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/configfs.h>
31#include <net/genetlink.h> 32#include <net/genetlink.h>
32#include <scsi/scsi_common.h> 33#include <scsi/scsi_common.h>
33#include <scsi/scsi_proto.h> 34#include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
112 spinlock_t commands_lock; 113 spinlock_t commands_lock;
113 114
114 struct timer_list timeout; 115 struct timer_list timeout;
116 unsigned int cmd_time_out;
115 117
116 char dev_config[TCMU_CONFIG_LEN]; 118 char dev_config[TCMU_CONFIG_LEN];
117}; 119};
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
172 174
173 tcmu_cmd->se_cmd = se_cmd; 175 tcmu_cmd->se_cmd = se_cmd;
174 tcmu_cmd->tcmu_dev = udev; 176 tcmu_cmd->tcmu_dev = udev;
175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 177 if (udev->cmd_time_out)
178 tcmu_cmd->deadline = jiffies +
179 msecs_to_jiffies(udev->cmd_time_out);
176 180
177 idr_preload(GFP_KERNEL); 181 idr_preload(GFP_KERNEL);
178 spin_lock_irq(&udev->commands_lock); 182 spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
451 455
452 pr_debug("sleeping for ring space\n"); 456 pr_debug("sleeping for ring space\n");
453 spin_unlock_irq(&udev->cmdr_lock); 457 spin_unlock_irq(&udev->cmdr_lock);
454 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 458 if (udev->cmd_time_out)
459 ret = schedule_timeout(
460 msecs_to_jiffies(udev->cmd_time_out));
461 else
462 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
455 finish_wait(&udev->wait_cmdr, &__wait); 463 finish_wait(&udev->wait_cmdr, &__wait);
456 if (!ret) { 464 if (!ret) {
457 pr_warn("tcmu: command timed out\n"); 465 pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
526 /* TODO: only if FLUSH and FUA? */ 534 /* TODO: only if FLUSH and FUA? */
527 uio_event_notify(&udev->uio_info); 535 uio_event_notify(&udev->uio_info);
528 536
529 mod_timer(&udev->timeout, 537 if (udev->cmd_time_out)
530 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 538 mod_timer(&udev->timeout, round_jiffies_up(jiffies +
539 msecs_to_jiffies(udev->cmd_time_out)));
531 540
532 return TCM_NO_SENSE; 541 return TCM_NO_SENSE;
533} 542}
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
742 } 751 }
743 752
744 udev->hba = hba; 753 udev->hba = hba;
754 udev->cmd_time_out = TCMU_TIME_OUT;
745 755
746 init_waitqueue_head(&udev->wait_cmdr); 756 init_waitqueue_head(&udev->wait_cmdr);
747 spin_lock_init(&udev->cmdr_lock); 757 spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
960 if (dev->dev_attrib.hw_block_size == 0) 970 if (dev->dev_attrib.hw_block_size == 0)
961 dev->dev_attrib.hw_block_size = 512; 971 dev->dev_attrib.hw_block_size = 512;
962 /* Other attributes can be configured in userspace */ 972 /* Other attributes can be configured in userspace */
963 dev->dev_attrib.hw_max_sectors = 128; 973 if (!dev->dev_attrib.hw_max_sectors)
974 dev->dev_attrib.hw_max_sectors = 128;
964 dev->dev_attrib.hw_queue_depth = 128; 975 dev->dev_attrib.hw_queue_depth = 128;
965 976
966 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 977 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
997 kfree(udev); 1008 kfree(udev);
998} 1009}
999 1010
1011static bool tcmu_dev_configured(struct tcmu_dev *udev)
1012{
1013 return udev->uio_info.uio_dev ? true : false;
1014}
1015
1000static void tcmu_free_device(struct se_device *dev) 1016static void tcmu_free_device(struct se_device *dev)
1001{ 1017{
1002 struct tcmu_dev *udev = TCMU_DEV(dev); 1018 struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
1018 spin_unlock_irq(&udev->commands_lock); 1034 spin_unlock_irq(&udev->commands_lock);
1019 WARN_ON(!all_expired); 1035 WARN_ON(!all_expired);
1020 1036
1021 /* Device was configured */ 1037 if (tcmu_dev_configured(udev)) {
1022 if (udev->uio_info.uio_dev) {
1023 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 1038 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
1024 udev->uio_info.uio_dev->minor); 1039 udev->uio_info.uio_dev->minor);
1025 1040
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
1031} 1046}
1032 1047
1033enum { 1048enum {
1034 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 1049 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1050 Opt_err,
1035}; 1051};
1036 1052
1037static match_table_t tokens = { 1053static match_table_t tokens = {
1038 {Opt_dev_config, "dev_config=%s"}, 1054 {Opt_dev_config, "dev_config=%s"},
1039 {Opt_dev_size, "dev_size=%u"}, 1055 {Opt_dev_size, "dev_size=%u"},
1040 {Opt_hw_block_size, "hw_block_size=%u"}, 1056 {Opt_hw_block_size, "hw_block_size=%u"},
1057 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1041 {Opt_err, NULL} 1058 {Opt_err, NULL}
1042}; 1059};
1043 1060
1061static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1062{
1063 unsigned long tmp_ul;
1064 char *arg_p;
1065 int ret;
1066
1067 arg_p = match_strdup(arg);
1068 if (!arg_p)
1069 return -ENOMEM;
1070
1071 ret = kstrtoul(arg_p, 0, &tmp_ul);
1072 kfree(arg_p);
1073 if (ret < 0) {
1074 pr_err("kstrtoul() failed for dev attrib\n");
1075 return ret;
1076 }
1077 if (!tmp_ul) {
1078 pr_err("dev attrib must be nonzero\n");
1079 return -EINVAL;
1080 }
1081 *dev_attrib = tmp_ul;
1082 return 0;
1083}
1084
1044static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1085static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1045 const char *page, ssize_t count) 1086 const char *page, ssize_t count)
1046{ 1087{
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1048 char *orig, *ptr, *opts, *arg_p; 1089 char *orig, *ptr, *opts, *arg_p;
1049 substring_t args[MAX_OPT_ARGS]; 1090 substring_t args[MAX_OPT_ARGS];
1050 int ret = 0, token; 1091 int ret = 0, token;
1051 unsigned long tmp_ul;
1052 1092
1053 opts = kstrdup(page, GFP_KERNEL); 1093 opts = kstrdup(page, GFP_KERNEL);
1054 if (!opts) 1094 if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1082 pr_err("kstrtoul() failed for dev_size=\n"); 1122 pr_err("kstrtoul() failed for dev_size=\n");
1083 break; 1123 break;
1084 case Opt_hw_block_size: 1124 case Opt_hw_block_size:
1085 arg_p = match_strdup(&args[0]); 1125 ret = tcmu_set_dev_attrib(&args[0],
1086 if (!arg_p) { 1126 &(dev->dev_attrib.hw_block_size));
1087 ret = -ENOMEM; 1127 break;
1088 break; 1128 case Opt_hw_max_sectors:
1089 } 1129 ret = tcmu_set_dev_attrib(&args[0],
1090 ret = kstrtoul(arg_p, 0, &tmp_ul); 1130 &(dev->dev_attrib.hw_max_sectors));
1091 kfree(arg_p);
1092 if (ret < 0) {
1093 pr_err("kstrtoul() failed for hw_block_size=\n");
1094 break;
1095 }
1096 if (!tmp_ul) {
1097 pr_err("hw_block_size must be nonzero\n");
1098 break;
1099 }
1100 dev->dev_attrib.hw_block_size = tmp_ul;
1101 break; 1131 break;
1102 default: 1132 default:
1103 break; 1133 break;
1104 } 1134 }
1135
1136 if (ret)
1137 break;
1105 } 1138 }
1106 1139
1107 kfree(orig); 1140 kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1134 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 1167 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1135} 1168}
1136 1169
1137static const struct target_backend_ops tcmu_ops = { 1170static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1171{
1172 struct se_dev_attrib *da = container_of(to_config_group(item),
1173 struct se_dev_attrib, da_group);
1174 struct tcmu_dev *udev = container_of(da->da_dev,
1175 struct tcmu_dev, se_dev);
1176
1177 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1178}
1179
1180static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1181 size_t count)
1182{
1183 struct se_dev_attrib *da = container_of(to_config_group(item),
1184 struct se_dev_attrib, da_group);
1185 struct tcmu_dev *udev = container_of(da->da_dev,
1186 struct tcmu_dev, se_dev);
1187 u32 val;
1188 int ret;
1189
1190 if (da->da_dev->export_count) {
1191 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1192 return -EINVAL;
1193 }
1194
1195 ret = kstrtou32(page, 0, &val);
1196 if (ret < 0)
1197 return ret;
1198
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count;
1206}
1207CONFIGFS_ATTR(tcmu_, cmd_time_out);
1208
1209static struct configfs_attribute **tcmu_attrs;
1210
1211static struct target_backend_ops tcmu_ops = {
1138 .name = "user", 1212 .name = "user",
1139 .owner = THIS_MODULE, 1213 .owner = THIS_MODULE,
1140 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1214 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
1148 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1222 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1149 .get_device_type = sbc_get_device_type, 1223 .get_device_type = sbc_get_device_type,
1150 .get_blocks = tcmu_get_blocks, 1224 .get_blocks = tcmu_get_blocks,
1151 .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1225 .tb_dev_attrib_attrs = NULL,
1152}; 1226};
1153 1227
1154static int __init tcmu_module_init(void) 1228static int __init tcmu_module_init(void)
1155{ 1229{
1156 int ret; 1230 int ret, i, len = 0;
1157 1231
1158 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1232 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1159 1233
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
1175 goto out_unreg_device; 1249 goto out_unreg_device;
1176 } 1250 }
1177 1251
1252 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1253 len += sizeof(struct configfs_attribute *);
1254 }
1255 len += sizeof(struct configfs_attribute *) * 2;
1256
1257 tcmu_attrs = kzalloc(len, GFP_KERNEL);
1258 if (!tcmu_attrs) {
1259 ret = -ENOMEM;
1260 goto out_unreg_genl;
1261 }
1262
1263 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
1264 tcmu_attrs[i] = passthrough_attrib_attrs[i];
1265 }
1266 tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
1267 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
1268
1178 ret = transport_backend_register(&tcmu_ops); 1269 ret = transport_backend_register(&tcmu_ops);
1179 if (ret) 1270 if (ret)
1180 goto out_unreg_genl; 1271 goto out_attrs;
1181 1272
1182 return 0; 1273 return 0;
1183 1274
1275out_attrs:
1276 kfree(tcmu_attrs);
1184out_unreg_genl: 1277out_unreg_genl:
1185 genl_unregister_family(&tcmu_genl_family); 1278 genl_unregister_family(&tcmu_genl_family);
1186out_unreg_device: 1279out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
1194static void __exit tcmu_module_exit(void) 1287static void __exit tcmu_module_exit(void)
1195{ 1288{
1196 target_backend_unregister(&tcmu_ops); 1289 target_backend_unregister(&tcmu_ops);
1290 kfree(tcmu_attrs);
1197 genl_unregister_family(&tcmu_genl_family); 1291 genl_unregister_family(&tcmu_genl_family);
1198 root_device_unregister(tcmu_root_device); 1292 root_device_unregister(tcmu_root_device);
1199 kmem_cache_destroy(tcmu_cmd_cache); 1293 kmem_cache_destroy(tcmu_cmd_cache);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 776b34396144..0a16cf4bed39 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -291,18 +291,6 @@ config ARMADA_THERMAL
291 Enable this option if you want to have support for thermal management 291 Enable this option if you want to have support for thermal management
292 controller present in Armada 370 and Armada XP SoC. 292 controller present in Armada 370 and Armada XP SoC.
293 293
294config DB8500_CPUFREQ_COOLING
295 tristate "DB8500 cpufreq cooling"
296 depends on ARCH_U8500 || COMPILE_TEST
297 depends on HAS_IOMEM
298 depends on CPU_THERMAL
299 default y
300 help
301 Adds DB8500 cpufreq cooling devices, and these cooling devices can be
302 bound to thermal zone trip points. When a trip point reached, the
303 bound cpufreq cooling device turns active to set CPU frequency low to
304 cool down the CPU.
305
306config INTEL_POWERCLAMP 294config INTEL_POWERCLAMP
307 tristate "Intel PowerClamp idle injection driver" 295 tristate "Intel PowerClamp idle injection driver"
308 depends on THERMAL 296 depends on THERMAL
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 7adae2029355..c2372f10dae5 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
41obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o 41obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
42obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o 42obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o
43obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o 43obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o
44obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
45obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o 44obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
46obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o 45obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
47obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o 46obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 91048eeca28b..69d0f430b2d1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -107,8 +107,6 @@ struct cpufreq_cooling_device {
107}; 107};
108static DEFINE_IDA(cpufreq_ida); 108static DEFINE_IDA(cpufreq_ida);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock); 110static DEFINE_MUTEX(cooling_list_lock);
113static LIST_HEAD(cpufreq_dev_list); 111static LIST_HEAD(cpufreq_dev_list);
114 112
@@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
395 393
396 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 394 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
397 true); 395 true);
396 if (IS_ERR(opp)) {
397 dev_warn_ratelimited(cpufreq_device->cpu_dev,
398 "Failed to find OPP for frequency %lu: %ld\n",
399 freq_hz, PTR_ERR(opp));
400 return -EINVAL;
401 }
402
398 voltage = dev_pm_opp_get_voltage(opp); 403 voltage = dev_pm_opp_get_voltage(opp);
399 dev_pm_opp_put(opp); 404 dev_pm_opp_put(opp);
400 405
401 if (voltage == 0) { 406 if (voltage == 0) {
402 dev_warn_ratelimited(cpufreq_device->cpu_dev, 407 dev_err_ratelimited(cpufreq_device->cpu_dev,
403 "Failed to get voltage for frequency %lu: %ld\n", 408 "Failed to get voltage for frequency %lu\n",
404 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 409 freq_hz);
405 return -EINVAL; 410 return -EINVAL;
406 } 411 }
407 412
@@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
693 698
694 *state = cpufreq_cooling_get_level(cpu, target_freq); 699 *state = cpufreq_cooling_get_level(cpu, target_freq);
695 if (*state == THERMAL_CSTATE_INVALID) { 700 if (*state == THERMAL_CSTATE_INVALID) {
696 dev_warn_ratelimited(&cdev->device, 701 dev_err_ratelimited(&cdev->device,
697 "Failed to convert %dKHz for cpu %d into a cdev state\n", 702 "Failed to convert %dKHz for cpu %d into a cdev state\n",
698 target_freq, cpu); 703 target_freq, cpu);
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 706
@@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np,
771 unsigned int freq, i, num_cpus; 776 unsigned int freq, i, num_cpus;
772 int ret; 777 int ret;
773 struct thermal_cooling_device_ops *cooling_ops; 778 struct thermal_cooling_device_ops *cooling_ops;
779 bool first;
774 780
775 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) 781 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
776 return ERR_PTR(-ENOMEM); 782 return ERR_PTR(-ENOMEM);
@@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np,
874 cpufreq_dev->cool_dev = cool_dev; 880 cpufreq_dev->cool_dev = cool_dev;
875 881
876 mutex_lock(&cooling_list_lock); 882 mutex_lock(&cooling_list_lock);
883 /* Register the notifier for first cpufreq cooling device */
884 first = list_empty(&cpufreq_dev_list);
877 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 885 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
886 mutex_unlock(&cooling_list_lock);
878 887
879 /* Register the notifier for first cpufreq cooling device */ 888 if (first)
880 if (!cpufreq_dev_count++)
881 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 889 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
882 CPUFREQ_POLICY_NOTIFIER); 890 CPUFREQ_POLICY_NOTIFIER);
883 mutex_unlock(&cooling_list_lock);
884 891
885 goto put_policy; 892 goto put_policy;
886 893
@@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1021void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 1028void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1022{ 1029{
1023 struct cpufreq_cooling_device *cpufreq_dev; 1030 struct cpufreq_cooling_device *cpufreq_dev;
1031 bool last;
1024 1032
1025 if (!cdev) 1033 if (!cdev)
1026 return; 1034 return;
@@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1028 cpufreq_dev = cdev->devdata; 1036 cpufreq_dev = cdev->devdata;
1029 1037
1030 mutex_lock(&cooling_list_lock); 1038 mutex_lock(&cooling_list_lock);
1039 list_del(&cpufreq_dev->node);
1031 /* Unregister the notifier for the last cpufreq cooling device */ 1040 /* Unregister the notifier for the last cpufreq cooling device */
1032 if (!--cpufreq_dev_count) 1041 last = list_empty(&cpufreq_dev_list);
1042 mutex_unlock(&cooling_list_lock);
1043
1044 if (last)
1033 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1045 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1034 CPUFREQ_POLICY_NOTIFIER); 1046 CPUFREQ_POLICY_NOTIFIER);
1035 1047
1036 list_del(&cpufreq_dev->node);
1037 mutex_unlock(&cooling_list_lock);
1038
1039 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1048 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1040 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); 1049 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
1041 kfree(cpufreq_dev->dyn_power_table); 1050 kfree(cpufreq_dev->dyn_power_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
deleted file mode 100644
index e58bd0b658b5..000000000000
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * db8500_cpufreq_cooling.c - DB8500 cpufreq works as cooling device.
3 *
4 * Copyright (C) 2012 ST-Ericsson
5 * Copyright (C) 2012 Linaro Ltd.
6 *
7 * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/cpu_cooling.h>
21#include <linux/err.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
28{
29 struct thermal_cooling_device *cdev;
30
31 cdev = cpufreq_cooling_register(cpu_present_mask);
32 if (IS_ERR(cdev)) {
33 int ret = PTR_ERR(cdev);
34
35 if (ret != -EPROBE_DEFER)
36 dev_err(&pdev->dev,
37 "Failed to register cooling device %d\n",
38 ret);
39
40 return ret;
41 }
42
43 platform_set_drvdata(pdev, cdev);
44
45 dev_info(&pdev->dev, "Cooling device registered: %s\n", cdev->type);
46
47 return 0;
48}
49
50static int db8500_cpufreq_cooling_remove(struct platform_device *pdev)
51{
52 struct thermal_cooling_device *cdev = platform_get_drvdata(pdev);
53
54 cpufreq_cooling_unregister(cdev);
55
56 return 0;
57}
58
59static int db8500_cpufreq_cooling_suspend(struct platform_device *pdev,
60 pm_message_t state)
61{
62 return -ENOSYS;
63}
64
65static int db8500_cpufreq_cooling_resume(struct platform_device *pdev)
66{
67 return -ENOSYS;
68}
69
70#ifdef CONFIG_OF
71static const struct of_device_id db8500_cpufreq_cooling_match[] = {
72 { .compatible = "stericsson,db8500-cpufreq-cooling" },
73 {},
74};
75MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
76#endif
77
78static struct platform_driver db8500_cpufreq_cooling_driver = {
79 .driver = {
80 .name = "db8500-cpufreq-cooling",
81 .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
82 },
83 .probe = db8500_cpufreq_cooling_probe,
84 .suspend = db8500_cpufreq_cooling_suspend,
85 .resume = db8500_cpufreq_cooling_resume,
86 .remove = db8500_cpufreq_cooling_remove,
87};
88
89static int __init db8500_cpufreq_cooling_init(void)
90{
91 return platform_driver_register(&db8500_cpufreq_cooling_driver);
92}
93
94static void __exit db8500_cpufreq_cooling_exit(void)
95{
96 platform_driver_unregister(&db8500_cpufreq_cooling_driver);
97}
98
99/* Should be later than db8500_cpufreq_register */
100late_initcall(db8500_cpufreq_cooling_init);
101module_exit(db8500_cpufreq_cooling_exit);
102
103MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@stericsson.com>");
104MODULE_DESCRIPTION("DB8500 cpufreq cooling driver");
105MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 7743a78d4723..4bf4ad58cffd 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
186 return 0; 186 return 0;
187 187
188 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 188 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
189 if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) 189 if (PTR_ERR(opp) == -ERANGE)
190 opp = dev_pm_opp_find_freq_exact(dev, freq, false); 190 opp = dev_pm_opp_find_freq_exact(dev, freq, false);
191 191
192 if (IS_ERR(opp)) {
193 dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
194 freq, PTR_ERR(opp));
195 return 0;
196 }
197
192 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ 198 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
193 dev_pm_opp_put(opp); 199 dev_pm_opp_put(opp);
194 200
195 if (voltage == 0) { 201 if (voltage == 0) {
196 dev_warn_ratelimited(dev, 202 dev_err_ratelimited(dev,
197 "Failed to get voltage for frequency %lu: %ld\n", 203 "Failed to get voltage for frequency %lu\n",
198 freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); 204 freq);
199 return 0; 205 return 0;
200 } 206 }
201 207
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ee55a2d47bb..e65808c482f1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
257{ 257{
258 unsigned int baud = tty_termios_baud_rate(termios); 258 unsigned int baud = tty_termios_baud_rate(termios);
259 struct dw8250_data *d = p->private_data; 259 struct dw8250_data *d = p->private_data;
260 unsigned int rate; 260 long rate;
261 int ret; 261 int ret;
262 262
263 if (IS_ERR(d->clk) || !old) 263 if (IS_ERR(d->clk) || !old)
@@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
265 265
266 clk_disable_unprepare(d->clk); 266 clk_disable_unprepare(d->clk);
267 rate = clk_round_rate(d->clk, baud * 16); 267 rate = clk_round_rate(d->clk, baud * 16);
268 ret = clk_set_rate(d->clk, rate); 268 if (rate < 0)
269 ret = rate;
270 else if (rate == 0)
271 ret = -ENOENT;
272 else
273 ret = clk_set_rate(d->clk, rate);
269 clk_prepare_enable(d->clk); 274 clk_prepare_enable(d->clk);
270 275
271 if (!ret) 276 if (!ret)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a65fb8197aec..0e3f529d50e9 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -128,9 +128,13 @@ config SERIAL_8250_PCI
128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. 128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
129 129
130config SERIAL_8250_EXAR 130config SERIAL_8250_EXAR
131 tristate "8250/16550 PCI device support" 131 tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
132 depends on SERIAL_8250_PCI 132 depends on SERIAL_8250_PCI
133 default SERIAL_8250 133 default SERIAL_8250
134 help
135 This builds support for XR17C1xx, XR17V3xx and some Commtech
136 422x PCIe serial cards that are not covered by the more generic
137 SERIAL_8250_PCI option.
134 138
135config SERIAL_8250_HP300 139config SERIAL_8250_HP300
136 tristate 140 tristate
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8789ea423ccf..b0a377725d63 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
2373 if (strcmp(name, "qdf2400_e44") == 0) { 2373 if (strcmp(name, "qdf2400_e44") == 0) {
2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44");
2375 qdf2400_e44_present = true; 2375 qdf2400_e44_present = true;
2376 } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { 2376 } else if (strcmp(name, "pl011") != 0) {
2377 return -ENODEV; 2377 return -ENODEV;
2378 } 2378 }
2379 2379
@@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
2452 uart_console_write(&dev->port, s, n, pl011_putc); 2452 uart_console_write(&dev->port, s, n, pl011_putc);
2453} 2453}
2454 2454
2455/*
2456 * On non-ACPI systems, earlycon is enabled by specifying
2457 * "earlycon=pl011,<address>" on the kernel command line.
2458 *
2459 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2460 * by specifying only "earlycon" on the command line. Because it requires
2461 * SPCR, the console starts after ACPI is parsed, which is later than a
2462 * traditional early console.
2463 *
2464 * To get the traditional early console that starts before ACPI is parsed,
2465 * specify the full "earlycon=pl011,<address>" option.
2466 */
2455static int __init pl011_early_console_setup(struct earlycon_device *device, 2467static int __init pl011_early_console_setup(struct earlycon_device *device,
2456 const char *opt) 2468 const char *opt)
2457{ 2469{
2458 if (!device->port.membase) 2470 if (!device->port.membase)
2459 return -ENODEV; 2471 return -ENODEV;
2460 2472
2461 device->con->write = qdf2400_e44_present ? 2473 /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
2462 qdf2400_e44_early_write : pl011_early_write; 2474 * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
2475 */
2476 if (!strcmp(device->options, "qdf2400_e44"))
2477 device->con->write = qdf2400_e44_early_write;
2478 else
2479 device->con->write = pl011_early_write;
2480
2463 return 0; 2481 return 0;
2464} 2482}
2465OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2483OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2466OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2484OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2485EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
2467 2486
2468#else 2487#else
2469#define AMBA_CONSOLE NULL 2488#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dcebb28ffbc4..1f50a83ef958 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port)
1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1952 atmel_port->pdc_tx.ofs = 0; 1952 atmel_port->pdc_tx.ofs = 0;
1953 } 1953 }
1954 /*
1955 * in uart_flush_buffer(), the xmit circular buffer has just
1956 * been cleared, so we have to reset tx_len accordingly.
1957 */
1958 atmel_port->tx_len = 0;
1954} 1959}
1955 1960
1956/* 1961/*
@@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
2483 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2488 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2484 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2489 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2485 2490
2491 /* Make sure that tx path is actually able to send characters */
2492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2493
2486 uart_console_write(port, s, count, atmel_console_putchar); 2494 uart_console_write(port, s, count, atmel_console_putchar);
2487 2495
2488 /* 2496 /*
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6989b227d134..be94246b6fcc 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u,
1088 AUART_LINECTRL_BAUD_DIV_MAX); 1088 AUART_LINECTRL_BAUD_DIV_MAX);
1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; 1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); 1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
1091 div = u->uartclk * 32 / baud; 1091 div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
1092 } 1092 }
1093 1093
1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); 1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index bcf1d33e6ffe..c334bcc59c64 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
575 pinctrl_select_state(ascport->pinctrl, 575 pinctrl_select_state(ascport->pinctrl,
576 ascport->states[NO_HW_FLOWCTRL]); 576 ascport->states[NO_HW_FLOWCTRL]);
577 577
578 gpiod = devm_get_gpiod_from_child(port->dev, "rts", 578 gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
579 &np->fwnode); 579 "rts",
580 if (!IS_ERR(gpiod)) { 580 &np->fwnode,
581 gpiod_direction_output(gpiod, 0); 581 GPIOD_OUT_LOW,
582 np->name);
583 if (!IS_ERR(gpiod))
582 ascport->rts = gpiod; 584 ascport->rts = gpiod;
583 }
584 } 585 }
585 } 586 }
586 587
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6de5ad..b0500a0a87b8 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
271 271
272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
273{ 273{
274 struct tty_ldisc *ld;
275
274 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); 276 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
275 if (!tty->ldisc) 277 ld = tty->ldisc;
278 if (!ld)
276 ldsem_up_read(&tty->ldisc_sem); 279 ldsem_up_read(&tty->ldisc_sem);
277 return tty->ldisc; 280 return ld;
278} 281}
279EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); 282EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
280 283
@@ -489,41 +492,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
489} 492}
490 493
491/** 494/**
492 * tty_ldisc_restore - helper for tty ldisc change
493 * @tty: tty to recover
494 * @old: previous ldisc
495 *
496 * Restore the previous line discipline or N_TTY when a line discipline
497 * change fails due to an open error
498 */
499
500static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
501{
502 struct tty_ldisc *new_ldisc;
503 int r;
504
505 /* There is an outstanding reference here so this is safe */
506 old = tty_ldisc_get(tty, old->ops->num);
507 WARN_ON(IS_ERR(old));
508 tty->ldisc = old;
509 tty_set_termios_ldisc(tty, old->ops->num);
510 if (tty_ldisc_open(tty, old) < 0) {
511 tty_ldisc_put(old);
512 /* This driver is always present */
513 new_ldisc = tty_ldisc_get(tty, N_TTY);
514 if (IS_ERR(new_ldisc))
515 panic("n_tty: get");
516 tty->ldisc = new_ldisc;
517 tty_set_termios_ldisc(tty, N_TTY);
518 r = tty_ldisc_open(tty, new_ldisc);
519 if (r < 0)
520 panic("Couldn't open N_TTY ldisc for "
521 "%s --- error %d.",
522 tty_name(tty), r);
523 }
524}
525
526/**
527 * tty_set_ldisc - set line discipline 495 * tty_set_ldisc - set line discipline
528 * @tty: the terminal to set 496 * @tty: the terminal to set
529 * @ldisc: the line discipline 497 * @ldisc: the line discipline
@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
536 504
537int tty_set_ldisc(struct tty_struct *tty, int disc) 505int tty_set_ldisc(struct tty_struct *tty, int disc)
538{ 506{
539 int retval; 507 int retval, old_disc;
540 struct tty_ldisc *old_ldisc, *new_ldisc;
541
542 new_ldisc = tty_ldisc_get(tty, disc);
543 if (IS_ERR(new_ldisc))
544 return PTR_ERR(new_ldisc);
545 508
546 tty_lock(tty); 509 tty_lock(tty);
547 retval = tty_ldisc_lock(tty, 5 * HZ); 510 retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
554 } 517 }
555 518
556 /* Check the no-op case */ 519 /* Check the no-op case */
557 if (tty->ldisc->ops->num == disc) 520 old_disc = tty->ldisc->ops->num;
521 if (old_disc == disc)
558 goto out; 522 goto out;
559 523
560 if (test_bit(TTY_HUPPED, &tty->flags)) { 524 if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
563 goto out; 527 goto out;
564 } 528 }
565 529
566 old_ldisc = tty->ldisc; 530 retval = tty_ldisc_reinit(tty, disc);
567
568 /* Shutdown the old discipline. */
569 tty_ldisc_close(tty, old_ldisc);
570
571 /* Now set up the new line discipline. */
572 tty->ldisc = new_ldisc;
573 tty_set_termios_ldisc(tty, disc);
574
575 retval = tty_ldisc_open(tty, new_ldisc);
576 if (retval < 0) { 531 if (retval < 0) {
577 /* Back to the old one or N_TTY if we can't */ 532 /* Back to the old one or N_TTY if we can't */
578 tty_ldisc_put(new_ldisc); 533 if (tty_ldisc_reinit(tty, old_disc) < 0) {
579 tty_ldisc_restore(tty, old_ldisc); 534 pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
535 if (tty_ldisc_reinit(tty, N_TTY) < 0) {
536 /* At this point we have tty->ldisc == NULL. */
537 pr_err("tty: reinitializing N_TTY failed\n");
538 }
539 }
580 } 540 }
581 541
582 if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { 542 if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
543 tty->ops->set_ldisc) {
583 down_read(&tty->termios_rwsem); 544 down_read(&tty->termios_rwsem);
584 tty->ops->set_ldisc(tty); 545 tty->ops->set_ldisc(tty);
585 up_read(&tty->termios_rwsem); 546 up_read(&tty->termios_rwsem);
586 } 547 }
587 548
588 /* At this point we hold a reference to the new ldisc and a
589 reference to the old ldisc, or we hold two references to
590 the old ldisc (if it was restored as part of error cleanup
591 above). In either case, releasing a single reference from
592 the old ldisc is correct. */
593 new_ldisc = old_ldisc;
594out: 549out:
595 tty_ldisc_unlock(tty); 550 tty_ldisc_unlock(tty);
596 551
@@ -598,7 +553,6 @@ out:
598 already running */ 553 already running */
599 tty_buffer_restart_work(tty->port); 554 tty_buffer_restart_work(tty->port);
600err: 555err:
601 tty_ldisc_put(new_ldisc); /* drop the extra reference */
602 tty_unlock(tty); 556 tty_unlock(tty);
603 return retval; 557 return retval;
604} 558}
@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
659 int retval; 613 int retval;
660 614
661 ld = tty_ldisc_get(tty, disc); 615 ld = tty_ldisc_get(tty, disc);
662 if (IS_ERR(ld)) { 616 if (IS_ERR(ld))
663 BUG_ON(disc == N_TTY);
664 return PTR_ERR(ld); 617 return PTR_ERR(ld);
665 }
666 618
667 if (tty->ldisc) { 619 if (tty->ldisc) {
668 tty_ldisc_close(tty, tty->ldisc); 620 tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
674 tty_set_termios_ldisc(tty, disc); 626 tty_set_termios_ldisc(tty, disc);
675 retval = tty_ldisc_open(tty, tty->ldisc); 627 retval = tty_ldisc_open(tty, tty->ldisc);
676 if (retval) { 628 if (retval) {
677 if (!WARN_ON(disc == N_TTY)) { 629 tty_ldisc_put(tty->ldisc);
678 tty_ldisc_put(tty->ldisc); 630 tty->ldisc = NULL;
679 tty->ldisc = NULL;
680 }
681 } 631 }
682 return retval; 632 return retval;
683} 633}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c5f0fc906136..8af8d9542663 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -28,7 +28,6 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
30#include <linux/sched/debug.h> 30#include <linux/sched/debug.h>
31#include <linux/sched/debug.h>
32#include <linux/tty.h> 31#include <linux/tty.h>
33#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index f03692ec5520..8fb309a0ff6b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf,
1381 1381
1382 dev_dbg(&intf->dev, "%s called\n", __func__); 1382 dev_dbg(&intf->dev, "%s called\n", __func__);
1383 1383
1384 data = kmalloc(sizeof(*data), GFP_KERNEL); 1384 data = kzalloc(sizeof(*data), GFP_KERNEL);
1385 if (!data) 1385 if (!data)
1386 return -ENOMEM; 1386 return -ENOMEM;
1387 1387
@@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf,
1444 break; 1444 break;
1445 } 1445 }
1446 } 1446 }
1447
1448 if (!data->bulk_out || !data->bulk_in) {
1449 dev_err(&intf->dev, "bulk endpoints not found\n");
1450 retcode = -ENODEV;
1451 goto err_put;
1452 }
1453
1447 /* Find int endpoint */ 1454 /* Find int endpoint */
1448 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { 1455 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
1449 endpoint = &iface_desc->endpoint[n].desc; 1456 endpoint = &iface_desc->endpoint[n].desc;
@@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1469 if (data->iin_ep_present) { 1476 if (data->iin_ep_present) {
1470 /* allocate int urb */ 1477 /* allocate int urb */
1471 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); 1478 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
1472 if (!data->iin_urb) 1479 if (!data->iin_urb) {
1480 retcode = -ENOMEM;
1473 goto error_register; 1481 goto error_register;
1482 }
1474 1483
1475 /* Protect interrupt in endpoint data until iin_urb is freed */ 1484 /* Protect interrupt in endpoint data until iin_urb is freed */
1476 kref_get(&data->kref); 1485 kref_get(&data->kref);
@@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1478 /* allocate buffer for interrupt in */ 1487 /* allocate buffer for interrupt in */
1479 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, 1488 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
1480 GFP_KERNEL); 1489 GFP_KERNEL);
1481 if (!data->iin_buffer) 1490 if (!data->iin_buffer) {
1491 retcode = -ENOMEM;
1482 goto error_register; 1492 goto error_register;
1493 }
1483 1494
1484 /* fill interrupt urb */ 1495 /* fill interrupt urb */
1485 usb_fill_int_urb(data->iin_urb, data->usb_dev, 1496 usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1512,6 +1523,7 @@ error_register:
1512 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); 1523 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
1513 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); 1524 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
1514 usbtmc_free_int(data); 1525 usbtmc_free_int(data);
1526err_put:
1515 kref_put(&data->kref, usbtmc_delete); 1527 kref_put(&data->kref, usbtmc_delete);
1516 return retcode; 1528 return retcode;
1517} 1529}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25dbd8c7aec7..4be52c602e9b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
280 280
281 /* 281 /*
282 * Adjust bInterval for quirked devices. 282 * Adjust bInterval for quirked devices.
283 */
284 /*
285 * This quirk fixes bIntervals reported in ms.
286 */
287 if (to_usb_device(ddev)->quirks &
288 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
289 n = clamp(fls(d->bInterval) + 3, i, j);
290 i = j = n;
291 }
292 /*
283 * This quirk fixes bIntervals reported in 293 * This quirk fixes bIntervals reported in
284 * linear microframes. 294 * linear microframes.
285 */ 295 */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 612fab6e54fb..79bdca5cb9c7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
520 */ 520 */
521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); 521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
522 tbuf = kzalloc(tbuf_size, GFP_KERNEL); 522 tbuf = kzalloc(tbuf_size, GFP_KERNEL);
523 if (!tbuf) 523 if (!tbuf) {
524 return -ENOMEM; 524 status = -ENOMEM;
525 goto err_alloc;
526 }
525 527
526 bufp = tbuf; 528 bufp = tbuf;
527 529
@@ -734,6 +736,7 @@ error:
734 } 736 }
735 737
736 kfree(tbuf); 738 kfree(tbuf);
739 err_alloc:
737 740
738 /* any errors get returned through the urb completion */ 741 /* any errors get returned through the urb completion */
739 spin_lock_irq(&hcd_root_hub_lock); 742 spin_lock_irq(&hcd_root_hub_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f0dd08198d74..5286bf67869a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); 4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; 4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
4277 4277
4278 if (!udev->usb2_hw_lpm_capable) 4278 if (!udev->usb2_hw_lpm_capable || !udev->bos)
4279 return; 4279 return;
4280 4280
4281 if (hub) 4281 if (hub)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98968a5..96b21b0dac1e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
170 /* M-Systems Flash Disk Pioneers */ 170 /* M-Systems Flash Disk Pioneers */
171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
172 172
173 /* Baum Vario Ultra */
174 { USB_DEVICE(0x0904, 0x6101), .driver_info =
175 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
176 { USB_DEVICE(0x0904, 0x6102), .driver_info =
177 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
178 { USB_DEVICE(0x0904, 0x6103), .driver_info =
179 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
180
173 /* Keytouch QWERTY Panel keyboard */ 181 /* Keytouch QWERTY Panel keyboard */
174 { USB_DEVICE(0x0926, 0x3333), .driver_info = 182 { USB_DEVICE(0x0926, 0x3333), .driver_info =
175 USB_QUIRK_CONFIG_INTF_STRINGS }, 183 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0d75158e43fe..79e7a3480d51 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
171 int status) 171 int status)
172{ 172{
173 struct dwc3 *dwc = dep->dwc; 173 struct dwc3 *dwc = dep->dwc;
174 unsigned int unmap_after_complete = false;
174 175
175 req->started = false; 176 req->started = false;
176 list_del(&req->list); 177 list_del(&req->list);
@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
180 if (req->request.status == -EINPROGRESS) 181 if (req->request.status == -EINPROGRESS)
181 req->request.status = status; 182 req->request.status = status;
182 183
183 if (dwc->ep0_bounced && dep->number <= 1) 184 /*
185 * NOTICE we don't want to unmap before calling ->complete() if we're
186 * dealing with a bounced ep0 request. If we unmap it here, we would end
187 * up overwritting the contents of req->buf and this could confuse the
188 * gadget driver.
189 */
190 if (dwc->ep0_bounced && dep->number <= 1) {
184 dwc->ep0_bounced = false; 191 dwc->ep0_bounced = false;
185 192 unmap_after_complete = true;
186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 193 } else {
187 &req->request, req->direction); 194 usb_gadget_unmap_request_by_dev(dwc->sysdev,
195 &req->request, req->direction);
196 }
188 197
189 trace_dwc3_gadget_giveback(req); 198 trace_dwc3_gadget_giveback(req);
190 199
@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
192 usb_gadget_giveback_request(&dep->endpoint, &req->request); 201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
193 spin_lock(&dwc->lock); 202 spin_lock(&dwc->lock);
194 203
204 if (unmap_after_complete)
205 usb_gadget_unmap_request_by_dev(dwc->sysdev,
206 &req->request, req->direction);
207
195 if (dep->number > 1) 208 if (dep->number > 1)
196 pm_runtime_put(dwc->dev); 209 pm_runtime_put(dwc->dev);
197} 210}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766ca4226..5e3828d9dac7 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
535{ 535{
536 struct usb_composite_dev *cdev = acm->port.func.config->cdev; 536 struct usb_composite_dev *cdev = acm->port.func.config->cdev;
537 int status; 537 int status;
538 __le16 serial_state;
538 539
539 spin_lock(&acm->lock); 540 spin_lock(&acm->lock);
540 if (acm->notify_req) { 541 if (acm->notify_req) {
541 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", 542 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
542 acm->port_num, acm->serial_state); 543 acm->port_num, acm->serial_state);
544 serial_state = cpu_to_le16(acm->serial_state);
543 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, 545 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
544 0, &acm->serial_state, sizeof(acm->serial_state)); 546 0, &serial_state, sizeof(acm->serial_state));
545 } else { 547 } else {
546 acm->pending = true; 548 acm->pending = true;
547 status = 0; 549 status = 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 89b48bcc377a..5eea44823ca0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -367,7 +367,7 @@ try_again:
367 count = min_t(unsigned, count, hidg->report_length); 367 count = min_t(unsigned, count, hidg->report_length);
368 368
369 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 369 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
370 status = copy_from_user(hidg->req->buf, buffer, count); 370 status = copy_from_user(req->buf, buffer, count);
371 371
372 if (status != 0) { 372 if (status != 0) {
373 ERROR(hidg->func.config->cdev, 373 ERROR(hidg->func.config->cdev,
@@ -378,9 +378,9 @@ try_again:
378 378
379 spin_lock_irqsave(&hidg->write_spinlock, flags); 379 spin_lock_irqsave(&hidg->write_spinlock, flags);
380 380
381 /* we our function has been disabled by host */ 381 /* when our function has been disabled by host */
382 if (!hidg->req) { 382 if (!hidg->req) {
383 free_ep_req(hidg->in_ep, hidg->req); 383 free_ep_req(hidg->in_ep, req);
384 /* 384 /*
385 * TODO 385 * TODO
386 * Should we fail with error here? 386 * Should we fail with error here?
@@ -394,7 +394,7 @@ try_again:
394 req->complete = f_hidg_req_complete; 394 req->complete = f_hidg_req_complete;
395 req->context = hidg; 395 req->context = hidg;
396 396
397 status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); 397 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
398 if (status < 0) { 398 if (status < 0) {
399 ERROR(hidg->func.config->cdev, 399 ERROR(hidg->func.config->cdev,
400 "usb_ep_queue error on int endpoint %zd\n", status); 400 "usb_ep_queue error on int endpoint %zd\n", status);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5dee04..f8a1881609a2 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); 594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); 595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
596 596
597 /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
598 if (opts->streaming_maxburst &&
599 (opts->streaming_maxpacket % 1024) != 0) {
600 opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
601 INFO(cdev, "overriding streaming_maxpacket to %d\n",
602 opts->streaming_maxpacket);
603 }
604
597 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the 605 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
598 * module parameters. 606 * module parameters.
599 * 607 *
@@ -625,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
625 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; 633 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
626 uvc_ss_streaming_comp.wBytesPerInterval = 634 uvc_ss_streaming_comp.wBytesPerInterval =
627 cpu_to_le16(max_packet_size * max_packet_mult * 635 cpu_to_le16(max_packet_size * max_packet_mult *
628 opts->streaming_maxburst); 636 (opts->streaming_maxburst + 1));
629 637
630 /* Allocate endpoints. */ 638 /* Allocate endpoints. */
631 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); 639 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da645c1b9..8a365aad66fe 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1523 td = phys_to_virt(addr); 1523 td = phys_to_virt(addr);
1524 addr2 = (dma_addr_t)td->next; 1524 addr2 = (dma_addr_t)td->next;
1525 pci_pool_free(dev->data_requests, td, addr); 1525 pci_pool_free(dev->data_requests, td, addr);
1526 td->next = 0x00;
1527 addr = addr2; 1526 addr = addr2;
1528 } 1527 }
1529 req->chain_len = 1; 1528 req->chain_len = 1;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index bd02a6cd8e2c..6ed468fa7d5e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -344,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
344static struct platform_driver usb_xhci_driver = { 344static struct platform_driver usb_xhci_driver = {
345 .probe = xhci_plat_probe, 345 .probe = xhci_plat_probe,
346 .remove = xhci_plat_remove, 346 .remove = xhci_plat_remove,
347 .shutdown = usb_hcd_platform_shutdown,
347 .driver = { 348 .driver = {
348 .name = "xhci-hcd", 349 .name = "xhci-hcd",
349 .pm = DEV_PM_OPS, 350 .pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d9936c771fa0..a3309aa02993 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1989 case TRB_NORMAL: 1989 case TRB_NORMAL:
1990 td->urb->actual_length = requested - remaining; 1990 td->urb->actual_length = requested - remaining;
1991 goto finish_td; 1991 goto finish_td;
1992 case TRB_STATUS:
1993 td->urb->actual_length = requested;
1994 goto finish_td;
1992 default: 1995 default:
1993 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 1996 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
1994 trb_type); 1997 trb_type);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 50aee8b7718b..953fd8f62df0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1477 struct xhci_ring *ep_ring; 1477 struct xhci_ring *ep_ring;
1478 struct xhci_virt_ep *ep; 1478 struct xhci_virt_ep *ep;
1479 struct xhci_command *command; 1479 struct xhci_command *command;
1480 struct xhci_virt_device *vdev;
1480 1481
1481 xhci = hcd_to_xhci(hcd); 1482 xhci = hcd_to_xhci(hcd);
1482 spin_lock_irqsave(&xhci->lock, flags); 1483 spin_lock_irqsave(&xhci->lock, flags);
@@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1485 1486
1486 /* Make sure the URB hasn't completed or been unlinked already */ 1487 /* Make sure the URB hasn't completed or been unlinked already */
1487 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1488 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1488 if (ret || !urb->hcpriv) 1489 if (ret)
1489 goto done; 1490 goto done;
1491
1492 /* give back URB now if we can't queue it for cancel */
1493 vdev = xhci->devs[urb->dev->slot_id];
1494 urb_priv = urb->hcpriv;
1495 if (!vdev || !urb_priv)
1496 goto err_giveback;
1497
1498 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1499 ep = &vdev->eps[ep_index];
1500 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1501 if (!ep || !ep_ring)
1502 goto err_giveback;
1503
1490 temp = readl(&xhci->op_regs->status); 1504 temp = readl(&xhci->op_regs->status);
1491 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1505 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1492 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1506 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1493 "HW died, freeing TD."); 1507 "HW died, freeing TD.");
1494 urb_priv = urb->hcpriv;
1495 for (i = urb_priv->num_tds_done; 1508 for (i = urb_priv->num_tds_done;
1496 i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; 1509 i < urb_priv->num_tds;
1497 i++) { 1510 i++) {
1498 td = &urb_priv->td[i]; 1511 td = &urb_priv->td[i];
1499 if (!list_empty(&td->td_list)) 1512 if (!list_empty(&td->td_list))
@@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1501 if (!list_empty(&td->cancelled_td_list)) 1514 if (!list_empty(&td->cancelled_td_list))
1502 list_del_init(&td->cancelled_td_list); 1515 list_del_init(&td->cancelled_td_list);
1503 } 1516 }
1504 1517 goto err_giveback;
1505 usb_hcd_unlink_urb_from_ep(hcd, urb);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1508 xhci_urb_free_priv(urb_priv);
1509 return ret;
1510 } 1518 }
1511 1519
1512 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1513 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1514 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1515 if (!ep_ring) {
1516 ret = -EINVAL;
1517 goto done;
1518 }
1519
1520 urb_priv = urb->hcpriv;
1521 i = urb_priv->num_tds_done; 1520 i = urb_priv->num_tds_done;
1522 if (i < urb_priv->num_tds) 1521 if (i < urb_priv->num_tds)
1523 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1554done: 1553done:
1555 spin_unlock_irqrestore(&xhci->lock, flags); 1554 spin_unlock_irqrestore(&xhci->lock, flags);
1556 return ret; 1555 return ret;
1556
1557err_giveback:
1558 if (urb_priv)
1559 xhci_urb_free_priv(urb_priv);
1560 usb_hcd_unlink_urb_from_ep(hcd, urb);
1561 spin_unlock_irqrestore(&xhci->lock, flags);
1562 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1563 return ret;
1557} 1564}
1558 1565
1559/* Drop an endpoint from a new bandwidth configuration for this device. 1566/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8b9fd7534f69..502bfe30a077 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
347 if (iface_desc->desc.bInterfaceClass != 0x0A) 347 if (iface_desc->desc.bInterfaceClass != 0x0A)
348 return -ENODEV; 348 return -ENODEV;
349 349
350 if (iface_desc->desc.bNumEndpoints < 1)
351 return -ENODEV;
352
350 /* allocate memory for our device state and initialize it */ 353 /* allocate memory for our device state and initialize it */
351 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 354 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
352 if (dev == NULL) 355 if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 77176511658f..d3d124753266 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
366 366
367 hdev = interface_to_usbdev(intf); 367 hdev = interface_to_usbdev(intf);
368 desc = intf->cur_altsetting; 368 desc = intf->cur_altsetting;
369
370 if (desc->desc.bNumEndpoints < 1)
371 return -ENODEV;
372
369 endpoint = &desc->endpoint[0].desc; 373 endpoint = &desc->endpoint[0].desc;
370 374
371 /* valid only for SS root hub */ 375 /* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index e45a3a680db8..07014cad6dbe 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf,
709 709
710 interface = intf->cur_altsetting; 710 interface = intf->cur_altsetting;
711 711
712 if (interface->desc.bNumEndpoints < 3) {
713 usb_put_dev(usbdev);
714 return -ENODEV;
715 }
716
712 /* 717 /*
713 * Allocate parport interface 718 * Allocate parport interface
714 */ 719 */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d8bae6ca8904..0c3664ab705e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev)
2490 musb_host_cleanup(musb); 2490 musb_host_cleanup(musb);
2491 musb_gadget_cleanup(musb); 2491 musb_gadget_cleanup(musb);
2492 2492
2493 spin_lock_irqsave(&musb->lock, flags);
2494 musb_platform_disable(musb); 2493 musb_platform_disable(musb);
2494 spin_lock_irqsave(&musb->lock, flags);
2495 musb_disable_interrupts(musb); 2495 musb_disable_interrupts(musb);
2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2497 spin_unlock_irqrestore(&musb->lock, flags); 2497 spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 00e272bfee39..355655f8a3fb 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data,
238 transferred < cppi41_channel->packet_sz) 238 transferred < cppi41_channel->packet_sz)
239 cppi41_channel->prog_len = 0; 239 cppi41_channel->prog_len = 0;
240 240
241 if (cppi41_channel->is_tx) 241 if (cppi41_channel->is_tx) {
242 empty = musb_is_tx_fifo_empty(hw_ep); 242 u8 type;
243
244 if (is_host_active(musb))
245 type = hw_ep->out_qh->type;
246 else
247 type = hw_ep->ep_in.type;
248
249 if (type == USB_ENDPOINT_XFER_ISOC)
250 /*
251 * Don't use the early-TX-interrupt workaround below
252 * for Isoch transfter. Since Isoch are periodic
253 * transfer, by the time the next transfer is
254 * scheduled, the current one should be done already.
255 *
256 * This avoids audio playback underrun issue.
257 */
258 empty = true;
259 else
260 empty = musb_is_tx_fifo_empty(hw_ep);
261 }
243 262
244 if (!cppi41_channel->is_tx || empty) { 263 if (!cppi41_channel->is_tx || empty) {
245 cppi41_trans_done(cppi41_channel); 264 cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7c047c4a2565..9c7ee26ef388 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev)
933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { 933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
934 ret = dsps_setup_optional_vbus_irq(pdev, glue); 934 ret = dsps_setup_optional_vbus_irq(pdev, glue);
935 if (ret) 935 if (ret)
936 return ret; 936 goto err_iounmap;
937 } 937 }
938 938
939 platform_set_drvdata(pdev, glue); 939 platform_set_drvdata(pdev, glue);
@@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev)
946 946
947err: 947err:
948 pm_runtime_disable(&pdev->dev); 948 pm_runtime_disable(&pdev->dev);
949err_iounmap:
950 iounmap(glue->usbss_base);
949 return ret; 951 return ret;
950} 952}
951 953
@@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev)
956 platform_device_unregister(glue->musb); 958 platform_device_unregister(glue->musb);
957 959
958 pm_runtime_disable(&pdev->dev); 960 pm_runtime_disable(&pdev->dev);
961 iounmap(glue->usbss_base);
959 962
960 return 0; 963 return 0;
961} 964}
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index b3b33cf7ddf6..f333024660b4 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -136,7 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
136static struct i2c_driver isp1301_driver = { 136static struct i2c_driver isp1301_driver = {
137 .driver = { 137 .driver = {
138 .name = DRV_NAME, 138 .name = DRV_NAME,
139 .of_match_table = of_match_ptr(isp1301_of_match), 139 .of_match_table = isp1301_of_match,
140 }, 140 },
141 .probe = isp1301_probe, 141 .probe = isp1301_probe,
142 .remove = isp1301_remove, 142 .remove = isp1301_remove,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e54c05..af67a0de6b5d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
233#define BANDRICH_PRODUCT_1012 0x1012 233#define BANDRICH_PRODUCT_1012 0x1012
234 234
235#define QUALCOMM_VENDOR_ID 0x05C6 235#define QUALCOMM_VENDOR_ID 0x05C6
236/* These Quectel products use Qualcomm's vendor ID */
237#define QUECTEL_PRODUCT_UC20 0x9003
238#define QUECTEL_PRODUCT_UC15 0x9090
239
240#define QUECTEL_VENDOR_ID 0x2c7c
241/* These Quectel products use Quectel's vendor ID */
242#define QUECTEL_PRODUCT_EC21 0x0121
243#define QUECTEL_PRODUCT_EC25 0x0125
236 244
237#define CMOTECH_VENDOR_ID 0x16d8 245#define CMOTECH_VENDOR_ID 0x16d8
238#define CMOTECH_PRODUCT_6001 0x6001 246#define CMOTECH_PRODUCT_6001 0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
1161 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1169 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1162 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1170 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1163 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1171 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1164 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ 1172 /* Quectel products using Qualcomm vendor ID */
1173 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1174 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1175 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1176 /* Quectel products using Quectel vendor ID */
1177 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1178 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1179 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1165 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1180 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1166 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1181 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1167 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1182 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458db7e3c..38b3f0d8cd58 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
172 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
173 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
172 174
173 /* Huawei devices */ 175 /* Huawei devices */
174 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 176 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd9218a..d01496fd27fe 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
39 int result; 39 int result;
40 struct device *dev = &iface->dev; 40 struct device *dev = &iface->dev;
41 41
42 if (iface->cur_altsetting->desc.bNumEndpoints < 3)
43 return -ENODEV;
44
42 result = wa_rpipes_create(wa); 45 result = wa_rpipes_create(wa);
43 if (result < 0) 46 if (result < 0)
44 goto error_rpipes_create; 47 goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c29d17..35a1e777b449 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface,
823 struct hwarc *hwarc; 823 struct hwarc *hwarc;
824 struct device *dev = &iface->dev; 824 struct device *dev = &iface->dev;
825 825
826 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
827 return -ENODEV;
828
826 result = -ENOMEM; 829 result = -ENOMEM;
827 uwb_rc = uwb_rc_alloc(); 830 uwb_rc = uwb_rc_alloc();
828 if (uwb_rc == NULL) { 831 if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846ac071..6345e85822a4 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
362 result); 362 result);
363 } 363 }
364 364
365 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
366 return -ENODEV;
367
365 result = -ENOMEM; 368 result = -ENOMEM;
366 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); 369 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
367 if (i1480_usb == NULL) { 370 if (i1480_usb == NULL) {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 609f4f982c74..561084ab387f 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref)
403 struct iommu_group *iommu_group = group->iommu_group; 403 struct iommu_group *iommu_group = group->iommu_group;
404 404
405 WARN_ON(!list_empty(&group->device_list)); 405 WARN_ON(!list_empty(&group->device_list));
406 WARN_ON(group->notifier.head);
406 407
407 list_for_each_entry_safe(unbound, tmp, 408 list_for_each_entry_safe(unbound, tmp,
408 &group->unbound_list, unbound_next) { 409 &group->unbound_list, unbound_next) {
@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1573 return -EBUSY; 1574 return -EBUSY;
1574 } 1575 }
1575 1576
1577 /* Warn if previous user didn't cleanup and re-init to drop them */
1578 if (WARN_ON(group->notifier.head))
1579 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1580
1576 filep->private_data = group; 1581 filep->private_data = group;
1577 1582
1578 return 0; 1583 return 0;
@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1584 1589
1585 filep->private_data = NULL; 1590 filep->private_data = NULL;
1586 1591
1587 /* Any user didn't unregister? */
1588 WARN_ON(group->notifier.head);
1589
1590 vfio_group_try_dissolve_container(group); 1592 vfio_group_try_dissolve_container(group);
1591 1593
1592 atomic_dec(&group->opened); 1594 atomic_dec(&group->opened);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c26fa1f3ed86..32d2633092a3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1182 return NULL; 1182 return NULL;
1183} 1183}
1184 1184
1185static bool vfio_iommu_has_resv_msi(struct iommu_group *group, 1185static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1186 phys_addr_t *base)
1187{ 1186{
1188 struct list_head group_resv_regions; 1187 struct list_head group_resv_regions;
1189 struct iommu_resv_region *region, *next; 1188 struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
1192 INIT_LIST_HEAD(&group_resv_regions); 1191 INIT_LIST_HEAD(&group_resv_regions);
1193 iommu_get_group_resv_regions(group, &group_resv_regions); 1192 iommu_get_group_resv_regions(group, &group_resv_regions);
1194 list_for_each_entry(region, &group_resv_regions, list) { 1193 list_for_each_entry(region, &group_resv_regions, list) {
1195 if (region->type & IOMMU_RESV_MSI) { 1194 if (region->type == IOMMU_RESV_SW_MSI) {
1196 *base = region->start; 1195 *base = region->start;
1197 ret = true; 1196 ret = true;
1198 goto out; 1197 goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1283 if (ret) 1282 if (ret)
1284 goto out_domain; 1283 goto out_domain;
1285 1284
1286 resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); 1285 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1287 1286
1288 INIT_LIST_HEAD(&domain->group_list); 1287 INIT_LIST_HEAD(&domain->group_list);
1289 list_add(&group->next, &domain->group_list); 1288 list_add(&group->next, &domain->group_list);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index ce5e63d2c66a..44eed8eb0725 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
223 return len; 223 return len;
224} 224}
225 225
226static int
227vhost_transport_cancel_pkt(struct vsock_sock *vsk)
228{
229 struct vhost_vsock *vsock;
230 struct virtio_vsock_pkt *pkt, *n;
231 int cnt = 0;
232 LIST_HEAD(freeme);
233
234 /* Find the vhost_vsock according to guest context id */
235 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
236 if (!vsock)
237 return -ENODEV;
238
239 spin_lock_bh(&vsock->send_pkt_list_lock);
240 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
241 if (pkt->vsk != vsk)
242 continue;
243 list_move(&pkt->list, &freeme);
244 }
245 spin_unlock_bh(&vsock->send_pkt_list_lock);
246
247 list_for_each_entry_safe(pkt, n, &freeme, list) {
248 if (pkt->reply)
249 cnt++;
250 list_del(&pkt->list);
251 virtio_transport_free_pkt(pkt);
252 }
253
254 if (cnt) {
255 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
256 int new_cnt;
257
258 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
259 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
260 vhost_poll_queue(&tx_vq->poll);
261 }
262
263 return 0;
264}
265
226static struct virtio_vsock_pkt * 266static struct virtio_vsock_pkt *
227vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, 267vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
228 unsigned int out, unsigned int in) 268 unsigned int out, unsigned int in)
@@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
675 .release = virtio_transport_release, 715 .release = virtio_transport_release,
676 .connect = virtio_transport_connect, 716 .connect = virtio_transport_connect,
677 .shutdown = virtio_transport_shutdown, 717 .shutdown = virtio_transport_shutdown,
718 .cancel_pkt = vhost_transport_cancel_pkt,
678 719
679 .dgram_enqueue = virtio_transport_dgram_enqueue, 720 .dgram_enqueue = virtio_transport_dgram_enqueue,
680 .dgram_dequeue = virtio_transport_dgram_dequeue, 721 .dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e1191508228..34adf9b9c053 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
242 242
243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
244 244
245static void update_balloon_stats(struct virtio_balloon *vb) 245static unsigned int update_balloon_stats(struct virtio_balloon *vb)
246{ 246{
247 unsigned long events[NR_VM_EVENT_ITEMS]; 247 unsigned long events[NR_VM_EVENT_ITEMS];
248 struct sysinfo i; 248 struct sysinfo i;
249 int idx = 0; 249 unsigned int idx = 0;
250 long available; 250 long available;
251 251
252 all_vm_events(events); 252 all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
254 254
255 available = si_mem_available(); 255 available = si_mem_available();
256 256
257#ifdef CONFIG_VM_EVENT_COUNTERS
257 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 258 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
258 pages_to_bytes(events[PSWPIN])); 259 pages_to_bytes(events[PSWPIN]));
259 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 260 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
260 pages_to_bytes(events[PSWPOUT])); 261 pages_to_bytes(events[PSWPOUT]));
261 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
264#endif
263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
264 pages_to_bytes(i.freeram)); 266 pages_to_bytes(i.freeram));
265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 267 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
266 pages_to_bytes(i.totalram)); 268 pages_to_bytes(i.totalram));
267 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 269 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
268 pages_to_bytes(available)); 270 pages_to_bytes(available));
271
272 return idx;
269} 273}
270 274
271/* 275/*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
291{ 295{
292 struct virtqueue *vq; 296 struct virtqueue *vq;
293 struct scatterlist sg; 297 struct scatterlist sg;
294 unsigned int len; 298 unsigned int len, num_stats;
295 299
296 update_balloon_stats(vb); 300 num_stats = update_balloon_stats(vb);
297 301
298 vq = vb->stats_vq; 302 vq = vb->stats_vq;
299 if (!virtqueue_get_buf(vq, &len)) 303 if (!virtqueue_get_buf(vq, &len))
300 return; 304 return;
301 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 305 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
302 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 306 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
303 virtqueue_kick(vq); 307 virtqueue_kick(vq);
304} 308}
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
423 vb->deflate_vq = vqs[1]; 427 vb->deflate_vq = vqs[1];
424 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 428 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
425 struct scatterlist sg; 429 struct scatterlist sg;
430 unsigned int num_stats;
426 vb->stats_vq = vqs[2]; 431 vb->stats_vq = vqs[2];
427 432
428 /* 433 /*
429 * Prime this virtqueue with one buffer so the hypervisor can 434 * Prime this virtqueue with one buffer so the hypervisor can
430 * use it to signal us later (it can't be broken yet!). 435 * use it to signal us later (it can't be broken yet!).
431 */ 436 */
432 sg_init_one(&sg, vb->stats, sizeof vb->stats); 437 num_stats = update_balloon_stats(vb);
438
439 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
433 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) 440 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
434 < 0) 441 < 0)
435 BUG(); 442 BUG();
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index df548a6fb844..590534910dc6 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
147{ 147{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 148 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev); 149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, err = -ENOMEM, allocated_vectors, nvectors; 150 int i, j, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX; 151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false; 152 bool shared = false;
153 u16 msix_vec; 153 u16 msix_vec;
@@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
212 if (!vp_dev->msix_vector_map) 212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq; 213 goto out_disable_config_irq;
214 214
215 allocated_vectors = 1; /* vector 0 is the config interrupt */ 215 allocated_vectors = j = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 216 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 217 if (!names[i]) {
218 vqs[i] = NULL; 218 vqs[i] = NULL;
@@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
236 continue; 236 continue;
237 } 237 }
238 238
239 snprintf(vp_dev->msix_names[i + 1], 239 snprintf(vp_dev->msix_names[j],
240 sizeof(*vp_dev->msix_names), "%s-%s", 240 sizeof(*vp_dev->msix_names), "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 241 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 243 vring_interrupt, IRQF_SHARED,
244 vp_dev->msix_names[i + 1], vqs[i]); 244 vp_dev->msix_names[j], vqs[i]);
245 if (err) { 245 if (err) {
246 /* don't free this irq on error */ 246 /* don't free this irq on error */
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
248 goto out_remove_vqs; 248 goto out_remove_vqs;
249 } 249 }
250 vp_dev->msix_vector_map[i] = msix_vec; 250 vp_dev->msix_vector_map[i] = msix_vec;
251 j++;
251 252
252 /* 253 /*
253 * Use a different vector for each queue if they are available, 254 * Use a different vector for each queue if they are available,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index c77a0751a311..f3bf8f4e2d6c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -36,6 +36,7 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/highmem.h> 38#include <linux/highmem.h>
39#include <linux/refcount.h>
39 40
40#include <xen/xen.h> 41#include <xen/xen.h>
41#include <xen/grant_table.h> 42#include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
86 int index; 87 int index;
87 int count; 88 int count;
88 int flags; 89 int flags;
89 atomic_t users; 90 refcount_t users;
90 struct unmap_notify notify; 91 struct unmap_notify notify;
91 struct ioctl_gntdev_grant_ref *grants; 92 struct ioctl_gntdev_grant_ref *grants;
92 struct gnttab_map_grant_ref *map_ops; 93 struct gnttab_map_grant_ref *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
166 167
167 add->index = 0; 168 add->index = 0;
168 add->count = count; 169 add->count = count;
169 atomic_set(&add->users, 1); 170 refcount_set(&add->users, 1);
170 171
171 return add; 172 return add;
172 173
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
212 if (!map) 213 if (!map)
213 return; 214 return;
214 215
215 if (!atomic_dec_and_test(&map->users)) 216 if (!refcount_dec_and_test(&map->users))
216 return; 217 return;
217 218
218 atomic_sub(map->count, &pages_mapped); 219 atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
400 struct grant_map *map = vma->vm_private_data; 401 struct grant_map *map = vma->vm_private_data;
401 402
402 pr_debug("gntdev_vma_open %p\n", vma); 403 pr_debug("gntdev_vma_open %p\n", vma);
403 atomic_inc(&map->users); 404 refcount_inc(&map->users);
404} 405}
405 406
406static void gntdev_vma_close(struct vm_area_struct *vma) 407static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1004 goto unlock_out; 1005 goto unlock_out;
1005 } 1006 }
1006 1007
1007 atomic_inc(&map->users); 1008 refcount_inc(&map->users);
1008 1009
1009 vma->vm_ops = &gntdev_vmops; 1010 vma->vm_ops = &gntdev_vmops;
1010 1011
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bcca18b..23e391d3ec01 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/syscore_ops.h>
30#include <linux/acpi.h> 31#include <linux/acpi.h>
31#include <acpi/processor.h> 32#include <acpi/processor.h>
32#include <xen/xen.h> 33#include <xen/xen.h>
33#include <xen/xen-ops.h>
34#include <xen/interface/platform.h> 34#include <xen/interface/platform.h>
35#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
36 36
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
409 ACPI_UINT32_MAX, 409 ACPI_UINT32_MAX,
410 read_acpi_id, NULL, NULL, NULL); 410 read_acpi_id, NULL, NULL, NULL);
411 acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); 411 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
412 412
413upload: 413upload:
414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { 414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
466 return rc; 466 return rc;
467} 467}
468 468
469static int xen_acpi_processor_resume(struct notifier_block *nb, 469static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
470 unsigned long action, void *data)
471{ 470{
471 int rc;
472
472 bitmap_zero(acpi_ids_done, nr_acpi_bits); 473 bitmap_zero(acpi_ids_done, nr_acpi_bits);
473 return xen_upload_processor_pm_data(); 474
475 rc = xen_upload_processor_pm_data();
476 if (rc != 0)
477 pr_info("ACPI data upload failed, error = %d\n", rc);
478}
479
480static void xen_acpi_processor_resume(void)
481{
482 static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
483
484 /*
485 * xen_upload_processor_pm_data() calls non-atomic code.
486 * However, the context for xen_acpi_processor_resume is syscore
487 * with only the boot CPU online and in an atomic context.
488 *
489 * So defer the upload for some point safer.
490 */
491 schedule_work(&wq);
474} 492}
475 493
476struct notifier_block xen_acpi_processor_resume_nb = { 494static struct syscore_ops xap_syscore_ops = {
477 .notifier_call = xen_acpi_processor_resume, 495 .resume = xen_acpi_processor_resume,
478}; 496};
479 497
480static int __init xen_acpi_processor_init(void) 498static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
527 if (rc) 545 if (rc)
528 goto err_unregister; 546 goto err_unregister;
529 547
530 xen_resume_notifier_register(&xen_acpi_processor_resume_nb); 548 register_syscore_ops(&xap_syscore_ops);
531 549
532 return 0; 550 return 0;
533err_unregister: 551err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
544{ 562{
545 int i; 563 int i;
546 564
547 xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); 565 unregister_syscore_ops(&xap_syscore_ops);
548 kfree(acpi_ids_done); 566 kfree(acpi_ids_done);
549 kfree(acpi_id_present); 567 kfree(acpi_id_present);
550 kfree(acpi_id_cst_present); 568 kfree(acpi_id_cst_present);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1f4733b80c87..f3b089b7c0b6 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
442 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 442 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
443 443
444 rc = xenbus_dev_request_and_reply(&u->u.msg, u); 444 rc = xenbus_dev_request_and_reply(&u->u.msg, u);
445 if (rc) 445 if (rc && trans) {
446 list_del(&trans->list);
446 kfree(trans); 447 kfree(trans);
448 }
447 449
448out: 450out:
449 return rc; 451 return rc;
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index b29447e03ede..25d404d22cae 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
362{ 362{
363 struct afs_server *server; 363 struct afs_server *server;
364 struct afs_vnode *vnode, *xvnode; 364 struct afs_vnode *vnode, *xvnode;
365 time_t now; 365 time64_t now;
366 long timeout; 366 long timeout;
367 int ret; 367 int ret;
368 368
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
370 370
371 _enter(""); 371 _enter("");
372 372
373 now = get_seconds(); 373 now = ktime_get_real_seconds();
374 374
375 /* find the first vnode to update */ 375 /* find the first vnode to update */
376 spin_lock(&server->cb_lock); 376 spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
424 424
425 /* and then reschedule */ 425 /* and then reschedule */
426 _debug("reschedule"); 426 _debug("reschedule");
427 vnode->update_at = get_seconds() + afs_vnode_update_timeout; 427 vnode->update_at = ktime_get_real_seconds() +
428 afs_vnode_update_timeout;
428 429
429 spin_lock(&server->cb_lock); 430 spin_lock(&server->cb_lock);
430 431
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2edbdcbf6432..3062cceb5c2a 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
187 struct afs_callback *cb; 187 struct afs_callback *cb;
188 struct afs_server *server; 188 struct afs_server *server;
189 __be32 *bp; 189 __be32 *bp;
190 u32 tmp;
191 int ret, loop; 190 int ret, loop;
192 191
193 _enter("{%u}", call->unmarshall); 192 _enter("{%u}", call->unmarshall);
@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
249 if (ret < 0) 248 if (ret < 0)
250 return ret; 249 return ret;
251 250
252 tmp = ntohl(call->tmp); 251 call->count2 = ntohl(call->tmp);
253 _debug("CB count: %u", tmp); 252 _debug("CB count: %u", call->count2);
254 if (tmp != call->count && tmp != 0) 253 if (call->count2 != call->count && call->count2 != 0)
255 return -EBADMSG; 254 return -EBADMSG;
256 call->offset = 0; 255 call->offset = 0;
257 call->unmarshall++; 256 call->unmarshall++;
@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
259 case 4: 258 case 4:
260 _debug("extract CB array"); 259 _debug("extract CB array");
261 ret = afs_extract_data(call, call->buffer, 260 ret = afs_extract_data(call, call->buffer,
262 call->count * 3 * 4, false); 261 call->count2 * 3 * 4, false);
263 if (ret < 0) 262 if (ret < 0)
264 return ret; 263 return ret;
265 264
266 _debug("unmarshall CB array"); 265 _debug("unmarshall CB array");
267 cb = call->request; 266 cb = call->request;
268 bp = call->buffer; 267 bp = call->buffer;
269 for (loop = call->count; loop > 0; loop--, cb++) { 268 for (loop = call->count2; loop > 0; loop--, cb++) {
270 cb->version = ntohl(*bp++); 269 cb->version = ntohl(*bp++);
271 cb->expiry = ntohl(*bp++); 270 cb->expiry = ntohl(*bp++);
272 cb->type = ntohl(*bp++); 271 cb->type = ntohl(*bp++);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ba7b71fba34b..0d5b8508869b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
30 30
31const struct file_operations afs_file_operations = { 31const struct file_operations afs_file_operations = {
32 .open = afs_open, 32 .open = afs_open,
33 .flush = afs_flush,
33 .release = afs_release, 34 .release = afs_release,
34 .llseek = generic_file_llseek, 35 .llseek = generic_file_llseek,
35 .read_iter = generic_file_read_iter, 36 .read_iter = generic_file_read_iter,
@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
184 if (!req) 185 if (!req)
185 goto enomem; 186 goto enomem;
186 187
188 /* We request a full page. If the page is a partial one at the
189 * end of the file, the server will return a short read and the
190 * unmarshalling code will clear the unfilled space.
191 */
187 atomic_set(&req->usage, 1); 192 atomic_set(&req->usage, 1);
188 req->pos = (loff_t)page->index << PAGE_SHIFT; 193 req->pos = (loff_t)page->index << PAGE_SHIFT;
189 req->len = min_t(size_t, i_size_read(inode) - req->pos, 194 req->len = PAGE_SIZE;
190 PAGE_SIZE);
191 req->nr_pages = 1; 195 req->nr_pages = 1;
192 req->pages[0] = page; 196 req->pages[0] = page;
193 get_page(page); 197 get_page(page);
@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
208 fscache_uncache_page(vnode->cache, page); 212 fscache_uncache_page(vnode->cache, page);
209#endif 213#endif
210 BUG_ON(PageFsCache(page)); 214 BUG_ON(PageFsCache(page));
211 goto error; 215
216 if (ret == -EINTR ||
217 ret == -ENOMEM ||
218 ret == -ERESTARTSYS ||
219 ret == -EAGAIN)
220 goto error;
221 goto io_error;
212 } 222 }
213 223
214 SetPageUptodate(page); 224 SetPageUptodate(page);
@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
227 _leave(" = 0"); 237 _leave(" = 0");
228 return 0; 238 return 0;
229 239
240io_error:
241 SetPageError(page);
242 goto error;
230enomem: 243enomem:
231 ret = -ENOMEM; 244 ret = -ENOMEM;
232error: 245error:
233 SetPageError(page);
234 unlock_page(page); 246 unlock_page(page);
235 _leave(" = %d", ret); 247 _leave(" = %d", ret);
236 return ret; 248 return ret;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ac8e766978dc..19f76ae36982 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -17,6 +17,12 @@
17#include "afs_fs.h" 17#include "afs_fs.h"
18 18
19/* 19/*
20 * We need somewhere to discard into in case the server helpfully returns more
21 * than we asked for in FS.FetchData{,64}.
22 */
23static u8 afs_discard_buffer[64];
24
25/*
20 * decode an AFSFid block 26 * decode an AFSFid block
21 */ 27 */
22static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid) 28static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
105 vnode->vfs_inode.i_mode = mode; 111 vnode->vfs_inode.i_mode = mode;
106 } 112 }
107 113
108 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; 114 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
109 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; 115 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
110 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; 116 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
111 vnode->vfs_inode.i_version = data_version; 117 vnode->vfs_inode.i_version = data_version;
@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
139 vnode->cb_version = ntohl(*bp++); 145 vnode->cb_version = ntohl(*bp++);
140 vnode->cb_expiry = ntohl(*bp++); 146 vnode->cb_expiry = ntohl(*bp++);
141 vnode->cb_type = ntohl(*bp++); 147 vnode->cb_type = ntohl(*bp++);
142 vnode->cb_expires = vnode->cb_expiry + get_seconds(); 148 vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
143 *_bp = bp; 149 *_bp = bp;
144} 150}
145 151
@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
315 void *buffer; 321 void *buffer;
316 int ret; 322 int ret;
317 323
318 _enter("{%u,%zu/%u;%u/%llu}", 324 _enter("{%u,%zu/%u;%llu/%llu}",
319 call->unmarshall, call->offset, call->count, 325 call->unmarshall, call->offset, call->count,
320 req->remain, req->actual_len); 326 req->remain, req->actual_len);
321 327
@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
353 359
354 req->actual_len |= ntohl(call->tmp); 360 req->actual_len |= ntohl(call->tmp);
355 _debug("DATA length: %llu", req->actual_len); 361 _debug("DATA length: %llu", req->actual_len);
356 /* Check that the server didn't want to send us extra. We
357 * might want to just discard instead, but that requires
358 * cooperation from AF_RXRPC.
359 */
360 if (req->actual_len > req->len)
361 return -EBADMSG;
362 362
363 req->remain = req->actual_len; 363 req->remain = req->actual_len;
364 call->offset = req->pos & (PAGE_SIZE - 1); 364 call->offset = req->pos & (PAGE_SIZE - 1);
@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
368 call->unmarshall++; 368 call->unmarshall++;
369 369
370 begin_page: 370 begin_page:
371 ASSERTCMP(req->index, <, req->nr_pages);
371 if (req->remain > PAGE_SIZE - call->offset) 372 if (req->remain > PAGE_SIZE - call->offset)
372 size = PAGE_SIZE - call->offset; 373 size = PAGE_SIZE - call->offset;
373 else 374 else
@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
378 379
379 /* extract the returned data */ 380 /* extract the returned data */
380 case 3: 381 case 3:
381 _debug("extract data %u/%llu %zu/%u", 382 _debug("extract data %llu/%llu %zu/%u",
382 req->remain, req->actual_len, call->offset, call->count); 383 req->remain, req->actual_len, call->offset, call->count);
383 384
384 buffer = kmap(req->pages[req->index]); 385 buffer = kmap(req->pages[req->index]);
@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
389 if (call->offset == PAGE_SIZE) { 390 if (call->offset == PAGE_SIZE) {
390 if (req->page_done) 391 if (req->page_done)
391 req->page_done(call, req); 392 req->page_done(call, req);
393 req->index++;
392 if (req->remain > 0) { 394 if (req->remain > 0) {
393 req->index++;
394 call->offset = 0; 395 call->offset = 0;
396 if (req->index >= req->nr_pages) {
397 call->unmarshall = 4;
398 goto begin_discard;
399 }
395 goto begin_page; 400 goto begin_page;
396 } 401 }
397 } 402 }
403 goto no_more_data;
404
405 /* Discard any excess data the server gave us */
406 begin_discard:
407 case 4:
408 size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
409 call->count = size;
410 _debug("extract discard %llu/%llu %zu/%u",
411 req->remain, req->actual_len, call->offset, call->count);
412
413 call->offset = 0;
414 ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
415 req->remain -= call->offset;
416 if (ret < 0)
417 return ret;
418 if (req->remain > 0)
419 goto begin_discard;
398 420
399 no_more_data: 421 no_more_data:
400 call->offset = 0; 422 call->offset = 0;
401 call->unmarshall++; 423 call->unmarshall = 5;
402 424
403 /* extract the metadata */ 425 /* extract the metadata */
404 case 4: 426 case 5:
405 ret = afs_extract_data(call, call->buffer, 427 ret = afs_extract_data(call, call->buffer,
406 (21 + 3 + 6) * 4, false); 428 (21 + 3 + 6) * 4, false);
407 if (ret < 0) 429 if (ret < 0)
@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
416 call->offset = 0; 438 call->offset = 0;
417 call->unmarshall++; 439 call->unmarshall++;
418 440
419 case 5: 441 case 6:
420 break; 442 break;
421 } 443 }
422 444
423 if (call->count < PAGE_SIZE) { 445 for (; req->index < req->nr_pages; req->index++) {
424 buffer = kmap(req->pages[req->index]); 446 if (call->count < PAGE_SIZE)
425 memset(buffer + call->count, 0, PAGE_SIZE - call->count); 447 zero_user_segment(req->pages[req->index],
426 kunmap(req->pages[req->index]); 448 call->count, PAGE_SIZE);
427 if (req->page_done) 449 if (req->page_done)
428 req->page_done(call, req); 450 req->page_done(call, req);
451 call->count = 0;
429 } 452 }
430 453
431 _leave(" = 0 [done]"); 454 _leave(" = 0 [done]");
@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
711 memset(bp, 0, padsz); 734 memset(bp, 0, padsz);
712 bp = (void *) bp + padsz; 735 bp = (void *) bp + padsz;
713 } 736 }
714 *bp++ = htonl(AFS_SET_MODE); 737 *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
715 *bp++ = 0; /* mtime */ 738 *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
716 *bp++ = 0; /* owner */ 739 *bp++ = 0; /* owner */
717 *bp++ = 0; /* group */ 740 *bp++ = 0; /* group */
718 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ 741 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
980 memset(bp, 0, c_padsz); 1003 memset(bp, 0, c_padsz);
981 bp = (void *) bp + c_padsz; 1004 bp = (void *) bp + c_padsz;
982 } 1005 }
983 *bp++ = htonl(AFS_SET_MODE); 1006 *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
984 *bp++ = 0; /* mtime */ 1007 *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
985 *bp++ = 0; /* owner */ 1008 *bp++ = 0; /* owner */
986 *bp++ = 0; /* group */ 1009 *bp++ = 0; /* group */
987 *bp++ = htonl(S_IRWXUGO); /* unix mode */ 1010 *bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
1180 *bp++ = htonl(vnode->fid.vnode); 1203 *bp++ = htonl(vnode->fid.vnode);
1181 *bp++ = htonl(vnode->fid.unique); 1204 *bp++ = htonl(vnode->fid.unique);
1182 1205
1183 *bp++ = 0; /* mask */ 1206 *bp++ = htonl(AFS_SET_MTIME); /* mask */
1184 *bp++ = 0; /* mtime */ 1207 *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
1185 *bp++ = 0; /* owner */ 1208 *bp++ = 0; /* owner */
1186 *bp++ = 0; /* group */ 1209 *bp++ = 0; /* group */
1187 *bp++ = 0; /* unix mode */ 1210 *bp++ = 0; /* unix mode */
@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
1213 _enter(",%x,{%x:%u},,", 1236 _enter(",%x,{%x:%u},,",
1214 key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); 1237 key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
1215 1238
1216 size = to - offset; 1239 size = (loff_t)to - (loff_t)offset;
1217 if (first != last) 1240 if (first != last)
1218 size += (loff_t)(last - first) << PAGE_SHIFT; 1241 size += (loff_t)(last - first) << PAGE_SHIFT;
1219 pos = (loff_t)first << PAGE_SHIFT; 1242 pos = (loff_t)first << PAGE_SHIFT;
@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
1257 *bp++ = htonl(vnode->fid.vnode); 1280 *bp++ = htonl(vnode->fid.vnode);
1258 *bp++ = htonl(vnode->fid.unique); 1281 *bp++ = htonl(vnode->fid.unique);
1259 1282
1260 *bp++ = 0; /* mask */ 1283 *bp++ = htonl(AFS_SET_MTIME); /* mask */
1261 *bp++ = 0; /* mtime */ 1284 *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
1262 *bp++ = 0; /* owner */ 1285 *bp++ = 0; /* owner */
1263 *bp++ = 0; /* group */ 1286 *bp++ = 0; /* group */
1264 *bp++ = 0; /* unix mode */ 1287 *bp++ = 0; /* unix mode */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1e4897a048d2..aae55dd15108 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
54 inode->i_fop = &afs_dir_file_operations; 54 inode->i_fop = &afs_dir_file_operations;
55 break; 55 break;
56 case AFS_FTYPE_SYMLINK: 56 case AFS_FTYPE_SYMLINK:
57 inode->i_mode = S_IFLNK | vnode->status.mode; 57 /* Symlinks with a mode of 0644 are actually mountpoints. */
58 inode->i_op = &page_symlink_inode_operations; 58 if ((vnode->status.mode & 0777) == 0644) {
59 inode->i_flags |= S_AUTOMOUNT;
60
61 spin_lock(&vnode->lock);
62 set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
63 spin_unlock(&vnode->lock);
64
65 inode->i_mode = S_IFDIR | 0555;
66 inode->i_op = &afs_mntpt_inode_operations;
67 inode->i_fop = &afs_mntpt_file_operations;
68 } else {
69 inode->i_mode = S_IFLNK | vnode->status.mode;
70 inode->i_op = &page_symlink_inode_operations;
71 }
59 inode_nohighmem(inode); 72 inode_nohighmem(inode);
60 break; 73 break;
61 default: 74 default:
@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
70 83
71 set_nlink(inode, vnode->status.nlink); 84 set_nlink(inode, vnode->status.nlink);
72 inode->i_uid = vnode->status.owner; 85 inode->i_uid = vnode->status.owner;
73 inode->i_gid = GLOBAL_ROOT_GID; 86 inode->i_gid = vnode->status.group;
74 inode->i_size = vnode->status.size; 87 inode->i_size = vnode->status.size;
75 inode->i_ctime.tv_sec = vnode->status.mtime_server; 88 inode->i_ctime.tv_sec = vnode->status.mtime_client;
76 inode->i_ctime.tv_nsec = 0; 89 inode->i_ctime.tv_nsec = 0;
77 inode->i_atime = inode->i_mtime = inode->i_ctime; 90 inode->i_atime = inode->i_mtime = inode->i_ctime;
78 inode->i_blocks = 0; 91 inode->i_blocks = 0;
79 inode->i_generation = vnode->fid.unique; 92 inode->i_generation = vnode->fid.unique;
80 inode->i_version = vnode->status.data_version; 93 inode->i_version = vnode->status.data_version;
81 inode->i_mapping->a_ops = &afs_fs_aops; 94 inode->i_mapping->a_ops = &afs_fs_aops;
82
83 /* check to see whether a symbolic link is really a mountpoint */
84 if (vnode->status.type == AFS_FTYPE_SYMLINK) {
85 afs_mntpt_check_symlink(vnode, key);
86
87 if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
88 inode->i_mode = S_IFDIR | vnode->status.mode;
89 inode->i_op = &afs_mntpt_inode_operations;
90 inode->i_fop = &afs_mntpt_file_operations;
91 }
92 }
93
94 return 0; 95 return 0;
95} 96}
96 97
@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
245 vnode->cb_version = 0; 246 vnode->cb_version = 0;
246 vnode->cb_expiry = 0; 247 vnode->cb_expiry = 0;
247 vnode->cb_type = 0; 248 vnode->cb_type = 0;
248 vnode->cb_expires = get_seconds(); 249 vnode->cb_expires = ktime_get_real_seconds();
249 } else { 250 } else {
250 vnode->cb_version = cb->version; 251 vnode->cb_version = cb->version;
251 vnode->cb_expiry = cb->expiry; 252 vnode->cb_expiry = cb->expiry;
252 vnode->cb_type = cb->type; 253 vnode->cb_type = cb->type;
253 vnode->cb_expires = vnode->cb_expiry + get_seconds(); 254 vnode->cb_expires = vnode->cb_expiry +
255 ktime_get_real_seconds();
254 } 256 }
255 } 257 }
256 258
@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
323 !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && 325 !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
324 !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && 326 !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
325 !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { 327 !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
326 if (vnode->cb_expires < get_seconds() + 10) { 328 if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
327 _debug("callback expired"); 329 _debug("callback expired");
328 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); 330 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
329 } else { 331 } else {
@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
444 446
445 mutex_lock(&vnode->permits_lock); 447 mutex_lock(&vnode->permits_lock);
446 permits = vnode->permits; 448 permits = vnode->permits;
447 rcu_assign_pointer(vnode->permits, NULL); 449 RCU_INIT_POINTER(vnode->permits, NULL);
448 mutex_unlock(&vnode->permits_lock); 450 mutex_unlock(&vnode->permits_lock);
449 if (permits) 451 if (permits)
450 call_rcu(&permits->rcu, afs_zap_permits); 452 call_rcu(&permits->rcu, afs_zap_permits);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5dfa56903a2d..a6901360fb81 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/ktime.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include <linux/pagemap.h> 16#include <linux/pagemap.h>
16#include <linux/rxrpc.h> 17#include <linux/rxrpc.h>
@@ -90,7 +91,10 @@ struct afs_call {
90 unsigned request_size; /* size of request data */ 91 unsigned request_size; /* size of request data */
91 unsigned reply_max; /* maximum size of reply */ 92 unsigned reply_max; /* maximum size of reply */
92 unsigned first_offset; /* offset into mapping[first] */ 93 unsigned first_offset; /* offset into mapping[first] */
93 unsigned last_to; /* amount of mapping[last] */ 94 union {
95 unsigned last_to; /* amount of mapping[last] */
96 unsigned count2; /* count used in unmarshalling */
97 };
94 unsigned char unmarshall; /* unmarshalling phase */ 98 unsigned char unmarshall; /* unmarshalling phase */
95 bool incoming; /* T if incoming call */ 99 bool incoming; /* T if incoming call */
96 bool send_pages; /* T if data from mapping should be sent */ 100 bool send_pages; /* T if data from mapping should be sent */
@@ -127,12 +131,11 @@ struct afs_call_type {
127 */ 131 */
128struct afs_read { 132struct afs_read {
129 loff_t pos; /* Where to start reading */ 133 loff_t pos; /* Where to start reading */
130 loff_t len; /* How much to read */ 134 loff_t len; /* How much we're asking for */
131 loff_t actual_len; /* How much we're actually getting */ 135 loff_t actual_len; /* How much we're actually getting */
136 loff_t remain; /* Amount remaining */
132 atomic_t usage; 137 atomic_t usage;
133 unsigned int remain; /* Amount remaining */
134 unsigned int index; /* Which page we're reading into */ 138 unsigned int index; /* Which page we're reading into */
135 unsigned int pg_offset; /* Offset in page we're at */
136 unsigned int nr_pages; 139 unsigned int nr_pages;
137 void (*page_done)(struct afs_call *, struct afs_read *); 140 void (*page_done)(struct afs_call *, struct afs_read *);
138 struct page *pages[]; 141 struct page *pages[];
@@ -247,7 +250,7 @@ struct afs_cache_vhash {
247 */ 250 */
248struct afs_vlocation { 251struct afs_vlocation {
249 atomic_t usage; 252 atomic_t usage;
250 time_t time_of_death; /* time at which put reduced usage to 0 */ 253 time64_t time_of_death; /* time at which put reduced usage to 0 */
251 struct list_head link; /* link in cell volume location list */ 254 struct list_head link; /* link in cell volume location list */
252 struct list_head grave; /* link in master graveyard list */ 255 struct list_head grave; /* link in master graveyard list */
253 struct list_head update; /* link in master update list */ 256 struct list_head update; /* link in master update list */
@@ -258,7 +261,7 @@ struct afs_vlocation {
258 struct afs_cache_vlocation vldb; /* volume information DB record */ 261 struct afs_cache_vlocation vldb; /* volume information DB record */
259 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ 262 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
260 wait_queue_head_t waitq; /* status change waitqueue */ 263 wait_queue_head_t waitq; /* status change waitqueue */
261 time_t update_at; /* time at which record should be updated */ 264 time64_t update_at; /* time at which record should be updated */
262 spinlock_t lock; /* access lock */ 265 spinlock_t lock; /* access lock */
263 afs_vlocation_state_t state; /* volume location state */ 266 afs_vlocation_state_t state; /* volume location state */
264 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ 267 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
@@ -271,7 +274,7 @@ struct afs_vlocation {
271 */ 274 */
272struct afs_server { 275struct afs_server {
273 atomic_t usage; 276 atomic_t usage;
274 time_t time_of_death; /* time at which put reduced usage to 0 */ 277 time64_t time_of_death; /* time at which put reduced usage to 0 */
275 struct in_addr addr; /* server address */ 278 struct in_addr addr; /* server address */
276 struct afs_cell *cell; /* cell in which server resides */ 279 struct afs_cell *cell; /* cell in which server resides */
277 struct list_head link; /* link in cell's server list */ 280 struct list_head link; /* link in cell's server list */
@@ -374,8 +377,8 @@ struct afs_vnode {
374 struct rb_node server_rb; /* link in server->fs_vnodes */ 377 struct rb_node server_rb; /* link in server->fs_vnodes */
375 struct rb_node cb_promise; /* link in server->cb_promises */ 378 struct rb_node cb_promise; /* link in server->cb_promises */
376 struct work_struct cb_broken_work; /* work to be done on callback break */ 379 struct work_struct cb_broken_work; /* work to be done on callback break */
377 time_t cb_expires; /* time at which callback expires */ 380 time64_t cb_expires; /* time at which callback expires */
378 time_t cb_expires_at; /* time used to order cb_promise */ 381 time64_t cb_expires_at; /* time used to order cb_promise */
379 unsigned cb_version; /* callback version */ 382 unsigned cb_version; /* callback version */
380 unsigned cb_expiry; /* callback expiry time */ 383 unsigned cb_expiry; /* callback expiry time */
381 afs_callback_type_t cb_type; /* type of callback */ 384 afs_callback_type_t cb_type; /* type of callback */
@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
557extern const struct file_operations afs_mntpt_file_operations; 560extern const struct file_operations afs_mntpt_file_operations;
558 561
559extern struct vfsmount *afs_d_automount(struct path *); 562extern struct vfsmount *afs_d_automount(struct path *);
560extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
561extern void afs_mntpt_kill_timer(void); 563extern void afs_mntpt_kill_timer(void);
562 564
563/* 565/*
@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
718extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); 720extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
719extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); 721extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
720extern int afs_writeback_all(struct afs_vnode *); 722extern int afs_writeback_all(struct afs_vnode *);
723extern int afs_flush(struct file *, fl_owner_t);
721extern int afs_fsync(struct file *, loff_t, loff_t, int); 724extern int afs_fsync(struct file *, loff_t, loff_t, int);
722 725
723 726
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 91ea1aa0d8b3..100b207efc9e 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
84 case RXKADDATALEN: return -EKEYREJECTED; 84 case RXKADDATALEN: return -EKEYREJECTED;
85 case RXKADILLEGALLEVEL: return -EKEYREJECTED; 85 case RXKADILLEGALLEVEL: return -EKEYREJECTED;
86 86
87 case RXGEN_OPCODE: return -ENOTSUPP;
88
87 default: return -EREMOTEIO; 89 default: return -EREMOTEIO;
88 } 90 }
89} 91}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index d4fb0afc0097..bd3b65cde282 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -47,59 +47,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
47static unsigned long afs_mntpt_expiry_timeout = 10 * 60; 47static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
48 48
49/* 49/*
50 * check a symbolic link to see whether it actually encodes a mountpoint
51 * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
52 */
53int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
54{
55 struct page *page;
56 size_t size;
57 char *buf;
58 int ret;
59
60 _enter("{%x:%u,%u}",
61 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
62
63 /* read the contents of the symlink into the pagecache */
64 page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
65 afs_page_filler, key);
66 if (IS_ERR(page)) {
67 ret = PTR_ERR(page);
68 goto out;
69 }
70
71 ret = -EIO;
72 if (PageError(page))
73 goto out_free;
74
75 buf = kmap(page);
76
77 /* examine the symlink's contents */
78 size = vnode->status.size;
79 _debug("symlink to %*.*s", (int) size, (int) size, buf);
80
81 if (size > 2 &&
82 (buf[0] == '%' || buf[0] == '#') &&
83 buf[size - 1] == '.'
84 ) {
85 _debug("symlink is a mountpoint");
86 spin_lock(&vnode->lock);
87 set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
88 vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
89 spin_unlock(&vnode->lock);
90 }
91
92 ret = 0;
93
94 kunmap(page);
95out_free:
96 put_page(page);
97out:
98 _leave(" = %d", ret);
99 return ret;
100}
101
102/*
103 * no valid lookup procedure on this sort of dir 50 * no valid lookup procedure on this sort of dir
104 */ 51 */
105static struct dentry *afs_mntpt_lookup(struct inode *dir, 52static struct dentry *afs_mntpt_lookup(struct inode *dir,
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 419ef05dcb5e..8f76b13d5549 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
259 call->buffer = NULL; 259 call->buffer = NULL;
260} 260}
261 261
262#define AFS_BVEC_MAX 8
263
264/*
265 * Load the given bvec with the next few pages.
266 */
267static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
268 struct bio_vec *bv, pgoff_t first, pgoff_t last,
269 unsigned offset)
270{
271 struct page *pages[AFS_BVEC_MAX];
272 unsigned int nr, n, i, to, bytes = 0;
273
274 nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
275 n = find_get_pages_contig(call->mapping, first, nr, pages);
276 ASSERTCMP(n, ==, nr);
277
278 msg->msg_flags |= MSG_MORE;
279 for (i = 0; i < nr; i++) {
280 to = PAGE_SIZE;
281 if (first + i >= last) {
282 to = call->last_to;
283 msg->msg_flags &= ~MSG_MORE;
284 }
285 bv[i].bv_page = pages[i];
286 bv[i].bv_len = to - offset;
287 bv[i].bv_offset = offset;
288 bytes += to - offset;
289 offset = 0;
290 }
291
292 iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
293}
294
262/* 295/*
263 * attach the data from a bunch of pages on an inode to a call 296 * attach the data from a bunch of pages on an inode to a call
264 */ 297 */
265static int afs_send_pages(struct afs_call *call, struct msghdr *msg) 298static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
266{ 299{
267 struct page *pages[8]; 300 struct bio_vec bv[AFS_BVEC_MAX];
268 unsigned count, n, loop, offset, to; 301 unsigned int bytes, nr, loop, offset;
269 pgoff_t first = call->first, last = call->last; 302 pgoff_t first = call->first, last = call->last;
270 int ret; 303 int ret;
271 304
272 _enter("");
273
274 offset = call->first_offset; 305 offset = call->first_offset;
275 call->first_offset = 0; 306 call->first_offset = 0;
276 307
277 do { 308 do {
278 _debug("attach %lx-%lx", first, last); 309 afs_load_bvec(call, msg, bv, first, last, offset);
279 310 offset = 0;
280 count = last - first + 1; 311 bytes = msg->msg_iter.count;
281 if (count > ARRAY_SIZE(pages)) 312 nr = msg->msg_iter.nr_segs;
282 count = ARRAY_SIZE(pages); 313
283 n = find_get_pages_contig(call->mapping, first, count, pages); 314 /* Have to change the state *before* sending the last
284 ASSERTCMP(n, ==, count); 315 * packet as RxRPC might give us the reply before it
285 316 * returns from sending the request.
286 loop = 0; 317 */
287 do { 318 if (first + nr - 1 >= last)
288 struct bio_vec bvec = {.bv_page = pages[loop], 319 call->state = AFS_CALL_AWAIT_REPLY;
289 .bv_offset = offset}; 320 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
290 msg->msg_flags = 0; 321 msg, bytes);
291 to = PAGE_SIZE; 322 for (loop = 0; loop < nr; loop++)
292 if (first + loop >= last) 323 put_page(bv[loop].bv_page);
293 to = call->last_to;
294 else
295 msg->msg_flags = MSG_MORE;
296 bvec.bv_len = to - offset;
297 offset = 0;
298
299 _debug("- range %u-%u%s",
300 offset, to, msg->msg_flags ? " [more]" : "");
301 iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
302 &bvec, 1, to - offset);
303
304 /* have to change the state *before* sending the last
305 * packet as RxRPC might give us the reply before it
306 * returns from sending the request */
307 if (first + loop >= last)
308 call->state = AFS_CALL_AWAIT_REPLY;
309 ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
310 msg, to - offset);
311 if (ret < 0)
312 break;
313 } while (++loop < count);
314 first += count;
315
316 for (loop = 0; loop < count; loop++)
317 put_page(pages[loop]);
318 if (ret < 0) 324 if (ret < 0)
319 break; 325 break;
326
327 first += nr;
320 } while (first <= last); 328 } while (first <= last);
321 329
322 _leave(" = %d", ret);
323 return ret; 330 return ret;
324} 331}
325 332
@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
333 struct rxrpc_call *rxcall; 340 struct rxrpc_call *rxcall;
334 struct msghdr msg; 341 struct msghdr msg;
335 struct kvec iov[1]; 342 struct kvec iov[1];
343 size_t offset;
344 u32 abort_code;
336 int ret; 345 int ret;
337 346
338 _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); 347 _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
381 msg.msg_controllen = 0; 390 msg.msg_controllen = 0;
382 msg.msg_flags = (call->send_pages ? MSG_MORE : 0); 391 msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
383 392
384 /* have to change the state *before* sending the last packet as RxRPC 393 /* We have to change the state *before* sending the last packet as
385 * might give us the reply before it returns from sending the 394 * rxrpc might give us the reply before it returns from sending the
386 * request */ 395 * request. Further, if the send fails, we may already have been given
396 * a notification and may have collected it.
397 */
387 if (!call->send_pages) 398 if (!call->send_pages)
388 call->state = AFS_CALL_AWAIT_REPLY; 399 call->state = AFS_CALL_AWAIT_REPLY;
389 ret = rxrpc_kernel_send_data(afs_socket, rxcall, 400 ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
405 return afs_wait_for_call_to_complete(call); 416 return afs_wait_for_call_to_complete(call);
406 417
407error_do_abort: 418error_do_abort:
408 rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); 419 call->state = AFS_CALL_COMPLETE;
420 if (ret != -ECONNABORTED) {
421 rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
422 -ret, "KSD");
423 } else {
424 abort_code = 0;
425 offset = 0;
426 rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
427 false, &abort_code);
428 ret = call->type->abort_to_error(abort_code);
429 }
409error_kill_call: 430error_kill_call:
410 afs_put_call(call); 431 afs_put_call(call);
411 _leave(" = %d", ret); 432 _leave(" = %d", ret);
@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
452 case -EINPROGRESS: 473 case -EINPROGRESS:
453 case -EAGAIN: 474 case -EAGAIN:
454 goto out; 475 goto out;
476 case -ECONNABORTED:
477 goto call_complete;
455 case -ENOTCONN: 478 case -ENOTCONN:
456 abort_code = RX_CALL_DEAD; 479 abort_code = RX_CALL_DEAD;
457 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 480 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
458 abort_code, -ret, "KNC"); 481 abort_code, -ret, "KNC");
459 goto do_abort; 482 goto save_error;
460 case -ENOTSUPP: 483 case -ENOTSUPP:
461 abort_code = RX_INVALID_OPERATION; 484 abort_code = RXGEN_OPCODE;
462 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 485 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
463 abort_code, -ret, "KIV"); 486 abort_code, -ret, "KIV");
464 goto do_abort; 487 goto save_error;
465 case -ENODATA: 488 case -ENODATA:
466 case -EBADMSG: 489 case -EBADMSG:
467 case -EMSGSIZE: 490 case -EMSGSIZE:
@@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call)
471 abort_code = RXGEN_SS_UNMARSHAL; 494 abort_code = RXGEN_SS_UNMARSHAL;
472 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 495 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
473 abort_code, EBADMSG, "KUM"); 496 abort_code, EBADMSG, "KUM");
474 goto do_abort; 497 goto save_error;
475 } 498 }
476 } 499 }
477 500
@@ -482,8 +505,9 @@ out:
482 _leave(""); 505 _leave("");
483 return; 506 return;
484 507
485do_abort: 508save_error:
486 call->error = ret; 509 call->error = ret;
510call_complete:
487 call->state = AFS_CALL_COMPLETE; 511 call->state = AFS_CALL_COMPLETE;
488 goto done; 512 goto done;
489} 513}
@@ -493,7 +517,6 @@ do_abort:
493 */ 517 */
494static int afs_wait_for_call_to_complete(struct afs_call *call) 518static int afs_wait_for_call_to_complete(struct afs_call *call)
495{ 519{
496 const char *abort_why;
497 int ret; 520 int ret;
498 521
499 DECLARE_WAITQUEUE(myself, current); 522 DECLARE_WAITQUEUE(myself, current);
@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
512 continue; 535 continue;
513 } 536 }
514 537
515 abort_why = "KWC"; 538 if (call->state == AFS_CALL_COMPLETE ||
516 ret = call->error; 539 signal_pending(current))
517 if (call->state == AFS_CALL_COMPLETE)
518 break;
519 abort_why = "KWI";
520 ret = -EINTR;
521 if (signal_pending(current))
522 break; 540 break;
523 schedule(); 541 schedule();
524 } 542 }
@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
526 remove_wait_queue(&call->waitq, &myself); 544 remove_wait_queue(&call->waitq, &myself);
527 __set_current_state(TASK_RUNNING); 545 __set_current_state(TASK_RUNNING);
528 546
529 /* kill the call */ 547 /* Kill off the call if it's still live. */
530 if (call->state < AFS_CALL_COMPLETE) { 548 if (call->state < AFS_CALL_COMPLETE) {
531 _debug("call incomplete"); 549 _debug("call interrupted");
532 rxrpc_kernel_abort_call(afs_socket, call->rxcall, 550 rxrpc_kernel_abort_call(afs_socket, call->rxcall,
533 RX_CALL_DEAD, -ret, abort_why); 551 RX_USER_ABORT, -EINTR, "KWI");
534 } 552 }
535 553
554 ret = call->error;
536 _debug("call complete"); 555 _debug("call complete");
537 afs_put_call(call); 556 afs_put_call(call);
538 _leave(" = %d", ret); 557 _leave(" = %d", ret);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 8d010422dc89..ecb86a670180 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
114 114
115 mutex_lock(&vnode->permits_lock); 115 mutex_lock(&vnode->permits_lock);
116 permits = vnode->permits; 116 permits = vnode->permits;
117 rcu_assign_pointer(vnode->permits, NULL); 117 RCU_INIT_POINTER(vnode->permits, NULL);
118 mutex_unlock(&vnode->permits_lock); 118 mutex_unlock(&vnode->permits_lock);
119 119
120 if (permits) 120 if (permits)
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
340 } else { 340 } else {
341 if (!(access & AFS_ACE_LOOKUP)) 341 if (!(access & AFS_ACE_LOOKUP))
342 goto permission_denied; 342 goto permission_denied;
343 if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
344 goto permission_denied;
343 if (mask & (MAY_EXEC | MAY_READ)) { 345 if (mask & (MAY_EXEC | MAY_READ)) {
344 if (!(access & AFS_ACE_READ)) 346 if (!(access & AFS_ACE_READ))
345 goto permission_denied; 347 goto permission_denied;
348 if (!(inode->i_mode & S_IRUSR))
349 goto permission_denied;
346 } else if (mask & MAY_WRITE) { 350 } else if (mask & MAY_WRITE) {
347 if (!(access & AFS_ACE_WRITE)) 351 if (!(access & AFS_ACE_WRITE))
348 goto permission_denied; 352 goto permission_denied;
353 if (!(inode->i_mode & S_IWUSR))
354 goto permission_denied;
349 } 355 }
350 } 356 }
351 357
352 key_put(key); 358 key_put(key);
353 ret = generic_permission(inode, mask);
354 _leave(" = %d", ret); 359 _leave(" = %d", ret);
355 return ret; 360 return ret;
356 361
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d4066ab7dd55..c001b1f2455f 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
242 spin_lock(&afs_server_graveyard_lock); 242 spin_lock(&afs_server_graveyard_lock);
243 if (atomic_read(&server->usage) == 0) { 243 if (atomic_read(&server->usage) == 0) {
244 list_move_tail(&server->grave, &afs_server_graveyard); 244 list_move_tail(&server->grave, &afs_server_graveyard);
245 server->time_of_death = get_seconds(); 245 server->time_of_death = ktime_get_real_seconds();
246 queue_delayed_work(afs_wq, &afs_server_reaper, 246 queue_delayed_work(afs_wq, &afs_server_reaper,
247 afs_server_timeout * HZ); 247 afs_server_timeout * HZ);
248 } 248 }
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
277 LIST_HEAD(corpses); 277 LIST_HEAD(corpses);
278 struct afs_server *server; 278 struct afs_server *server;
279 unsigned long delay, expiry; 279 unsigned long delay, expiry;
280 time_t now; 280 time64_t now;
281 281
282 now = get_seconds(); 282 now = ktime_get_real_seconds();
283 spin_lock(&afs_server_graveyard_lock); 283 spin_lock(&afs_server_graveyard_lock);
284 284
285 while (!list_empty(&afs_server_graveyard)) { 285 while (!list_empty(&afs_server_graveyard)) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index d7d8dd8c0b31..37b7c3b342a6 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
340 struct afs_vlocation *xvl; 340 struct afs_vlocation *xvl;
341 341
342 /* wait at least 10 minutes before updating... */ 342 /* wait at least 10 minutes before updating... */
343 vl->update_at = get_seconds() + afs_vlocation_update_timeout; 343 vl->update_at = ktime_get_real_seconds() +
344 afs_vlocation_update_timeout;
344 345
345 spin_lock(&afs_vlocation_updates_lock); 346 spin_lock(&afs_vlocation_updates_lock);
346 347
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
506 if (atomic_read(&vl->usage) == 0) { 507 if (atomic_read(&vl->usage) == 0) {
507 _debug("buried"); 508 _debug("buried");
508 list_move_tail(&vl->grave, &afs_vlocation_graveyard); 509 list_move_tail(&vl->grave, &afs_vlocation_graveyard);
509 vl->time_of_death = get_seconds(); 510 vl->time_of_death = ktime_get_real_seconds();
510 queue_delayed_work(afs_wq, &afs_vlocation_reap, 511 queue_delayed_work(afs_wq, &afs_vlocation_reap,
511 afs_vlocation_timeout * HZ); 512 afs_vlocation_timeout * HZ);
512 513
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
543 LIST_HEAD(corpses); 544 LIST_HEAD(corpses);
544 struct afs_vlocation *vl; 545 struct afs_vlocation *vl;
545 unsigned long delay, expiry; 546 unsigned long delay, expiry;
546 time_t now; 547 time64_t now;
547 548
548 _enter(""); 549 _enter("");
549 550
550 now = get_seconds(); 551 now = ktime_get_real_seconds();
551 spin_lock(&afs_vlocation_graveyard_lock); 552 spin_lock(&afs_vlocation_graveyard_lock);
552 553
553 while (!list_empty(&afs_vlocation_graveyard)) { 554 while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
622{ 623{
623 struct afs_cache_vlocation vldb; 624 struct afs_cache_vlocation vldb;
624 struct afs_vlocation *vl, *xvl; 625 struct afs_vlocation *vl, *xvl;
625 time_t now; 626 time64_t now;
626 long timeout; 627 long timeout;
627 int ret; 628 int ret;
628 629
629 _enter(""); 630 _enter("");
630 631
631 now = get_seconds(); 632 now = ktime_get_real_seconds();
632 633
633 /* find a record to update */ 634 /* find a record to update */
634 spin_lock(&afs_vlocation_updates_lock); 635 spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
684 685
685 /* and then reschedule */ 686 /* and then reschedule */
686 _debug("reschedule"); 687 _debug("reschedule");
687 vl->update_at = get_seconds() + afs_vlocation_update_timeout; 688 vl->update_at = ktime_get_real_seconds() +
689 afs_vlocation_update_timeout;
688 690
689 spin_lock(&afs_vlocation_updates_lock); 691 spin_lock(&afs_vlocation_updates_lock);
690 692
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c83c1a0e851f..2d2fccd5044b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
84 * partly or wholly fill a page that's under preparation for writing 84 * partly or wholly fill a page that's under preparation for writing
85 */ 85 */
86static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87 loff_t pos, struct page *page) 87 loff_t pos, unsigned int len, struct page *page)
88{ 88{
89 struct afs_read *req; 89 struct afs_read *req;
90 loff_t i_size;
91 int ret; 90 int ret;
92 91
93 _enter(",,%llu", (unsigned long long)pos); 92 _enter(",,%llu", (unsigned long long)pos);
@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
99 98
100 atomic_set(&req->usage, 1); 99 atomic_set(&req->usage, 1);
101 req->pos = pos; 100 req->pos = pos;
101 req->len = len;
102 req->nr_pages = 1; 102 req->nr_pages = 1;
103 req->pages[0] = page; 103 req->pages[0] = page;
104 104 get_page(page);
105 i_size = i_size_read(&vnode->vfs_inode);
106 if (pos + PAGE_SIZE > i_size)
107 req->len = i_size - pos;
108 else
109 req->len = PAGE_SIZE;
110 105
111 ret = afs_vnode_fetch_data(vnode, key, req); 106 ret = afs_vnode_fetch_data(vnode, key, req);
112 afs_put_read(req); 107 afs_put_read(req);
@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
159 kfree(candidate); 154 kfree(candidate);
160 return -ENOMEM; 155 return -ENOMEM;
161 } 156 }
162 *pagep = page;
163 /* page won't leak in error case: it eventually gets cleaned off LRU */
164 157
165 if (!PageUptodate(page) && len != PAGE_SIZE) { 158 if (!PageUptodate(page) && len != PAGE_SIZE) {
166 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); 159 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
167 if (ret < 0) { 160 if (ret < 0) {
161 unlock_page(page);
162 put_page(page);
168 kfree(candidate); 163 kfree(candidate);
169 _leave(" = %d [prep]", ret); 164 _leave(" = %d [prep]", ret);
170 return ret; 165 return ret;
@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
172 SetPageUptodate(page); 167 SetPageUptodate(page);
173 } 168 }
174 169
170 /* page won't leak in error case: it eventually gets cleaned off LRU */
171 *pagep = page;
172
175try_again: 173try_again:
176 spin_lock(&vnode->writeback_lock); 174 spin_lock(&vnode->writeback_lock);
177 175
@@ -233,7 +231,7 @@ flush_conflicting_wb:
233 if (wb->state == AFS_WBACK_PENDING) 231 if (wb->state == AFS_WBACK_PENDING)
234 wb->state = AFS_WBACK_CONFLICTING; 232 wb->state = AFS_WBACK_CONFLICTING;
235 spin_unlock(&vnode->writeback_lock); 233 spin_unlock(&vnode->writeback_lock);
236 if (PageDirty(page)) { 234 if (clear_page_dirty_for_io(page)) {
237 ret = afs_write_back_from_locked_page(wb, page); 235 ret = afs_write_back_from_locked_page(wb, page);
238 if (ret < 0) { 236 if (ret < 0) {
239 afs_put_writeback(candidate); 237 afs_put_writeback(candidate);
@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
257 struct page *page, void *fsdata) 255 struct page *page, void *fsdata)
258{ 256{
259 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 257 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
258 struct key *key = file->private_data;
260 loff_t i_size, maybe_i_size; 259 loff_t i_size, maybe_i_size;
260 int ret;
261 261
262 _enter("{%x:%u},{%lx}", 262 _enter("{%x:%u},{%lx}",
263 vnode->fid.vid, vnode->fid.vnode, page->index); 263 vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
273 spin_unlock(&vnode->writeback_lock); 273 spin_unlock(&vnode->writeback_lock);
274 } 274 }
275 275
276 if (!PageUptodate(page)) {
277 if (copied < len) {
278 /* Try and load any missing data from the server. The
279 * unmarshalling routine will take care of clearing any
280 * bits that are beyond the EOF.
281 */
282 ret = afs_fill_page(vnode, key, pos + copied,
283 len - copied, page);
284 if (ret < 0)
285 return ret;
286 }
287 SetPageUptodate(page);
288 }
289
276 set_page_dirty(page); 290 set_page_dirty(page);
277 if (PageDirty(page)) 291 if (PageDirty(page))
278 _debug("dirtied"); 292 _debug("dirtied");
@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
307 ASSERTCMP(pv.nr, ==, count); 321 ASSERTCMP(pv.nr, ==, count);
308 322
309 for (loop = 0; loop < count; loop++) { 323 for (loop = 0; loop < count; loop++) {
310 ClearPageUptodate(pv.pages[loop]); 324 struct page *page = pv.pages[loop];
325 ClearPageUptodate(page);
311 if (error) 326 if (error)
312 SetPageError(pv.pages[loop]); 327 SetPageError(page);
313 end_page_writeback(pv.pages[loop]); 328 if (PageWriteback(page))
329 end_page_writeback(page);
330 if (page->index >= first)
331 first = page->index + 1;
314 } 332 }
315 333
316 __pagevec_release(&pv); 334 __pagevec_release(&pv);
@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
335 _enter(",%lx", primary_page->index); 353 _enter(",%lx", primary_page->index);
336 354
337 count = 1; 355 count = 1;
338 if (!clear_page_dirty_for_io(primary_page))
339 BUG();
340 if (test_set_page_writeback(primary_page)) 356 if (test_set_page_writeback(primary_page))
341 BUG(); 357 BUG();
342 358
@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
502 */ 518 */
503 lock_page(page); 519 lock_page(page);
504 520
505 if (page->mapping != mapping) { 521 if (page->mapping != mapping || !PageDirty(page)) {
506 unlock_page(page); 522 unlock_page(page);
507 put_page(page); 523 put_page(page);
508 continue; 524 continue;
509 } 525 }
510 526
511 if (wbc->sync_mode != WB_SYNC_NONE) 527 if (PageWriteback(page)) {
512 wait_on_page_writeback(page);
513
514 if (PageWriteback(page) || !PageDirty(page)) {
515 unlock_page(page); 528 unlock_page(page);
529 if (wbc->sync_mode != WB_SYNC_NONE)
530 wait_on_page_writeback(page);
531 put_page(page);
516 continue; 532 continue;
517 } 533 }
518 534
@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
523 wb->state = AFS_WBACK_WRITING; 539 wb->state = AFS_WBACK_WRITING;
524 spin_unlock(&wb->vnode->writeback_lock); 540 spin_unlock(&wb->vnode->writeback_lock);
525 541
542 if (!clear_page_dirty_for_io(page))
543 BUG();
526 ret = afs_write_back_from_locked_page(wb, page); 544 ret = afs_write_back_from_locked_page(wb, page);
527 unlock_page(page); 545 unlock_page(page);
528 put_page(page); 546 put_page(page);
@@ -746,6 +764,20 @@ out:
746} 764}
747 765
748/* 766/*
767 * Flush out all outstanding writes on a file opened for writing when it is
768 * closed.
769 */
770int afs_flush(struct file *file, fl_owner_t id)
771{
772 _enter("");
773
774 if ((file->f_mode & FMODE_WRITE) == 0)
775 return 0;
776
777 return vfs_fsync(file, 0);
778}
779
780/*
749 * notification that a previously read-only page is about to become writable 781 * notification that a previously read-only page is about to become writable
750 * - if it returns an error, the caller will deliver a bus error signal 782 * - if it returns an error, the caller will deliver a bus error signal
751 */ 783 */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc28c607..c4115901d906 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,7 +1259,7 @@ struct btrfs_root {
1259 atomic_t will_be_snapshoted; 1259 atomic_t will_be_snapshoted;
1260 1260
1261 /* For qgroup metadata space reserve */ 1261 /* For qgroup metadata space reserve */
1262 atomic_t qgroup_meta_rsv; 1262 atomic64_t qgroup_meta_rsv;
1263}; 1263};
1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
1265{ 1265{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d0..eb1ee7b6f532 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1342 atomic_set(&root->orphan_inodes, 0); 1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1); 1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0); 1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0); 1345 atomic64_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0; 1346 root->log_transid = 0;
1347 root->log_transid_committed = -1; 1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0; 1348 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 28e81922a21c..27fdb250b446 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
1714 * can we find nothing at @index. 1714 * can we find nothing at @index.
1715 */ 1715 */
1716 ASSERT(page_ops & PAGE_LOCK); 1716 ASSERT(page_ops & PAGE_LOCK);
1717 return ret; 1717 err = -EAGAIN;
1718 goto out;
1718 } 1719 }
1719 1720
1720 for (i = 0; i < ret; i++) { 1721 for (i = 0; i < ret; i++) {
@@ -2583,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
2583 2584
2584 if (tree->ops) { 2585 if (tree->ops) {
2585 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2586 if (!ret && !bio->bi_error) 2587 if (ret == -EAGAIN) {
2587 uptodate = 1; 2588 /*
2588 } else { 2589 * Data inode's readpage_io_failed_hook() always
2590 * returns -EAGAIN.
2591 *
2592 * The generic bio_readpage_error handles errors
2593 * the following way: If possible, new read
2594 * requests are created and submitted and will
2595 * end up in end_bio_extent_readpage as well (if
2596 * we're lucky, not in the !uptodate case). In
2597 * that case it returns 0 and we just go on with
2598 * the next page in our bio. If it can't handle
2599 * the error it will return -EIO and we remain
2600 * responsible for that page.
2601 */
2602 ret = bio_readpage_error(bio, offset, page,
2603 start, end, mirror);
2604 if (ret == 0) {
2605 uptodate = !bio->bi_error;
2606 offset += len;
2607 continue;
2608 }
2609 }
2610
2589 /* 2611 /*
2590 * The generic bio_readpage_error handles errors the 2612 * metadata's readpage_io_failed_hook() always returns
2591 * following way: If possible, new read requests are 2613 * -EIO and fixes nothing. -EIO is also returned if
2592 * created and submitted and will end up in 2614 * data inode error could not be fixed.
2593 * end_bio_extent_readpage as well (if we're lucky, not
2594 * in the !uptodate case). In that case it returns 0 and
2595 * we just go on with the next page in our bio. If it
2596 * can't handle the error it will return -EIO and we
2597 * remain responsible for that page.
2598 */ 2615 */
2599 ret = bio_readpage_error(bio, offset, page, start, end, 2616 ASSERT(ret == -EIO);
2600 mirror);
2601 if (ret == 0) {
2602 uptodate = !bio->bi_error;
2603 offset += len;
2604 continue;
2605 }
2606 } 2617 }
2607readpage_ok: 2618readpage_ok:
2608 if (likely(uptodate)) { 2619 if (likely(uptodate)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c40060cc481f..a18510be76c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
6709 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6709 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6710 ret = btrfs_decompress(compress_type, tmp, page, 6710 ret = btrfs_decompress(compress_type, tmp, page,
6711 extent_offset, inline_size, max_size); 6711 extent_offset, inline_size, max_size);
6712
6713 /*
6714 * decompression code contains a memset to fill in any space between the end
6715 * of the uncompressed data and the end of max_size in case the decompressed
6716 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6717 * the end of an inline extent and the beginning of the next block, so we
6718 * cover that region here.
6719 */
6720
6721 if (max_size + pg_offset < PAGE_SIZE) {
6722 char *map = kmap(page);
6723 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
6724 kunmap(page);
6725 }
6712 kfree(tmp); 6726 kfree(tmp);
6713 return ret; 6727 return ret;
6714} 6728}
@@ -10509,9 +10523,9 @@ out_inode:
10509} 10523}
10510 10524
10511__attribute__((const)) 10525__attribute__((const))
10512static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10526static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10513{ 10527{
10514 return 0; 10528 return -EAGAIN;
10515} 10529}
10516 10530
10517static const struct inode_operations btrfs_dir_inode_operations = { 10531static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10556,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
10556 .submit_bio_hook = btrfs_submit_bio_hook, 10570 .submit_bio_hook = btrfs_submit_bio_hook,
10557 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10558 .merge_bio_hook = btrfs_merge_bio_hook, 10572 .merge_bio_hook = btrfs_merge_bio_hook,
10559 .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10573 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10560 10574
10561 /* optional callbacks */ 10575 /* optional callbacks */
10562 .fill_delalloc = run_delalloc_range, 10576 .fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5da750c1087..a59801dc2a34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2948 ret = qgroup_reserve(root, num_bytes, enforce); 2948 ret = qgroup_reserve(root, num_bytes, enforce);
2949 if (ret < 0) 2949 if (ret < 0)
2950 return ret; 2950 return ret;
2951 atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2952 return ret; 2952 return ret;
2953} 2953}
2954 2954
2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2956{ 2956{
2957 struct btrfs_fs_info *fs_info = root->fs_info; 2957 struct btrfs_fs_info *fs_info = root->fs_info;
2958 int reserved; 2958 u64 reserved;
2959 2959
2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2961 !is_fstree(root->objectid)) 2961 !is_fstree(root->objectid))
2962 return; 2962 return;
2963 2963
2964 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2965 if (reserved == 0) 2965 if (reserved == 0)
2966 return; 2966 return;
2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2976 return; 2976 return;
2977 2977
2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2979 WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2979 WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2980 atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2980 atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2982} 2982}
2983 2983
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 456c8901489b..a60d5bfb8a49 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6305 goto out; 6305 goto out;
6306 } 6306 }
6307 6307
6308 /*
6309 * Check that we don't overflow at later allocations, we request
6310 * clone_sources_count + 1 items, and compare to unsigned long inside
6311 * access_ok.
6312 */
6308 if (arg->clone_sources_count > 6313 if (arg->clone_sources_count >
6309 ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 ULONG_MAX / sizeof(struct clone_root) - 1) {
6310 ret = -EINVAL; 6315 ret = -EINVAL;
6311 goto out; 6316 goto out;
6312 } 6317 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15e1db8738ae..dd3f5fabfdf6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -972,6 +972,86 @@ out:
972 return rc; 972 return rc;
973} 973}
974 974
975ssize_t cifs_file_copychunk_range(unsigned int xid,
976 struct file *src_file, loff_t off,
977 struct file *dst_file, loff_t destoff,
978 size_t len, unsigned int flags)
979{
980 struct inode *src_inode = file_inode(src_file);
981 struct inode *target_inode = file_inode(dst_file);
982 struct cifsFileInfo *smb_file_src;
983 struct cifsFileInfo *smb_file_target;
984 struct cifs_tcon *src_tcon;
985 struct cifs_tcon *target_tcon;
986 ssize_t rc;
987
988 cifs_dbg(FYI, "copychunk range\n");
989
990 if (src_inode == target_inode) {
991 rc = -EINVAL;
992 goto out;
993 }
994
995 if (!src_file->private_data || !dst_file->private_data) {
996 rc = -EBADF;
997 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
998 goto out;
999 }
1000
1001 rc = -EXDEV;
1002 smb_file_target = dst_file->private_data;
1003 smb_file_src = src_file->private_data;
1004 src_tcon = tlink_tcon(smb_file_src->tlink);
1005 target_tcon = tlink_tcon(smb_file_target->tlink);
1006
1007 if (src_tcon->ses != target_tcon->ses) {
1008 cifs_dbg(VFS, "source and target of copy not on same server\n");
1009 goto out;
1010 }
1011
1012 /*
1013 * Note: cifs case is easier than btrfs since server responsible for
1014 * checks for proper open modes and file type and if it wants
1015 * server could even support copy of range where source = target
1016 */
1017 lock_two_nondirectories(target_inode, src_inode);
1018
1019 cifs_dbg(FYI, "about to flush pages\n");
1020 /* should we flush first and last page first */
1021 truncate_inode_pages(&target_inode->i_data, 0);
1022
1023 if (target_tcon->ses->server->ops->copychunk_range)
1024 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1025 smb_file_src, smb_file_target, off, len, destoff);
1026 else
1027 rc = -EOPNOTSUPP;
1028
1029 /* force revalidate of size and timestamps of target file now
1030 * that target is updated on the server
1031 */
1032 CIFS_I(target_inode)->time = 0;
1033 /* although unlocking in the reverse order from locking is not
1034 * strictly necessary here it is a little cleaner to be consistent
1035 */
1036 unlock_two_nondirectories(src_inode, target_inode);
1037
1038out:
1039 return rc;
1040}
1041
1042static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1043 struct file *dst_file, loff_t destoff,
1044 size_t len, unsigned int flags)
1045{
1046 unsigned int xid = get_xid();
1047 ssize_t rc;
1048
1049 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1050 len, flags);
1051 free_xid(xid);
1052 return rc;
1053}
1054
975const struct file_operations cifs_file_ops = { 1055const struct file_operations cifs_file_ops = {
976 .read_iter = cifs_loose_read_iter, 1056 .read_iter = cifs_loose_read_iter,
977 .write_iter = cifs_file_write_iter, 1057 .write_iter = cifs_file_write_iter,
@@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = {
984 .splice_read = generic_file_splice_read, 1064 .splice_read = generic_file_splice_read,
985 .llseek = cifs_llseek, 1065 .llseek = cifs_llseek,
986 .unlocked_ioctl = cifs_ioctl, 1066 .unlocked_ioctl = cifs_ioctl,
1067 .copy_file_range = cifs_copy_file_range,
987 .clone_file_range = cifs_clone_file_range, 1068 .clone_file_range = cifs_clone_file_range,
988 .setlease = cifs_setlease, 1069 .setlease = cifs_setlease,
989 .fallocate = cifs_fallocate, 1070 .fallocate = cifs_fallocate,
@@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = {
1001 .splice_read = generic_file_splice_read, 1082 .splice_read = generic_file_splice_read,
1002 .llseek = cifs_llseek, 1083 .llseek = cifs_llseek,
1003 .unlocked_ioctl = cifs_ioctl, 1084 .unlocked_ioctl = cifs_ioctl,
1085 .copy_file_range = cifs_copy_file_range,
1004 .clone_file_range = cifs_clone_file_range, 1086 .clone_file_range = cifs_clone_file_range,
1005 .setlease = cifs_setlease, 1087 .setlease = cifs_setlease,
1006 .fallocate = cifs_fallocate, 1088 .fallocate = cifs_fallocate,
@@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = {
1018 .mmap = cifs_file_mmap, 1100 .mmap = cifs_file_mmap,
1019 .splice_read = generic_file_splice_read, 1101 .splice_read = generic_file_splice_read,
1020 .unlocked_ioctl = cifs_ioctl, 1102 .unlocked_ioctl = cifs_ioctl,
1103 .copy_file_range = cifs_copy_file_range,
1021 .clone_file_range = cifs_clone_file_range, 1104 .clone_file_range = cifs_clone_file_range,
1022 .llseek = cifs_llseek, 1105 .llseek = cifs_llseek,
1023 .setlease = cifs_setlease, 1106 .setlease = cifs_setlease,
@@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = {
1035 .splice_read = generic_file_splice_read, 1118 .splice_read = generic_file_splice_read,
1036 .llseek = cifs_llseek, 1119 .llseek = cifs_llseek,
1037 .unlocked_ioctl = cifs_ioctl, 1120 .unlocked_ioctl = cifs_ioctl,
1121 .copy_file_range = cifs_copy_file_range,
1038 .clone_file_range = cifs_clone_file_range, 1122 .clone_file_range = cifs_clone_file_range,
1039 .setlease = cifs_setlease, 1123 .setlease = cifs_setlease,
1040 .fallocate = cifs_fallocate, 1124 .fallocate = cifs_fallocate,
@@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
1051 .splice_read = generic_file_splice_read, 1135 .splice_read = generic_file_splice_read,
1052 .llseek = cifs_llseek, 1136 .llseek = cifs_llseek,
1053 .unlocked_ioctl = cifs_ioctl, 1137 .unlocked_ioctl = cifs_ioctl,
1138 .copy_file_range = cifs_copy_file_range,
1054 .clone_file_range = cifs_clone_file_range, 1139 .clone_file_range = cifs_clone_file_range,
1055 .setlease = cifs_setlease, 1140 .setlease = cifs_setlease,
1056 .fallocate = cifs_fallocate, 1141 .fallocate = cifs_fallocate,
@@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
1067 .mmap = cifs_file_mmap, 1152 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read, 1153 .splice_read = generic_file_splice_read,
1069 .unlocked_ioctl = cifs_ioctl, 1154 .unlocked_ioctl = cifs_ioctl,
1155 .copy_file_range = cifs_copy_file_range,
1070 .clone_file_range = cifs_clone_file_range, 1156 .clone_file_range = cifs_clone_file_range,
1071 .llseek = cifs_llseek, 1157 .llseek = cifs_llseek,
1072 .setlease = cifs_setlease, 1158 .setlease = cifs_setlease,
@@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = {
1078 .release = cifs_closedir, 1164 .release = cifs_closedir,
1079 .read = generic_read_dir, 1165 .read = generic_read_dir,
1080 .unlocked_ioctl = cifs_ioctl, 1166 .unlocked_ioctl = cifs_ioctl,
1167 .copy_file_range = cifs_copy_file_range,
1081 .clone_file_range = cifs_clone_file_range, 1168 .clone_file_range = cifs_clone_file_range,
1082 .llseek = generic_file_llseek, 1169 .llseek = generic_file_llseek,
1083}; 1170};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index da717fee3026..30bf89b1fd9a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
139# define cifs_listxattr NULL 139# define cifs_listxattr NULL
140#endif 140#endif
141 141
142extern ssize_t cifs_file_copychunk_range(unsigned int xid,
143 struct file *src_file, loff_t off,
144 struct file *dst_file, loff_t destoff,
145 size_t len, unsigned int flags);
146
142extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 147extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
143#ifdef CONFIG_CIFS_NFSD_EXPORT 148#ifdef CONFIG_CIFS_NFSD_EXPORT
144extern const struct export_operations cifs_export_ops; 149extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d42dd3288647..d07f13a63369 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -243,6 +243,7 @@ struct smb_version_operations {
243 /* verify the message */ 243 /* verify the message */
244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); 244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *); 245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
246 int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
246 void (*downgrade_oplock)(struct TCP_Server_Info *, 247 void (*downgrade_oplock)(struct TCP_Server_Info *,
247 struct cifsInodeInfo *, bool); 248 struct cifsInodeInfo *, bool);
248 /* process transaction2 response */ 249 /* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
407 char * (*create_lease_buf)(u8 *, u8); 408 char * (*create_lease_buf)(u8 *, u8);
408 /* parse lease context buffer and return oplock/epoch info */ 409 /* parse lease context buffer and return oplock/epoch info */
409 __u8 (*parse_lease_buf)(void *, unsigned int *); 410 __u8 (*parse_lease_buf)(void *, unsigned int *);
410 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 411 ssize_t (*copychunk_range)(const unsigned int,
411 struct cifsFileInfo *target_file, u64 src_off, u64 len, 412 struct cifsFileInfo *src_file,
412 u64 dest_off); 413 struct cifsFileInfo *target_file,
414 u64 src_off, u64 len, u64 dest_off);
413 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, 415 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
414 struct cifsFileInfo *target_file, u64 src_off, u64 len, 416 struct cifsFileInfo *target_file, u64 src_off, u64 len,
415 u64 dest_off); 417 u64 dest_off);
@@ -1343,6 +1345,7 @@ struct mid_q_entry {
1343 void *callback_data; /* general purpose pointer for callback */ 1345 void *callback_data; /* general purpose pointer for callback */
1344 void *resp_buf; /* pointer to received SMB header */ 1346 void *resp_buf; /* pointer to received SMB header */
1345 int mid_state; /* wish this were enum but can not pass to wait_event */ 1347 int mid_state; /* wish this were enum but can not pass to wait_event */
1348 unsigned int mid_flags;
1346 __le16 command; /* smb command code */ 1349 __le16 command; /* smb command code */
1347 bool large_buf:1; /* if valid response, is pointer to large buf */ 1350 bool large_buf:1; /* if valid response, is pointer to large buf */
1348 bool multiRsp:1; /* multiple trans2 responses for one request */ 1351 bool multiRsp:1; /* multiple trans2 responses for one request */
@@ -1350,6 +1353,12 @@ struct mid_q_entry {
1350 bool decrypted:1; /* decrypted entry */ 1353 bool decrypted:1; /* decrypted entry */
1351}; 1354};
1352 1355
1356struct close_cancelled_open {
1357 struct cifs_fid fid;
1358 struct cifs_tcon *tcon;
1359 struct work_struct work;
1360};
1361
1353/* Make code in transport.c a little cleaner by moving 1362/* Make code in transport.c a little cleaner by moving
1354 update of optional stats into function below */ 1363 update of optional stats into function below */
1355#ifdef CONFIG_CIFS_STATS2 1364#ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1490,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1481#define MID_RESPONSE_MALFORMED 0x10 1490#define MID_RESPONSE_MALFORMED 0x10
1482#define MID_SHUTDOWN 0x20 1491#define MID_SHUTDOWN 0x20
1483 1492
1493/* Flags */
1494#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
1495
1484/* Types of response buffer returned from SendReceive2 */ 1496/* Types of response buffer returned from SendReceive2 */
1485#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1497#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
1486#define CIFS_SMALL_BUFFER 1 1498#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 97e5d236d265..ec5e5e514fdd 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -79,7 +79,8 @@ extern void cifs_delete_mid(struct mid_q_entry *mid);
79extern void cifs_wake_up_task(struct mid_q_entry *mid); 79extern void cifs_wake_up_task(struct mid_q_entry *mid);
80extern int cifs_handle_standard(struct TCP_Server_Info *server, 80extern int cifs_handle_standard(struct TCP_Server_Info *server,
81 struct mid_q_entry *mid); 81 struct mid_q_entry *mid);
82extern int cifs_discard_remaining_data(struct TCP_Server_Info *server); 82extern int cifs_discard_remaining_data(struct TCP_Server_Info *server,
83 char *buf);
83extern int cifs_call_async(struct TCP_Server_Info *server, 84extern int cifs_call_async(struct TCP_Server_Info *server,
84 struct smb_rqst *rqst, 85 struct smb_rqst *rqst,
85 mid_receive_t *receive, mid_callback_t *callback, 86 mid_receive_t *receive, mid_callback_t *callback,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 066950671929..967b92631807 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1400,9 +1400,9 @@ openRetry:
1400 * current bigbuf. 1400 * current bigbuf.
1401 */ 1401 */
1402int 1402int
1403cifs_discard_remaining_data(struct TCP_Server_Info *server) 1403cifs_discard_remaining_data(struct TCP_Server_Info *server, char *buf)
1404{ 1404{
1405 unsigned int rfclen = get_rfc1002_length(server->smallbuf); 1405 unsigned int rfclen = get_rfc1002_length(buf);
1406 int remaining = rfclen + 4 - server->total_read; 1406 int remaining = rfclen + 4 - server->total_read;
1407 1407
1408 while (remaining > 0) { 1408 while (remaining > 0) {
@@ -1426,7 +1426,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1426 int length; 1426 int length;
1427 struct cifs_readdata *rdata = mid->callback_data; 1427 struct cifs_readdata *rdata = mid->callback_data;
1428 1428
1429 length = cifs_discard_remaining_data(server); 1429 length = cifs_discard_remaining_data(server, mid->resp_buf);
1430 dequeue_mid(mid, rdata->result); 1430 dequeue_mid(mid, rdata->result);
1431 return length; 1431 return length;
1432} 1432}
@@ -1459,7 +1459,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1459 1459
1460 if (server->ops->is_status_pending && 1460 if (server->ops->is_status_pending &&
1461 server->ops->is_status_pending(buf, server, 0)) { 1461 server->ops->is_status_pending(buf, server, 0)) {
1462 cifs_discard_remaining_data(server); 1462 cifs_discard_remaining_data(server, buf);
1463 return -1; 1463 return -1;
1464 } 1464 }
1465 1465
@@ -1519,6 +1519,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1519 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", 1519 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
1520 rdata->iov[0].iov_base, server->total_read); 1520 rdata->iov[0].iov_base, server->total_read);
1521 1521
1522 mid->resp_buf = server->smallbuf;
1523 server->smallbuf = NULL;
1524
1522 /* how much data is in the response? */ 1525 /* how much data is in the response? */
1523 data_len = server->ops->read_data_length(buf); 1526 data_len = server->ops->read_data_length(buf);
1524 if (data_offset + data_len > buflen) { 1527 if (data_offset + data_len > buflen) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9ae695ae3ed7..0c7596cef4b8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p)
904 904
905 server->lstrp = jiffies; 905 server->lstrp = jiffies;
906 if (mid_entry != NULL) { 906 if (mid_entry != NULL) {
907 if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
908 mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
909 server->ops->handle_cancelled_mid)
910 server->ops->handle_cancelled_mid(
911 mid_entry->resp_buf,
912 server);
913
907 if (!mid_entry->multiRsp || mid_entry->multiEnd) 914 if (!mid_entry->multiRsp || mid_entry->multiEnd)
908 mid_entry->callback(mid_entry); 915 mid_entry->callback(mid_entry);
909 } else if (!server->ops->is_oplock_break || 916 } else if (server->ops->is_oplock_break &&
910 !server->ops->is_oplock_break(buf, server)) { 917 server->ops->is_oplock_break(buf, server)) {
918 cifs_dbg(FYI, "Received oplock break\n");
919 } else {
911 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 920 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
912 atomic_read(&midCount)); 921 atomic_read(&midCount));
913 cifs_dump_mem("Received Data is: ", buf, 922 cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..265c45fe4ea5 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,71 +34,14 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include <linux/btrfs.h> 35#include <linux/btrfs.h>
36 36
37static int cifs_file_clone_range(unsigned int xid, struct file *src_file, 37static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
38 struct file *dst_file)
39{
40 struct inode *src_inode = file_inode(src_file);
41 struct inode *target_inode = file_inode(dst_file);
42 struct cifsFileInfo *smb_file_src;
43 struct cifsFileInfo *smb_file_target;
44 struct cifs_tcon *src_tcon;
45 struct cifs_tcon *target_tcon;
46 int rc;
47
48 cifs_dbg(FYI, "ioctl clone range\n");
49
50 if (!src_file->private_data || !dst_file->private_data) {
51 rc = -EBADF;
52 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
53 goto out;
54 }
55
56 rc = -EXDEV;
57 smb_file_target = dst_file->private_data;
58 smb_file_src = src_file->private_data;
59 src_tcon = tlink_tcon(smb_file_src->tlink);
60 target_tcon = tlink_tcon(smb_file_target->tlink);
61
62 if (src_tcon->ses != target_tcon->ses) {
63 cifs_dbg(VFS, "source and target of copy not on same server\n");
64 goto out;
65 }
66
67 /*
68 * Note: cifs case is easier than btrfs since server responsible for
69 * checks for proper open modes and file type and if it wants
70 * server could even support copy of range where source = target
71 */
72 lock_two_nondirectories(target_inode, src_inode);
73
74 cifs_dbg(FYI, "about to flush pages\n");
75 /* should we flush first and last page first */
76 truncate_inode_pages(&target_inode->i_data, 0);
77
78 if (target_tcon->ses->server->ops->clone_range)
79 rc = target_tcon->ses->server->ops->clone_range(xid,
80 smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
81 else
82 rc = -EOPNOTSUPP;
83
84 /* force revalidate of size and timestamps of target file now
85 that target is updated on the server */
86 CIFS_I(target_inode)->time = 0;
87 /* although unlocking in the reverse order from locking is not
88 strictly necessary here it is a little cleaner to be consistent */
89 unlock_two_nondirectories(src_inode, target_inode);
90out:
91 return rc;
92}
93
94static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
95 unsigned long srcfd) 38 unsigned long srcfd)
96{ 39{
97 int rc; 40 int rc;
98 struct fd src_file; 41 struct fd src_file;
99 struct inode *src_inode; 42 struct inode *src_inode;
100 43
101 cifs_dbg(FYI, "ioctl clone range\n"); 44 cifs_dbg(FYI, "ioctl copychunk range\n");
102 /* the destination must be opened for writing */ 45 /* the destination must be opened for writing */
103 if (!(dst_file->f_mode & FMODE_WRITE)) { 46 if (!(dst_file->f_mode & FMODE_WRITE)) {
104 cifs_dbg(FYI, "file target not open for write\n"); 47 cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
129 if (S_ISDIR(src_inode->i_mode)) 72 if (S_ISDIR(src_inode->i_mode))
130 goto out_fput; 73 goto out_fput;
131 74
132 rc = cifs_file_clone_range(xid, src_file.file, dst_file); 75 rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
76 src_inode->i_size, 0);
133 77
134out_fput: 78out_fput:
135 fdput(src_file); 79 fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
251 } 195 }
252 break; 196 break;
253 case CIFS_IOC_COPYCHUNK_FILE: 197 case CIFS_IOC_COPYCHUNK_FILE:
254 rc = cifs_ioctl_clone(xid, filep, arg); 198 rc = cifs_ioctl_copychunk(xid, filep, arg);
255 break; 199 break;
256 case CIFS_IOC_SET_INTEGRITY: 200 case CIFS_IOC_SET_INTEGRITY:
257 if (pSMBFile == NULL) 201 if (pSMBFile == NULL)
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index fd516ea8b8f8..1a04b3a5beb1 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
660 return false; 660 return false;
661} 661}
662
663void
664smb2_cancelled_close_fid(struct work_struct *work)
665{
666 struct close_cancelled_open *cancelled = container_of(work,
667 struct close_cancelled_open, work);
668
669 cifs_dbg(VFS, "Close unmatched open\n");
670
671 SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
672 cancelled->fid.volatile_fid);
673 cifs_put_tcon(cancelled->tcon);
674 kfree(cancelled);
675}
676
677int
678smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
679{
680 struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
681 struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
682 struct cifs_tcon *tcon;
683 struct close_cancelled_open *cancelled;
684
685 if (sync_hdr->Command != SMB2_CREATE ||
686 sync_hdr->Status != STATUS_SUCCESS)
687 return 0;
688
689 cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
690 if (!cancelled)
691 return -ENOMEM;
692
693 tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
694 sync_hdr->TreeId);
695 if (!tcon) {
696 kfree(cancelled);
697 return -ENOENT;
698 }
699
700 cancelled->fid.persistent_fid = rsp->PersistentFileId;
701 cancelled->fid.volatile_fid = rsp->VolatileFileId;
702 cancelled->tcon = tcon;
703 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
704 queue_work(cifsiod_wq, &cancelled->work);
705
706 return 0;
707}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0231108d9387..7b12a727947e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -21,6 +21,7 @@
21#include <linux/vfs.h> 21#include <linux/vfs.h>
22#include <linux/falloc.h> 22#include <linux/falloc.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/uuid.h>
24#include <crypto/aead.h> 25#include <crypto/aead.h>
25#include "cifsglob.h" 26#include "cifsglob.h"
26#include "smb2pdu.h" 27#include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
592 return rc; 593 return rc;
593} 594}
594 595
595static int 596static ssize_t
596smb2_clone_range(const unsigned int xid, 597smb2_copychunk_range(const unsigned int xid,
597 struct cifsFileInfo *srcfile, 598 struct cifsFileInfo *srcfile,
598 struct cifsFileInfo *trgtfile, u64 src_off, 599 struct cifsFileInfo *trgtfile, u64 src_off,
599 u64 len, u64 dest_off) 600 u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
605 struct cifs_tcon *tcon; 606 struct cifs_tcon *tcon;
606 int chunks_copied = 0; 607 int chunks_copied = 0;
607 bool chunk_sizes_updated = false; 608 bool chunk_sizes_updated = false;
609 ssize_t bytes_written, total_bytes_written = 0;
608 610
609 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 611 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
610 612
611 if (pcchunk == NULL) 613 if (pcchunk == NULL)
612 return -ENOMEM; 614 return -ENOMEM;
613 615
614 cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); 616 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
615 /* Request a key from the server to identify the source of the copy */ 617 /* Request a key from the server to identify the source of the copy */
616 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), 618 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
617 srcfile->fid.persistent_fid, 619 srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
669 } 671 }
670 chunks_copied++; 672 chunks_copied++;
671 673
672 src_off += le32_to_cpu(retbuf->TotalBytesWritten); 674 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
673 dest_off += le32_to_cpu(retbuf->TotalBytesWritten); 675 src_off += bytes_written;
674 len -= le32_to_cpu(retbuf->TotalBytesWritten); 676 dest_off += bytes_written;
677 len -= bytes_written;
678 total_bytes_written += bytes_written;
675 679
676 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", 680 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
677 le32_to_cpu(retbuf->ChunksWritten), 681 le32_to_cpu(retbuf->ChunksWritten),
678 le32_to_cpu(retbuf->ChunkBytesWritten), 682 le32_to_cpu(retbuf->ChunkBytesWritten),
679 le32_to_cpu(retbuf->TotalBytesWritten)); 683 bytes_written);
680 } else if (rc == -EINVAL) { 684 } else if (rc == -EINVAL) {
681 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) 685 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
682 goto cchunk_out; 686 goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
713cchunk_out: 717cchunk_out:
714 kfree(pcchunk); 718 kfree(pcchunk);
715 kfree(retbuf); 719 kfree(retbuf);
716 return rc; 720 if (rc)
721 return rc;
722 else
723 return total_bytes_written;
717} 724}
718 725
719static int 726static int
@@ -2188,7 +2195,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid)
2188 if (rc) 2195 if (rc)
2189 goto free_pages; 2196 goto free_pages;
2190 2197
2191 rc = cifs_discard_remaining_data(server); 2198 rc = cifs_discard_remaining_data(server, buf);
2192 if (rc) 2199 if (rc)
2193 goto free_pages; 2200 goto free_pages;
2194 2201
@@ -2214,7 +2221,7 @@ free_pages:
2214 kfree(pages); 2221 kfree(pages);
2215 return rc; 2222 return rc;
2216discard_data: 2223discard_data:
2217 cifs_discard_remaining_data(server); 2224 cifs_discard_remaining_data(server, buf);
2218 goto free_pages; 2225 goto free_pages;
2219} 2226}
2220 2227
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
2322 .clear_stats = smb2_clear_stats, 2329 .clear_stats = smb2_clear_stats,
2323 .print_stats = smb2_print_stats, 2330 .print_stats = smb2_print_stats,
2324 .is_oplock_break = smb2_is_valid_oplock_break, 2331 .is_oplock_break = smb2_is_valid_oplock_break,
2332 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2325 .downgrade_oplock = smb2_downgrade_oplock, 2333 .downgrade_oplock = smb2_downgrade_oplock,
2326 .need_neg = smb2_need_neg, 2334 .need_neg = smb2_need_neg,
2327 .negotiate = smb2_negotiate, 2335 .negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
2377 .set_oplock_level = smb2_set_oplock_level, 2385 .set_oplock_level = smb2_set_oplock_level,
2378 .create_lease_buf = smb2_create_lease_buf, 2386 .create_lease_buf = smb2_create_lease_buf,
2379 .parse_lease_buf = smb2_parse_lease_buf, 2387 .parse_lease_buf = smb2_parse_lease_buf,
2380 .clone_range = smb2_clone_range, 2388 .copychunk_range = smb2_copychunk_range,
2381 .wp_retry_size = smb2_wp_retry_size, 2389 .wp_retry_size = smb2_wp_retry_size,
2382 .dir_needs_close = smb2_dir_needs_close, 2390 .dir_needs_close = smb2_dir_needs_close,
2383 .get_dfs_refer = smb2_get_dfs_refer, 2391 .get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
2404 .clear_stats = smb2_clear_stats, 2412 .clear_stats = smb2_clear_stats,
2405 .print_stats = smb2_print_stats, 2413 .print_stats = smb2_print_stats,
2406 .is_oplock_break = smb2_is_valid_oplock_break, 2414 .is_oplock_break = smb2_is_valid_oplock_break,
2415 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2407 .downgrade_oplock = smb2_downgrade_oplock, 2416 .downgrade_oplock = smb2_downgrade_oplock,
2408 .need_neg = smb2_need_neg, 2417 .need_neg = smb2_need_neg,
2409 .negotiate = smb2_negotiate, 2418 .negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
2459 .set_oplock_level = smb21_set_oplock_level, 2468 .set_oplock_level = smb21_set_oplock_level,
2460 .create_lease_buf = smb2_create_lease_buf, 2469 .create_lease_buf = smb2_create_lease_buf,
2461 .parse_lease_buf = smb2_parse_lease_buf, 2470 .parse_lease_buf = smb2_parse_lease_buf,
2462 .clone_range = smb2_clone_range, 2471 .copychunk_range = smb2_copychunk_range,
2463 .wp_retry_size = smb2_wp_retry_size, 2472 .wp_retry_size = smb2_wp_retry_size,
2464 .dir_needs_close = smb2_dir_needs_close, 2473 .dir_needs_close = smb2_dir_needs_close,
2465 .enum_snapshots = smb3_enum_snapshots, 2474 .enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
2488 .print_stats = smb2_print_stats, 2497 .print_stats = smb2_print_stats,
2489 .dump_share_caps = smb2_dump_share_caps, 2498 .dump_share_caps = smb2_dump_share_caps,
2490 .is_oplock_break = smb2_is_valid_oplock_break, 2499 .is_oplock_break = smb2_is_valid_oplock_break,
2500 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2491 .downgrade_oplock = smb2_downgrade_oplock, 2501 .downgrade_oplock = smb2_downgrade_oplock,
2492 .need_neg = smb2_need_neg, 2502 .need_neg = smb2_need_neg,
2493 .negotiate = smb2_negotiate, 2503 .negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
2545 .set_oplock_level = smb3_set_oplock_level, 2555 .set_oplock_level = smb3_set_oplock_level,
2546 .create_lease_buf = smb3_create_lease_buf, 2556 .create_lease_buf = smb3_create_lease_buf,
2547 .parse_lease_buf = smb3_parse_lease_buf, 2557 .parse_lease_buf = smb3_parse_lease_buf,
2548 .clone_range = smb2_clone_range, 2558 .copychunk_range = smb2_copychunk_range,
2549 .duplicate_extents = smb2_duplicate_extents, 2559 .duplicate_extents = smb2_duplicate_extents,
2550 .validate_negotiate = smb3_validate_negotiate, 2560 .validate_negotiate = smb3_validate_negotiate,
2551 .wp_retry_size = smb2_wp_retry_size, 2561 .wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
2582 .print_stats = smb2_print_stats, 2592 .print_stats = smb2_print_stats,
2583 .dump_share_caps = smb2_dump_share_caps, 2593 .dump_share_caps = smb2_dump_share_caps,
2584 .is_oplock_break = smb2_is_valid_oplock_break, 2594 .is_oplock_break = smb2_is_valid_oplock_break,
2595 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2585 .downgrade_oplock = smb2_downgrade_oplock, 2596 .downgrade_oplock = smb2_downgrade_oplock,
2586 .need_neg = smb2_need_neg, 2597 .need_neg = smb2_need_neg,
2587 .negotiate = smb2_negotiate, 2598 .negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
2639 .set_oplock_level = smb3_set_oplock_level, 2650 .set_oplock_level = smb3_set_oplock_level,
2640 .create_lease_buf = smb3_create_lease_buf, 2651 .create_lease_buf = smb3_create_lease_buf,
2641 .parse_lease_buf = smb3_parse_lease_buf, 2652 .parse_lease_buf = smb3_parse_lease_buf,
2642 .clone_range = smb2_clone_range, 2653 .copychunk_range = smb2_copychunk_range,
2643 .duplicate_extents = smb2_duplicate_extents, 2654 .duplicate_extents = smb2_duplicate_extents,
2644/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ 2655/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
2645 .wp_retry_size = smb2_wp_retry_size, 2656 .wp_retry_size = smb2_wp_retry_size,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7446496850a3..66fa1b941cdf 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1185,6 +1185,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1185 return -EINVAL; 1185 return -EINVAL;
1186 } 1186 }
1187 1187
1188 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1189 if (tcon)
1190 tcon->tid = 0;
1191
1188 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); 1192 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
1189 if (rc) { 1193 if (rc) {
1190 kfree(unc_path); 1194 kfree(unc_path);
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 69e35873b1de..6853454fc871 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
48 struct smb_rqst *rqst); 48 struct smb_rqst *rqst);
49extern struct mid_q_entry *smb2_setup_async_request( 49extern struct mid_q_entry *smb2_setup_async_request(
50 struct TCP_Server_Info *server, struct smb_rqst *rqst); 50 struct TCP_Server_Info *server, struct smb_rqst *rqst);
51extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
52 __u64 ses_id);
53extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
54 __u64 ses_id, __u32 tid);
51extern int smb2_calc_signature(struct smb_rqst *rqst, 55extern int smb2_calc_signature(struct smb_rqst *rqst,
52 struct TCP_Server_Info *server); 56 struct TCP_Server_Info *server);
53extern int smb3_calc_signature(struct smb_rqst *rqst, 57extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
164extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 168extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
165 const u64 persistent_fid, const u64 volatile_fid, 169 const u64 persistent_fid, const u64 volatile_fid,
166 const __u8 oplock_level); 170 const __u8 oplock_level);
171extern int smb2_handle_cancelled_mid(char *buffer,
172 struct TCP_Server_Info *server);
173void smb2_cancelled_close_fid(struct work_struct *work);
167extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 174extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
168 u64 persistent_file_id, u64 volatile_file_id, 175 u64 persistent_file_id, u64 volatile_file_id,
169 struct kstatfs *FSData); 176 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7c3bb1bd7eed..506b67fc93d9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
115 return 0; 115 return 0;
116} 116}
117 117
118struct cifs_ses * 118static struct cifs_ses *
119smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) 119smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
120{ 120{
121 struct cifs_ses *ses; 121 struct cifs_ses *ses;
122 122
123 spin_lock(&cifs_tcp_ses_lock);
124 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 123 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
125 if (ses->Suid != ses_id) 124 if (ses->Suid != ses_id)
126 continue; 125 continue;
127 spin_unlock(&cifs_tcp_ses_lock);
128 return ses; 126 return ses;
129 } 127 }
128
129 return NULL;
130}
131
132struct cifs_ses *
133smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
134{
135 struct cifs_ses *ses;
136
137 spin_lock(&cifs_tcp_ses_lock);
138 ses = smb2_find_smb_ses_unlocked(server, ses_id);
130 spin_unlock(&cifs_tcp_ses_lock); 139 spin_unlock(&cifs_tcp_ses_lock);
131 140
141 return ses;
142}
143
144static struct cifs_tcon *
145smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
146{
147 struct cifs_tcon *tcon;
148
149 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
150 if (tcon->tid != tid)
151 continue;
152 ++tcon->tc_count;
153 return tcon;
154 }
155
132 return NULL; 156 return NULL;
133} 157}
134 158
159/*
160 * Obtain tcon corresponding to the tid in the given
161 * cifs_ses
162 */
163
164struct cifs_tcon *
165smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
166{
167 struct cifs_ses *ses;
168 struct cifs_tcon *tcon;
169
170 spin_lock(&cifs_tcp_ses_lock);
171 ses = smb2_find_smb_ses_unlocked(server, ses_id);
172 if (!ses) {
173 spin_unlock(&cifs_tcp_ses_lock);
174 return NULL;
175 }
176 tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
177 spin_unlock(&cifs_tcp_ses_lock);
178
179 return tcon;
180}
181
135int 182int
136smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 183smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
137{ 184{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 526f0533cb4e..f6e13a977fc8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
752 752
753 rc = wait_for_response(ses->server, midQ); 753 rc = wait_for_response(ses->server, midQ);
754 if (rc != 0) { 754 if (rc != 0) {
755 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
755 send_cancel(ses->server, rqst, midQ); 756 send_cancel(ses->server, rqst, midQ);
756 spin_lock(&GlobalMid_Lock); 757 spin_lock(&GlobalMid_Lock);
757 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 midQ->mid_flags |= MID_WAIT_CANCELLED;
758 midQ->callback = DeleteMidQEntry; 760 midQ->callback = DeleteMidQEntry;
759 spin_unlock(&GlobalMid_Lock); 761 spin_unlock(&GlobalMid_Lock);
760 add_credits(ses->server, 1, optype); 762 add_credits(ses->server, 1, optype);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 02a7a9286449..6d6eca394d4d 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page);
327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
328{ 328{
329 struct dentry *dir; 329 struct dentry *dir;
330 struct fscrypt_info *ci;
331 int dir_has_key, cached_with_key; 330 int dir_has_key, cached_with_key;
332 331
333 if (flags & LOOKUP_RCU) 332 if (flags & LOOKUP_RCU)
@@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
339 return 0; 338 return 0;
340 } 339 }
341 340
342 ci = d_inode(dir)->i_crypt_info;
343 if (ci && ci->ci_keyring_key &&
344 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
345 (1 << KEY_FLAG_REVOKED) |
346 (1 << KEY_FLAG_DEAD))))
347 ci = NULL;
348
349 /* this should eventually be an flag in d_flags */ 341 /* this should eventually be an flag in d_flags */
350 spin_lock(&dentry->d_lock); 342 spin_lock(&dentry->d_lock);
351 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 343 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
352 spin_unlock(&dentry->d_lock); 344 spin_unlock(&dentry->d_lock);
353 dir_has_key = (ci != NULL); 345 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
354 dput(dir); 346 dput(dir);
355 347
356 /* 348 /*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 13052b85c393..37b49894c762 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
350 fname->disk_name.len = iname->len; 350 fname->disk_name.len = iname->len;
351 return 0; 351 return 0;
352 } 352 }
353 ret = fscrypt_get_crypt_info(dir); 353 ret = fscrypt_get_encryption_info(dir);
354 if (ret && ret != -EOPNOTSUPP) 354 if (ret && ret != -EOPNOTSUPP)
355 return ret; 355 return ret;
356 356
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index fdbb8af32eaf..e39696e64494 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -67,7 +67,6 @@ struct fscrypt_info {
67 u8 ci_filename_mode; 67 u8 ci_filename_mode;
68 u8 ci_flags; 68 u8 ci_flags;
69 struct crypto_skcipher *ci_ctfm; 69 struct crypto_skcipher *ci_ctfm;
70 struct key *ci_keyring_key;
71 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; 70 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
72}; 71};
73 72
@@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
101extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 100extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
102 gfp_t gfp_flags); 101 gfp_t gfp_flags);
103 102
104/* keyinfo.c */
105extern int fscrypt_get_crypt_info(struct inode *);
106
107#endif /* _FSCRYPT_PRIVATE_H */ 103#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index d5d896fa5a71..8cdfddce2b34 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
95 kfree(description); 95 kfree(description);
96 if (IS_ERR(keyring_key)) 96 if (IS_ERR(keyring_key))
97 return PTR_ERR(keyring_key); 97 return PTR_ERR(keyring_key);
98 down_read(&keyring_key->sem);
98 99
99 if (keyring_key->type != &key_type_logon) { 100 if (keyring_key->type != &key_type_logon) {
100 printk_once(KERN_WARNING 101 printk_once(KERN_WARNING
@@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
102 res = -ENOKEY; 103 res = -ENOKEY;
103 goto out; 104 goto out;
104 } 105 }
105 down_read(&keyring_key->sem);
106 ukp = user_key_payload_locked(keyring_key); 106 ukp = user_key_payload_locked(keyring_key);
107 if (ukp->datalen != sizeof(struct fscrypt_key)) { 107 if (ukp->datalen != sizeof(struct fscrypt_key)) {
108 res = -EINVAL; 108 res = -EINVAL;
109 up_read(&keyring_key->sem);
110 goto out; 109 goto out;
111 } 110 }
112 master_key = (struct fscrypt_key *)ukp->data; 111 master_key = (struct fscrypt_key *)ukp->data;
@@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
117 "%s: key size incorrect: %d\n", 116 "%s: key size incorrect: %d\n",
118 __func__, master_key->size); 117 __func__, master_key->size);
119 res = -ENOKEY; 118 res = -ENOKEY;
120 up_read(&keyring_key->sem);
121 goto out; 119 goto out;
122 } 120 }
123 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); 121 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
124 up_read(&keyring_key->sem);
125 if (res)
126 goto out;
127
128 crypt_info->ci_keyring_key = keyring_key;
129 return 0;
130out: 122out:
123 up_read(&keyring_key->sem);
131 key_put(keyring_key); 124 key_put(keyring_key);
132 return res; 125 return res;
133} 126}
@@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci)
169 if (!ci) 162 if (!ci)
170 return; 163 return;
171 164
172 key_put(ci->ci_keyring_key);
173 crypto_free_skcipher(ci->ci_ctfm); 165 crypto_free_skcipher(ci->ci_ctfm);
174 kmem_cache_free(fscrypt_info_cachep, ci); 166 kmem_cache_free(fscrypt_info_cachep, ci);
175} 167}
176 168
177int fscrypt_get_crypt_info(struct inode *inode) 169int fscrypt_get_encryption_info(struct inode *inode)
178{ 170{
179 struct fscrypt_info *crypt_info; 171 struct fscrypt_info *crypt_info;
180 struct fscrypt_context ctx; 172 struct fscrypt_context ctx;
@@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode)
184 u8 *raw_key = NULL; 176 u8 *raw_key = NULL;
185 int res; 177 int res;
186 178
179 if (inode->i_crypt_info)
180 return 0;
181
187 res = fscrypt_initialize(inode->i_sb->s_cop->flags); 182 res = fscrypt_initialize(inode->i_sb->s_cop->flags);
188 if (res) 183 if (res)
189 return res; 184 return res;
190 185
191 if (!inode->i_sb->s_cop->get_context) 186 if (!inode->i_sb->s_cop->get_context)
192 return -EOPNOTSUPP; 187 return -EOPNOTSUPP;
193retry:
194 crypt_info = ACCESS_ONCE(inode->i_crypt_info);
195 if (crypt_info) {
196 if (!crypt_info->ci_keyring_key ||
197 key_validate(crypt_info->ci_keyring_key) == 0)
198 return 0;
199 fscrypt_put_encryption_info(inode, crypt_info);
200 goto retry;
201 }
202 188
203 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); 189 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
204 if (res < 0) { 190 if (res < 0) {
@@ -229,7 +215,6 @@ retry:
229 crypt_info->ci_data_mode = ctx.contents_encryption_mode; 215 crypt_info->ci_data_mode = ctx.contents_encryption_mode;
230 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; 216 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
231 crypt_info->ci_ctfm = NULL; 217 crypt_info->ci_ctfm = NULL;
232 crypt_info->ci_keyring_key = NULL;
233 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, 218 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
234 sizeof(crypt_info->ci_master_key)); 219 sizeof(crypt_info->ci_master_key));
235 220
@@ -273,14 +258,8 @@ retry:
273 if (res) 258 if (res)
274 goto out; 259 goto out;
275 260
276 kzfree(raw_key); 261 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
277 raw_key = NULL; 262 crypt_info = NULL;
278 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
279 put_crypt_info(crypt_info);
280 goto retry;
281 }
282 return 0;
283
284out: 263out:
285 if (res == -ENOKEY) 264 if (res == -ENOKEY)
286 res = 0; 265 res = 0;
@@ -288,6 +267,7 @@ out:
288 kzfree(raw_key); 267 kzfree(raw_key);
289 return res; 268 return res;
290} 269}
270EXPORT_SYMBOL(fscrypt_get_encryption_info);
291 271
292void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) 272void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
293{ 273{
@@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
305 put_crypt_info(ci); 285 put_crypt_info(ci);
306} 286}
307EXPORT_SYMBOL(fscrypt_put_encryption_info); 287EXPORT_SYMBOL(fscrypt_put_encryption_info);
308
309int fscrypt_get_encryption_info(struct inode *inode)
310{
311 struct fscrypt_info *ci = inode->i_crypt_info;
312
313 if (!ci ||
314 (ci->ci_keyring_key &&
315 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
316 (1 << KEY_FLAG_REVOKED) |
317 (1 << KEY_FLAG_DEAD)))))
318 return fscrypt_get_crypt_info(inode);
319 return 0;
320}
321EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 14b76da71269..4908906d54d5 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode,
33 const struct fscrypt_policy *policy) 33 const struct fscrypt_policy *policy)
34{ 34{
35 struct fscrypt_context ctx; 35 struct fscrypt_context ctx;
36 int res;
37 36
38 if (!inode->i_sb->s_cop->set_context) 37 if (!inode->i_sb->s_cop->set_context)
39 return -EOPNOTSUPP; 38 return -EOPNOTSUPP;
40 39
41 if (inode->i_sb->s_cop->prepare_context) {
42 res = inode->i_sb->s_cop->prepare_context(inode);
43 if (res)
44 return res;
45 }
46
47 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; 40 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
48 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, 41 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
49 FS_KEY_DESCRIPTOR_SIZE); 42 FS_KEY_DESCRIPTOR_SIZE);
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
373 } 373 }
374 spin_lock_irq(&mapping->tree_lock); 374 spin_lock_irq(&mapping->tree_lock);
375 375
376 if (!entry) {
377 /*
378 * We needed to drop the page_tree lock while calling
379 * radix_tree_preload() and we didn't have an entry to
380 * lock. See if another thread inserted an entry at
381 * our index during this time.
382 */
383 entry = __radix_tree_lookup(&mapping->page_tree, index,
384 NULL, &slot);
385 if (entry) {
386 radix_tree_preload_end();
387 spin_unlock_irq(&mapping->tree_lock);
388 goto restart;
389 }
390 }
391
376 if (pmd_downgrade) { 392 if (pmd_downgrade) {
377 radix_tree_delete(&mapping->page_tree, index); 393 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--; 394 mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
388 if (err) { 404 if (err) {
389 spin_unlock_irq(&mapping->tree_lock); 405 spin_unlock_irq(&mapping->tree_lock);
390 /* 406 /*
391 * Someone already created the entry? This is a 407 * Our insertion of a DAX entry failed, most likely
392 * normal failure when inserting PMDs in a range 408 * because we were inserting a PMD entry and it
393 * that already contains PTEs. In that case we want 409 * collided with a PTE sized entry at a different
394 * to return -EEXIST immediately. 410 * index in the PMD range. We haven't inserted
395 */ 411 * anything into the radix tree and have no waiters to
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 412 * wake.
397 goto restart;
398 /*
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
403 * waiters to wake.
404 */ 413 */
405 return ERR_PTR(err); 414 return ERR_PTR(err);
406 } 415 }
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 7d398d300e97..9382db998ec9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
743 newsock->type = con->sock->type; 743 newsock->type = con->sock->type;
744 newsock->ops = con->sock->ops; 744 newsock->ops = con->sock->ops;
745 745
746 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 746 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
747 if (result < 0) 747 if (result < 0)
748 goto accept_err; 748 goto accept_err;
749 749
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f493af666591..fb69ee2388db 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *);
2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); 2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
2467extern void ext4_evict_inode(struct inode *); 2467extern void ext4_evict_inode(struct inode *);
2468extern void ext4_clear_inode(struct inode *); 2468extern void ext4_clear_inode(struct inode *);
2469extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
2469extern int ext4_sync_inode(handle_t *, struct inode *); 2470extern int ext4_sync_inode(handle_t *, struct inode *);
2470extern void ext4_dirty_inode(struct inode *, int); 2471extern void ext4_dirty_inode(struct inode *, int);
2471extern int ext4_change_inode_journal_flag(struct inode *, int); 2472extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8210c1f43556..cefa9835f275 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
744 744
745const struct inode_operations ext4_file_inode_operations = { 745const struct inode_operations ext4_file_inode_operations = {
746 .setattr = ext4_setattr, 746 .setattr = ext4_setattr,
747 .getattr = ext4_getattr, 747 .getattr = ext4_file_getattr,
748 .listxattr = ext4_listxattr, 748 .listxattr = ext4_listxattr,
749 .get_acl = ext4_get_acl, 749 .get_acl = ext4_get_acl,
750 .set_acl = ext4_set_acl, 750 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 30a9f210d1e3..375fb1c05d49 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
1169 set_buffer_uptodate(dir_block); 1169 set_buffer_uptodate(dir_block);
1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); 1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
1171 if (err) 1171 if (err)
1172 goto out; 1172 return err;
1173 set_buffer_verified(dir_block); 1173 set_buffer_verified(dir_block);
1174out: 1174 return ext4_mark_inode_dirty(handle, inode);
1175 return err;
1176} 1175}
1177 1176
1178static int ext4_convert_inline_data_nolock(handle_t *handle, 1177static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7385e6a6b6cb..b9ffa9f4191f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5390,17 +5390,52 @@ err_out:
5390int ext4_getattr(const struct path *path, struct kstat *stat, 5390int ext4_getattr(const struct path *path, struct kstat *stat,
5391 u32 request_mask, unsigned int query_flags) 5391 u32 request_mask, unsigned int query_flags)
5392{ 5392{
5393 struct inode *inode; 5393 struct inode *inode = d_inode(path->dentry);
5394 unsigned long long delalloc_blocks; 5394 struct ext4_inode *raw_inode;
5395 struct ext4_inode_info *ei = EXT4_I(inode);
5396 unsigned int flags;
5397
5398 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5399 stat->result_mask |= STATX_BTIME;
5400 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5401 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5402 }
5403
5404 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5405 if (flags & EXT4_APPEND_FL)
5406 stat->attributes |= STATX_ATTR_APPEND;
5407 if (flags & EXT4_COMPR_FL)
5408 stat->attributes |= STATX_ATTR_COMPRESSED;
5409 if (flags & EXT4_ENCRYPT_FL)
5410 stat->attributes |= STATX_ATTR_ENCRYPTED;
5411 if (flags & EXT4_IMMUTABLE_FL)
5412 stat->attributes |= STATX_ATTR_IMMUTABLE;
5413 if (flags & EXT4_NODUMP_FL)
5414 stat->attributes |= STATX_ATTR_NODUMP;
5415
5416 stat->attributes_mask |= (STATX_ATTR_APPEND |
5417 STATX_ATTR_COMPRESSED |
5418 STATX_ATTR_ENCRYPTED |
5419 STATX_ATTR_IMMUTABLE |
5420 STATX_ATTR_NODUMP);
5395 5421
5396 inode = d_inode(path->dentry);
5397 generic_fillattr(inode, stat); 5422 generic_fillattr(inode, stat);
5423 return 0;
5424}
5425
5426int ext4_file_getattr(const struct path *path, struct kstat *stat,
5427 u32 request_mask, unsigned int query_flags)
5428{
5429 struct inode *inode = d_inode(path->dentry);
5430 u64 delalloc_blocks;
5431
5432 ext4_getattr(path, stat, request_mask, query_flags);
5398 5433
5399 /* 5434 /*
5400 * If there is inline data in the inode, the inode will normally not 5435 * If there is inline data in the inode, the inode will normally not
5401 * have data blocks allocated (it may have an external xattr block). 5436 * have data blocks allocated (it may have an external xattr block).
5402 * Report at least one sector for such files, so tools like tar, rsync, 5437 * Report at least one sector for such files, so tools like tar, rsync,
5403 * others doen't incorrectly think the file is completely sparse. 5438 * others don't incorrectly think the file is completely sparse.
5404 */ 5439 */
5405 if (unlikely(ext4_has_inline_data(inode))) 5440 if (unlikely(ext4_has_inline_data(inode)))
5406 stat->blocks += (stat->size + 511) >> 9; 5441 stat->blocks += (stat->size + 511) >> 9;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 578f8c33fb44..c992ef2c2f94 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode,
511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != 511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { 512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
513 ext4_debug("ext4 move extent: orig and donor's start " 513 ext4_debug("ext4 move extent: orig and donor's start "
514 "offset are not alligned [ino:orig %lu, donor %lu]\n", 514 "offsets are not aligned [ino:orig %lu, donor %lu]\n",
515 orig_inode->i_ino, donor_inode->i_ino); 515 orig_inode->i_ino, donor_inode->i_ino);
516 return -EINVAL; 516 return -EINVAL;
517 } 517 }
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6ad612c576fc..07e5e1405771 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3912 .tmpfile = ext4_tmpfile, 3912 .tmpfile = ext4_tmpfile,
3913 .rename = ext4_rename2, 3913 .rename = ext4_rename2,
3914 .setattr = ext4_setattr, 3914 .setattr = ext4_setattr,
3915 .getattr = ext4_getattr,
3915 .listxattr = ext4_listxattr, 3916 .listxattr = ext4_listxattr,
3916 .get_acl = ext4_get_acl, 3917 .get_acl = ext4_get_acl,
3917 .set_acl = ext4_set_acl, 3918 .set_acl = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3920 3921
3921const struct inode_operations ext4_special_inode_operations = { 3922const struct inode_operations ext4_special_inode_operations = {
3922 .setattr = ext4_setattr, 3923 .setattr = ext4_setattr,
3924 .getattr = ext4_getattr,
3923 .listxattr = ext4_listxattr, 3925 .listxattr = ext4_listxattr,
3924 .get_acl = ext4_get_acl, 3926 .get_acl = ext4_get_acl,
3925 .set_acl = ext4_set_acl, 3927 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2e03a0a88d92..a9448db1cf7e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); 1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1121} 1121}
1122 1122
1123static int ext4_prepare_context(struct inode *inode)
1124{
1125 return ext4_convert_inline_data(inode);
1126}
1127
1128static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, 1123static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1129 void *fs_data) 1124 void *fs_data)
1130{ 1125{
1131 handle_t *handle = fs_data; 1126 handle_t *handle = fs_data;
1132 int res, res2, retries = 0; 1127 int res, res2, retries = 0;
1133 1128
1129 res = ext4_convert_inline_data(inode);
1130 if (res)
1131 return res;
1132
1134 /* 1133 /*
1135 * If a journal handle was specified, then the encryption context is 1134 * If a journal handle was specified, then the encryption context is
1136 * being set on a new inode via inheritance and is part of a larger 1135 * being set on a new inode via inheritance and is part of a larger
@@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
1196static const struct fscrypt_operations ext4_cryptops = { 1195static const struct fscrypt_operations ext4_cryptops = {
1197 .key_prefix = "ext4:", 1196 .key_prefix = "ext4:",
1198 .get_context = ext4_get_context, 1197 .get_context = ext4_get_context,
1199 .prepare_context = ext4_prepare_context,
1200 .set_context = ext4_set_context, 1198 .set_context = ext4_set_context,
1201 .dummy_context = ext4_dummy_context, 1199 .dummy_context = ext4_dummy_context,
1202 .is_encrypted = ext4_encrypted_inode, 1200 .is_encrypted = ext4_encrypted_inode,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 73b184d161fc..5c8fc53cb0e5 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -85,17 +85,20 @@ errout:
85const struct inode_operations ext4_encrypted_symlink_inode_operations = { 85const struct inode_operations ext4_encrypted_symlink_inode_operations = {
86 .get_link = ext4_encrypted_get_link, 86 .get_link = ext4_encrypted_get_link,
87 .setattr = ext4_setattr, 87 .setattr = ext4_setattr,
88 .getattr = ext4_getattr,
88 .listxattr = ext4_listxattr, 89 .listxattr = ext4_listxattr,
89}; 90};
90 91
91const struct inode_operations ext4_symlink_inode_operations = { 92const struct inode_operations ext4_symlink_inode_operations = {
92 .get_link = page_get_link, 93 .get_link = page_get_link,
93 .setattr = ext4_setattr, 94 .setattr = ext4_setattr,
95 .getattr = ext4_getattr,
94 .listxattr = ext4_listxattr, 96 .listxattr = ext4_listxattr,
95}; 97};
96 98
97const struct inode_operations ext4_fast_symlink_inode_operations = { 99const struct inode_operations ext4_fast_symlink_inode_operations = {
98 .get_link = simple_get_link, 100 .get_link = simple_get_link,
99 .setattr = ext4_setattr, 101 .setattr = ext4_setattr,
102 .getattr = ext4_getattr,
100 .listxattr = ext4_listxattr, 103 .listxattr = ext4_listxattr,
101}; 104};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 67636acf7624..996e7900d4c8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
131} 131}
132 132
133static int ext4_xattr_block_csum_verify(struct inode *inode, 133static int ext4_xattr_block_csum_verify(struct inode *inode,
134 sector_t block_nr, 134 struct buffer_head *bh)
135 struct ext4_xattr_header *hdr)
136{ 135{
137 if (ext4_has_metadata_csum(inode->i_sb) && 136 struct ext4_xattr_header *hdr = BHDR(bh);
138 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) 137 int ret = 1;
139 return 0;
140 return 1;
141}
142
143static void ext4_xattr_block_csum_set(struct inode *inode,
144 sector_t block_nr,
145 struct ext4_xattr_header *hdr)
146{
147 if (!ext4_has_metadata_csum(inode->i_sb))
148 return;
149 138
150 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); 139 if (ext4_has_metadata_csum(inode->i_sb)) {
140 lock_buffer(bh);
141 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
142 bh->b_blocknr, hdr));
143 unlock_buffer(bh);
144 }
145 return ret;
151} 146}
152 147
153static inline int ext4_handle_dirty_xattr_block(handle_t *handle, 148static void ext4_xattr_block_csum_set(struct inode *inode,
154 struct inode *inode, 149 struct buffer_head *bh)
155 struct buffer_head *bh)
156{ 150{
157 ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); 151 if (ext4_has_metadata_csum(inode->i_sb))
158 return ext4_handle_dirty_metadata(handle, inode, bh); 152 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
153 bh->b_blocknr, BHDR(bh));
159} 154}
160 155
161static inline const struct xattr_handler * 156static inline const struct xattr_handler *
@@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
233 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 228 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
234 BHDR(bh)->h_blocks != cpu_to_le32(1)) 229 BHDR(bh)->h_blocks != cpu_to_le32(1))
235 return -EFSCORRUPTED; 230 return -EFSCORRUPTED;
236 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) 231 if (!ext4_xattr_block_csum_verify(inode, bh))
237 return -EFSBADCRC; 232 return -EFSBADCRC;
238 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, 233 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
239 bh->b_data); 234 bh->b_data);
@@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
618 } 613 }
619 } 614 }
620 615
616 ext4_xattr_block_csum_set(inode, bh);
621 /* 617 /*
622 * Beware of this ugliness: Releasing of xattr block references 618 * Beware of this ugliness: Releasing of xattr block references
623 * from different inodes can race and so we have to protect 619 * from different inodes can race and so we have to protect
624 * from a race where someone else frees the block (and releases 620 * from a race where someone else frees the block (and releases
625 * its journal_head) before we are done dirtying the buffer. In 621 * its journal_head) before we are done dirtying the buffer. In
626 * nojournal mode this race is harmless and we actually cannot 622 * nojournal mode this race is harmless and we actually cannot
627 * call ext4_handle_dirty_xattr_block() with locked buffer as 623 * call ext4_handle_dirty_metadata() with locked buffer as
628 * that function can call sync_dirty_buffer() so for that case 624 * that function can call sync_dirty_buffer() so for that case
629 * we handle the dirtying after unlocking the buffer. 625 * we handle the dirtying after unlocking the buffer.
630 */ 626 */
631 if (ext4_handle_valid(handle)) 627 if (ext4_handle_valid(handle))
632 error = ext4_handle_dirty_xattr_block(handle, inode, 628 error = ext4_handle_dirty_metadata(handle, inode, bh);
633 bh);
634 unlock_buffer(bh); 629 unlock_buffer(bh);
635 if (!ext4_handle_valid(handle)) 630 if (!ext4_handle_valid(handle))
636 error = ext4_handle_dirty_xattr_block(handle, inode, 631 error = ext4_handle_dirty_metadata(handle, inode, bh);
637 bh);
638 if (IS_SYNC(inode)) 632 if (IS_SYNC(inode))
639 ext4_handle_sync(handle); 633 ext4_handle_sync(handle);
640 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); 634 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
863 ext4_xattr_cache_insert(ext4_mb_cache, 857 ext4_xattr_cache_insert(ext4_mb_cache,
864 bs->bh); 858 bs->bh);
865 } 859 }
860 ext4_xattr_block_csum_set(inode, bs->bh);
866 unlock_buffer(bs->bh); 861 unlock_buffer(bs->bh);
867 if (error == -EFSCORRUPTED) 862 if (error == -EFSCORRUPTED)
868 goto bad_block; 863 goto bad_block;
869 if (!error) 864 if (!error)
870 error = ext4_handle_dirty_xattr_block(handle, 865 error = ext4_handle_dirty_metadata(handle,
871 inode, 866 inode,
872 bs->bh); 867 bs->bh);
873 if (error) 868 if (error)
874 goto cleanup; 869 goto cleanup;
875 goto inserted; 870 goto inserted;
@@ -967,10 +962,11 @@ inserted:
967 ce->e_reusable = 0; 962 ce->e_reusable = 0;
968 ea_bdebug(new_bh, "reusing; refcount now=%d", 963 ea_bdebug(new_bh, "reusing; refcount now=%d",
969 ref); 964 ref);
965 ext4_xattr_block_csum_set(inode, new_bh);
970 unlock_buffer(new_bh); 966 unlock_buffer(new_bh);
971 error = ext4_handle_dirty_xattr_block(handle, 967 error = ext4_handle_dirty_metadata(handle,
972 inode, 968 inode,
973 new_bh); 969 new_bh);
974 if (error) 970 if (error)
975 goto cleanup_dquot; 971 goto cleanup_dquot;
976 } 972 }
@@ -1020,11 +1016,12 @@ getblk_failed:
1020 goto getblk_failed; 1016 goto getblk_failed;
1021 } 1017 }
1022 memcpy(new_bh->b_data, s->base, new_bh->b_size); 1018 memcpy(new_bh->b_data, s->base, new_bh->b_size);
1019 ext4_xattr_block_csum_set(inode, new_bh);
1023 set_buffer_uptodate(new_bh); 1020 set_buffer_uptodate(new_bh);
1024 unlock_buffer(new_bh); 1021 unlock_buffer(new_bh);
1025 ext4_xattr_cache_insert(ext4_mb_cache, new_bh); 1022 ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
1026 error = ext4_handle_dirty_xattr_block(handle, 1023 error = ext4_handle_dirty_metadata(handle, inode,
1027 inode, new_bh); 1024 new_bh);
1028 if (error) 1025 if (error)
1029 goto cleanup; 1026 goto cleanup;
1030 } 1027 }
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a77df377e2e8..ee2d0a485fc3 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); 196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; 197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
198 si->base_mem += NM_I(sbi)->nat_blocks / 8; 198 si->base_mem += NM_I(sbi)->nat_blocks / 8;
199 si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
199 200
200get_cache: 201get_cache:
201 si->cache_mem = 0; 202 si->cache_mem = 0;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4650c9b85de7..8d5c62b07b28 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
750 dentry_blk = page_address(page); 750 dentry_blk = page_address(page);
751 bit_pos = dentry - dentry_blk->dentry; 751 bit_pos = dentry - dentry_blk->dentry;
752 for (i = 0; i < slots; i++) 752 for (i = 0; i < slots; i++)
753 clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 753 __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
754 754
755 /* Let's check and deallocate this dentry page */ 755 /* Let's check and deallocate this dentry page */
756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e849f83d6114..0a6e115562f6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -561,6 +561,8 @@ struct f2fs_nm_info {
561 struct mutex build_lock; /* lock for build free nids */ 561 struct mutex build_lock; /* lock for build free nids */
562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; 562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
563 unsigned char *nat_block_bitmap; 563 unsigned char *nat_block_bitmap;
564 unsigned short *free_nid_count; /* free nid count of NAT block */
565 spinlock_t free_nid_lock; /* protect updating of nid count */
564 566
565 /* for checkpoint */ 567 /* for checkpoint */
566 char *nat_bitmap; /* NAT bitmap pointer */ 568 char *nat_bitmap; /* NAT bitmap pointer */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 94967171dee8..481aa8dc79f4 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
338 set_nat_flag(e, IS_CHECKPOINTED, false); 338 set_nat_flag(e, IS_CHECKPOINTED, false);
339 __set_nat_cache_dirty(nm_i, e); 339 __set_nat_cache_dirty(nm_i, e);
340 340
341 if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
342 clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
343
344 /* update fsync_mark if its inode nat entry is still alive */ 341 /* update fsync_mark if its inode nat entry is still alive */
345 if (ni->nid != ni->ino) 342 if (ni->nid != ni->ino)
346 e = __lookup_nat_cache(nm_i, ni->ino); 343 e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
1823 kmem_cache_free(free_nid_slab, i); 1820 kmem_cache_free(free_nid_slab, i);
1824} 1821}
1825 1822
1826void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) 1823static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
1824 bool set, bool build, bool locked)
1827{ 1825{
1828 struct f2fs_nm_info *nm_i = NM_I(sbi); 1826 struct f2fs_nm_info *nm_i = NM_I(sbi);
1829 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 1827 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
1833 return; 1831 return;
1834 1832
1835 if (set) 1833 if (set)
1836 set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1834 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1837 else 1835 else
1838 clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1836 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1837
1838 if (!locked)
1839 spin_lock(&nm_i->free_nid_lock);
1840 if (set)
1841 nm_i->free_nid_count[nat_ofs]++;
1842 else if (!build)
1843 nm_i->free_nid_count[nat_ofs]--;
1844 if (!locked)
1845 spin_unlock(&nm_i->free_nid_lock);
1839} 1846}
1840 1847
1841static void scan_nat_page(struct f2fs_sb_info *sbi, 1848static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
1847 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 1854 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
1848 int i; 1855 int i;
1849 1856
1850 set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1857 if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1858 return;
1859
1860 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
1851 1861
1852 i = start_nid % NAT_ENTRY_PER_BLOCK; 1862 i = start_nid % NAT_ENTRY_PER_BLOCK;
1853 1863
@@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
1861 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1871 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1862 if (blk_addr == NULL_ADDR) 1872 if (blk_addr == NULL_ADDR)
1863 freed = add_free_nid(sbi, start_nid, true); 1873 freed = add_free_nid(sbi, start_nid, true);
1864 update_free_nid_bitmap(sbi, start_nid, freed); 1874 update_free_nid_bitmap(sbi, start_nid, freed, true, false);
1865 } 1875 }
1866} 1876}
1867 1877
@@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
1877 for (i = 0; i < nm_i->nat_blocks; i++) { 1887 for (i = 0; i < nm_i->nat_blocks; i++) {
1878 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 1888 if (!test_bit_le(i, nm_i->nat_block_bitmap))
1879 continue; 1889 continue;
1890 if (!nm_i->free_nid_count[i])
1891 continue;
1880 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 1892 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
1881 nid_t nid; 1893 nid_t nid;
1882 1894
@@ -1907,58 +1919,6 @@ out:
1907 up_read(&nm_i->nat_tree_lock); 1919 up_read(&nm_i->nat_tree_lock);
1908} 1920}
1909 1921
1910static int scan_nat_bits(struct f2fs_sb_info *sbi)
1911{
1912 struct f2fs_nm_info *nm_i = NM_I(sbi);
1913 struct page *page;
1914 unsigned int i = 0;
1915 nid_t nid;
1916
1917 if (!enabled_nat_bits(sbi, NULL))
1918 return -EAGAIN;
1919
1920 down_read(&nm_i->nat_tree_lock);
1921check_empty:
1922 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
1923 if (i >= nm_i->nat_blocks) {
1924 i = 0;
1925 goto check_partial;
1926 }
1927
1928 for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
1929 nid++) {
1930 if (unlikely(nid >= nm_i->max_nid))
1931 break;
1932 add_free_nid(sbi, nid, true);
1933 }
1934
1935 if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
1936 goto out;
1937 i++;
1938 goto check_empty;
1939
1940check_partial:
1941 i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
1942 if (i >= nm_i->nat_blocks) {
1943 disable_nat_bits(sbi, true);
1944 up_read(&nm_i->nat_tree_lock);
1945 return -EINVAL;
1946 }
1947
1948 nid = i * NAT_ENTRY_PER_BLOCK;
1949 page = get_current_nat_page(sbi, nid);
1950 scan_nat_page(sbi, page, nid);
1951 f2fs_put_page(page, 1);
1952
1953 if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
1954 i++;
1955 goto check_partial;
1956 }
1957out:
1958 up_read(&nm_i->nat_tree_lock);
1959 return 0;
1960}
1961
1962static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 1922static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
1963{ 1923{
1964 struct f2fs_nm_info *nm_i = NM_I(sbi); 1924 struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
1980 1940
1981 if (nm_i->nid_cnt[FREE_NID_LIST]) 1941 if (nm_i->nid_cnt[FREE_NID_LIST])
1982 return; 1942 return;
1983
1984 /* try to find free nids with nat_bits */
1985 if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
1986 return;
1987 }
1988
1989 /* find next valid candidate */
1990 if (enabled_nat_bits(sbi, NULL)) {
1991 int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
1992 nm_i->nat_blocks, 0);
1993
1994 if (idx >= nm_i->nat_blocks)
1995 set_sbi_flag(sbi, SBI_NEED_FSCK);
1996 else
1997 nid = idx * NAT_ENTRY_PER_BLOCK;
1998 } 1943 }
1999 1944
2000 /* readahead nat pages to be scanned */ 1945 /* readahead nat pages to be scanned */
@@ -2081,7 +2026,7 @@ retry:
2081 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); 2026 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
2082 nm_i->available_nids--; 2027 nm_i->available_nids--;
2083 2028
2084 update_free_nid_bitmap(sbi, *nid, false); 2029 update_free_nid_bitmap(sbi, *nid, false, false, false);
2085 2030
2086 spin_unlock(&nm_i->nid_list_lock); 2031 spin_unlock(&nm_i->nid_list_lock);
2087 return true; 2032 return true;
@@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2137 2082
2138 nm_i->available_nids++; 2083 nm_i->available_nids++;
2139 2084
2140 update_free_nid_bitmap(sbi, nid, true); 2085 update_free_nid_bitmap(sbi, nid, true, false, false);
2141 2086
2142 spin_unlock(&nm_i->nid_list_lock); 2087 spin_unlock(&nm_i->nid_list_lock);
2143 2088
@@ -2383,7 +2328,7 @@ add_out:
2383 list_add_tail(&nes->set_list, head); 2328 list_add_tail(&nes->set_list, head);
2384} 2329}
2385 2330
2386void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2331static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2387 struct page *page) 2332 struct page *page)
2388{ 2333{
2389 struct f2fs_nm_info *nm_i = NM_I(sbi); 2334 struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2402 valid++; 2347 valid++;
2403 } 2348 }
2404 if (valid == 0) { 2349 if (valid == 0) {
2405 set_bit_le(nat_index, nm_i->empty_nat_bits); 2350 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2406 clear_bit_le(nat_index, nm_i->full_nat_bits); 2351 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2407 return; 2352 return;
2408 } 2353 }
2409 2354
2410 clear_bit_le(nat_index, nm_i->empty_nat_bits); 2355 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2411 if (valid == NAT_ENTRY_PER_BLOCK) 2356 if (valid == NAT_ENTRY_PER_BLOCK)
2412 set_bit_le(nat_index, nm_i->full_nat_bits); 2357 __set_bit_le(nat_index, nm_i->full_nat_bits);
2413 else 2358 else
2414 clear_bit_le(nat_index, nm_i->full_nat_bits); 2359 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2415} 2360}
2416 2361
2417static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, 2362static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2467 add_free_nid(sbi, nid, false); 2412 add_free_nid(sbi, nid, false);
2468 spin_lock(&NM_I(sbi)->nid_list_lock); 2413 spin_lock(&NM_I(sbi)->nid_list_lock);
2469 NM_I(sbi)->available_nids++; 2414 NM_I(sbi)->available_nids++;
2470 update_free_nid_bitmap(sbi, nid, true); 2415 update_free_nid_bitmap(sbi, nid, true, false, false);
2471 spin_unlock(&NM_I(sbi)->nid_list_lock); 2416 spin_unlock(&NM_I(sbi)->nid_list_lock);
2472 } else { 2417 } else {
2473 spin_lock(&NM_I(sbi)->nid_list_lock); 2418 spin_lock(&NM_I(sbi)->nid_list_lock);
2474 update_free_nid_bitmap(sbi, nid, false); 2419 update_free_nid_bitmap(sbi, nid, false, false, false);
2475 spin_unlock(&NM_I(sbi)->nid_list_lock); 2420 spin_unlock(&NM_I(sbi)->nid_list_lock);
2476 } 2421 }
2477 } 2422 }
@@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2577 return 0; 2522 return 0;
2578} 2523}
2579 2524
2525inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2526{
2527 struct f2fs_nm_info *nm_i = NM_I(sbi);
2528 unsigned int i = 0;
2529 nid_t nid, last_nid;
2530
2531 if (!enabled_nat_bits(sbi, NULL))
2532 return;
2533
2534 for (i = 0; i < nm_i->nat_blocks; i++) {
2535 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2536 if (i >= nm_i->nat_blocks)
2537 break;
2538
2539 __set_bit_le(i, nm_i->nat_block_bitmap);
2540
2541 nid = i * NAT_ENTRY_PER_BLOCK;
2542 last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
2543
2544 spin_lock(&nm_i->free_nid_lock);
2545 for (; nid < last_nid; nid++)
2546 update_free_nid_bitmap(sbi, nid, true, true, true);
2547 spin_unlock(&nm_i->free_nid_lock);
2548 }
2549
2550 for (i = 0; i < nm_i->nat_blocks; i++) {
2551 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2552 if (i >= nm_i->nat_blocks)
2553 break;
2554
2555 __set_bit_le(i, nm_i->nat_block_bitmap);
2556 }
2557}
2558
2580static int init_node_manager(struct f2fs_sb_info *sbi) 2559static int init_node_manager(struct f2fs_sb_info *sbi)
2581{ 2560{
2582 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 2561 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
2638 return 0; 2617 return 0;
2639} 2618}
2640 2619
2641int init_free_nid_cache(struct f2fs_sb_info *sbi) 2620static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2642{ 2621{
2643 struct f2fs_nm_info *nm_i = NM_I(sbi); 2622 struct f2fs_nm_info *nm_i = NM_I(sbi);
2644 2623
@@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
2651 GFP_KERNEL); 2630 GFP_KERNEL);
2652 if (!nm_i->nat_block_bitmap) 2631 if (!nm_i->nat_block_bitmap)
2653 return -ENOMEM; 2632 return -ENOMEM;
2633
2634 nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
2635 sizeof(unsigned short), GFP_KERNEL);
2636 if (!nm_i->free_nid_count)
2637 return -ENOMEM;
2638
2639 spin_lock_init(&nm_i->free_nid_lock);
2640
2654 return 0; 2641 return 0;
2655} 2642}
2656 2643
@@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi)
2670 if (err) 2657 if (err)
2671 return err; 2658 return err;
2672 2659
2660 /* load free nid status from nat_bits table */
2661 load_free_nid_bitmap(sbi);
2662
2673 build_free_nids(sbi, true, true); 2663 build_free_nids(sbi, true, true);
2674 return 0; 2664 return 0;
2675} 2665}
@@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2730 2720
2731 kvfree(nm_i->nat_block_bitmap); 2721 kvfree(nm_i->nat_block_bitmap);
2732 kvfree(nm_i->free_nid_bitmap); 2722 kvfree(nm_i->free_nid_bitmap);
2723 kvfree(nm_i->free_nid_count);
2733 2724
2734 kfree(nm_i->nat_bitmap); 2725 kfree(nm_i->nat_bitmap);
2735 kfree(nm_i->nat_bits); 2726 kfree(nm_i->nat_bits);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 4bd7a8b19332..29ef7088c558 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1163 if (f2fs_discard_en(sbi) && 1163 if (f2fs_discard_en(sbi) &&
1164 !f2fs_test_and_set_bit(offset, se->discard_map)) 1164 !f2fs_test_and_set_bit(offset, se->discard_map))
1165 sbi->discard_blks--; 1165 sbi->discard_blks--;
1166
1167 /* don't overwrite by SSR to keep node chain */
1168 if (se->type == CURSEG_WARM_NODE) {
1169 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1170 se->ckpt_valid_blocks++;
1171 }
1166 } else { 1172 } else {
1167 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { 1173 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
1168#ifdef CONFIG_F2FS_CHECK_FS 1174#ifdef CONFIG_F2FS_CHECK_FS
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef600591d96f..63ee2940775c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
173 spin_unlock_bh(&wb->work_lock); 173 spin_unlock_bh(&wb->work_lock);
174} 174}
175 175
176static void finish_writeback_work(struct bdi_writeback *wb,
177 struct wb_writeback_work *work)
178{
179 struct wb_completion *done = work->done;
180
181 if (work->auto_free)
182 kfree(work);
183 if (done && atomic_dec_and_test(&done->cnt))
184 wake_up_all(&wb->bdi->wb_waitq);
185}
186
176static void wb_queue_work(struct bdi_writeback *wb, 187static void wb_queue_work(struct bdi_writeback *wb,
177 struct wb_writeback_work *work) 188 struct wb_writeback_work *work)
178{ 189{
179 trace_writeback_queue(wb, work); 190 trace_writeback_queue(wb, work);
180 191
181 spin_lock_bh(&wb->work_lock);
182 if (!test_bit(WB_registered, &wb->state))
183 goto out_unlock;
184 if (work->done) 192 if (work->done)
185 atomic_inc(&work->done->cnt); 193 atomic_inc(&work->done->cnt);
186 list_add_tail(&work->list, &wb->work_list); 194
187 mod_delayed_work(bdi_wq, &wb->dwork, 0); 195 spin_lock_bh(&wb->work_lock);
188out_unlock: 196
197 if (test_bit(WB_registered, &wb->state)) {
198 list_add_tail(&work->list, &wb->work_list);
199 mod_delayed_work(bdi_wq, &wb->dwork, 0);
200 } else
201 finish_writeback_work(wb, work);
202
189 spin_unlock_bh(&wb->work_lock); 203 spin_unlock_bh(&wb->work_lock);
190} 204}
191 205
@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
1873 1887
1874 set_bit(WB_writeback_running, &wb->state); 1888 set_bit(WB_writeback_running, &wb->state);
1875 while ((work = get_next_work_item(wb)) != NULL) { 1889 while ((work = get_next_work_item(wb)) != NULL) {
1876 struct wb_completion *done = work->done;
1877
1878 trace_writeback_exec(wb, work); 1890 trace_writeback_exec(wb, work);
1879
1880 wrote += wb_writeback(wb, work); 1891 wrote += wb_writeback(wb, work);
1881 1892 finish_writeback_work(wb, work);
1882 if (work->auto_free)
1883 kfree(work);
1884 if (done && atomic_dec_and_test(&done->cnt))
1885 wake_up_all(&wb->bdi->wb_waitq);
1886 } 1893 }
1887 1894
1888 /* 1895 /*
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index c45084ac642d..511e1ed7e2de 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@ struct lm_lockname {
207 struct gfs2_sbd *ln_sbd; 207 struct gfs2_sbd *ln_sbd;
208 u64 ln_number; 208 u64 ln_number;
209 unsigned int ln_type; 209 unsigned int ln_type;
210}; 210} __packed __aligned(sizeof(int));
211 211
212#define lm_name_equal(name1, name2) \ 212#define lm_name_equal(name1, name2) \
213 (((name1)->ln_number == (name2)->ln_number) && \ 213 (((name1)->ln_number == (name2)->ln_number) && \
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8f96461236f6..7163fe014b57 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
695 695
696 inode = new_inode(sb); 696 inode = new_inode(sb);
697 if (inode) { 697 if (inode) {
698 struct hugetlbfs_inode_info *info;
699 inode->i_ino = get_next_ino(); 698 inode->i_ino = get_next_ino();
700 inode->i_mode = S_IFDIR | config->mode; 699 inode->i_mode = S_IFDIR | config->mode;
701 inode->i_uid = config->uid; 700 inode->i_uid = config->uid;
702 inode->i_gid = config->gid; 701 inode->i_gid = config->gid;
703 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 702 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
704 info = HUGETLBFS_I(inode);
705 mpol_shared_policy_init(&info->policy, NULL);
706 inode->i_op = &hugetlbfs_dir_inode_operations; 703 inode->i_op = &hugetlbfs_dir_inode_operations;
707 inode->i_fop = &simple_dir_operations; 704 inode->i_fop = &simple_dir_operations;
708 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 705 /* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
733 730
734 inode = new_inode(sb); 731 inode = new_inode(sb);
735 if (inode) { 732 if (inode) {
736 struct hugetlbfs_inode_info *info;
737 inode->i_ino = get_next_ino(); 733 inode->i_ino = get_next_ino();
738 inode_init_owner(inode, dir, mode); 734 inode_init_owner(inode, dir, mode);
739 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 735 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
741 inode->i_mapping->a_ops = &hugetlbfs_aops; 737 inode->i_mapping->a_ops = &hugetlbfs_aops;
742 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 738 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
743 inode->i_mapping->private_data = resv_map; 739 inode->i_mapping->private_data = resv_map;
744 info = HUGETLBFS_I(inode);
745 /*
746 * The policy is initialized here even if we are creating a
747 * private inode because initialization simply creates an
748 * an empty rb tree and calls rwlock_init(), later when we
749 * call mpol_free_shared_policy() it will just return because
750 * the rb tree will still be empty.
751 */
752 mpol_shared_policy_init(&info->policy, NULL);
753 switch (mode & S_IFMT) { 740 switch (mode & S_IFMT) {
754 default: 741 default:
755 init_special_inode(inode, mode, dev); 742 init_special_inode(inode, mode, dev);
@@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
937 hugetlbfs_inc_free_inodes(sbinfo); 924 hugetlbfs_inc_free_inodes(sbinfo);
938 return NULL; 925 return NULL;
939 } 926 }
927
928 /*
929 * Any time after allocation, hugetlbfs_destroy_inode can be called
930 * for the inode. mpol_free_shared_policy is unconditionally called
931 * as part of hugetlbfs_destroy_inode. So, initialize policy here
932 * in case of a quick call to destroy.
933 *
934 * Note that the policy is initialized even if we are creating a
935 * private inode. This simplifies hugetlbfs_destroy_inode.
936 */
937 mpol_shared_policy_init(&p->policy, NULL);
938
940 return &p->vfs_inode; 939 return &p->vfs_inode;
941} 940}
942 941
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a1a359bfcc9c..5adc2fb62b0f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
1125 1125
1126 /* Set up a default-sized revoke table for the new mount. */ 1126 /* Set up a default-sized revoke table for the new mount. */
1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); 1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
1128 if (err) { 1128 if (err)
1129 kfree(journal); 1129 goto err_cleanup;
1130 return NULL;
1131 }
1132 1130
1133 spin_lock_init(&journal->j_history_lock); 1131 spin_lock_init(&journal->j_history_lock);
1134 1132
@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev,
1145 journal->j_wbufsize = n; 1143 journal->j_wbufsize = n;
1146 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), 1144 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
1147 GFP_KERNEL); 1145 GFP_KERNEL);
1148 if (!journal->j_wbuf) { 1146 if (!journal->j_wbuf)
1149 kfree(journal); 1147 goto err_cleanup;
1150 return NULL;
1151 }
1152 1148
1153 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); 1149 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
1154 if (!bh) { 1150 if (!bh) {
1155 pr_err("%s: Cannot get buffer for journal superblock\n", 1151 pr_err("%s: Cannot get buffer for journal superblock\n",
1156 __func__); 1152 __func__);
1157 kfree(journal->j_wbuf); 1153 goto err_cleanup;
1158 kfree(journal);
1159 return NULL;
1160 } 1154 }
1161 journal->j_sb_buffer = bh; 1155 journal->j_sb_buffer = bh;
1162 journal->j_superblock = (journal_superblock_t *)bh->b_data; 1156 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1163 1157
1164 return journal; 1158 return journal;
1159
1160err_cleanup:
1161 kfree(journal->j_wbuf);
1162 jbd2_journal_destroy_revoke(journal);
1163 kfree(journal);
1164 return NULL;
1165} 1165}
1166 1166
1167/* jbd2_journal_init_dev and jbd2_journal_init_inode: 1167/* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index cfc38b552118..f9aefcda5854 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
280 280
281fail1: 281fail1:
282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
283 journal->j_revoke_table[0] = NULL;
283fail0: 284fail0:
284 return -ENOMEM; 285 return -ENOMEM;
285} 286}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8e4dc7ab584c..ac2dfe0c5a9c 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
809 if (kn->flags & KERNFS_HAS_MMAP) 809 if (kn->flags & KERNFS_HAS_MMAP)
810 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 810 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
811 811
812 kernfs_release_file(kn, of); 812 if (kn->flags & KERNFS_HAS_RELEASE)
813 kernfs_release_file(kn, of);
813 } 814 }
814 815
815 mutex_unlock(&kernfs_open_file_mutex); 816 mutex_unlock(&kernfs_open_file_mutex);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index bb79972dc638..773774531aff 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = {
232 .svo_module = THIS_MODULE, 232 .svo_module = THIS_MODULE,
233}; 233};
234 234
235struct svc_serv_ops *nfs4_cb_sv_ops[] = { 235static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
236 [0] = &nfs40_cb_sv_ops, 236 [0] = &nfs40_cb_sv_ops,
237 [1] = &nfs41_cb_sv_ops, 237 [1] = &nfs41_cb_sv_ops,
238}; 238};
239#else 239#else
240struct svc_serv_ops *nfs4_cb_sv_ops[] = { 240static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
241 [0] = &nfs40_cb_sv_ops, 241 [0] = &nfs40_cb_sv_ops,
242 [1] = NULL, 242 [1] = NULL,
243}; 243};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 91a8d610ba0f..390ada8741bc 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
325 return NULL; 325 return NULL;
326} 326}
327 327
328static bool nfs_client_init_is_complete(const struct nfs_client *clp) 328/*
329 * Return true if @clp is done initializing, false if still working on it.
330 *
331 * Use nfs_client_init_status to check if it was successful.
332 */
333bool nfs_client_init_is_complete(const struct nfs_client *clp)
329{ 334{
330 return clp->cl_cons_state <= NFS_CS_READY; 335 return clp->cl_cons_state <= NFS_CS_READY;
331} 336}
337EXPORT_SYMBOL_GPL(nfs_client_init_is_complete);
338
339/*
340 * Return 0 if @clp was successfully initialized, -errno otherwise.
341 *
342 * This must be called *after* nfs_client_init_is_complete() returns true,
343 * otherwise it will pop WARN_ON_ONCE and return -EINVAL
344 */
345int nfs_client_init_status(const struct nfs_client *clp)
346{
347 /* called without checking nfs_client_init_is_complete */
348 if (clp->cl_cons_state > NFS_CS_READY) {
349 WARN_ON_ONCE(1);
350 return -EINVAL;
351 }
352 return clp->cl_cons_state;
353}
354EXPORT_SYMBOL_GPL(nfs_client_init_status);
332 355
333int nfs_wait_client_init_complete(const struct nfs_client *clp) 356int nfs_wait_client_init_complete(const struct nfs_client *clp)
334{ 357{
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fb499a3f21b5..f92ba8d6c556 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2055{ 2055{
2056 struct inode *old_inode = d_inode(old_dentry); 2056 struct inode *old_inode = d_inode(old_dentry);
2057 struct inode *new_inode = d_inode(new_dentry); 2057 struct inode *new_inode = d_inode(new_dentry);
2058 struct dentry *dentry = NULL, *rehash = NULL; 2058 struct dentry *dentry = NULL;
2059 struct rpc_task *task; 2059 struct rpc_task *task;
2060 int error = -EBUSY; 2060 int error = -EBUSY;
2061 2061
@@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2078 * To prevent any new references to the target during the 2078 * To prevent any new references to the target during the
2079 * rename, we unhash the dentry in advance. 2079 * rename, we unhash the dentry in advance.
2080 */ 2080 */
2081 if (!d_unhashed(new_dentry)) { 2081 if (!d_unhashed(new_dentry))
2082 d_drop(new_dentry); 2082 d_drop(new_dentry);
2083 rehash = new_dentry;
2084 }
2085 2083
2086 if (d_count(new_dentry) > 2) { 2084 if (d_count(new_dentry) > 2) {
2087 int err; 2085 int err;
@@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2098 goto out; 2096 goto out;
2099 2097
2100 new_dentry = dentry; 2098 new_dentry = dentry;
2101 rehash = NULL;
2102 new_inode = NULL; 2099 new_inode = NULL;
2103 } 2100 }
2104 } 2101 }
@@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2119 error = task->tk_status; 2116 error = task->tk_status;
2120 rpc_put_task(task); 2117 rpc_put_task(task);
2121out: 2118out:
2122 if (rehash)
2123 d_rehash(rehash);
2124 trace_nfs_rename_exit(old_dir, old_dentry, 2119 trace_nfs_rename_exit(old_dir, old_dentry,
2125 new_dir, new_dentry, error); 2120 new_dir, new_dentry, error);
2126 /* new dentry created? */ 2121 /* new dentry created? */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44347f4bdc15..acd30baca461 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task,
202 task->tk_status); 202 task->tk_status);
203 nfs4_mark_deviceid_unavailable(devid); 203 nfs4_mark_deviceid_unavailable(devid);
204 pnfs_error_mark_layout_for_return(inode, lseg); 204 pnfs_error_mark_layout_for_return(inode, lseg);
205 pnfs_set_lo_fail(lseg);
206 rpc_wake_up(&tbl->slot_tbl_waitq); 205 rpc_wake_up(&tbl->slot_tbl_waitq);
207 /* fall through */ 206 /* fall through */
208 default: 207 default:
208 pnfs_set_lo_fail(lseg);
209reset: 209reset:
210 dprintk("%s Retry through MDS. Error %d\n", __func__, 210 dprintk("%s Retry through MDS. Error %d\n", __func__,
211 task->tk_status); 211 task->tk_status);
@@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
560 return PNFS_ATTEMPTED; 560 return PNFS_ATTEMPTED;
561} 561}
562 562
563static int
564filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
565 struct nfs4_filelayout_segment *fl,
566 gfp_t gfp_flags)
567{
568 struct nfs4_deviceid_node *d;
569 struct nfs4_file_layout_dsaddr *dsaddr;
570 int status = -EINVAL;
571
572 /* find and reference the deviceid */
573 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
574 lo->plh_lc_cred, gfp_flags);
575 if (d == NULL)
576 goto out;
577
578 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
579 /* Found deviceid is unavailable */
580 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
581 goto out_put;
582
583 fl->dsaddr = dsaddr;
584
585 if (fl->first_stripe_index >= dsaddr->stripe_count) {
586 dprintk("%s Bad first_stripe_index %u\n",
587 __func__, fl->first_stripe_index);
588 goto out_put;
589 }
590
591 if ((fl->stripe_type == STRIPE_SPARSE &&
592 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
593 (fl->stripe_type == STRIPE_DENSE &&
594 fl->num_fh != dsaddr->stripe_count)) {
595 dprintk("%s num_fh %u not valid for given packing\n",
596 __func__, fl->num_fh);
597 goto out_put;
598 }
599 status = 0;
600out:
601 return status;
602out_put:
603 nfs4_fl_put_deviceid(dsaddr);
604 goto out;
605}
606
563/* 607/*
564 * filelayout_check_layout() 608 * filelayout_check_layout()
565 * 609 *
@@ -572,11 +616,8 @@ static int
572filelayout_check_layout(struct pnfs_layout_hdr *lo, 616filelayout_check_layout(struct pnfs_layout_hdr *lo,
573 struct nfs4_filelayout_segment *fl, 617 struct nfs4_filelayout_segment *fl,
574 struct nfs4_layoutget_res *lgr, 618 struct nfs4_layoutget_res *lgr,
575 struct nfs4_deviceid *id,
576 gfp_t gfp_flags) 619 gfp_t gfp_flags)
577{ 620{
578 struct nfs4_deviceid_node *d;
579 struct nfs4_file_layout_dsaddr *dsaddr;
580 int status = -EINVAL; 621 int status = -EINVAL;
581 622
582 dprintk("--> %s\n", __func__); 623 dprintk("--> %s\n", __func__);
@@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
601 goto out; 642 goto out;
602 } 643 }
603 644
604 /* find and reference the deviceid */
605 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
606 lo->plh_lc_cred, gfp_flags);
607 if (d == NULL)
608 goto out;
609
610 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
611 /* Found deviceid is unavailable */
612 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
613 goto out_put;
614
615 fl->dsaddr = dsaddr;
616
617 if (fl->first_stripe_index >= dsaddr->stripe_count) {
618 dprintk("%s Bad first_stripe_index %u\n",
619 __func__, fl->first_stripe_index);
620 goto out_put;
621 }
622
623 if ((fl->stripe_type == STRIPE_SPARSE &&
624 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
625 (fl->stripe_type == STRIPE_DENSE &&
626 fl->num_fh != dsaddr->stripe_count)) {
627 dprintk("%s num_fh %u not valid for given packing\n",
628 __func__, fl->num_fh);
629 goto out_put;
630 }
631
632 status = 0; 645 status = 0;
633out: 646out:
634 dprintk("--> %s returns %d\n", __func__, status); 647 dprintk("--> %s returns %d\n", __func__, status);
635 return status; 648 return status;
636out_put:
637 nfs4_fl_put_deviceid(dsaddr);
638 goto out;
639} 649}
640 650
641static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) 651static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
@@ -657,7 +667,6 @@ static int
657filelayout_decode_layout(struct pnfs_layout_hdr *flo, 667filelayout_decode_layout(struct pnfs_layout_hdr *flo,
658 struct nfs4_filelayout_segment *fl, 668 struct nfs4_filelayout_segment *fl,
659 struct nfs4_layoutget_res *lgr, 669 struct nfs4_layoutget_res *lgr,
660 struct nfs4_deviceid *id,
661 gfp_t gfp_flags) 670 gfp_t gfp_flags)
662{ 671{
663 struct xdr_stream stream; 672 struct xdr_stream stream;
@@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
682 if (unlikely(!p)) 691 if (unlikely(!p))
683 goto out_err; 692 goto out_err;
684 693
685 memcpy(id, p, sizeof(*id)); 694 memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
686 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 695 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
687 nfs4_print_deviceid(id); 696 nfs4_print_deviceid(&fl->deviceid);
688 697
689 nfl_util = be32_to_cpup(p++); 698 nfl_util = be32_to_cpup(p++);
690 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) 699 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
831{ 840{
832 struct nfs4_filelayout_segment *fl; 841 struct nfs4_filelayout_segment *fl;
833 int rc; 842 int rc;
834 struct nfs4_deviceid id;
835 843
836 dprintk("--> %s\n", __func__); 844 dprintk("--> %s\n", __func__);
837 fl = kzalloc(sizeof(*fl), gfp_flags); 845 fl = kzalloc(sizeof(*fl), gfp_flags);
838 if (!fl) 846 if (!fl)
839 return NULL; 847 return NULL;
840 848
841 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 849 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
842 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 850 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
843 _filelayout_free_lseg(fl); 851 _filelayout_free_lseg(fl);
844 return NULL; 852 return NULL;
845 } 853 }
@@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
888 return min(stripe_unit - (unsigned int)stripe_offset, size); 896 return min(stripe_unit - (unsigned int)stripe_offset, size);
889} 897}
890 898
899static struct pnfs_layout_segment *
900fl_pnfs_update_layout(struct inode *ino,
901 struct nfs_open_context *ctx,
902 loff_t pos,
903 u64 count,
904 enum pnfs_iomode iomode,
905 bool strict_iomode,
906 gfp_t gfp_flags)
907{
908 struct pnfs_layout_segment *lseg = NULL;
909 struct pnfs_layout_hdr *lo;
910 struct nfs4_filelayout_segment *fl;
911 int status;
912
913 lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
914 gfp_flags);
915 if (!lseg)
916 lseg = ERR_PTR(-ENOMEM);
917 if (IS_ERR(lseg))
918 goto out;
919
920 lo = NFS_I(ino)->layout;
921 fl = FILELAYOUT_LSEG(lseg);
922
923 status = filelayout_check_deviceid(lo, fl, gfp_flags);
924 if (status)
925 lseg = ERR_PTR(status);
926out:
927 if (IS_ERR(lseg))
928 pnfs_put_lseg(lseg);
929 return lseg;
930}
931
891static void 932static void
892filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, 933filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
893 struct nfs_page *req) 934 struct nfs_page *req)
894{ 935{
895 if (!pgio->pg_lseg) { 936 if (!pgio->pg_lseg) {
896 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 937 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
897 req->wb_context, 938 req->wb_context,
898 0, 939 0,
899 NFS4_MAX_UINT64, 940 NFS4_MAX_UINT64,
900 IOMODE_READ, 941 IOMODE_READ,
901 false, 942 false,
902 GFP_KERNEL); 943 GFP_KERNEL);
903 if (IS_ERR(pgio->pg_lseg)) { 944 if (IS_ERR(pgio->pg_lseg)) {
904 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 945 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
905 pgio->pg_lseg = NULL; 946 pgio->pg_lseg = NULL;
@@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
919 int status; 960 int status;
920 961
921 if (!pgio->pg_lseg) { 962 if (!pgio->pg_lseg) {
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 963 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
923 req->wb_context, 964 req->wb_context,
924 0, 965 0,
925 NFS4_MAX_UINT64, 966 NFS4_MAX_UINT64,
926 IOMODE_RW, 967 IOMODE_RW,
927 false, 968 false,
928 GFP_NOFS); 969 GFP_NOFS);
929 if (IS_ERR(pgio->pg_lseg)) { 970 if (IS_ERR(pgio->pg_lseg)) {
930 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 971 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
931 pgio->pg_lseg = NULL; 972 pgio->pg_lseg = NULL;
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 2896cb833a11..79323b5dab0c 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr {
55}; 55};
56 56
57struct nfs4_filelayout_segment { 57struct nfs4_filelayout_segment {
58 struct pnfs_layout_segment generic_hdr; 58 struct pnfs_layout_segment generic_hdr;
59 u32 stripe_type; 59 u32 stripe_type;
60 u32 commit_through_mds; 60 u32 commit_through_mds;
61 u32 stripe_unit; 61 u32 stripe_unit;
62 u32 first_stripe_index; 62 u32 first_stripe_index;
63 u64 pattern_offset; 63 u64 pattern_offset;
64 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 64 struct nfs4_deviceid deviceid;
65 unsigned int num_fh; 65 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
66 struct nfs_fh **fh_array; 66 unsigned int num_fh;
67 struct nfs_fh **fh_array;
67}; 68};
68 69
69struct nfs4_filelayout { 70struct nfs4_filelayout {
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index f956ca20a8a3..d913e818858f 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
266 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 266 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
267 struct nfs4_pnfs_ds *ret = ds; 267 struct nfs4_pnfs_ds *ret = ds;
268 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 268 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
269 int status;
269 270
270 if (ds == NULL) { 271 if (ds == NULL) {
271 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 272 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
@@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
277 if (ds->ds_clp) 278 if (ds->ds_clp)
278 goto out_test_devid; 279 goto out_test_devid;
279 280
280 nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 281 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
281 dataserver_retrans, 4, 282 dataserver_retrans, 4,
282 s->nfs_client->cl_minorversion); 283 s->nfs_client->cl_minorversion);
284 if (status) {
285 nfs4_mark_deviceid_unavailable(devid);
286 ret = NULL;
287 goto out;
288 }
283 289
284out_test_devid: 290out_test_devid:
285 if (ret->ds_clp == NULL || 291 if (ret->ds_clp == NULL ||
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f4f39b0ab09b..98b34c9b0564 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
175static inline bool 175static inline bool
176ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) 176ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
177{ 177{
178 return nfs4_test_deviceid_unavailable(node); 178 /*
179 * Flexfiles should never mark a DS unavailable, but if it does
180 * print a (ratelimited) warning as this can affect performance.
181 */
182 if (nfs4_test_deviceid_unavailable(node)) {
183 u32 *p = (u32 *)node->deviceid.data;
184
185 pr_warn_ratelimited("NFS: flexfiles layout referencing an "
186 "unavailable device [%x%x%x%x]\n",
187 p[0], p[1], p[2], p[3]);
188 return true;
189 }
190 return false;
179} 191}
180 192
181static inline int 193static inline int
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e5a6f248697b..457cfeb1d5c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
208 } else 208 } else
209 goto outerr; 209 goto outerr;
210 } 210 }
211
212 if (IS_ERR(mirror->mirror_ds))
213 goto outerr;
214
211 if (mirror->mirror_ds->ds == NULL) { 215 if (mirror->mirror_ds->ds == NULL) {
212 struct nfs4_deviceid_node *devid; 216 struct nfs4_deviceid_node *devid;
213 devid = &mirror->mirror_ds->id_node; 217 devid = &mirror->mirror_ds->id_node;
@@ -384,6 +388,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
384 struct inode *ino = lseg->pls_layout->plh_inode; 388 struct inode *ino = lseg->pls_layout->plh_inode;
385 struct nfs_server *s = NFS_SERVER(ino); 389 struct nfs_server *s = NFS_SERVER(ino);
386 unsigned int max_payload; 390 unsigned int max_payload;
391 int status;
387 392
388 if (!ff_layout_mirror_valid(lseg, mirror, true)) { 393 if (!ff_layout_mirror_valid(lseg, mirror, true)) {
389 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", 394 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -404,7 +409,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
404 /* FIXME: For now we assume the server sent only one version of NFS 409 /* FIXME: For now we assume the server sent only one version of NFS
405 * to use for the DS. 410 * to use for the DS.
406 */ 411 */
407 nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 412 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
408 dataserver_retrans, 413 dataserver_retrans,
409 mirror->mirror_ds->ds_versions[0].version, 414 mirror->mirror_ds->ds_versions[0].version,
410 mirror->mirror_ds->ds_versions[0].minor_version); 415 mirror->mirror_ds->ds_versions[0].minor_version);
@@ -420,11 +425,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
420 mirror->mirror_ds->ds_versions[0].wsize = max_payload; 425 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
421 goto out; 426 goto out;
422 } 427 }
428out_fail:
423 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 429 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
424 mirror, lseg->pls_range.offset, 430 mirror, lseg->pls_range.offset,
425 lseg->pls_range.length, NFS4ERR_NXIO, 431 lseg->pls_range.length, NFS4ERR_NXIO,
426 OP_ILLEGAL, GFP_NOIO); 432 OP_ILLEGAL, GFP_NOIO);
427out_fail:
428 if (fail_return || !ff_layout_has_available_ds(lseg)) 433 if (fail_return || !ff_layout_has_available_ds(lseg))
429 pnfs_error_mark_layout_for_return(ino, lseg); 434 pnfs_error_mark_layout_for_return(ino, lseg);
430 ds = NULL; 435 ds = NULL;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 09ca5095c04e..7b38fedb7e03 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
186 struct nfs_fh *, 186 struct nfs_fh *,
187 struct nfs_fattr *, 187 struct nfs_fattr *,
188 rpc_authflavor_t); 188 rpc_authflavor_t);
189extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
190extern int nfs_client_init_status(const struct nfs_client *clp);
189extern int nfs_wait_client_init_complete(const struct nfs_client *clp); 191extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
190extern void nfs_mark_client_ready(struct nfs_client *clp, int state); 192extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
191extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, 193extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 5ae9d64ea08b..8346ccbf2d52 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
1023 server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; 1023 server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
1024 server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; 1024 server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
1025 1025
1026 if (server->rsize > server_resp_sz) 1026 if (!server->rsize || server->rsize > server_resp_sz)
1027 server->rsize = server_resp_sz; 1027 server->rsize = server_resp_sz;
1028 if (server->wsize > server_rqst_sz) 1028 if (!server->wsize || server->wsize > server_rqst_sz)
1029 server->wsize = server_rqst_sz; 1029 server->wsize = server_rqst_sz;
1030#endif /* CONFIG_NFS_V4_1 */ 1030#endif /* CONFIG_NFS_V4_1 */
1031} 1031}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1b183686c6d4..201ca3f2c4ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
2258 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2258 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2259 return 0; 2259 return 0;
2260 2260
2261 /* even though OPEN succeeded, access is denied. Close the file */
2262 nfs4_close_state(state, fmode);
2263 return -EACCES; 2261 return -EACCES;
2264} 2262}
2265 2263
@@ -2444,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2444 } 2442 }
2445 2443
2446 nfs4_stateid_copy(&stateid, &delegation->stateid); 2444 nfs4_stateid_copy(&stateid, &delegation->stateid);
2447 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2446 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2447 &delegation->flags)) {
2448 rcu_read_unlock(); 2448 rcu_read_unlock();
2449 nfs_finish_clear_delegation_stateid(state, &stateid); 2449 nfs_finish_clear_delegation_stateid(state, &stateid);
2450 return; 2450 return;
2451 } 2451 }
2452 2452
2453 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
2454 rcu_read_unlock();
2455 return;
2456 }
2457
2458 cred = get_rpccred(delegation->cred); 2453 cred = get_rpccred(delegation->cred);
2459 rcu_read_unlock(); 2454 rcu_read_unlock();
2460 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2455 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
@@ -7427,11 +7422,11 @@ static void nfs4_exchange_id_release(void *data)
7427 struct nfs41_exchange_id_data *cdata = 7422 struct nfs41_exchange_id_data *cdata =
7428 (struct nfs41_exchange_id_data *)data; 7423 (struct nfs41_exchange_id_data *)data;
7429 7424
7430 nfs_put_client(cdata->args.client);
7431 if (cdata->xprt) { 7425 if (cdata->xprt) {
7432 xprt_put(cdata->xprt); 7426 xprt_put(cdata->xprt);
7433 rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); 7427 rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
7434 } 7428 }
7429 nfs_put_client(cdata->args.client);
7435 kfree(cdata->res.impl_id); 7430 kfree(cdata->res.impl_id);
7436 kfree(cdata->res.server_scope); 7431 kfree(cdata->res.server_scope);
7437 kfree(cdata->res.server_owner); 7432 kfree(cdata->res.server_owner);
@@ -7538,10 +7533,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7538 task_setup_data.callback_data = calldata; 7533 task_setup_data.callback_data = calldata;
7539 7534
7540 task = rpc_run_task(&task_setup_data); 7535 task = rpc_run_task(&task_setup_data);
7541 if (IS_ERR(task)) { 7536 if (IS_ERR(task))
7542 status = PTR_ERR(task); 7537 return PTR_ERR(task);
7543 goto out_impl_id;
7544 }
7545 7538
7546 if (!xprt) { 7539 if (!xprt) {
7547 status = rpc_wait_for_completion_task(task); 7540 status = rpc_wait_for_completion_task(task);
@@ -7569,6 +7562,7 @@ out_server_owner:
7569 kfree(calldata->res.server_owner); 7562 kfree(calldata->res.server_owner);
7570out_calldata: 7563out_calldata:
7571 kfree(calldata); 7564 kfree(calldata);
7565 nfs_put_client(clp);
7572 goto out; 7566 goto out;
7573} 7567}
7574 7568
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index f0369e362753..80ce289eea05 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
3942 if (len <= 0) 3942 if (len <= 0)
3943 goto out; 3943 goto out;
3944 dprintk("%s: name=%s\n", __func__, group_name->data); 3944 dprintk("%s: name=%s\n", __func__, group_name->data);
3945 return NFS_ATTR_FATTR_OWNER_NAME; 3945 return NFS_ATTR_FATTR_GROUP_NAME;
3946 } else { 3946 } else {
3947 len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, 3947 len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
3948 XDR_MAX_NETOBJ); 3948 XDR_MAX_NETOBJ);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 63f77b49a586..590e1e35781f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
367struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, 367struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
368 gfp_t gfp_flags); 368 gfp_t gfp_flags);
369void nfs4_pnfs_v3_ds_connect_unload(void); 369void nfs4_pnfs_v3_ds_connect_unload(void);
370void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 370int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
371 struct nfs4_deviceid_node *devid, unsigned int timeo, 371 struct nfs4_deviceid_node *devid, unsigned int timeo,
372 unsigned int retrans, u32 version, u32 minor_version); 372 unsigned int retrans, u32 version, u32 minor_version);
373struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, 373struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 9414b492439f..7250b95549ec 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -745,15 +745,17 @@ out:
745/* 745/*
746 * Create an rpc connection to the nfs4_pnfs_ds data server. 746 * Create an rpc connection to the nfs4_pnfs_ds data server.
747 * Currently only supports IPv4 and IPv6 addresses. 747 * Currently only supports IPv4 and IPv6 addresses.
748 * If connection fails, make devid unavailable. 748 * If connection fails, make devid unavailable and return a -errno.
749 */ 749 */
750void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 750int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
751 struct nfs4_deviceid_node *devid, unsigned int timeo, 751 struct nfs4_deviceid_node *devid, unsigned int timeo,
752 unsigned int retrans, u32 version, u32 minor_version) 752 unsigned int retrans, u32 version, u32 minor_version)
753{ 753{
754 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 754 int err;
755 int err = 0;
756 755
756again:
757 err = 0;
758 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
757 if (version == 3) { 759 if (version == 3) {
758 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, 760 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
759 retrans); 761 retrans);
@@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
766 err = -EPROTONOSUPPORT; 768 err = -EPROTONOSUPPORT;
767 } 769 }
768 770
769 if (err)
770 nfs4_mark_deviceid_unavailable(devid);
771 nfs4_clear_ds_conn_bit(ds); 771 nfs4_clear_ds_conn_bit(ds);
772 } else { 772 } else {
773 nfs4_wait_ds_connect(ds); 773 nfs4_wait_ds_connect(ds);
774
775 /* what was waited on didn't connect AND didn't mark unavail */
776 if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
777 goto again;
774 } 778 }
779
780 /*
781 * At this point the ds->ds_clp should be ready, but it might have
782 * hit an error.
783 */
784 if (!err) {
785 if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
786 WARN_ON_ONCE(ds->ds_clp ||
787 !nfs4_test_deviceid_unavailable(devid));
788 return -EINVAL;
789 }
790 err = nfs_client_init_status(ds->ds_clp);
791 }
792
793 return err;
775} 794}
776EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); 795EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
777 796
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e75b056f46f4..abb2c8a3be42 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
1784 (long long)req_offset(req)); 1784 (long long)req_offset(req));
1785 if (status < 0) { 1785 if (status < 0) {
1786 nfs_context_set_write_error(req->wb_context, status); 1786 nfs_context_set_write_error(req->wb_context, status);
1787 nfs_inode_remove_request(req); 1787 if (req->wb_page)
1788 nfs_inode_remove_request(req);
1788 dprintk_cont(", error = %d\n", status); 1789 dprintk_cont(", error = %d\n", status);
1789 goto next; 1790 goto next;
1790 } 1791 }
@@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
1793 * returned by the server against all stored verfs. */ 1794 * returned by the server against all stored verfs. */
1794 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1795 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
1795 /* We have a match */ 1796 /* We have a match */
1796 nfs_inode_remove_request(req); 1797 if (req->wb_page)
1798 nfs_inode_remove_request(req);
1797 dprintk_cont(" OK\n"); 1799 dprintk_cont(" OK\n");
1798 goto next; 1800 goto next;
1799 } 1801 }
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 73e75ac90525..8bf8f667a8cf 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -538,13 +538,21 @@ out_free:
538 538
539static ssize_t 539static ssize_t
540nfsd_print_version_support(char *buf, int remaining, const char *sep, 540nfsd_print_version_support(char *buf, int remaining, const char *sep,
541 unsigned vers, unsigned minor) 541 unsigned vers, int minor)
542{ 542{
543 const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; 543 const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
544 bool supported = !!nfsd_vers(vers, NFSD_TEST); 544 bool supported = !!nfsd_vers(vers, NFSD_TEST);
545 545
546 if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) 546 if (vers == 4 && minor >= 0 &&
547 !nfsd_minorversion(minor, NFSD_TEST))
547 supported = false; 548 supported = false;
549 if (minor == 0 && supported)
550 /*
551 * special case for backward compatability.
552 * +4.0 is never reported, it is implied by
553 * +4, unless -4.0 is present.
554 */
555 return 0;
548 return snprintf(buf, remaining, format, sep, 556 return snprintf(buf, remaining, format, sep,
549 supported ? '+' : '-', vers, minor); 557 supported ? '+' : '-', vers, minor);
550} 558}
@@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
554 char *mesg = buf; 562 char *mesg = buf;
555 char *vers, *minorp, sign; 563 char *vers, *minorp, sign;
556 int len, num, remaining; 564 int len, num, remaining;
557 unsigned minor;
558 ssize_t tlen = 0; 565 ssize_t tlen = 0;
559 char *sep; 566 char *sep;
560 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); 567 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
@@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
575 if (len <= 0) return -EINVAL; 582 if (len <= 0) return -EINVAL;
576 do { 583 do {
577 enum vers_op cmd; 584 enum vers_op cmd;
585 unsigned minor;
578 sign = *vers; 586 sign = *vers;
579 if (sign == '+' || sign == '-') 587 if (sign == '+' || sign == '-')
580 num = simple_strtol((vers+1), &minorp, 0); 588 num = simple_strtol((vers+1), &minorp, 0);
@@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
585 return -EINVAL; 593 return -EINVAL;
586 if (kstrtouint(minorp+1, 0, &minor) < 0) 594 if (kstrtouint(minorp+1, 0, &minor) < 0)
587 return -EINVAL; 595 return -EINVAL;
588 } else 596 }
589 minor = 0; 597
590 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; 598 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
591 switch(num) { 599 switch(num) {
592 case 2: 600 case 2:
@@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
594 nfsd_vers(num, cmd); 602 nfsd_vers(num, cmd);
595 break; 603 break;
596 case 4: 604 case 4:
597 if (nfsd_minorversion(minor, cmd) >= 0) 605 if (*minorp == '.') {
598 break; 606 if (nfsd_minorversion(minor, cmd) < 0)
607 return -EINVAL;
608 } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
609 /*
610 * Either we have +4 and no minors are enabled,
611 * or we have -4 and at least one minor is enabled.
612 * In either case, propagate 'cmd' to all minors.
613 */
614 minor = 0;
615 while (nfsd_minorversion(minor, cmd) >= 0)
616 minor++;
617 }
618 break;
599 default: 619 default:
600 return -EINVAL; 620 return -EINVAL;
601 } 621 }
@@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
612 sep = ""; 632 sep = "";
613 remaining = SIMPLE_TRANSACTION_LIMIT; 633 remaining = SIMPLE_TRANSACTION_LIMIT;
614 for (num=2 ; num <= 4 ; num++) { 634 for (num=2 ; num <= 4 ; num++) {
635 int minor;
615 if (!nfsd_vers(num, NFSD_AVAIL)) 636 if (!nfsd_vers(num, NFSD_AVAIL))
616 continue; 637 continue;
617 minor = 0; 638
639 minor = -1;
618 do { 640 do {
619 len = nfsd_print_version_support(buf, remaining, 641 len = nfsd_print_version_support(buf, remaining,
620 sep, num, minor); 642 sep, num, minor);
@@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
624 buf += len; 646 buf += len;
625 tlen += len; 647 tlen += len;
626 minor++; 648 minor++;
627 sep = " "; 649 if (len)
650 sep = " ";
628 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); 651 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
629 } 652 }
630out: 653out:
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index fa82b7707e85..03a7e9da4da0 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -786,6 +786,7 @@ nfserrno (int errno)
786 { nfserr_serverfault, -ESERVERFAULT }, 786 { nfserr_serverfault, -ESERVERFAULT },
787 { nfserr_serverfault, -ENFILE }, 787 { nfserr_serverfault, -ENFILE },
788 { nfserr_io, -EUCLEAN }, 788 { nfserr_io, -EUCLEAN },
789 { nfserr_perm, -ENOKEY },
789 }; 790 };
790 int i; 791 int i;
791 792
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 786a4a2cb2d7..31e1f9593457 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void)
167 167
168int nfsd_minorversion(u32 minorversion, enum vers_op change) 168int nfsd_minorversion(u32 minorversion, enum vers_op change)
169{ 169{
170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) 170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
171 change != NFSD_AVAIL)
171 return -1; 172 return -1;
172 switch(change) { 173 switch(change) {
173 case NFSD_SET: 174 case NFSD_SET:
@@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
415 416
416void nfsd_reset_versions(void) 417void nfsd_reset_versions(void)
417{ 418{
418 int found_one = 0;
419 int i; 419 int i;
420 420
421 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { 421 for (i = 0; i < NFSD_NRVERS; i++)
422 if (nfsd_program.pg_vers[i]) 422 if (nfsd_vers(i, NFSD_TEST))
423 found_one = 1; 423 return;
424 }
425 424
426 if (!found_one) { 425 for (i = 0; i < NFSD_NRVERS; i++)
427 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) 426 if (i != 4)
428 nfsd_program.pg_vers[i] = nfsd_version[i]; 427 nfsd_vers(i, NFSD_SET);
429#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 428 else {
430 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) 429 int minor = 0;
431 nfsd_acl_program.pg_vers[i] = 430 while (nfsd_minorversion(minor, NFSD_SET) >= 0)
432 nfsd_acl_version[i]; 431 minor++;
433#endif 432 }
434 }
435} 433}
436 434
437/* 435/*
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 4348027384f5..d0ab7e56d0b4 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
1863 1863
1864 new_sock->type = sock->type; 1864 new_sock->type = sock->type;
1865 new_sock->ops = sock->ops; 1865 new_sock->ops = sock->ops;
1866 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); 1866 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
1867 if (ret < 0) 1867 if (ret < 0)
1868 goto out; 1868 goto out;
1869 1869
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c24351a67f..cd261c8de53a 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
263 if (!new_op) 263 if (!new_op)
264 return -ENOMEM; 264 return -ENOMEM;
265 new_op->upcall.req.features.features = 0; 265 new_op->upcall.req.features.features = 0;
266 ret = service_operation(new_op, "orangefs_features", 0); 266 ret = service_operation(new_op, "orangefs_features",
267 orangefs_features = new_op->downcall.resp.features.features; 267 ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
268 if (!ret)
269 orangefs_features =
270 new_op->downcall.resp.features.features;
271 else
272 orangefs_features = 0;
268 op_release(new_op); 273 op_release(new_op);
269 } else { 274 } else {
270 orangefs_features = 0; 275 orangefs_features = 0;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 8f91ec66baa3..d04ea4349909 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
1074 1074
1075 if ((table->proc_handler == proc_dostring) || 1075 if ((table->proc_handler == proc_dostring) ||
1076 (table->proc_handler == proc_dointvec) || 1076 (table->proc_handler == proc_dointvec) ||
1077 (table->proc_handler == proc_douintvec) ||
1077 (table->proc_handler == proc_dointvec_minmax) || 1078 (table->proc_handler == proc_dointvec_minmax) ||
1078 (table->proc_handler == proc_dointvec_jiffies) || 1079 (table->proc_handler == proc_dointvec_jiffies) ||
1079 (table->proc_handler == proc_dointvec_userhz_jiffies) || 1080 (table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/fs/stat.c b/fs/stat.c
index fa0be59340cc..c6c963b2546b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
130int vfs_statx_fd(unsigned int fd, struct kstat *stat, 130int vfs_statx_fd(unsigned int fd, struct kstat *stat,
131 u32 request_mask, unsigned int query_flags) 131 u32 request_mask, unsigned int query_flags)
132{ 132{
133 struct fd f = fdget_raw(fd); 133 struct fd f;
134 int error = -EBADF; 134 int error = -EBADF;
135 135
136 if (query_flags & ~KSTAT_QUERY_FLAGS)
137 return -EINVAL;
138
139 f = fdget_raw(fd);
136 if (f.file) { 140 if (f.file) {
137 error = vfs_getattr(&f.file->f_path, stat, 141 error = vfs_getattr(&f.file->f_path, stat,
138 request_mask, query_flags); 142 request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
155 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 159 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
156 * at the given name from being referenced. 160 * at the given name from being referenced.
157 * 161 *
158 * The caller must have preset stat->request_mask as for vfs_getattr(). The
159 * flags are also used to load up stat->query_flags.
160 *
161 * 0 will be returned on success, and a -ve error code if unsuccessful. 162 * 0 will be returned on success, and a -ve error code if unsuccessful.
162 */ 163 */
163int vfs_statx(int dfd, const char __user *filename, int flags, 164int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
509} 510}
510#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 511#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
511 512
512static inline int __put_timestamp(struct timespec *kts, 513static noinline_for_stack int
513 struct statx_timestamp __user *uts) 514cp_statx(const struct kstat *stat, struct statx __user *buffer)
514{
515 return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
516 __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
517 __put_user(0, &uts->__reserved ));
518}
519
520/*
521 * Set the statx results.
522 */
523static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
524{ 515{
525 uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); 516 struct statx tmp;
526 gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); 517
527 518 memset(&tmp, 0, sizeof(tmp));
528 if (__put_user(stat->result_mask, &buffer->stx_mask ) || 519
529 __put_user(stat->mode, &buffer->stx_mode ) || 520 tmp.stx_mask = stat->result_mask;
530 __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || 521 tmp.stx_blksize = stat->blksize;
531 __put_user(stat->nlink, &buffer->stx_nlink ) || 522 tmp.stx_attributes = stat->attributes;
532 __put_user(uid, &buffer->stx_uid ) || 523 tmp.stx_nlink = stat->nlink;
533 __put_user(gid, &buffer->stx_gid ) || 524 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
534 __put_user(stat->attributes, &buffer->stx_attributes ) || 525 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
535 __put_user(stat->blksize, &buffer->stx_blksize ) || 526 tmp.stx_mode = stat->mode;
536 __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || 527 tmp.stx_ino = stat->ino;
537 __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || 528 tmp.stx_size = stat->size;
538 __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || 529 tmp.stx_blocks = stat->blocks;
539 __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || 530 tmp.stx_attributes_mask = stat->attributes_mask;
540 __put_timestamp(&stat->atime, &buffer->stx_atime ) || 531 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
541 __put_timestamp(&stat->btime, &buffer->stx_btime ) || 532 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
542 __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || 533 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
543 __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || 534 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
544 __put_user(stat->ino, &buffer->stx_ino ) || 535 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
545 __put_user(stat->size, &buffer->stx_size ) || 536 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
546 __put_user(stat->blocks, &buffer->stx_blocks ) || 537 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
547 __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || 538 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
548 __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) 539 tmp.stx_rdev_major = MAJOR(stat->rdev);
549 return -EFAULT; 540 tmp.stx_rdev_minor = MINOR(stat->rdev);
550 541 tmp.stx_dev_major = MAJOR(stat->dev);
551 return 0; 542 tmp.stx_dev_minor = MINOR(stat->dev);
543
544 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
552} 545}
553 546
554/** 547/**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
570 struct kstat stat; 563 struct kstat stat;
571 int error; 564 int error;
572 565
566 if (mask & STATX__RESERVED)
567 return -EINVAL;
573 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 568 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
574 return -EINVAL; 569 return -EINVAL;
575 if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
576 return -EFAULT;
577 570
578 if (filename) 571 if (filename)
579 error = vfs_statx(dfd, filename, flags, &stat, mask); 572 error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
581 error = vfs_statx_fd(dfd, &stat, mask, flags); 574 error = vfs_statx_fd(dfd, &stat, mask, flags);
582 if (error) 575 if (error)
583 return error; 576 return error;
584 return statx_set_result(&stat, buffer); 577
578 return cp_statx(&stat, buffer);
585} 579}
586 580
587/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 581/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213d1307..39c75a86c67f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
108{ 108{
109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn); 109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
110 struct kobject *kobj = of->kn->parent->priv; 110 struct kobject *kobj = of->kn->parent->priv;
111 size_t len; 111 ssize_t len;
112 112
113 /* 113 /*
114 * If buf != of->prealloc_buf, we don't know how 114 * If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
117 if (WARN_ON_ONCE(buf != of->prealloc_buf)) 117 if (WARN_ON_ONCE(buf != of->prealloc_buf))
118 return 0; 118 return 0;
119 len = ops->show(kobj, of->kn->priv, buf); 119 len = ops->show(kobj, of->kn->priv, buf);
120 if (len < 0)
121 return len;
120 if (pos) { 122 if (pos) {
121 if (len <= pos) 123 if (len <= pos)
122 return 0; 124 return 0;
123 len -= pos; 125 len -= pos;
124 memmove(buf, buf + pos, len); 126 memmove(buf, buf + pos, len);
125 } 127 }
126 return min(count, len); 128 return min_t(ssize_t, count, len);
127} 129}
128 130
129/* kernfs write callback for regular sysfs files */ 131/* kernfs write callback for regular sysfs files */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1756 * protocols: aa:... bb:... 1756 * protocols: aa:... bb:...
1757 */ 1757 */
1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1759 pending, total, UFFD_API, UFFD_API_FEATURES, 1759 pending, total, UFFD_API, ctx->features,
1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1761} 1761}
1762#endif 1762#endif
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index d04547fcf274..39f8604f764e 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,6 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
126extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 126extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
127extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 127extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
128extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
128 129
129/* xfs_dir2_readdir.c */ 130/* xfs_dir2_readdir.c */
130extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, 131extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff41197..e84af093b2ab 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,112 @@ xfs_dir2_sf_check(
629} 629}
630#endif /* DEBUG */ 630#endif /* DEBUG */
631 631
632/* Verify the consistency of an inline directory. */
633int
634xfs_dir2_sf_verify(
635 struct xfs_inode *ip)
636{
637 struct xfs_mount *mp = ip->i_mount;
638 struct xfs_dir2_sf_hdr *sfp;
639 struct xfs_dir2_sf_entry *sfep;
640 struct xfs_dir2_sf_entry *next_sfep;
641 char *endp;
642 const struct xfs_dir_ops *dops;
643 struct xfs_ifork *ifp;
644 xfs_ino_t ino;
645 int i;
646 int i8count;
647 int offset;
648 int size;
649 int error;
650 __uint8_t filetype;
651
652 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
653 /*
654 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
655 * so we can only trust the mountpoint to have the right pointer.
656 */
657 dops = xfs_dir_get_ops(mp, NULL);
658
659 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
660 sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
661 size = ifp->if_bytes;
662
663 /*
664 * Give up if the directory is way too short.
665 */
666 if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
667 size < xfs_dir2_sf_hdr_size(sfp->i8count))
668 return -EFSCORRUPTED;
669
670 endp = (char *)sfp + size;
671
672 /* Check .. entry */
673 ino = dops->sf_get_parent_ino(sfp);
674 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
675 error = xfs_dir_ino_validate(mp, ino);
676 if (error)
677 return error;
678 offset = dops->data_first_offset;
679
680 /* Check all reported entries */
681 sfep = xfs_dir2_sf_firstentry(sfp);
682 for (i = 0; i < sfp->count; i++) {
683 /*
684 * struct xfs_dir2_sf_entry has a variable length.
685 * Check the fixed-offset parts of the structure are
686 * within the data buffer.
687 */
688 if (((char *)sfep + sizeof(*sfep)) >= endp)
689 return -EFSCORRUPTED;
690
691 /* Don't allow names with known bad length. */
692 if (sfep->namelen == 0)
693 return -EFSCORRUPTED;
694
695 /*
696 * Check that the variable-length part of the structure is
697 * within the data buffer. The next entry starts after the
698 * name component, so nextentry is an acceptable test.
699 */
700 next_sfep = dops->sf_nextentry(sfp, sfep);
701 if (endp < (char *)next_sfep)
702 return -EFSCORRUPTED;
703
704 /* Check that the offsets always increase. */
705 if (xfs_dir2_sf_get_offset(sfep) < offset)
706 return -EFSCORRUPTED;
707
708 /* Check the inode number. */
709 ino = dops->sf_get_ino(sfp, sfep);
710 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
711 error = xfs_dir_ino_validate(mp, ino);
712 if (error)
713 return error;
714
715 /* Check the file type. */
716 filetype = dops->sf_get_ftype(sfep);
717 if (filetype >= XFS_DIR3_FT_MAX)
718 return -EFSCORRUPTED;
719
720 offset = xfs_dir2_sf_get_offset(sfep) +
721 dops->data_entsize(sfep->namelen);
722
723 sfep = next_sfep;
724 }
725 if (i8count != sfp->i8count)
726 return -EFSCORRUPTED;
727 if ((void *)sfep != (void *)endp)
728 return -EFSCORRUPTED;
729
730 /* Make sure this whole thing ought to be in local format. */
731 if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
732 (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
733 return -EFSCORRUPTED;
734
735 return 0;
736}
737
632/* 738/*
633 * Create a new (shortform) directory. 739 * Create a new (shortform) directory.
634 */ 740 */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e078aef6..8a37efe04de3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
33#include "xfs_trace.h" 33#include "xfs_trace.h"
34#include "xfs_attr_sf.h" 34#include "xfs_attr_sf.h"
35#include "xfs_da_format.h" 35#include "xfs_da_format.h"
36#include "xfs_da_btree.h"
37#include "xfs_dir2_priv.h"
36 38
37kmem_zone_t *xfs_ifork_zone; 39kmem_zone_t *xfs_ifork_zone;
38 40
@@ -210,6 +212,16 @@ xfs_iformat_fork(
210 if (error) 212 if (error)
211 return error; 213 return error;
212 214
215 /* Check inline dir contents. */
216 if (S_ISDIR(VFS_I(ip)->i_mode) &&
217 dip->di_format == XFS_DINODE_FMT_LOCAL) {
218 error = xfs_dir2_sf_verify(ip);
219 if (error) {
220 xfs_idestroy_fork(ip, XFS_DATA_FORK);
221 return error;
222 }
223 }
224
213 if (xfs_is_reflink_inode(ip)) { 225 if (xfs_is_reflink_inode(ip)) {
214 ASSERT(ip->i_cowfp == NULL); 226 ASSERT(ip->i_cowfp == NULL);
215 xfs_ifork_init_cow(ip); 227 xfs_ifork_init_cow(ip);
@@ -320,7 +332,6 @@ xfs_iformat_local(
320 int whichfork, 332 int whichfork,
321 int size) 333 int size)
322{ 334{
323
324 /* 335 /*
325 * If the size is unreasonable, then something 336 * If the size is unreasonable, then something
326 * is wrong and we just bail out rather than crash in 337 * is wrong and we just bail out rather than crash in
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8b75dcea5966..828532ce0adc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
1311 /* 1311 /*
1312 * Now that we've unmap all full blocks we'll have to zero out any 1312 * Now that we've unmap all full blocks we'll have to zero out any
1313 * partial block at the beginning and/or end. xfs_zero_range is 1313 * partial block at the beginning and/or end. xfs_zero_range is
1314 * smart enough to skip any holes, including those we just created. 1314 * smart enough to skip any holes, including those we just created,
1315 * but we must take care not to zero beyond EOF and enlarge i_size.
1315 */ 1316 */
1317
1318 if (offset >= XFS_ISIZE(ip))
1319 return 0;
1320
1321 if (offset + len > XFS_ISIZE(ip))
1322 len = XFS_ISIZE(ip) - offset;
1323
1316 return xfs_zero_range(ip, offset, len, NULL); 1324 return xfs_zero_range(ip, offset, len, NULL);
1317} 1325}
1318 1326
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 003a99b83bd8..ad9396e516f6 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
71 struct xfs_da_geometry *geo = args->geo; 71 struct xfs_da_geometry *geo = args->geo;
72 72
73 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 73 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
74 /*
75 * Give up if the directory is way too short.
76 */
77 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
78 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
79 return -EIO;
80 }
81
82 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 74 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
83 ASSERT(dp->i_df.if_u1.if_data != NULL); 75 ASSERT(dp->i_df.if_u1.if_data != NULL);
84 76
85 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 77 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
86 78
87 if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
88 return -EFSCORRUPTED;
89
90 /* 79 /*
91 * If the block number in the offset is out of range, we're done. 80 * If the block number in the offset is out of range, we're done.
92 */ 81 */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7eaf1ef74e3c..7605d8396596 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
50#include "xfs_log.h" 50#include "xfs_log.h"
51#include "xfs_bmap_btree.h" 51#include "xfs_bmap_btree.h"
52#include "xfs_reflink.h" 52#include "xfs_reflink.h"
53#include "xfs_dir2_priv.h"
53 54
54kmem_zone_t *xfs_inode_zone; 55kmem_zone_t *xfs_inode_zone;
55 56
@@ -3546,6 +3547,12 @@ xfs_iflush_int(
3546 if (ip->i_d.di_version < 3) 3547 if (ip->i_d.di_version < 3)
3547 ip->i_d.di_flushiter++; 3548 ip->i_d.di_flushiter++;
3548 3549
3550 /* Check the inline directory data. */
3551 if (S_ISDIR(VFS_I(ip)->i_mode) &&
3552 ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
3553 xfs_dir2_sf_verify(ip))
3554 goto corrupt_out;
3555
3549 /* 3556 /*
3550 * Copy the dirty parts of the inode into the on-disk inode. We always 3557 * Copy the dirty parts of the inode into the on-disk inode. We always
3551 * copy out the core of the inode, because if the inode is dirty at all 3558 * copy out the core of the inode, because if the inode is dirty at all
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 229cc6a6d8ef..ebfc13350f9a 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -516,6 +516,20 @@ xfs_vn_getattr(
516 stat->blocks = 516 stat->blocks =
517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
518 518
519 if (ip->i_d.di_version == 3) {
520 if (request_mask & STATX_BTIME) {
521 stat->result_mask |= STATX_BTIME;
522 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
523 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
524 }
525 }
526
527 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
528 stat->attributes |= STATX_ATTR_IMMUTABLE;
529 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
530 stat->attributes |= STATX_ATTR_APPEND;
531 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
532 stat->attributes |= STATX_ATTR_NODUMP;
519 533
520 switch (inode->i_mode & S_IFMT) { 534 switch (inode->i_mode & S_IFMT) {
521 case S_IFBLK: 535 case S_IFBLK:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2a6d9b1558e0..26d67ce3c18d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -583,7 +583,7 @@ xfs_inumbers(
583 return error; 583 return error;
584 584
585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
586 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 586 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
587 do { 587 do {
588 struct xfs_inobt_rec_incore r; 588 struct xfs_inobt_rec_incore r;
589 int stat; 589 int stat;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4df64a1fc09e..532372c6cf15 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,8 +14,8 @@
14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
15 * and/or .init.* sections. 15 * and/or .init.* sections.
16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections
17 * [__start_data_ro_after_init, __end_data_ro_after_init]: 17 * [__start_ro_after_init, __end_ro_after_init]:
18 * contains data.ro_after_init section 18 * contains .data..ro_after_init section
19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
20 * may be out of this range on some architectures. 20 * may be out of this range on some architectures.
21 * [_sinittext, _einittext]: contains .init.text.* sections 21 * [_sinittext, _einittext]: contains .init.text.* sections
@@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
33extern char __bss_start[], __bss_stop[]; 33extern char __bss_start[], __bss_stop[];
34extern char __init_begin[], __init_end[]; 34extern char __init_begin[], __init_end[];
35extern char _sinittext[], _einittext[]; 35extern char _sinittext[], _einittext[];
36extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36extern char __start_ro_after_init[], __end_ro_after_init[];
37extern char _end[]; 37extern char _end[];
38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
39extern char __kprobes_text_start[], __kprobes_text_end[]; 39extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 0968d13b3885..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -173,6 +173,7 @@
173 KEEP(*(__##name##_of_table_end)) 173 KEEP(*(__##name##_of_table_end))
174 174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 177#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 178#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 179#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
@@ -260,9 +261,9 @@
260 */ 261 */
261#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
262#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
263 __start_data_ro_after_init = .; \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \
264 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
265 __end_data_ro_after_init = .; 266 VMLINUX_SYMBOL(__end_ro_after_init) = .;
266#endif 267#endif
267 268
268/* 269/*
@@ -559,6 +560,7 @@
559 CLK_OF_TABLES() \ 560 CLK_OF_TABLES() \
560 RESERVEDMEM_OF_TABLES() \ 561 RESERVEDMEM_OF_TABLES() \
561 CLKSRC_OF_TABLES() \ 562 CLKSRC_OF_TABLES() \
563 CLKEVT_OF_TABLES() \
562 IOMMU_OF_TABLES() \ 564 IOMMU_OF_TABLES() \
563 CPU_METHOD_OF_TABLES() \ 565 CPU_METHOD_OF_TABLES() \
564 CPUIDLE_METHOD_OF_TABLES() \ 566 CPUIDLE_METHOD_OF_TABLES() \
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a2bfd7843f18..e2b9c6fe2714 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
73 73
74int af_alg_release(struct socket *sock); 74int af_alg_release(struct socket *sock);
75void af_alg_release_parent(struct sock *sk); 75void af_alg_release_parent(struct sock *sk);
76int af_alg_accept(struct sock *sk, struct socket *newsock); 76int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
77 77
78int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); 78int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
79void af_alg_free_sg(struct af_alg_sgl *sgl); 79void af_alg_free_sg(struct af_alg_sgl *sgl);
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f98f0e1..1487011fe057 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
229 * @ref_type: The type of reference. 229 * @ref_type: The type of reference.
230 * @existed: Upon completion, indicates that an identical reference object 230 * @existed: Upon completion, indicates that an identical reference object
231 * already existed, and the refcount was upped on that object instead. 231 * already existed, and the refcount was upped on that object instead.
232 * @require_existed: Fail with -EPERM if an identical ref object didn't
233 * already exist.
232 * 234 *
233 * Checks that the base object is shareable and adds a ref object to it. 235 * Checks that the base object is shareable and adds a ref object to it.
234 * 236 *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
243 */ 245 */
244extern int ttm_ref_object_add(struct ttm_object_file *tfile, 246extern int ttm_ref_object_add(struct ttm_object_file *tfile,
245 struct ttm_base_object *base, 247 struct ttm_base_object *base,
246 enum ttm_ref_type ref_type, bool *existed); 248 enum ttm_ref_type ref_type, bool *existed,
249 bool require_existed);
247 250
248extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, 251extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
249 struct ttm_base_object *base); 252 struct ttm_base_object *base);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
296int kvm_vgic_map_resources(struct kvm *kvm); 296int kvm_vgic_map_resources(struct kvm *kvm);
297int kvm_vgic_hyp_init(void); 297int kvm_vgic_hyp_init(void);
298void kvm_vgic_init_cpu_hardware(void);
298 299
299int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 300int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
300 bool level); 301 bool level);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 673acda012af..9b05886f9773 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
287} 287}
288 288
289/* Validate the processor object's proc_id */ 289/* Validate the processor object's proc_id */
290bool acpi_processor_validate_proc_id(int proc_id); 290bool acpi_duplicate_processor_id(int proc_id);
291 291
292#ifdef CONFIG_ACPI_HOTPLUG_CPU 292#ifdef CONFIG_ACPI_HOTPLUG_CPU
293/* Arch dependent functions for cpu hotplug support */ 293/* Arch dependent functions for cpu hotplug support */
294int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, 294int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
295 int *pcpu); 295 int *pcpu);
296int acpi_unmap_cpu(int cpu); 296int acpi_unmap_cpu(int cpu);
297int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
298#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 297#endif /* CONFIG_ACPI_HOTPLUG_CPU */
299 298
300void acpi_set_processor_mapping(void);
301
302#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 299#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
303int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); 300int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
304#endif 301#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..9382c5da7a2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
51 51
52 atomic_t nr_active; 52 atomic_t nr_active;
53 53
54 struct delayed_work delayed_run_work;
54 struct delayed_work delay_work; 55 struct delayed_work delay_work;
55 56
56 struct hlist_node cpuhp_dead; 57 struct hlist_node cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
238void blk_mq_start_hw_queues(struct request_queue *q); 239void blk_mq_start_hw_queues(struct request_queue *q);
239void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 240void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
240void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 241void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
241void blk_mq_run_hw_queues(struct request_queue *q, bool async); 243void blk_mq_run_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 244void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
243void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 245void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..7548f332121a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -610,7 +610,6 @@ struct request_queue {
610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
611#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
612#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
614 613
615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 614#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
616 (1 << QUEUE_FLAG_STACKABLE) | \ 615 (1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c71dd8fa5764..c41b8d99dd0e 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@ enum ccp_engine {
556 * struct ccp_cmd - CCP operation request 556 * struct ccp_cmd - CCP operation request
557 * @entry: list element (ccp driver use only) 557 * @entry: list element (ccp driver use only)
558 * @work: work element used for callbacks (ccp driver use only) 558 * @work: work element used for callbacks (ccp driver use only)
559 * @ccp: CCP device to be run on (ccp driver use only) 559 * @ccp: CCP device to be run on
560 * @ret: operation return code (ccp driver use only) 560 * @ret: operation return code (ccp driver use only)
561 * @flags: cmd processing flags 561 * @flags: cmd processing flags
562 * @engine: CCP operation to perform 562 * @engine: CCP operation to perform
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 5d3053c34fb3..6d7edc3082f9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
229 229
230#ifdef CONFIG_CLKEVT_PROBE 230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void); 231extern int clockevent_probe(void);
232#els 232#else
233static inline int clockevent_probe(void) { return 0; } 233static inline int clockevent_probe(void) { return 0; }
234#endif 234#endif
235 235
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042bbbf60..68449293c4b6 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@ struct dccp_request_sock {
163 __u64 dreq_isr; 163 __u64 dreq_isr;
164 __u64 dreq_gsr; 164 __u64 dreq_gsr;
165 __be32 dreq_service; 165 __be32 dreq_service;
166 spinlock_t dreq_lock;
166 struct list_head dreq_featneg; 167 struct list_head dreq_featneg;
167 __u32 dreq_timestamp_echo; 168 __u32 dreq_timestamp_echo;
168 __u32 dreq_timestamp_time; 169 __u32 dreq_timestamp_time;
diff --git a/include/linux/device.h b/include/linux/device.h
index 30c4570e928d..9ef518af5515 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
1140extern void lock_device_hotplug(void); 1140extern void lock_device_hotplug(void);
1141extern void unlock_device_hotplug(void); 1141extern void unlock_device_hotplug(void);
1142extern int lock_device_hotplug_sysfs(void); 1142extern int lock_device_hotplug_sysfs(void);
1143void assert_held_device_hotplug(void);
1144extern int device_offline(struct device *dev); 1143extern int device_offline(struct device *dev);
1145extern int device_online(struct device *dev); 1144extern int device_online(struct device *dev);
1146extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1145extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..22d39e8d4de1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
212 212
213extern int elevator_init(struct request_queue *, char *); 213extern int elevator_init(struct request_queue *, char *);
214extern void elevator_exit(struct elevator_queue *); 214extern void elevator_exit(struct request_queue *, struct elevator_queue *);
215extern int elevator_change(struct request_queue *, const char *); 215extern int elevator_change(struct request_queue *, const char *);
216extern bool elv_bio_merge_ok(struct request *, struct bio *); 216extern bool elv_bio_merge_ok(struct request *, struct bio *);
217extern struct elevator_queue *elevator_alloc(struct request_queue *, 217extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 9ca23fcfb5d7..6fdfc884fdeb 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
20 struct sock_extended_err ee; 20 struct sock_extended_err ee;
21 u16 addr_offset; 21 u16 addr_offset;
22 __be16 port; 22 __be16 port;
23 u8 opt_stats:1,
24 unused:7;
23}; 25};
24 26
25#endif 27#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0c167fdee5f7..fbf7b39e8103 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -409,6 +409,7 @@ struct bpf_prog {
409 u16 pages; /* Number of allocated pages */ 409 u16 pages; /* Number of allocated pages */
410 kmemcheck_bitfield_begin(meta); 410 kmemcheck_bitfield_begin(meta);
411 u16 jited:1, /* Is our filter JIT'ed? */ 411 u16 jited:1, /* Is our filter JIT'ed? */
412 locked:1, /* Program image locked? */
412 gpl_compatible:1, /* Is filter GPL compatible? */ 413 gpl_compatible:1, /* Is filter GPL compatible? */
413 cb_access:1, /* Is control block accessed? */ 414 cb_access:1, /* Is control block accessed? */
414 dst_needed:1, /* Do we need dst entry? */ 415 dst_needed:1, /* Do we need dst entry? */
@@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
554#ifdef CONFIG_ARCH_HAS_SET_MEMORY 555#ifdef CONFIG_ARCH_HAS_SET_MEMORY
555static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 556static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
556{ 557{
557 set_memory_ro((unsigned long)fp, fp->pages); 558 fp->locked = 1;
559 WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
558} 560}
559 561
560static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 562static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
561{ 563{
562 set_memory_rw((unsigned long)fp, fp->pages); 564 if (fp->locked) {
565 WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
566 /* In case set_memory_rw() fails, we want to be the first
567 * to crash here instead of some random place later on.
568 */
569 fp->locked = 0;
570 }
563} 571}
564 572
565static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 573static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
566{ 574{
567 set_memory_ro((unsigned long)hdr, hdr->pages); 575 WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
568} 576}
569 577
570static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) 578static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
571{ 579{
572 set_memory_rw((unsigned long)hdr, hdr->pages); 580 WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
573} 581}
574#else 582#else
575static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 583static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 547f81592ba1..10c1abfbac6c 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -87,7 +87,6 @@ struct fscrypt_operations {
87 unsigned int flags; 87 unsigned int flags;
88 const char *key_prefix; 88 const char *key_prefix;
89 int (*get_context)(struct inode *, void *, size_t); 89 int (*get_context)(struct inode *, void *, size_t);
90 int (*prepare_context)(struct inode *);
91 int (*set_context)(struct inode *, const void *, size_t, void *); 90 int (*set_context)(struct inode *, const void *, size_t, void *);
92 int (*dummy_context)(struct inode *); 91 int (*dummy_context)(struct inode *);
93 bool (*is_encrypted)(struct inode *); 92 bool (*is_encrypted)(struct inode *);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 2484b2fcc6eb..933d93656605 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
143 struct fwnode_handle *child, 143 struct fwnode_handle *child,
144 enum gpiod_flags flags, 144 enum gpiod_flags flags,
145 const char *label); 145 const char *label);
146/* FIXME: delete this helper when users are switched over */
147static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
148 const char *con_id, struct fwnode_handle *child)
149{
150 return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
151 0, child,
152 GPIOD_ASIS,
153 "?");
154}
155 146
156#else /* CONFIG_GPIOLIB */ 147#else /* CONFIG_GPIOLIB */
157 148
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
444 return ERR_PTR(-ENOSYS); 435 return ERR_PTR(-ENOSYS);
445} 436}
446 437
447/* FIXME: delete this when all users are switched over */
448static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
449 const char *con_id, struct fwnode_handle *child)
450{
451 return ERR_PTR(-ENOSYS);
452}
453
454#endif /* CONFIG_GPIOLIB */ 438#endif /* CONFIG_GPIOLIB */
455 439
456static inline 440static inline
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 78d59dba563e..88b673749121 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) 88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) 89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) 90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
91#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
91#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) 92#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
92#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) 93#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
93#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) 94#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62bbf3c1aa4a..970771a5f739 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -845,6 +845,13 @@ struct vmbus_channel {
845 * link up channels based on their CPU affinity. 845 * link up channels based on their CPU affinity.
846 */ 846 */
847 struct list_head percpu_list; 847 struct list_head percpu_list;
848
849 /*
850 * Defer freeing channel until after all cpu's have
851 * gone through grace period.
852 */
853 struct rcu_head rcu;
854
848 /* 855 /*
849 * For performance critical channels (storage, networking 856 * For performance critical channels (storage, networking
850 * etc,), Hyper-V has a mechanism to enhance the throughput 857 * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1430 const int *srv_version, int srv_vercnt, 1437 const int *srv_version, int srv_vercnt,
1431 int *nego_fw_version, int *nego_srv_version); 1438 int *nego_fw_version, int *nego_srv_version);
1432 1439
1433void hv_event_tasklet_disable(struct vmbus_channel *channel);
1434void hv_event_tasklet_enable(struct vmbus_channel *channel);
1435
1436void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1440void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1437 1441
1438void vmbus_setevent(struct vmbus_channel *channel); 1442void vmbus_setevent(struct vmbus_channel *channel);
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca41515527..fa7931933067 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
62 const char *name, 62 const char *name,
63 struct config_item_type *type) 63 struct config_item_type *type)
64{ 64{
65#ifdef CONFIG_CONFIGFS_FS 65#if IS_ENABLED(CONFIG_CONFIGFS_FS)
66 config_group_init_type_name(&d->group, name, type); 66 config_group_init_type_name(&d->group, name, type);
67#endif 67#endif
68} 68}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 6a6de187ddc0..2e4de0deee53 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -125,9 +125,16 @@ enum iommu_attr {
125}; 125};
126 126
127/* These are the possible reserved region types */ 127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0) 128enum iommu_resv_type {
129#define IOMMU_RESV_RESERVED (1 << 1) 129 /* Memory regions which must be mapped 1:1 at all times */
130#define IOMMU_RESV_MSI (1 << 2) 130 IOMMU_RESV_DIRECT,
131 /* Arbitrary "never map this or give it to a device" address ranges */
132 IOMMU_RESV_RESERVED,
133 /* Hardware MSI region (untranslated) */
134 IOMMU_RESV_MSI,
135 /* Software-managed MSI translation window */
136 IOMMU_RESV_SW_MSI,
137};
131 138
132/** 139/**
133 * struct iommu_resv_region - descriptor for a reserved memory region 140 * struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
142 phys_addr_t start; 149 phys_addr_t start;
143 size_t length; 150 size_t length;
144 int prot; 151 int prot;
145 int type; 152 enum iommu_resv_type type;
146}; 153};
147 154
148#ifdef CONFIG_IOMMU_API 155#ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 295extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
289extern int iommu_request_dm_for_dev(struct device *dev); 296extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region * 297extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); 298iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
299 enum iommu_resv_type type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group, 300extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head); 301 struct list_head *head);
294 302
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
96#define GICH_MISR_EOI (1 << 0) 96#define GICH_MISR_EOI (1 << 0)
97#define GICH_MISR_U (1 << 1) 97#define GICH_MISR_U (1 << 1)
98 98
99#define GICV_PMR_PRIORITY_SHIFT 3
100#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
101
99#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
100 103
101#include <linux/irqdomain.h> 104#include <linux/irqdomain.h>
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 1c823bef4c15..a5c7046f26b4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -6,6 +6,7 @@
6struct kmem_cache; 6struct kmem_cache;
7struct page; 7struct page;
8struct vm_struct; 8struct vm_struct;
9struct task_struct;
9 10
10#ifdef CONFIG_KASAN 11#ifdef CONFIG_KASAN
11 12
@@ -75,6 +76,9 @@ size_t ksize(const void *);
75static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
76size_t kasan_metadata_size(struct kmem_cache *cache); 77size_t kasan_metadata_size(struct kmem_cache *cache);
77 78
79bool kasan_save_enable_multi_shot(void);
80void kasan_restore_multi_shot(bool enabled);
81
78#else /* CONFIG_KASAN */ 82#else /* CONFIG_KASAN */
79 83
80static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 84static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c14ad9809da..d0250744507a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
162 int len, void *val); 162 int len, void *val);
163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, struct kvm_io_device *dev); 164 int len, struct kvm_io_device *dev);
165int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 165void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
166 struct kvm_io_device *dev); 166 struct kvm_io_device *dev);
167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 gpa_t addr); 168 gpa_t addr);
169 169
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe1009084..87ff4f58a2f0 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
29 ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) 29 ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
30 30
31#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) 31#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
32
33#define hlist_nulls_entry_safe(ptr, type, member) \
34 ({ typeof(ptr) ____ptr = (ptr); \
35 !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
36 })
32/** 37/**
33 * ptr_is_a_nulls - Test if a ptr is a nulls 38 * ptr_is_a_nulls - Test if a ptr is a nulls
34 * @ptr: ptr to be tested 39 * @ptr: ptr to be tested
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5af377303880..bb7250c45cb8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
740 return false; 740 return false;
741} 741}
742 742
743static inline void mem_cgroup_update_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx,
745 int nr)
746{
747}
748
743static inline void mem_cgroup_inc_page_stat(struct page *page, 749static inline void mem_cgroup_inc_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx) 750 enum mem_cgroup_stat_index idx)
745{ 751{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..3eef9fb9968a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
35 * Max bus-specific overhead incurred by request/responses. 35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests. 36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses. 37 * I2C requires 2 additional bytes for responses.
38 * SPI requires up to 32 additional bytes for responses.
38 * */ 39 * */
39#define EC_PROTO_VERSION_UNKNOWN 0 40#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1 41#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2 42#define EC_MAX_RESPONSE_OVERHEAD 32
42 43
43/* 44/*
44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 45 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7e66e4f62858..1beb1ec2fbdf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,6 +476,7 @@ enum {
476enum { 476enum {
477 MLX4_INTERFACE_STATE_UP = 1 << 0, 477 MLX4_INTERFACE_STATE_UP = 1 << 0,
478 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 478 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
479 MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
479}; 480};
480 481
481#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 482#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f01c88f0800..00a8fa7e366a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -32,6 +32,8 @@ struct user_struct;
32struct writeback_control; 32struct writeback_control;
33struct bdi_writeback; 33struct bdi_writeback;
34 34
35void init_mm_internals(void);
36
35#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 37#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
36extern unsigned long max_mapnr; 38extern unsigned long max_mapnr;
37 39
diff --git a/include/linux/net.h b/include/linux/net.h
index cd0c8bd0a1de..0620f5e18c96 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,7 +146,7 @@ struct proto_ops {
146 int (*socketpair)(struct socket *sock1, 146 int (*socketpair)(struct socket *sock1,
147 struct socket *sock2); 147 struct socket *sock2);
148 int (*accept) (struct socket *sock, 148 int (*accept) (struct socket *sock,
149 struct socket *newsock, int flags); 149 struct socket *newsock, int flags, bool kern);
150 int (*getname) (struct socket *sock, 150 int (*getname) (struct socket *sock,
151 struct sockaddr *addr, 151 struct sockaddr *addr,
152 int *sockaddr_len, int peer); 152 int *sockaddr_len, int peer);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..9061780b141f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
64 * RDMA_QPTYPE field 64 * RDMA_QPTYPE field
65 */ 65 */
66enum { 66enum {
67 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ 67 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
68 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ 68 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
69}; 69};
70 70
71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS 71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
72 * RDMA_QPTYPE field 72 * RDMA_QPTYPE field
73 */ 73 */
74enum { 74enum {
75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ 75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
76 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ 76 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
77 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ 77 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
78 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ 78 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
79 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ 79 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
80}; 80};
81 81
82/* RDMA Connection Management Service Type codes for Discovery Log Page 82/* RDMA Connection Management Service Type codes for Discovery Log Page
83 * entry TSAS RDMA_CMS field 83 * entry TSAS RDMA_CMS field
84 */ 84 */
85enum { 85enum {
86 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ 86 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
87}; 87};
88 88
89#define NVMF_AQ_DEPTH 32 89#define NVMF_AQ_DEPTH 32
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7a4948..fd0de00c0d77 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -76,22 +76,12 @@ struct gpmc_timings;
76struct omap_nand_platform_data; 76struct omap_nand_platform_data;
77struct omap_onenand_platform_data; 77struct omap_onenand_platform_data;
78 78
79#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
80extern int gpmc_nand_init(struct omap_nand_platform_data *d,
81 struct gpmc_timings *gpmc_t);
82#else
83static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
84 struct gpmc_timings *gpmc_t)
85{
86 return 0;
87}
88#endif
89
90#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) 79#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
91extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); 80extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
92#else 81#else
93#define board_onenand_data NULL 82#define board_onenand_data NULL
94static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) 83static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
95{ 84{
85 return 0;
96} 86}
97#endif 87#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 772476028a65..43a774873aa9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev);
837int genphy_suspend(struct phy_device *phydev); 837int genphy_suspend(struct phy_device *phydev);
838int genphy_resume(struct phy_device *phydev); 838int genphy_resume(struct phy_device *phydev);
839int genphy_soft_reset(struct phy_device *phydev); 839int genphy_soft_reset(struct phy_device *phydev);
840static inline int genphy_no_soft_reset(struct phy_device *phydev)
841{
842 return 0;
843}
840void phy_driver_unregister(struct phy_driver *drv); 844void phy_driver_unregister(struct phy_driver *drv);
841void phy_drivers_unregister(struct phy_driver *drv, int n); 845void phy_drivers_unregister(struct phy_driver *drv, int n);
842int phy_driver_register(struct phy_driver *new_driver, struct module *owner); 846int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
146 struct device *dev, void *driver_data, 146 struct device *dev, void *driver_data,
147 struct pinctrl_dev **pctldev); 147 struct pinctrl_dev **pctldev);
148extern int pinctrl_enable(struct pinctrl_dev *pctldev);
148 149
149/* Please use pinctrl_register_and_init() instead */ 150/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
150extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, 151extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
151 struct device *dev, void *driver_data); 152 struct device *dev, void *driver_data);
152 153
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7e8597..a23a33153180 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
156 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 156 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
157 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) 157 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
158 158
159/**
160 * hlist_nulls_for_each_entry_safe -
161 * iterate over list of given type safe against removal of list entry
162 * @tpos: the type * to use as a loop cursor.
163 * @pos: the &struct hlist_nulls_node to use as a loop cursor.
164 * @head: the head for your list.
165 * @member: the name of the hlist_nulls_node within the struct.
166 */
167#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
168 for (({barrier();}), \
169 pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
170 (!is_a_nulls(pos)) && \
171 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
172 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
159#endif 173#endif
160#endif 174#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 86b4ed75359e..96fb139bdd08 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev)
31 31
32static inline int reset_control_reset(struct reset_control *rstc) 32static inline int reset_control_reset(struct reset_control *rstc)
33{ 33{
34 WARN_ON(1);
35 return 0; 34 return 0;
36} 35}
37 36
38static inline int reset_control_assert(struct reset_control *rstc) 37static inline int reset_control_assert(struct reset_control *rstc)
39{ 38{
40 WARN_ON(1);
41 return 0; 39 return 0;
42} 40}
43 41
44static inline int reset_control_deassert(struct reset_control *rstc) 42static inline int reset_control_deassert(struct reset_control *rstc)
45{ 43{
46 WARN_ON(1);
47 return 0; 44 return 0;
48} 45}
49 46
50static inline int reset_control_status(struct reset_control *rstc) 47static inline int reset_control_status(struct reset_control *rstc)
51{ 48{
52 WARN_ON(1);
53 return 0; 49 return 0;
54} 50}
55 51
56static inline void reset_control_put(struct reset_control *rstc) 52static inline void reset_control_put(struct reset_control *rstc)
57{ 53{
58 WARN_ON(1);
59} 54}
60 55
61static inline int __must_check device_reset(struct device *dev) 56static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get(
74 const char *id, int index, bool shared, 69 const char *id, int index, bool shared,
75 bool optional) 70 bool optional)
76{ 71{
77 return ERR_PTR(-ENOTSUPP); 72 return optional ? NULL : ERR_PTR(-ENOTSUPP);
78} 73}
79 74
80static inline struct reset_control *__devm_reset_control_get( 75static inline struct reset_control *__devm_reset_control_get(
81 struct device *dev, const char *id, 76 struct device *dev, const char *id,
82 int index, bool shared, bool optional) 77 int index, bool shared, bool optional)
83{ 78{
84 return ERR_PTR(-ENOTSUPP); 79 return optional ? NULL : ERR_PTR(-ENOTSUPP);
85} 80}
86 81
87#endif /* CONFIG_RESET_CONTROLLER */ 82#endif /* CONFIG_RESET_CONTROLLER */
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 4a68c6791207..34fe92ce1ebd 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
54} 54}
55#else 55#else
56extern void sched_clock_init_late(void); 56extern void sched_clock_init_late(void);
57/*
58 * Architectures can set this to 1 if they have specified
59 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
60 * but then during bootup it turns out that sched_clock()
61 * is reliable after all:
62 */
63extern int sched_clock_stable(void); 57extern int sched_clock_stable(void);
64extern void clear_sched_clock_stable(void); 58extern void clear_sched_clock_stable(void);
65 59
60/*
61 * When sched_clock_stable(), __sched_clock_offset provides the offset
62 * between local_clock() and sched_clock().
63 */
64extern u64 __sched_clock_offset;
65
66
66extern void sched_clock_tick(void); 67extern void sched_clock_tick(void);
67extern void sched_clock_idle_sleep_event(void); 68extern void sched_clock_idle_sleep_event(void);
68extern void sched_clock_idle_wakeup_event(u64 delta_ns); 69extern void sched_clock_idle_wakeup_event(u64 delta_ns);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
26 unsigned int nlink; 26 unsigned int nlink;
27 uint32_t blksize; /* Preferred I/O size */ 27 uint32_t blksize; /* Preferred I/O size */
28 u64 attributes; 28 u64 attributes;
29 u64 attributes_mask;
29#define KSTAT_ATTR_FS_IOC_FLAGS \ 30#define KSTAT_ATTR_FS_IOC_FLAGS \
30 (STATX_ATTR_COMPRESSED | \ 31 (STATX_ATTR_COMPRESSED | \
31 STATX_ATTR_IMMUTABLE | \ 32 STATX_ATTR_IMMUTABLE | \
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043dc34e4..de2a722fe3cf 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
50/* device can't handle Link Power Management */ 50/* device can't handle Link Power Management */
51#define USB_QUIRK_NO_LPM BIT(10) 51#define USB_QUIRK_NO_LPM BIT(10)
52 52
53/*
54 * Device reports its bInterval as linear frames instead of the
55 * USB 2.0 calculation.
56 */
57#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
58
53#endif /* __LINUX_USB_QUIRKS_H */ 59#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 9638bfeb0d1f..584f9a647ad4 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
48 struct virtio_vsock_hdr hdr; 48 struct virtio_vsock_hdr hdr;
49 struct work_struct work; 49 struct work_struct work;
50 struct list_head list; 50 struct list_head list;
51 /* socket refcnt not held, only use for cancellation */
52 struct vsock_sock *vsk;
51 void *buf; 53 void *buf;
52 u32 len; 54 u32 len;
53 u32 off; 55 u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
56 58
57struct virtio_vsock_pkt_info { 59struct virtio_vsock_pkt_info {
58 u32 remote_cid, remote_port; 60 u32 remote_cid, remote_port;
61 struct vsock_sock *vsk;
59 struct msghdr *msg; 62 struct msghdr *msg;
60 u32 pkt_len; 63 u32 pkt_len;
61 u16 type; 64 u16 type;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f2758964ce6f..f32ed9ac181a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,6 +100,9 @@ struct vsock_transport {
100 void (*destruct)(struct vsock_sock *); 100 void (*destruct)(struct vsock_sock *);
101 void (*release)(struct vsock_sock *); 101 void (*release)(struct vsock_sock *);
102 102
103 /* Cancel all pending packets sent on vsock. */
104 int (*cancel_pkt)(struct vsock_sock *vsk);
105
103 /* Connections. */ 106 /* Connections. */
104 int (*connect)(struct vsock_sock *); 107 int (*connect)(struct vsock_sock *);
105 108
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b7952d55b9c0..f39ae697347f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
20 int addr_len, int flags, int is_sendmsg); 20 int addr_len, int flags, int is_sendmsg);
21int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, 21int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
22 int addr_len, int flags); 22 int addr_len, int flags);
23int inet_accept(struct socket *sock, struct socket *newsock, int flags); 23int inet_accept(struct socket *sock, struct socket *newsock, int flags,
24 bool kern);
24int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); 25int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
25ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, 26ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
26 size_t size, int flags); 27 size_t size, int flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 826f198374f8..c7a577976bec 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
258 return (unsigned long)min_t(u64, when, max_when); 258 return (unsigned long)min_t(u64, when, max_when);
259} 259}
260 260
261struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 261struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
262 262
263int inet_csk_get_port(struct sock *sk, unsigned short snum); 263int inet_csk_get_port(struct sock *sk, unsigned short snum);
264 264
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f540f9ad2af4..19605878da47 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
244 u32 seq); 244 u32 seq);
245 245
246/* Fake conntrack entry for untracked connections */ 246/* Fake conntrack entry for untracked connections */
247DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 247DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
248static inline struct nf_conn *nf_ct_untracked_get(void) 248static inline struct nf_conn *nf_ct_untracked_get(void)
249{ 249{
250 return raw_cpu_ptr(&nf_conntrack_untracked); 250 return raw_cpu_ptr(&nf_conntrack_untracked);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2aa8a9d80fbe..0136028652bd 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -103,6 +103,35 @@ struct nft_regs {
103 }; 103 };
104}; 104};
105 105
106/* Store/load an u16 or u8 integer to/from the u32 data register.
107 *
108 * Note, when using concatenations, register allocation happens at 32-bit
109 * level. So for store instruction, pad the rest part with zero to avoid
110 * garbage values.
111 */
112
113static inline void nft_reg_store16(u32 *dreg, u16 val)
114{
115 *dreg = 0;
116 *(u16 *)dreg = val;
117}
118
119static inline void nft_reg_store8(u32 *dreg, u8 val)
120{
121 *dreg = 0;
122 *(u8 *)dreg = val;
123}
124
125static inline u16 nft_reg_load16(u32 *sreg)
126{
127 return *(u16 *)sreg;
128}
129
130static inline u8 nft_reg_load8(u32 *sreg)
131{
132 return *(u8 *)sreg;
133}
134
106static inline void nft_data_copy(u32 *dst, const struct nft_data *src, 135static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
107 unsigned int len) 136 unsigned int len)
108{ 137{
@@ -203,7 +232,6 @@ struct nft_set_elem {
203struct nft_set; 232struct nft_set;
204struct nft_set_iter { 233struct nft_set_iter {
205 u8 genmask; 234 u8 genmask;
206 bool flush;
207 unsigned int count; 235 unsigned int count;
208 unsigned int skip; 236 unsigned int skip;
209 int err; 237 int err;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d150b5066201..97983d1c05e4 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9 struct sk_buff *skb, 9 struct sk_buff *skb,
10 const struct nf_hook_state *state) 10 const struct nf_hook_state *state)
11{ 11{
12 unsigned int flags = IP6_FH_F_AUTH;
12 int protohdr, thoff = 0; 13 int protohdr, thoff = 0;
13 unsigned short frag_off; 14 unsigned short frag_off;
14 15
15 nft_set_pktinfo(pkt, skb, state); 16 nft_set_pktinfo(pkt, skb, state);
16 17
17 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 18 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
18 if (protohdr < 0) { 19 if (protohdr < 0) {
19 nft_set_pktinfo_proto_unspec(pkt, skb); 20 nft_set_pktinfo_proto_unspec(pkt, skb);
20 return; 21 return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
32 const struct nf_hook_state *state) 33 const struct nf_hook_state *state)
33{ 34{
34#if IS_ENABLED(CONFIG_IPV6) 35#if IS_ENABLED(CONFIG_IPV6)
36 unsigned int flags = IP6_FH_F_AUTH;
35 struct ipv6hdr *ip6h, _ip6h; 37 struct ipv6hdr *ip6h, _ip6h;
36 unsigned int thoff = 0; 38 unsigned int thoff = 0;
37 unsigned short frag_off; 39 unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
50 if (pkt_len + sizeof(*ip6h) > skb->len) 52 if (pkt_len + sizeof(*ip6h) > skb->len)
51 return -1; 53 return -1;
52 54
53 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 55 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
54 if (protohdr < 0) 56 if (protohdr < 0)
55 return -1; 57 return -1;
56 58
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1f71ee5ab518..069582ee5d7f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
448 return frag; 448 return frag;
449} 449}
450 450
451static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) 451static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
452{ 452{
453 453 sctp_assoc_sync_pmtu(asoc);
454 sctp_assoc_sync_pmtu(sk, asoc);
455 asoc->pmtu_pending = 0; 454 asoc->pmtu_pending = 0;
456} 455}
457 456
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
596 */ 595 */
597static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 596static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
598{ 597{
599 if (t->dst && (!dst_check(t->dst, t->dst_cookie) || 598 if (t->dst && !dst_check(t->dst, t->dst_cookie))
600 t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
601 SCTP_DEFAULT_MINSEGMENT)))
602 sctp_transport_dst_release(t); 599 sctp_transport_dst_release(t);
603 600
604 return t->dst; 601 return t->dst;
605} 602}
606 603
604static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
605{
606 __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
607 SCTP_DEFAULT_MINSEGMENT);
608
609 if (t->pathmtu == pmtu)
610 return true;
611
612 t->pathmtu = pmtu;
613
614 return false;
615}
616
607#endif /* __net_sctp_h__ */ 617#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a244db5e5ff7..138f8615acf0 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -83,6 +83,7 @@ struct sctp_bind_addr;
83struct sctp_ulpq; 83struct sctp_ulpq;
84struct sctp_ep_common; 84struct sctp_ep_common;
85struct crypto_shash; 85struct crypto_shash;
86struct sctp_stream;
86 87
87 88
88#include <net/sctp/tsnmap.h> 89#include <net/sctp/tsnmap.h>
@@ -376,7 +377,8 @@ typedef struct sctp_sender_hb_info {
376 __u64 hb_nonce; 377 __u64 hb_nonce;
377} sctp_sender_hb_info_t; 378} sctp_sender_hb_info_t;
378 379
379struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); 380int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
381int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
380void sctp_stream_free(struct sctp_stream *stream); 382void sctp_stream_free(struct sctp_stream *stream);
381void sctp_stream_clear(struct sctp_stream *stream); 383void sctp_stream_clear(struct sctp_stream *stream);
382 384
@@ -476,7 +478,8 @@ struct sctp_pf {
476 int (*send_verify) (struct sctp_sock *, union sctp_addr *); 478 int (*send_verify) (struct sctp_sock *, union sctp_addr *);
477 int (*supported_addrs)(const struct sctp_sock *, __be16 *); 479 int (*supported_addrs)(const struct sctp_sock *, __be16 *);
478 struct sock *(*create_accept_sk) (struct sock *sk, 480 struct sock *(*create_accept_sk) (struct sock *sk,
479 struct sctp_association *asoc); 481 struct sctp_association *asoc,
482 bool kern);
480 int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr); 483 int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
481 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk); 484 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
482 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk); 485 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
@@ -497,7 +500,6 @@ struct sctp_datamsg {
497 /* Did the messenge fail to send? */ 500 /* Did the messenge fail to send? */
498 int send_error; 501 int send_error;
499 u8 send_failed:1, 502 u8 send_failed:1,
500 force_delay:1,
501 can_delay; /* should this message be Nagle delayed */ 503 can_delay; /* should this message be Nagle delayed */
502}; 504};
503 505
@@ -752,6 +754,8 @@ struct sctp_transport {
752 /* Is the Path MTU update pending on this tranport */ 754 /* Is the Path MTU update pending on this tranport */
753 pmtu_pending:1, 755 pmtu_pending:1,
754 756
757 dst_pending_confirm:1, /* need to confirm neighbour */
758
755 /* Has this transport moved the ctsn since we last sacked */ 759 /* Has this transport moved the ctsn since we last sacked */
756 sack_generation:1; 760 sack_generation:1;
757 u32 dst_cookie; 761 u32 dst_cookie;
@@ -805,8 +809,6 @@ struct sctp_transport {
805 809
806 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ 810 __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
807 811
808 __u32 dst_pending_confirm; /* need to confirm neighbour */
809
810 /* Destination */ 812 /* Destination */
811 struct dst_entry *dst; 813 struct dst_entry *dst;
812 /* Source address. */ 814 /* Source address. */
@@ -950,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
950void sctp_transport_burst_limited(struct sctp_transport *); 952void sctp_transport_burst_limited(struct sctp_transport *);
951void sctp_transport_burst_reset(struct sctp_transport *); 953void sctp_transport_burst_reset(struct sctp_transport *);
952unsigned long sctp_transport_timeout(struct sctp_transport *); 954unsigned long sctp_transport_timeout(struct sctp_transport *);
953void sctp_transport_reset(struct sctp_transport *); 955void sctp_transport_reset(struct sctp_transport *t);
954void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); 956void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
955void sctp_transport_immediate_rtx(struct sctp_transport *); 957void sctp_transport_immediate_rtx(struct sctp_transport *);
956void sctp_transport_dst_release(struct sctp_transport *t); 958void sctp_transport_dst_release(struct sctp_transport *t);
957void sctp_transport_dst_confirm(struct sctp_transport *t); 959void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1876,6 +1878,7 @@ struct sctp_association {
1876 1878
1877 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ 1879 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
1878 temp:1, /* Is it a temporary association? */ 1880 temp:1, /* Is it a temporary association? */
1881 force_delay:1,
1879 prsctp_enable:1, 1882 prsctp_enable:1,
1880 reconf_enable:1; 1883 reconf_enable:1;
1881 1884
@@ -1951,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old,
1951 1954
1952__u32 sctp_association_get_next_tsn(struct sctp_association *); 1955__u32 sctp_association_get_next_tsn(struct sctp_association *);
1953 1956
1954void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1957void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
1955void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1958void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
1956void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1959void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1957void sctp_assoc_set_primary(struct sctp_association *, 1960void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 5e5997654db6..03252d53975d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -236,6 +236,7 @@ struct sock_common {
236 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 236 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
237 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 237 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
238 * @sk_lock: synchronizer 238 * @sk_lock: synchronizer
239 * @sk_kern_sock: True if sock is using kernel lock classes
239 * @sk_rcvbuf: size of receive buffer in bytes 240 * @sk_rcvbuf: size of receive buffer in bytes
240 * @sk_wq: sock wait queue and async head 241 * @sk_wq: sock wait queue and async head
241 * @sk_rx_dst: receive input route used by early demux 242 * @sk_rx_dst: receive input route used by early demux
@@ -430,7 +431,8 @@ struct sock {
430#endif 431#endif
431 432
432 kmemcheck_bitfield_begin(flags); 433 kmemcheck_bitfield_begin(flags);
433 unsigned int sk_padding : 2, 434 unsigned int sk_padding : 1,
435 sk_kern_sock : 1,
434 sk_no_check_tx : 1, 436 sk_no_check_tx : 1,
435 sk_no_check_rx : 1, 437 sk_no_check_rx : 1,
436 sk_userlocks : 4, 438 sk_userlocks : 4,
@@ -1015,7 +1017,8 @@ struct proto {
1015 int addr_len); 1017 int addr_len);
1016 int (*disconnect)(struct sock *sk, int flags); 1018 int (*disconnect)(struct sock *sk, int flags);
1017 1019
1018 struct sock * (*accept)(struct sock *sk, int flags, int *err); 1020 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1021 bool kern);
1019 1022
1020 int (*ioctl)(struct sock *sk, int cmd, 1023 int (*ioctl)(struct sock *sk, int cmd,
1021 unsigned long arg); 1024 unsigned long arg);
@@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1573int sock_no_bind(struct socket *, struct sockaddr *, int); 1576int sock_no_bind(struct socket *, struct sockaddr *, int);
1574int sock_no_connect(struct socket *, struct sockaddr *, int, int); 1577int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1575int sock_no_socketpair(struct socket *, struct socket *); 1578int sock_no_socketpair(struct socket *, struct socket *);
1576int sock_no_accept(struct socket *, struct socket *, int); 1579int sock_no_accept(struct socket *, struct socket *, int, bool);
1577int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1580int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
1578unsigned int sock_no_poll(struct file *, struct socket *, 1581unsigned int sock_no_poll(struct file *, struct socket *,
1579 struct poll_table_struct *); 1582 struct poll_table_struct *);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0f1813c13687..99e4423eb2b8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
1863}; 1863};
1864 1864
1865struct ib_device { 1865struct ib_device {
1866 /* Do not access @dma_device directly from ULP nor from HW drivers. */
1867 struct device *dma_device;
1868
1866 char name[IB_DEVICE_NAME_MAX]; 1869 char name[IB_DEVICE_NAME_MAX];
1867 1870
1868 struct list_head event_handler_list; 1871 struct list_head event_handler_list;
@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3007 */ 3010 */
3008static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3011static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3009{ 3012{
3010 return dma_mapping_error(&dev->dev, dma_addr); 3013 return dma_mapping_error(dev->dma_device, dma_addr);
3011} 3014}
3012 3015
3013/** 3016/**
@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
3021 void *cpu_addr, size_t size, 3024 void *cpu_addr, size_t size,
3022 enum dma_data_direction direction) 3025 enum dma_data_direction direction)
3023{ 3026{
3024 return dma_map_single(&dev->dev, cpu_addr, size, direction); 3027 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3025} 3028}
3026 3029
3027/** 3030/**
@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
3035 u64 addr, size_t size, 3038 u64 addr, size_t size,
3036 enum dma_data_direction direction) 3039 enum dma_data_direction direction)
3037{ 3040{
3038 dma_unmap_single(&dev->dev, addr, size, direction); 3041 dma_unmap_single(dev->dma_device, addr, size, direction);
3039} 3042}
3040 3043
3041/** 3044/**
@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
3052 size_t size, 3055 size_t size,
3053 enum dma_data_direction direction) 3056 enum dma_data_direction direction)
3054{ 3057{
3055 return dma_map_page(&dev->dev, page, offset, size, direction); 3058 return dma_map_page(dev->dma_device, page, offset, size, direction);
3056} 3059}
3057 3060
3058/** 3061/**
@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
3066 u64 addr, size_t size, 3069 u64 addr, size_t size,
3067 enum dma_data_direction direction) 3070 enum dma_data_direction direction)
3068{ 3071{
3069 dma_unmap_page(&dev->dev, addr, size, direction); 3072 dma_unmap_page(dev->dma_device, addr, size, direction);
3070} 3073}
3071 3074
3072/** 3075/**
@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
3080 struct scatterlist *sg, int nents, 3083 struct scatterlist *sg, int nents,
3081 enum dma_data_direction direction) 3084 enum dma_data_direction direction)
3082{ 3085{
3083 return dma_map_sg(&dev->dev, sg, nents, direction); 3086 return dma_map_sg(dev->dma_device, sg, nents, direction);
3084} 3087}
3085 3088
3086/** 3089/**
@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
3094 struct scatterlist *sg, int nents, 3097 struct scatterlist *sg, int nents,
3095 enum dma_data_direction direction) 3098 enum dma_data_direction direction)
3096{ 3099{
3097 dma_unmap_sg(&dev->dev, sg, nents, direction); 3100 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3098} 3101}
3099 3102
3100static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3103static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3102 enum dma_data_direction direction, 3105 enum dma_data_direction direction,
3103 unsigned long dma_attrs) 3106 unsigned long dma_attrs)
3104{ 3107{
3105 return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3108 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3109 dma_attrs);
3106} 3110}
3107 3111
3108static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3112static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3110 enum dma_data_direction direction, 3114 enum dma_data_direction direction,
3111 unsigned long dma_attrs) 3115 unsigned long dma_attrs)
3112{ 3116{
3113 dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3117 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3114} 3118}
3115/** 3119/**
3116 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3120 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3152 size_t size, 3156 size_t size,
3153 enum dma_data_direction dir) 3157 enum dma_data_direction dir)
3154{ 3158{
3155 dma_sync_single_for_cpu(&dev->dev, addr, size, dir); 3159 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3156} 3160}
3157 3161
3158/** 3162/**
@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3167 size_t size, 3171 size_t size,
3168 enum dma_data_direction dir) 3172 enum dma_data_direction dir)
3169{ 3173{
3170 dma_sync_single_for_device(&dev->dev, addr, size, dir); 3174 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3171} 3175}
3172 3176
3173/** 3177/**
@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3182 dma_addr_t *dma_handle, 3186 dma_addr_t *dma_handle,
3183 gfp_t flag) 3187 gfp_t flag)
3184{ 3188{
3185 return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3189 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3186} 3190}
3187 3191
3188/** 3192/**
@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
3196 size_t size, void *cpu_addr, 3200 size_t size, void *cpu_addr,
3197 dma_addr_t dma_handle) 3201 dma_addr_t dma_handle)
3198{ 3202{
3199 dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); 3203 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3200} 3204}
3201 3205
3202/** 3206/**
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b0e275de6dec..583875ea136a 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@ struct iscsi_conn {
196 struct iscsi_task *task; /* xmit task in progress */ 196 struct iscsi_task *task; /* xmit task in progress */
197 197
198 /* xmit */ 198 /* xmit */
199 spinlock_t taskqueuelock; /* protects the next three lists */
199 struct list_head mgmtqueue; /* mgmt (control) xmit queue */ 200 struct list_head mgmtqueue; /* mgmt (control) xmit queue */
200 struct list_head cmdqueue; /* data-path cmd queue */ 201 struct list_head cmdqueue; /* data-path cmd queue */
201 struct list_head requeue; /* tasks needing another run */ 202 struct list_head requeue; /* tasks needing another run */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6f22b39f1b0c..080c7ce9bae8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -472,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
472 sdev->sdev_state == SDEV_CREATED_BLOCK; 472 sdev->sdev_state == SDEV_CREATED_BLOCK;
473} 473}
474 474
475int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
476int scsi_internal_device_unblock(struct scsi_device *sdev,
477 enum scsi_device_state new_state);
478
475/* accessor functions for the SCSI parameters */ 479/* accessor functions for the SCSI parameters */
476static inline int scsi_device_sync(struct scsi_device *sdev) 480static inline int scsi_device_sync(struct scsi_device *sdev)
477{ 481{
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b54b98dc2d4a..1b0f447ce850 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -4,7 +4,12 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <target/target_core_base.h> 5#include <target/target_core_base.h>
6 6
7#define TRANSPORT_FLAG_PASSTHROUGH 1 7#define TRANSPORT_FLAG_PASSTHROUGH 0x1
8/*
9 * ALUA commands, state checks and setup operations are handled by the
10 * backend module.
11 */
12#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
8 13
9struct request_queue; 14struct request_queue;
10struct scatterlist; 15struct scatterlist;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 37c274e61acc..4b784b6e21c0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
299 struct list_head tg_pt_gp_lun_list; 299 struct list_head tg_pt_gp_lun_list;
300 struct se_lun *tg_pt_gp_alua_lun; 300 struct se_lun *tg_pt_gp_alua_lun;
301 struct se_node_acl *tg_pt_gp_alua_nacl; 301 struct se_node_acl *tg_pt_gp_alua_nacl;
302 struct delayed_work tg_pt_gp_transition_work; 302 struct work_struct tg_pt_gp_transition_work;
303 struct completion *tg_pt_gp_transition_complete; 303 struct completion *tg_pt_gp_transition_complete;
304}; 304};
305 305
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 9b1462e38b82..a076cf1a3a23 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) 730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
731#define __NR_pkey_free 290 731#define __NR_pkey_free 290
732__SYSCALL(__NR_pkey_free, sys_pkey_free) 732__SYSCALL(__NR_pkey_free, sys_pkey_free)
733#define __NR_statx 291
734__SYSCALL(__NR_statx, sys_statx)
733 735
734#undef __NR_syscalls 736#undef __NR_syscalls
735#define __NR_syscalls 291 737#define __NR_syscalls 292
736 738
737/* 739/*
738 * All syscalls below here should go away really, 740 * All syscalls below here should go away really,
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 407cb55df6ac..7fb97863c945 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -33,8 +33,8 @@ extern "C" {
33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ 33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
34 34
35struct drm_omap_param { 35struct drm_omap_param {
36 uint64_t param; /* in */ 36 __u64 param; /* in */
37 uint64_t value; /* in (set_param), out (get_param) */ 37 __u64 value; /* in (set_param), out (get_param) */
38}; 38};
39 39
40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ 40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) 53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
54 54
55union omap_gem_size { 55union omap_gem_size {
56 uint32_t bytes; /* (for non-tiled formats) */ 56 __u32 bytes; /* (for non-tiled formats) */
57 struct { 57 struct {
58 uint16_t width; 58 __u16 width;
59 uint16_t height; 59 __u16 height;
60 } tiled; /* (for tiled formats) */ 60 } tiled; /* (for tiled formats) */
61}; 61};
62 62
63struct drm_omap_gem_new { 63struct drm_omap_gem_new {
64 union omap_gem_size size; /* in */ 64 union omap_gem_size size; /* in */
65 uint32_t flags; /* in */ 65 __u32 flags; /* in */
66 uint32_t handle; /* out */ 66 __u32 handle; /* out */
67 uint32_t __pad; 67 __u32 __pad;
68}; 68};
69 69
70/* mask of operations: */ 70/* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
74}; 74};
75 75
76struct drm_omap_gem_cpu_prep { 76struct drm_omap_gem_cpu_prep {
77 uint32_t handle; /* buffer handle (in) */ 77 __u32 handle; /* buffer handle (in) */
78 uint32_t op; /* mask of omap_gem_op (in) */ 78 __u32 op; /* mask of omap_gem_op (in) */
79}; 79};
80 80
81struct drm_omap_gem_cpu_fini { 81struct drm_omap_gem_cpu_fini {
82 uint32_t handle; /* buffer handle (in) */ 82 __u32 handle; /* buffer handle (in) */
83 uint32_t op; /* mask of omap_gem_op (in) */ 83 __u32 op; /* mask of omap_gem_op (in) */
84 /* TODO maybe here we pass down info about what regions are touched 84 /* TODO maybe here we pass down info about what regions are touched
85 * by sw so we can be clever about cache ops? For now a placeholder, 85 * by sw so we can be clever about cache ops? For now a placeholder,
86 * set to zero and we just do full buffer flush.. 86 * set to zero and we just do full buffer flush..
87 */ 87 */
88 uint32_t nregions; 88 __u32 nregions;
89 uint32_t __pad; 89 __u32 __pad;
90}; 90};
91 91
92struct drm_omap_gem_info { 92struct drm_omap_gem_info {
93 uint32_t handle; /* buffer handle (in) */ 93 __u32 handle; /* buffer handle (in) */
94 uint32_t pad; 94 __u32 pad;
95 uint64_t offset; /* mmap offset (out) */ 95 __u64 offset; /* mmap offset (out) */
96 /* note: in case of tiled buffers, the user virtual size can be 96 /* note: in case of tiled buffers, the user virtual size can be
97 * different from the physical size (ie. how many pages are needed 97 * different from the physical size (ie. how many pages are needed
98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. 98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
99 * This size here is the one that should be used if you want to 99 * This size here is the one that should be used if you want to
100 * mmap() the buffer: 100 * mmap() the buffer:
101 */ 101 */
102 uint32_t size; /* virtual size for mmap'ing (out) */ 102 __u32 size; /* virtual size for mmap'ing (out) */
103 uint32_t __pad; 103 __u32 __pad;
104}; 104};
105 105
106#define DRM_OMAP_GET_PARAM 0x00 106#define DRM_OMAP_GET_PARAM 0x00
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index db4c253f8011..dcfc3a5a9cb1 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -713,33 +713,6 @@ enum btrfs_err_code {
713 BTRFS_ERROR_DEV_ONLY_WRITABLE, 713 BTRFS_ERROR_DEV_ONLY_WRITABLE,
714 BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS 714 BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
715}; 715};
716/* An error code to error string mapping for the kernel
717* error codes
718*/
719static inline char *btrfs_err_str(enum btrfs_err_code err_code)
720{
721 switch (err_code) {
722 case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
723 return "unable to go below two devices on raid1";
724 case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
725 return "unable to go below four devices on raid10";
726 case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
727 return "unable to go below two devices on raid5";
728 case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
729 return "unable to go below three devices on raid6";
730 case BTRFS_ERROR_DEV_TGT_REPLACE:
731 return "unable to remove the dev_replace target dev";
732 case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
733 return "no missing devices found to remove";
734 case BTRFS_ERROR_DEV_ONLY_WRITABLE:
735 return "unable to remove the only writeable device";
736 case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
737 return "add/delete/balance/replace/resize operation "\
738 "in progress";
739 default:
740 return NULL;
741 }
742}
743 716
744#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 717#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
745 struct btrfs_ioctl_vol_args) 718 struct btrfs_ioctl_vol_args)
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f3dd6f..0c5d5dd61b6a 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@ struct packet_diag_mclist {
64 __u32 pdmc_count; 64 __u32 pdmc_count;
65 __u16 pdmc_type; 65 __u16 pdmc_type;
66 __u16 pdmc_alen; 66 __u16 pdmc_alen;
67 __u8 pdmc_addr[MAX_ADDR_LEN]; 67 __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
68}; 68};
69 69
70struct packet_diag_ring { 70struct packet_diag_ring {
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 51a6b86e3700..d538897b8e08 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -114,7 +114,7 @@ struct statx {
114 __u64 stx_ino; /* Inode number */ 114 __u64 stx_ino; /* Inode number */
115 __u64 stx_size; /* File size */ 115 __u64 stx_size; /* File size */
116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */ 116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */
117 __u64 __spare1[1]; 117 __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
118 /* 0x40 */ 118 /* 0x40 */
119 struct statx_timestamp stx_atime; /* Last access time */ 119 struct statx_timestamp stx_atime; /* Last access time */
120 struct statx_timestamp stx_btime; /* File creation time */ 120 struct statx_timestamp stx_btime; /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ 152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ 153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
154#define STATX_ALL 0x00000fffU /* All currently supported flags */ 154#define STATX_ALL 0x00000fffU /* All currently supported flags */
155#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
155 156
156/* 157/*
157 * Attributes to be found in stx_attributes 158 * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
158 * 159 *
159 * These give information about the features or the state of a file that might 160 * These give information about the features or the state of a file that might
160 * be of use to ordinary userspace programs such as GUIs or ls rather than 161 * be of use to ordinary userspace programs such as GUIs or ls rather than
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index da7cd62bace7..0b3d30837a9f 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -34,6 +34,7 @@
34#define MLX5_ABI_USER_H 34#define MLX5_ABI_USER_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/if_ether.h> /* For ETH_ALEN. */
37 38
38enum { 39enum {
39 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 40 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
66}; 67};
67 68
68enum mlx5_lib_caps { 69enum mlx5_lib_caps {
69 MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, 70 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
70}; 71};
71 72
72struct mlx5_ib_alloc_ucontext_req_v2 { 73struct mlx5_ib_alloc_ucontext_req_v2 {
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index ef8e2a8ad0af..6b083d327e98 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -46,6 +46,7 @@
46#define DECON_FRAMEFIFO_STATUS 0x0524 46#define DECON_FRAMEFIFO_STATUS 0x0524
47#define DECON_CMU 0x1404 47#define DECON_CMU 0x1404
48#define DECON_UPDATE 0x1410 48#define DECON_UPDATE 0x1410
49#define DECON_CRFMID 0x1414
49#define DECON_UPDATE_SCHEME 0x1438 50#define DECON_UPDATE_SCHEME 0x1438
50#define DECON_VIDCON1 0x2000 51#define DECON_VIDCON1 0x2000
51#define DECON_VIDCON2 0x2004 52#define DECON_VIDCON2 0x2004
@@ -126,6 +127,10 @@
126 127
127/* VIDINTCON0 */ 128/* VIDINTCON0 */
128#define VIDINTCON0_FRAMEDONE (1 << 17) 129#define VIDINTCON0_FRAMEDONE (1 << 17)
130#define VIDINTCON0_FRAMESEL_BP (0 << 15)
131#define VIDINTCON0_FRAMESEL_VS (1 << 15)
132#define VIDINTCON0_FRAMESEL_AC (2 << 15)
133#define VIDINTCON0_FRAMESEL_FP (3 << 15)
129#define VIDINTCON0_INTFRMEN (1 << 12) 134#define VIDINTCON0_INTFRMEN (1 << 12)
130#define VIDINTCON0_INTEN (1 << 0) 135#define VIDINTCON0_INTEN (1 << 0)
131 136
@@ -142,6 +147,13 @@
142#define STANDALONE_UPDATE_F (1 << 0) 147#define STANDALONE_UPDATE_F (1 << 0)
143 148
144/* DECON_VIDCON1 */ 149/* DECON_VIDCON1 */
150#define VIDCON1_LINECNT_MASK (0x0fff << 16)
151#define VIDCON1_I80_ACTIVE (1 << 15)
152#define VIDCON1_VSTATUS_MASK (0x3 << 13)
153#define VIDCON1_VSTATUS_VS (0 << 13)
154#define VIDCON1_VSTATUS_BP (1 << 13)
155#define VIDCON1_VSTATUS_AC (2 << 13)
156#define VIDCON1_VSTATUS_FP (3 << 13)
145#define VIDCON1_VCLK_MASK (0x3 << 9) 157#define VIDCON1_VCLK_MASK (0x3 << 9)
146#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) 158#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
147#define VIDCON1_VCLK_HOLD (0x0 << 9) 159#define VIDCON1_VCLK_HOLD (0x0 << 9)
diff --git a/init/main.c b/init/main.c
index f9c9d9948203..b0c11cbf5ddf 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1022,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
1022 1022
1023 workqueue_init(); 1023 workqueue_init();
1024 1024
1025 init_mm_internals();
1026
1025 do_pre_smp_initcalls(); 1027 do_pre_smp_initcalls();
1026 lockup_detector_init(); 1028 lockup_detector_init();
1027 1029
diff --git a/kernel/audit.c b/kernel/audit.c
index e794544f5e63..2f4964cfde0b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -54,6 +54,10 @@
54#include <linux/kthread.h> 54#include <linux/kthread.h>
55#include <linux/kernel.h> 55#include <linux/kernel.h>
56#include <linux/syscalls.h> 56#include <linux/syscalls.h>
57#include <linux/spinlock.h>
58#include <linux/rcupdate.h>
59#include <linux/mutex.h>
60#include <linux/gfp.h>
57 61
58#include <linux/audit.h> 62#include <linux/audit.h>
59 63
@@ -90,13 +94,34 @@ static u32 audit_default;
90/* If auditing cannot proceed, audit_failure selects what happens. */ 94/* If auditing cannot proceed, audit_failure selects what happens. */
91static u32 audit_failure = AUDIT_FAIL_PRINTK; 95static u32 audit_failure = AUDIT_FAIL_PRINTK;
92 96
93/* 97/* private audit network namespace index */
94 * If audit records are to be written to the netlink socket, audit_pid 98static unsigned int audit_net_id;
95 * contains the pid of the auditd process and audit_nlk_portid contains 99
96 * the portid to use to send netlink messages to that process. 100/**
101 * struct audit_net - audit private network namespace data
102 * @sk: communication socket
103 */
104struct audit_net {
105 struct sock *sk;
106};
107
108/**
109 * struct auditd_connection - kernel/auditd connection state
110 * @pid: auditd PID
111 * @portid: netlink portid
112 * @net: the associated network namespace
113 * @lock: spinlock to protect write access
114 *
115 * Description:
116 * This struct is RCU protected; you must either hold the RCU lock for reading
117 * or the included spinlock for writing.
97 */ 118 */
98int audit_pid; 119static struct auditd_connection {
99static __u32 audit_nlk_portid; 120 int pid;
121 u32 portid;
122 struct net *net;
123 spinlock_t lock;
124} auditd_conn;
100 125
101/* If audit_rate_limit is non-zero, limit the rate of sending audit records 126/* If audit_rate_limit is non-zero, limit the rate of sending audit records
102 * to that number per second. This prevents DoS attacks, but results in 127 * to that number per second. This prevents DoS attacks, but results in
@@ -123,10 +148,6 @@ u32 audit_sig_sid = 0;
123*/ 148*/
124static atomic_t audit_lost = ATOMIC_INIT(0); 149static atomic_t audit_lost = ATOMIC_INIT(0);
125 150
126/* The netlink socket. */
127static struct sock *audit_sock;
128static unsigned int audit_net_id;
129
130/* Hash for inode-based rules */ 151/* Hash for inode-based rules */
131struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 152struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
132 153
@@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist);
139 160
140/* queue msgs to send via kauditd_task */ 161/* queue msgs to send via kauditd_task */
141static struct sk_buff_head audit_queue; 162static struct sk_buff_head audit_queue;
163static void kauditd_hold_skb(struct sk_buff *skb);
142/* queue msgs due to temporary unicast send problems */ 164/* queue msgs due to temporary unicast send problems */
143static struct sk_buff_head audit_retry_queue; 165static struct sk_buff_head audit_retry_queue;
144/* queue msgs waiting for new auditd connection */ 166/* queue msgs waiting for new auditd connection */
@@ -192,6 +214,43 @@ struct audit_reply {
192 struct sk_buff *skb; 214 struct sk_buff *skb;
193}; 215};
194 216
217/**
218 * auditd_test_task - Check to see if a given task is an audit daemon
219 * @task: the task to check
220 *
221 * Description:
222 * Return 1 if the task is a registered audit daemon, 0 otherwise.
223 */
224int auditd_test_task(const struct task_struct *task)
225{
226 int rc;
227
228 rcu_read_lock();
229 rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
230 rcu_read_unlock();
231
232 return rc;
233}
234
235/**
236 * audit_get_sk - Return the audit socket for the given network namespace
237 * @net: the destination network namespace
238 *
239 * Description:
240 * Returns the sock pointer if valid, NULL otherwise. The caller must ensure
241 * that a reference is held for the network namespace while the sock is in use.
242 */
243static struct sock *audit_get_sk(const struct net *net)
244{
245 struct audit_net *aunet;
246
247 if (!net)
248 return NULL;
249
250 aunet = net_generic(net, audit_net_id);
251 return aunet->sk;
252}
253
195static void audit_set_portid(struct audit_buffer *ab, __u32 portid) 254static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
196{ 255{
197 if (ab) { 256 if (ab) {
@@ -210,9 +269,7 @@ void audit_panic(const char *message)
210 pr_err("%s\n", message); 269 pr_err("%s\n", message);
211 break; 270 break;
212 case AUDIT_FAIL_PANIC: 271 case AUDIT_FAIL_PANIC:
213 /* test audit_pid since printk is always losey, why bother? */ 272 panic("audit: %s\n", message);
214 if (audit_pid)
215 panic("audit: %s\n", message);
216 break; 273 break;
217 } 274 }
218} 275}
@@ -370,21 +427,87 @@ static int audit_set_failure(u32 state)
370 return audit_do_config_change("audit_failure", &audit_failure, state); 427 return audit_do_config_change("audit_failure", &audit_failure, state);
371} 428}
372 429
373/* 430/**
374 * For one reason or another this nlh isn't getting delivered to the userspace 431 * auditd_set - Set/Reset the auditd connection state
375 * audit daemon, just send it to printk. 432 * @pid: auditd PID
433 * @portid: auditd netlink portid
434 * @net: auditd network namespace pointer
435 *
436 * Description:
437 * This function will obtain and drop network namespace references as
438 * necessary.
439 */
440static void auditd_set(int pid, u32 portid, struct net *net)
441{
442 unsigned long flags;
443
444 spin_lock_irqsave(&auditd_conn.lock, flags);
445 auditd_conn.pid = pid;
446 auditd_conn.portid = portid;
447 if (auditd_conn.net)
448 put_net(auditd_conn.net);
449 if (net)
450 auditd_conn.net = get_net(net);
451 else
452 auditd_conn.net = NULL;
453 spin_unlock_irqrestore(&auditd_conn.lock, flags);
454}
455
456/**
457 * auditd_reset - Disconnect the auditd connection
458 *
459 * Description:
460 * Break the auditd/kauditd connection and move all the queued records into the
461 * hold queue in case auditd reconnects.
462 */
463static void auditd_reset(void)
464{
465 struct sk_buff *skb;
466
467 /* if it isn't already broken, break the connection */
468 rcu_read_lock();
469 if (auditd_conn.pid)
470 auditd_set(0, 0, NULL);
471 rcu_read_unlock();
472
473 /* flush all of the main and retry queues to the hold queue */
474 while ((skb = skb_dequeue(&audit_retry_queue)))
475 kauditd_hold_skb(skb);
476 while ((skb = skb_dequeue(&audit_queue)))
477 kauditd_hold_skb(skb);
478}
479
480/**
481 * kauditd_print_skb - Print the audit record to the ring buffer
482 * @skb: audit record
483 *
484 * Whatever the reason, this packet may not make it to the auditd connection
485 * so write it via printk so the information isn't completely lost.
376 */ 486 */
377static void kauditd_printk_skb(struct sk_buff *skb) 487static void kauditd_printk_skb(struct sk_buff *skb)
378{ 488{
379 struct nlmsghdr *nlh = nlmsg_hdr(skb); 489 struct nlmsghdr *nlh = nlmsg_hdr(skb);
380 char *data = nlmsg_data(nlh); 490 char *data = nlmsg_data(nlh);
381 491
382 if (nlh->nlmsg_type != AUDIT_EOE) { 492 if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
383 if (printk_ratelimit()) 493 pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
384 pr_notice("type=%d %s\n", nlh->nlmsg_type, data); 494}
385 else 495
386 audit_log_lost("printk limit exceeded"); 496/**
387 } 497 * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
498 * @skb: audit record
499 *
500 * Description:
501 * This should only be used by the kauditd_thread when it fails to flush the
502 * hold queue.
503 */
504static void kauditd_rehold_skb(struct sk_buff *skb)
505{
506 /* put the record back in the queue at the same place */
507 skb_queue_head(&audit_hold_queue, skb);
508
509 /* fail the auditd connection */
510 auditd_reset();
388} 511}
389 512
390/** 513/**
@@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb)
421 /* we have no other options - drop the message */ 544 /* we have no other options - drop the message */
422 audit_log_lost("kauditd hold queue overflow"); 545 audit_log_lost("kauditd hold queue overflow");
423 kfree_skb(skb); 546 kfree_skb(skb);
547
548 /* fail the auditd connection */
549 auditd_reset();
424} 550}
425 551
426/** 552/**
@@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb)
441} 567}
442 568
443/** 569/**
444 * auditd_reset - Disconnect the auditd connection 570 * auditd_send_unicast_skb - Send a record via unicast to auditd
571 * @skb: audit record
445 * 572 *
446 * Description: 573 * Description:
447 * Break the auditd/kauditd connection and move all the records in the retry 574 * Send a skb to the audit daemon, returns positive/zero values on success and
448 * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex 575 * negative values on failure; in all cases the skb will be consumed by this
449 * must be held when calling this function. 576 * function. If the send results in -ECONNREFUSED the connection with auditd
577 * will be reset. This function may sleep so callers should not hold any locks
578 * where this would cause a problem.
450 */ 579 */
451static void auditd_reset(void) 580static int auditd_send_unicast_skb(struct sk_buff *skb)
452{ 581{
453 struct sk_buff *skb; 582 int rc;
454 583 u32 portid;
455 /* break the connection */ 584 struct net *net;
456 if (audit_sock) { 585 struct sock *sk;
457 sock_put(audit_sock); 586
458 audit_sock = NULL; 587 /* NOTE: we can't call netlink_unicast while in the RCU section so
588 * take a reference to the network namespace and grab local
589 * copies of the namespace, the sock, and the portid; the
590 * namespace and sock aren't going to go away while we hold a
591 * reference and if the portid does become invalid after the RCU
592 * section netlink_unicast() should safely return an error */
593
594 rcu_read_lock();
595 if (!auditd_conn.pid) {
596 rcu_read_unlock();
597 rc = -ECONNREFUSED;
598 goto err;
459 } 599 }
460 audit_pid = 0; 600 net = auditd_conn.net;
461 audit_nlk_portid = 0; 601 get_net(net);
602 sk = audit_get_sk(net);
603 portid = auditd_conn.portid;
604 rcu_read_unlock();
462 605
463 /* flush all of the retry queue to the hold queue */ 606 rc = netlink_unicast(sk, skb, portid, 0);
464 while ((skb = skb_dequeue(&audit_retry_queue))) 607 put_net(net);
465 kauditd_hold_skb(skb); 608 if (rc < 0)
609 goto err;
610
611 return rc;
612
613err:
614 if (rc == -ECONNREFUSED)
615 auditd_reset();
616 return rc;
466} 617}
467 618
468/** 619/**
469 * kauditd_send_unicast_skb - Send a record via unicast to auditd 620 * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
470 * @skb: audit record 621 * @sk: the sending sock
622 * @portid: the netlink destination
623 * @queue: the skb queue to process
624 * @retry_limit: limit on number of netlink unicast failures
625 * @skb_hook: per-skb hook for additional processing
626 * @err_hook: hook called if the skb fails the netlink unicast send
627 *
628 * Description:
629 * Run through the given queue and attempt to send the audit records to auditd,
630 * returns zero on success, negative values on failure. It is up to the caller
631 * to ensure that the @sk is valid for the duration of this function.
632 *
471 */ 633 */
472static int kauditd_send_unicast_skb(struct sk_buff *skb) 634static int kauditd_send_queue(struct sock *sk, u32 portid,
635 struct sk_buff_head *queue,
636 unsigned int retry_limit,
637 void (*skb_hook)(struct sk_buff *skb),
638 void (*err_hook)(struct sk_buff *skb))
473{ 639{
474 int rc; 640 int rc = 0;
641 struct sk_buff *skb;
642 static unsigned int failed = 0;
475 643
476 /* if we know nothing is connected, don't even try the netlink call */ 644 /* NOTE: kauditd_thread takes care of all our locking, we just use
477 if (!audit_pid) 645 * the netlink info passed to us (e.g. sk and portid) */
478 return -ECONNREFUSED; 646
647 while ((skb = skb_dequeue(queue))) {
648 /* call the skb_hook for each skb we touch */
649 if (skb_hook)
650 (*skb_hook)(skb);
651
652 /* can we send to anyone via unicast? */
653 if (!sk) {
654 if (err_hook)
655 (*err_hook)(skb);
656 continue;
657 }
479 658
480 /* get an extra skb reference in case we fail to send */ 659 /* grab an extra skb reference in case of error */
481 skb_get(skb); 660 skb_get(skb);
482 rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); 661 rc = netlink_unicast(sk, skb, portid, 0);
483 if (rc >= 0) { 662 if (rc < 0) {
484 consume_skb(skb); 663 /* fatal failure for our queue flush attempt? */
485 rc = 0; 664 if (++failed >= retry_limit ||
665 rc == -ECONNREFUSED || rc == -EPERM) {
666 /* yes - error processing for the queue */
667 sk = NULL;
668 if (err_hook)
669 (*err_hook)(skb);
670 if (!skb_hook)
671 goto out;
672 /* keep processing with the skb_hook */
673 continue;
674 } else
675 /* no - requeue to preserve ordering */
676 skb_queue_head(queue, skb);
677 } else {
678 /* it worked - drop the extra reference and continue */
679 consume_skb(skb);
680 failed = 0;
681 }
486 } 682 }
487 683
488 return rc; 684out:
685 return (rc >= 0 ? 0 : rc);
489} 686}
490 687
491/* 688/*
@@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb)
493 * @skb: audit record 690 * @skb: audit record
494 * 691 *
495 * Description: 692 * Description:
496 * This function doesn't consume an skb as might be expected since it has to 693 * Write a multicast message to anyone listening in the initial network
497 * copy it anyways. 694 * namespace. This function doesn't consume an skb as might be expected since
695 * it has to copy it anyways.
498 */ 696 */
499static void kauditd_send_multicast_skb(struct sk_buff *skb) 697static void kauditd_send_multicast_skb(struct sk_buff *skb)
500{ 698{
501 struct sk_buff *copy; 699 struct sk_buff *copy;
502 struct audit_net *aunet = net_generic(&init_net, audit_net_id); 700 struct sock *sock = audit_get_sk(&init_net);
503 struct sock *sock = aunet->nlsk;
504 struct nlmsghdr *nlh; 701 struct nlmsghdr *nlh;
505 702
703 /* NOTE: we are not taking an additional reference for init_net since
704 * we don't have to worry about it going away */
705
506 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) 706 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
507 return; 707 return;
508 708
@@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
526} 726}
527 727
528/** 728/**
529 * kauditd_wake_condition - Return true when it is time to wake kauditd_thread 729 * kauditd_thread - Worker thread to send audit records to userspace
530 * 730 * @dummy: unused
531 * Description:
532 * This function is for use by the wait_event_freezable() call in
533 * kauditd_thread().
534 */ 731 */
535static int kauditd_wake_condition(void)
536{
537 static int pid_last = 0;
538 int rc;
539 int pid = audit_pid;
540
541 /* wake on new messages or a change in the connected auditd */
542 rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
543 if (rc)
544 pid_last = pid;
545
546 return rc;
547}
548
549static int kauditd_thread(void *dummy) 732static int kauditd_thread(void *dummy)
550{ 733{
551 int rc; 734 int rc;
552 int auditd = 0; 735 u32 portid = 0;
553 int reschedule = 0; 736 struct net *net = NULL;
554 struct sk_buff *skb; 737 struct sock *sk = NULL;
555 struct nlmsghdr *nlh;
556 738
557#define UNICAST_RETRIES 5 739#define UNICAST_RETRIES 5
558#define AUDITD_BAD(x,y) \
559 ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
560
561 /* NOTE: we do invalidate the auditd connection flag on any sending
562 * errors, but we only "restore" the connection flag at specific places
563 * in the loop in order to help ensure proper ordering of audit
564 * records */
565 740
566 set_freezable(); 741 set_freezable();
567 while (!kthread_should_stop()) { 742 while (!kthread_should_stop()) {
568 /* NOTE: possible area for future improvement is to look at 743 /* NOTE: see the lock comments in auditd_send_unicast_skb() */
569 * the hold and retry queues, since only this thread 744 rcu_read_lock();
570 * has access to these queues we might be able to do 745 if (!auditd_conn.pid) {
571 * our own queuing and skip some/all of the locking */ 746 rcu_read_unlock();
572 747 goto main_queue;
573 /* NOTE: it might be a fun experiment to split the hold and 748 }
574 * retry queue handling to another thread, but the 749 net = auditd_conn.net;
575 * synchronization issues and other overhead might kill 750 get_net(net);
576 * any performance gains */ 751 sk = audit_get_sk(net);
752 portid = auditd_conn.portid;
753 rcu_read_unlock();
577 754
578 /* attempt to flush the hold queue */ 755 /* attempt to flush the hold queue */
579 while (auditd && (skb = skb_dequeue(&audit_hold_queue))) { 756 rc = kauditd_send_queue(sk, portid,
580 rc = kauditd_send_unicast_skb(skb); 757 &audit_hold_queue, UNICAST_RETRIES,
581 if (rc) { 758 NULL, kauditd_rehold_skb);
582 /* requeue to the same spot */ 759 if (rc < 0) {
583 skb_queue_head(&audit_hold_queue, skb); 760 sk = NULL;
584 761 goto main_queue;
585 auditd = 0;
586 if (AUDITD_BAD(rc, reschedule)) {
587 mutex_lock(&audit_cmd_mutex);
588 auditd_reset();
589 mutex_unlock(&audit_cmd_mutex);
590 reschedule = 0;
591 }
592 } else
593 /* we were able to send successfully */
594 reschedule = 0;
595 } 762 }
596 763
597 /* attempt to flush the retry queue */ 764 /* attempt to flush the retry queue */
598 while (auditd && (skb = skb_dequeue(&audit_retry_queue))) { 765 rc = kauditd_send_queue(sk, portid,
599 rc = kauditd_send_unicast_skb(skb); 766 &audit_retry_queue, UNICAST_RETRIES,
600 if (rc) { 767 NULL, kauditd_hold_skb);
601 auditd = 0; 768 if (rc < 0) {
602 if (AUDITD_BAD(rc, reschedule)) { 769 sk = NULL;
603 kauditd_hold_skb(skb); 770 goto main_queue;
604 mutex_lock(&audit_cmd_mutex);
605 auditd_reset();
606 mutex_unlock(&audit_cmd_mutex);
607 reschedule = 0;
608 } else
609 /* temporary problem (we hope), queue
610 * to the same spot and retry */
611 skb_queue_head(&audit_retry_queue, skb);
612 } else
613 /* we were able to send successfully */
614 reschedule = 0;
615 } 771 }
616 772
617 /* standard queue processing, try to be as quick as possible */ 773main_queue:
618quick_loop: 774 /* process the main queue - do the multicast send and attempt
619 skb = skb_dequeue(&audit_queue); 775 * unicast, dump failed record sends to the retry queue; if
620 if (skb) { 776 * sk == NULL due to previous failures we will just do the
621 /* setup the netlink header, see the comments in 777 * multicast send and move the record to the retry queue */
622 * kauditd_send_multicast_skb() for length quirks */ 778 kauditd_send_queue(sk, portid, &audit_queue, 1,
623 nlh = nlmsg_hdr(skb); 779 kauditd_send_multicast_skb,
624 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; 780 kauditd_retry_skb);
625 781
626 /* attempt to send to any multicast listeners */ 782 /* drop our netns reference, no auditd sends past this line */
627 kauditd_send_multicast_skb(skb); 783 if (net) {
628 784 put_net(net);
629 /* attempt to send to auditd, queue on failure */ 785 net = NULL;
630 if (auditd) {
631 rc = kauditd_send_unicast_skb(skb);
632 if (rc) {
633 auditd = 0;
634 if (AUDITD_BAD(rc, reschedule)) {
635 mutex_lock(&audit_cmd_mutex);
636 auditd_reset();
637 mutex_unlock(&audit_cmd_mutex);
638 reschedule = 0;
639 }
640
641 /* move to the retry queue */
642 kauditd_retry_skb(skb);
643 } else
644 /* everything is working so go fast! */
645 goto quick_loop;
646 } else if (reschedule)
647 /* we are currently having problems, move to
648 * the retry queue */
649 kauditd_retry_skb(skb);
650 else
651 /* dump the message via printk and hold it */
652 kauditd_hold_skb(skb);
653 } else {
654 /* we have flushed the backlog so wake everyone */
655 wake_up(&audit_backlog_wait);
656
657 /* if everything is okay with auditd (if present), go
658 * to sleep until there is something new in the queue
659 * or we have a change in the connected auditd;
660 * otherwise simply reschedule to give things a chance
661 * to recover */
662 if (reschedule) {
663 set_current_state(TASK_INTERRUPTIBLE);
664 schedule();
665 } else
666 wait_event_freezable(kauditd_wait,
667 kauditd_wake_condition());
668
669 /* update the auditd connection status */
670 auditd = (audit_pid ? 1 : 0);
671 } 786 }
787 sk = NULL;
788
789 /* we have processed all the queues so wake everyone */
790 wake_up(&audit_backlog_wait);
791
792 /* NOTE: we want to wake up if there is anything on the queue,
793 * regardless of if an auditd is connected, as we need to
794 * do the multicast send and rotate records from the
795 * main queue to the retry/hold queues */
796 wait_event_freezable(kauditd_wait,
797 (skb_queue_len(&audit_queue) ? 1 : 0));
672 } 798 }
673 799
674 return 0; 800 return 0;
@@ -678,17 +804,16 @@ int audit_send_list(void *_dest)
678{ 804{
679 struct audit_netlink_list *dest = _dest; 805 struct audit_netlink_list *dest = _dest;
680 struct sk_buff *skb; 806 struct sk_buff *skb;
681 struct net *net = dest->net; 807 struct sock *sk = audit_get_sk(dest->net);
682 struct audit_net *aunet = net_generic(net, audit_net_id);
683 808
684 /* wait for parent to finish and send an ACK */ 809 /* wait for parent to finish and send an ACK */
685 mutex_lock(&audit_cmd_mutex); 810 mutex_lock(&audit_cmd_mutex);
686 mutex_unlock(&audit_cmd_mutex); 811 mutex_unlock(&audit_cmd_mutex);
687 812
688 while ((skb = __skb_dequeue(&dest->q)) != NULL) 813 while ((skb = __skb_dequeue(&dest->q)) != NULL)
689 netlink_unicast(aunet->nlsk, skb, dest->portid, 0); 814 netlink_unicast(sk, skb, dest->portid, 0);
690 815
691 put_net(net); 816 put_net(dest->net);
692 kfree(dest); 817 kfree(dest);
693 818
694 return 0; 819 return 0;
@@ -722,16 +847,15 @@ out_kfree_skb:
722static int audit_send_reply_thread(void *arg) 847static int audit_send_reply_thread(void *arg)
723{ 848{
724 struct audit_reply *reply = (struct audit_reply *)arg; 849 struct audit_reply *reply = (struct audit_reply *)arg;
725 struct net *net = reply->net; 850 struct sock *sk = audit_get_sk(reply->net);
726 struct audit_net *aunet = net_generic(net, audit_net_id);
727 851
728 mutex_lock(&audit_cmd_mutex); 852 mutex_lock(&audit_cmd_mutex);
729 mutex_unlock(&audit_cmd_mutex); 853 mutex_unlock(&audit_cmd_mutex);
730 854
731 /* Ignore failure. It'll only happen if the sender goes away, 855 /* Ignore failure. It'll only happen if the sender goes away,
732 because our timeout is set to infinite. */ 856 because our timeout is set to infinite. */
733 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); 857 netlink_unicast(sk, reply->skb, reply->portid, 0);
734 put_net(net); 858 put_net(reply->net);
735 kfree(reply); 859 kfree(reply);
736 return 0; 860 return 0;
737} 861}
@@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb)
949 1073
950static int audit_replace(pid_t pid) 1074static int audit_replace(pid_t pid)
951{ 1075{
952 struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, 1076 struct sk_buff *skb;
953 &pid, sizeof(pid));
954 1077
1078 skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
955 if (!skb) 1079 if (!skb)
956 return -ENOMEM; 1080 return -ENOMEM;
957 return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); 1081 return auditd_send_unicast_skb(skb);
958} 1082}
959 1083
960static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1084static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
981 memset(&s, 0, sizeof(s)); 1105 memset(&s, 0, sizeof(s));
982 s.enabled = audit_enabled; 1106 s.enabled = audit_enabled;
983 s.failure = audit_failure; 1107 s.failure = audit_failure;
984 s.pid = audit_pid; 1108 rcu_read_lock();
1109 s.pid = auditd_conn.pid;
1110 rcu_read_unlock();
985 s.rate_limit = audit_rate_limit; 1111 s.rate_limit = audit_rate_limit;
986 s.backlog_limit = audit_backlog_limit; 1112 s.backlog_limit = audit_backlog_limit;
987 s.lost = atomic_read(&audit_lost); 1113 s.lost = atomic_read(&audit_lost);
@@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1014 * from the initial pid namespace, but something 1140 * from the initial pid namespace, but something
1015 * to keep in mind if this changes */ 1141 * to keep in mind if this changes */
1016 int new_pid = s.pid; 1142 int new_pid = s.pid;
1143 pid_t auditd_pid;
1017 pid_t requesting_pid = task_tgid_vnr(current); 1144 pid_t requesting_pid = task_tgid_vnr(current);
1018 1145
1019 if ((!new_pid) && (requesting_pid != audit_pid)) { 1146 /* test the auditd connection */
1020 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1147 audit_replace(requesting_pid);
1148
1149 rcu_read_lock();
1150 auditd_pid = auditd_conn.pid;
1151 /* only the current auditd can unregister itself */
1152 if ((!new_pid) && (requesting_pid != auditd_pid)) {
1153 rcu_read_unlock();
1154 audit_log_config_change("audit_pid", new_pid,
1155 auditd_pid, 0);
1021 return -EACCES; 1156 return -EACCES;
1022 } 1157 }
1023 if (audit_pid && new_pid && 1158 /* replacing a healthy auditd is not allowed */
1024 audit_replace(requesting_pid) != -ECONNREFUSED) { 1159 if (auditd_pid && new_pid) {
1025 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1160 rcu_read_unlock();
1161 audit_log_config_change("audit_pid", new_pid,
1162 auditd_pid, 0);
1026 return -EEXIST; 1163 return -EEXIST;
1027 } 1164 }
1165 rcu_read_unlock();
1166
1028 if (audit_enabled != AUDIT_OFF) 1167 if (audit_enabled != AUDIT_OFF)
1029 audit_log_config_change("audit_pid", new_pid, audit_pid, 1); 1168 audit_log_config_change("audit_pid", new_pid,
1169 auditd_pid, 1);
1170
1030 if (new_pid) { 1171 if (new_pid) {
1031 if (audit_sock) 1172 /* register a new auditd connection */
1032 sock_put(audit_sock); 1173 auditd_set(new_pid,
1033 audit_pid = new_pid; 1174 NETLINK_CB(skb).portid,
1034 audit_nlk_portid = NETLINK_CB(skb).portid; 1175 sock_net(NETLINK_CB(skb).sk));
1035 sock_hold(skb->sk); 1176 /* try to process any backlog */
1036 audit_sock = skb->sk; 1177 wake_up_interruptible(&kauditd_wait);
1037 } else { 1178 } else
1179 /* unregister the auditd connection */
1038 auditd_reset(); 1180 auditd_reset();
1039 }
1040 wake_up_interruptible(&kauditd_wait);
1041 } 1181 }
1042 if (s.mask & AUDIT_STATUS_RATE_LIMIT) { 1182 if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
1043 err = audit_set_rate_limit(s.rate_limit); 1183 err = audit_set_rate_limit(s.rate_limit);
@@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1090 if (err) 1230 if (err)
1091 break; 1231 break;
1092 } 1232 }
1093 mutex_unlock(&audit_cmd_mutex);
1094 audit_log_common_recv_msg(&ab, msg_type); 1233 audit_log_common_recv_msg(&ab, msg_type);
1095 if (msg_type != AUDIT_USER_TTY) 1234 if (msg_type != AUDIT_USER_TTY)
1096 audit_log_format(ab, " msg='%.*s'", 1235 audit_log_format(ab, " msg='%.*s'",
@@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1108 } 1247 }
1109 audit_set_portid(ab, NETLINK_CB(skb).portid); 1248 audit_set_portid(ab, NETLINK_CB(skb).portid);
1110 audit_log_end(ab); 1249 audit_log_end(ab);
1111 mutex_lock(&audit_cmd_mutex);
1112 } 1250 }
1113 break; 1251 break;
1114 case AUDIT_ADD_RULE: 1252 case AUDIT_ADD_RULE:
@@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net)
1298 1436
1299 struct audit_net *aunet = net_generic(net, audit_net_id); 1437 struct audit_net *aunet = net_generic(net, audit_net_id);
1300 1438
1301 aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); 1439 aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
1302 if (aunet->nlsk == NULL) { 1440 if (aunet->sk == NULL) {
1303 audit_panic("cannot initialize netlink socket in namespace"); 1441 audit_panic("cannot initialize netlink socket in namespace");
1304 return -ENOMEM; 1442 return -ENOMEM;
1305 } 1443 }
1306 aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1444 aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1445
1307 return 0; 1446 return 0;
1308} 1447}
1309 1448
1310static void __net_exit audit_net_exit(struct net *net) 1449static void __net_exit audit_net_exit(struct net *net)
1311{ 1450{
1312 struct audit_net *aunet = net_generic(net, audit_net_id); 1451 struct audit_net *aunet = net_generic(net, audit_net_id);
1313 struct sock *sock = aunet->nlsk; 1452
1314 mutex_lock(&audit_cmd_mutex); 1453 rcu_read_lock();
1315 if (sock == audit_sock) 1454 if (net == auditd_conn.net)
1316 auditd_reset(); 1455 auditd_reset();
1317 mutex_unlock(&audit_cmd_mutex); 1456 rcu_read_unlock();
1318 1457
1319 netlink_kernel_release(sock); 1458 netlink_kernel_release(aunet->sk);
1320 aunet->nlsk = NULL;
1321} 1459}
1322 1460
1323static struct pernet_operations audit_net_ops __net_initdata = { 1461static struct pernet_operations audit_net_ops __net_initdata = {
@@ -1335,20 +1473,24 @@ static int __init audit_init(void)
1335 if (audit_initialized == AUDIT_DISABLED) 1473 if (audit_initialized == AUDIT_DISABLED)
1336 return 0; 1474 return 0;
1337 1475
1338 pr_info("initializing netlink subsys (%s)\n", 1476 memset(&auditd_conn, 0, sizeof(auditd_conn));
1339 audit_default ? "enabled" : "disabled"); 1477 spin_lock_init(&auditd_conn.lock);
1340 register_pernet_subsys(&audit_net_ops);
1341 1478
1342 skb_queue_head_init(&audit_queue); 1479 skb_queue_head_init(&audit_queue);
1343 skb_queue_head_init(&audit_retry_queue); 1480 skb_queue_head_init(&audit_retry_queue);
1344 skb_queue_head_init(&audit_hold_queue); 1481 skb_queue_head_init(&audit_hold_queue);
1345 audit_initialized = AUDIT_INITIALIZED;
1346 audit_enabled = audit_default;
1347 audit_ever_enabled |= !!audit_default;
1348 1482
1349 for (i = 0; i < AUDIT_INODE_BUCKETS; i++) 1483 for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
1350 INIT_LIST_HEAD(&audit_inode_hash[i]); 1484 INIT_LIST_HEAD(&audit_inode_hash[i]);
1351 1485
1486 pr_info("initializing netlink subsys (%s)\n",
1487 audit_default ? "enabled" : "disabled");
1488 register_pernet_subsys(&audit_net_ops);
1489
1490 audit_initialized = AUDIT_INITIALIZED;
1491 audit_enabled = audit_default;
1492 audit_ever_enabled |= !!audit_default;
1493
1352 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); 1494 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
1353 if (IS_ERR(kauditd_task)) { 1495 if (IS_ERR(kauditd_task)) {
1354 int err = PTR_ERR(kauditd_task); 1496 int err = PTR_ERR(kauditd_task);
@@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1519 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) 1661 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
1520 return NULL; 1662 return NULL;
1521 1663
1522 /* don't ever fail/sleep on these two conditions: 1664 /* NOTE: don't ever fail/sleep on these two conditions:
1523 * 1. auditd generated record - since we need auditd to drain the 1665 * 1. auditd generated record - since we need auditd to drain the
1524 * queue; also, when we are checking for auditd, compare PIDs using 1666 * queue; also, when we are checking for auditd, compare PIDs using
1525 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() 1667 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
1526 * using a PID anchored in the caller's namespace 1668 * using a PID anchored in the caller's namespace
1527 * 2. audit command message - record types 1000 through 1099 inclusive 1669 * 2. generator holding the audit_cmd_mutex - we don't want to block
1528 * are command messages/records used to manage the kernel subsystem 1670 * while holding the mutex */
1529 * and the audit userspace, blocking on these messages could cause 1671 if (!(auditd_test_task(current) ||
1530 * problems under load so don't do it (note: not all of these 1672 (current == __mutex_owner(&audit_cmd_mutex)))) {
1531 * command types are valid as record types, but it is quicker to 1673 long stime = audit_backlog_wait_time;
1532 * just check two ints than a series of ints in a if/switch stmt) */
1533 if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
1534 (type >= 1000 && type <= 1099))) {
1535 long sleep_time = audit_backlog_wait_time;
1536 1674
1537 while (audit_backlog_limit && 1675 while (audit_backlog_limit &&
1538 (skb_queue_len(&audit_queue) > audit_backlog_limit)) { 1676 (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
@@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1541 1679
1542 /* sleep if we are allowed and we haven't exhausted our 1680 /* sleep if we are allowed and we haven't exhausted our
1543 * backlog wait limit */ 1681 * backlog wait limit */
1544 if ((gfp_mask & __GFP_DIRECT_RECLAIM) && 1682 if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
1545 (sleep_time > 0)) {
1546 DECLARE_WAITQUEUE(wait, current); 1683 DECLARE_WAITQUEUE(wait, current);
1547 1684
1548 add_wait_queue_exclusive(&audit_backlog_wait, 1685 add_wait_queue_exclusive(&audit_backlog_wait,
1549 &wait); 1686 &wait);
1550 set_current_state(TASK_UNINTERRUPTIBLE); 1687 set_current_state(TASK_UNINTERRUPTIBLE);
1551 sleep_time = schedule_timeout(sleep_time); 1688 stime = schedule_timeout(stime);
1552 remove_wait_queue(&audit_backlog_wait, &wait); 1689 remove_wait_queue(&audit_backlog_wait, &wait);
1553 } else { 1690 } else {
1554 if (audit_rate_check() && printk_ratelimit()) 1691 if (audit_rate_check() && printk_ratelimit())
@@ -2127,15 +2264,27 @@ out:
2127 */ 2264 */
2128void audit_log_end(struct audit_buffer *ab) 2265void audit_log_end(struct audit_buffer *ab)
2129{ 2266{
2267 struct sk_buff *skb;
2268 struct nlmsghdr *nlh;
2269
2130 if (!ab) 2270 if (!ab)
2131 return; 2271 return;
2132 if (!audit_rate_check()) { 2272
2133 audit_log_lost("rate limit exceeded"); 2273 if (audit_rate_check()) {
2134 } else { 2274 skb = ab->skb;
2135 skb_queue_tail(&audit_queue, ab->skb);
2136 wake_up_interruptible(&kauditd_wait);
2137 ab->skb = NULL; 2275 ab->skb = NULL;
2138 } 2276
2277 /* setup the netlink header, see the comments in
2278 * kauditd_send_multicast_skb() for length quirks */
2279 nlh = nlmsg_hdr(skb);
2280 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
2281
2282 /* queue the netlink packet and poke the kauditd thread */
2283 skb_queue_tail(&audit_queue, skb);
2284 wake_up_interruptible(&kauditd_wait);
2285 } else
2286 audit_log_lost("rate limit exceeded");
2287
2139 audit_buffer_free(ab); 2288 audit_buffer_free(ab);
2140} 2289}
2141 2290
diff --git a/kernel/audit.h b/kernel/audit.h
index ca579880303a..0d87f8ab8778 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context,
218 struct audit_names *n, const struct path *path, 218 struct audit_names *n, const struct path *path,
219 int record_num, int *call_panic); 219 int record_num, int *call_panic);
220 220
221extern int audit_pid; 221extern int auditd_test_task(const struct task_struct *task);
222 222
223#define AUDIT_INODE_BUCKETS 32 223#define AUDIT_INODE_BUCKETS 32
224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -250,10 +250,6 @@ struct audit_netlink_list {
250 250
251int audit_send_list(void *); 251int audit_send_list(void *);
252 252
253struct audit_net {
254 struct sock *nlsk;
255};
256
257extern int selinux_audit_rule_update(void); 253extern int selinux_audit_rule_update(void);
258 254
259extern struct mutex audit_filter_mutex; 255extern struct mutex audit_filter_mutex;
@@ -337,14 +333,7 @@ extern u32 audit_sig_sid;
337extern int audit_filter(int msgtype, unsigned int listtype); 333extern int audit_filter(int msgtype, unsigned int listtype);
338 334
339#ifdef CONFIG_AUDITSYSCALL 335#ifdef CONFIG_AUDITSYSCALL
340extern int __audit_signal_info(int sig, struct task_struct *t); 336extern int audit_signal_info(int sig, struct task_struct *t);
341static inline int audit_signal_info(int sig, struct task_struct *t)
342{
343 if (unlikely((audit_pid && t->tgid == audit_pid) ||
344 (audit_signals && !audit_dummy_context())))
345 return __audit_signal_info(sig, t);
346 return 0;
347}
348extern void audit_filter_inodes(struct task_struct *, struct audit_context *); 337extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
349extern struct list_head *audit_killed_trees(void); 338extern struct list_head *audit_killed_trees(void);
350#else 339#else
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index d6a8de5f8fa3..1c2333155893 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
762 struct audit_entry *e; 762 struct audit_entry *e;
763 enum audit_state state; 763 enum audit_state state;
764 764
765 if (audit_pid && tsk->tgid == audit_pid) 765 if (auditd_test_task(tsk))
766 return AUDIT_DISABLED; 766 return AUDIT_DISABLED;
767 767
768 rcu_read_lock(); 768 rcu_read_lock();
@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
816{ 816{
817 struct audit_names *n; 817 struct audit_names *n;
818 818
819 if (audit_pid && tsk->tgid == audit_pid) 819 if (auditd_test_task(tsk))
820 return; 820 return;
821 821
822 rcu_read_lock(); 822 rcu_read_lock();
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
2249 * If the audit subsystem is being terminated, record the task (pid) 2249 * If the audit subsystem is being terminated, record the task (pid)
2250 * and uid that is doing that. 2250 * and uid that is doing that.
2251 */ 2251 */
2252int __audit_signal_info(int sig, struct task_struct *t) 2252int audit_signal_info(int sig, struct task_struct *t)
2253{ 2253{
2254 struct audit_aux_data_pids *axp; 2254 struct audit_aux_data_pids *axp;
2255 struct task_struct *tsk = current; 2255 struct task_struct *tsk = current;
2256 struct audit_context *ctx = tsk->audit_context; 2256 struct audit_context *ctx = tsk->audit_context;
2257 kuid_t uid = current_uid(), t_uid = task_uid(t); 2257 kuid_t uid = current_uid(), t_uid = task_uid(t);
2258 2258
2259 if (audit_pid && t->tgid == audit_pid) { 2259 if (auditd_test_task(t) &&
2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2260 (sig == SIGTERM || sig == SIGHUP ||
2261 audit_sig_pid = task_tgid_nr(tsk); 2261 sig == SIGUSR1 || sig == SIGUSR2)) {
2262 if (uid_valid(tsk->loginuid)) 2262 audit_sig_pid = task_tgid_nr(tsk);
2263 audit_sig_uid = tsk->loginuid; 2263 if (uid_valid(tsk->loginuid))
2264 else 2264 audit_sig_uid = tsk->loginuid;
2265 audit_sig_uid = uid; 2265 else
2266 security_task_getsecid(tsk, &audit_sig_sid); 2266 audit_sig_uid = uid;
2267 } 2267 security_task_getsecid(tsk, &audit_sig_sid);
2268 if (!audit_signals || audit_dummy_context())
2269 return 0;
2270 } 2268 }
2271 2269
2270 if (!audit_signals || audit_dummy_context())
2271 return 0;
2272
2272 /* optimize the common case by putting first signal recipient directly 2273 /* optimize the common case by putting first signal recipient directly
2273 * in audit_context */ 2274 * in audit_context */
2274 if (!ctx->target_pid) { 2275 if (!ctx->target_pid) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ea87fb19a94..361a69dfe543 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,11 +13,12 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/rculist_nulls.h>
16#include "percpu_freelist.h" 17#include "percpu_freelist.h"
17#include "bpf_lru_list.h" 18#include "bpf_lru_list.h"
18 19
19struct bucket { 20struct bucket {
20 struct hlist_head head; 21 struct hlist_nulls_head head;
21 raw_spinlock_t lock; 22 raw_spinlock_t lock;
22}; 23};
23 24
@@ -29,28 +30,26 @@ struct bpf_htab {
29 struct pcpu_freelist freelist; 30 struct pcpu_freelist freelist;
30 struct bpf_lru lru; 31 struct bpf_lru lru;
31 }; 32 };
32 void __percpu *extra_elems; 33 struct htab_elem *__percpu *extra_elems;
33 atomic_t count; /* number of elements in this hashtable */ 34 atomic_t count; /* number of elements in this hashtable */
34 u32 n_buckets; /* number of hash buckets */ 35 u32 n_buckets; /* number of hash buckets */
35 u32 elem_size; /* size of each element in bytes */ 36 u32 elem_size; /* size of each element in bytes */
36}; 37};
37 38
38enum extra_elem_state {
39 HTAB_NOT_AN_EXTRA_ELEM = 0,
40 HTAB_EXTRA_ELEM_FREE,
41 HTAB_EXTRA_ELEM_USED
42};
43
44/* each htab element is struct htab_elem + key + value */ 39/* each htab element is struct htab_elem + key + value */
45struct htab_elem { 40struct htab_elem {
46 union { 41 union {
47 struct hlist_node hash_node; 42 struct hlist_nulls_node hash_node;
48 struct bpf_htab *htab; 43 struct {
49 struct pcpu_freelist_node fnode; 44 void *padding;
45 union {
46 struct bpf_htab *htab;
47 struct pcpu_freelist_node fnode;
48 };
49 };
50 }; 50 };
51 union { 51 union {
52 struct rcu_head rcu; 52 struct rcu_head rcu;
53 enum extra_elem_state state;
54 struct bpf_lru_node lru_node; 53 struct bpf_lru_node lru_node;
55 }; 54 };
56 u32 hash; 55 u32 hash;
@@ -71,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
71 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; 70 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
72} 71}
73 72
73static bool htab_is_prealloc(const struct bpf_htab *htab)
74{
75 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
76}
77
74static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, 78static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
75 void __percpu *pptr) 79 void __percpu *pptr)
76{ 80{
@@ -122,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
122 126
123static int prealloc_init(struct bpf_htab *htab) 127static int prealloc_init(struct bpf_htab *htab)
124{ 128{
129 u32 num_entries = htab->map.max_entries;
125 int err = -ENOMEM, i; 130 int err = -ENOMEM, i;
126 131
127 htab->elems = bpf_map_area_alloc(htab->elem_size * 132 if (!htab_is_percpu(htab) && !htab_is_lru(htab))
128 htab->map.max_entries); 133 num_entries += num_possible_cpus();
134
135 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
129 if (!htab->elems) 136 if (!htab->elems)
130 return -ENOMEM; 137 return -ENOMEM;
131 138
132 if (!htab_is_percpu(htab)) 139 if (!htab_is_percpu(htab))
133 goto skip_percpu_elems; 140 goto skip_percpu_elems;
134 141
135 for (i = 0; i < htab->map.max_entries; i++) { 142 for (i = 0; i < num_entries; i++) {
136 u32 size = round_up(htab->map.value_size, 8); 143 u32 size = round_up(htab->map.value_size, 8);
137 void __percpu *pptr; 144 void __percpu *pptr;
138 145
@@ -160,10 +167,11 @@ skip_percpu_elems:
160 if (htab_is_lru(htab)) 167 if (htab_is_lru(htab))
161 bpf_lru_populate(&htab->lru, htab->elems, 168 bpf_lru_populate(&htab->lru, htab->elems,
162 offsetof(struct htab_elem, lru_node), 169 offsetof(struct htab_elem, lru_node),
163 htab->elem_size, htab->map.max_entries); 170 htab->elem_size, num_entries);
164 else 171 else
165 pcpu_freelist_populate(&htab->freelist, htab->elems, 172 pcpu_freelist_populate(&htab->freelist,
166 htab->elem_size, htab->map.max_entries); 173 htab->elems + offsetof(struct htab_elem, fnode),
174 htab->elem_size, num_entries);
167 175
168 return 0; 176 return 0;
169 177
@@ -184,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
184 192
185static int alloc_extra_elems(struct bpf_htab *htab) 193static int alloc_extra_elems(struct bpf_htab *htab)
186{ 194{
187 void __percpu *pptr; 195 struct htab_elem *__percpu *pptr, *l_new;
196 struct pcpu_freelist_node *l;
188 int cpu; 197 int cpu;
189 198
190 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); 199 pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
200 GFP_USER | __GFP_NOWARN);
191 if (!pptr) 201 if (!pptr)
192 return -ENOMEM; 202 return -ENOMEM;
193 203
194 for_each_possible_cpu(cpu) { 204 for_each_possible_cpu(cpu) {
195 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = 205 l = pcpu_freelist_pop(&htab->freelist);
196 HTAB_EXTRA_ELEM_FREE; 206 /* pop will succeed, since prealloc_init()
207 * preallocated extra num_possible_cpus elements
208 */
209 l_new = container_of(l, struct htab_elem, fnode);
210 *per_cpu_ptr(pptr, cpu) = l_new;
197 } 211 }
198 htab->extra_elems = pptr; 212 htab->extra_elems = pptr;
199 return 0; 213 return 0;
@@ -217,6 +231,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
217 int err, i; 231 int err, i;
218 u64 cost; 232 u64 cost;
219 233
234 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
235 offsetof(struct htab_elem, hash_node.pprev));
236 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
237 offsetof(struct htab_elem, hash_node.pprev));
238
220 if (lru && !capable(CAP_SYS_ADMIN)) 239 if (lru && !capable(CAP_SYS_ADMIN))
221 /* LRU implementation is much complicated than other 240 /* LRU implementation is much complicated than other
222 * maps. Hence, limit to CAP_SYS_ADMIN for now. 241 * maps. Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,29 +345,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
326 goto free_htab; 345 goto free_htab;
327 346
328 for (i = 0; i < htab->n_buckets; i++) { 347 for (i = 0; i < htab->n_buckets; i++) {
329 INIT_HLIST_HEAD(&htab->buckets[i].head); 348 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
330 raw_spin_lock_init(&htab->buckets[i].lock); 349 raw_spin_lock_init(&htab->buckets[i].lock);
331 } 350 }
332 351
333 if (!percpu && !lru) {
334 /* lru itself can remove the least used element, so
335 * there is no need for an extra elem during map_update.
336 */
337 err = alloc_extra_elems(htab);
338 if (err)
339 goto free_buckets;
340 }
341
342 if (prealloc) { 352 if (prealloc) {
343 err = prealloc_init(htab); 353 err = prealloc_init(htab);
344 if (err) 354 if (err)
345 goto free_extra_elems; 355 goto free_buckets;
356
357 if (!percpu && !lru) {
358 /* lru itself can remove the least used element, so
359 * there is no need for an extra elem during map_update.
360 */
361 err = alloc_extra_elems(htab);
362 if (err)
363 goto free_prealloc;
364 }
346 } 365 }
347 366
348 return &htab->map; 367 return &htab->map;
349 368
350free_extra_elems: 369free_prealloc:
351 free_percpu(htab->extra_elems); 370 prealloc_destroy(htab);
352free_buckets: 371free_buckets:
353 bpf_map_area_free(htab->buckets); 372 bpf_map_area_free(htab->buckets);
354free_htab: 373free_htab:
@@ -366,20 +385,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
366 return &htab->buckets[hash & (htab->n_buckets - 1)]; 385 return &htab->buckets[hash & (htab->n_buckets - 1)];
367} 386}
368 387
369static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) 388static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
370{ 389{
371 return &__select_bucket(htab, hash)->head; 390 return &__select_bucket(htab, hash)->head;
372} 391}
373 392
374static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, 393/* this lookup function can only be called with bucket lock taken */
394static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
375 void *key, u32 key_size) 395 void *key, u32 key_size)
376{ 396{
397 struct hlist_nulls_node *n;
398 struct htab_elem *l;
399
400 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
401 if (l->hash == hash && !memcmp(&l->key, key, key_size))
402 return l;
403
404 return NULL;
405}
406
407/* can be called without bucket lock. it will repeat the loop in
408 * the unlikely event when elements moved from one bucket into another
409 * while link list is being walked
410 */
411static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
412 u32 hash, void *key,
413 u32 key_size, u32 n_buckets)
414{
415 struct hlist_nulls_node *n;
377 struct htab_elem *l; 416 struct htab_elem *l;
378 417
379 hlist_for_each_entry_rcu(l, head, hash_node) 418again:
419 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
380 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 420 if (l->hash == hash && !memcmp(&l->key, key, key_size))
381 return l; 421 return l;
382 422
423 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
424 goto again;
425
383 return NULL; 426 return NULL;
384} 427}
385 428
@@ -387,7 +430,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
387static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 430static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
388{ 431{
389 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 432 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
390 struct hlist_head *head; 433 struct hlist_nulls_head *head;
391 struct htab_elem *l; 434 struct htab_elem *l;
392 u32 hash, key_size; 435 u32 hash, key_size;
393 436
@@ -400,7 +443,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
400 443
401 head = select_bucket(htab, hash); 444 head = select_bucket(htab, hash);
402 445
403 l = lookup_elem_raw(head, hash, key, key_size); 446 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
404 447
405 return l; 448 return l;
406} 449}
@@ -433,8 +476,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
433static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 476static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
434{ 477{
435 struct bpf_htab *htab = (struct bpf_htab *)arg; 478 struct bpf_htab *htab = (struct bpf_htab *)arg;
436 struct htab_elem *l, *tgt_l; 479 struct htab_elem *l = NULL, *tgt_l;
437 struct hlist_head *head; 480 struct hlist_nulls_head *head;
481 struct hlist_nulls_node *n;
438 unsigned long flags; 482 unsigned long flags;
439 struct bucket *b; 483 struct bucket *b;
440 484
@@ -444,9 +488,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
444 488
445 raw_spin_lock_irqsave(&b->lock, flags); 489 raw_spin_lock_irqsave(&b->lock, flags);
446 490
447 hlist_for_each_entry_rcu(l, head, hash_node) 491 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
448 if (l == tgt_l) { 492 if (l == tgt_l) {
449 hlist_del_rcu(&l->hash_node); 493 hlist_nulls_del_rcu(&l->hash_node);
450 break; 494 break;
451 } 495 }
452 496
@@ -459,7 +503,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
459static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 503static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
460{ 504{
461 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 505 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
462 struct hlist_head *head; 506 struct hlist_nulls_head *head;
463 struct htab_elem *l, *next_l; 507 struct htab_elem *l, *next_l;
464 u32 hash, key_size; 508 u32 hash, key_size;
465 int i; 509 int i;
@@ -473,7 +517,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
473 head = select_bucket(htab, hash); 517 head = select_bucket(htab, hash);
474 518
475 /* lookup the key */ 519 /* lookup the key */
476 l = lookup_elem_raw(head, hash, key, key_size); 520 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
477 521
478 if (!l) { 522 if (!l) {
479 i = 0; 523 i = 0;
@@ -481,7 +525,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
481 } 525 }
482 526
483 /* key was found, get next key in the same bucket */ 527 /* key was found, get next key in the same bucket */
484 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), 528 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
485 struct htab_elem, hash_node); 529 struct htab_elem, hash_node);
486 530
487 if (next_l) { 531 if (next_l) {
@@ -500,7 +544,7 @@ find_first_elem:
500 head = select_bucket(htab, i); 544 head = select_bucket(htab, i);
501 545
502 /* pick first element in the bucket */ 546 /* pick first element in the bucket */
503 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 547 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
504 struct htab_elem, hash_node); 548 struct htab_elem, hash_node);
505 if (next_l) { 549 if (next_l) {
506 /* if it's not empty, just return it */ 550 /* if it's not empty, just return it */
@@ -538,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
538 582
539static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 583static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
540{ 584{
541 if (l->state == HTAB_EXTRA_ELEM_USED) { 585 if (htab_is_prealloc(htab)) {
542 l->state = HTAB_EXTRA_ELEM_FREE;
543 return;
544 }
545
546 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
547 pcpu_freelist_push(&htab->freelist, &l->fnode); 586 pcpu_freelist_push(&htab->freelist, &l->fnode);
548 } else { 587 } else {
549 atomic_dec(&htab->count); 588 atomic_dec(&htab->count);
@@ -573,43 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
573static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 612static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
574 void *value, u32 key_size, u32 hash, 613 void *value, u32 key_size, u32 hash,
575 bool percpu, bool onallcpus, 614 bool percpu, bool onallcpus,
576 bool old_elem_exists) 615 struct htab_elem *old_elem)
577{ 616{
578 u32 size = htab->map.value_size; 617 u32 size = htab->map.value_size;
579 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); 618 bool prealloc = htab_is_prealloc(htab);
580 struct htab_elem *l_new; 619 struct htab_elem *l_new, **pl_new;
581 void __percpu *pptr; 620 void __percpu *pptr;
582 int err = 0;
583 621
584 if (prealloc) { 622 if (prealloc) {
585 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); 623 if (old_elem) {
586 if (!l_new) 624 /* if we're updating the existing element,
587 err = -E2BIG; 625 * use per-cpu extra elems to avoid freelist_pop/push
588 } else { 626 */
589 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 627 pl_new = this_cpu_ptr(htab->extra_elems);
590 atomic_dec(&htab->count); 628 l_new = *pl_new;
591 err = -E2BIG; 629 *pl_new = old_elem;
592 } else { 630 } else {
593 l_new = kmalloc(htab->elem_size, 631 struct pcpu_freelist_node *l;
594 GFP_ATOMIC | __GFP_NOWARN);
595 if (!l_new)
596 return ERR_PTR(-ENOMEM);
597 }
598 }
599 632
600 if (err) { 633 l = pcpu_freelist_pop(&htab->freelist);
601 if (!old_elem_exists) 634 if (!l)
602 return ERR_PTR(err); 635 return ERR_PTR(-E2BIG);
603 636 l_new = container_of(l, struct htab_elem, fnode);
604 /* if we're updating the existing element and the hash table 637 }
605 * is full, use per-cpu extra elems
606 */
607 l_new = this_cpu_ptr(htab->extra_elems);
608 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
609 return ERR_PTR(-E2BIG);
610 l_new->state = HTAB_EXTRA_ELEM_USED;
611 } else { 638 } else {
612 l_new->state = HTAB_NOT_AN_EXTRA_ELEM; 639 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
640 if (!old_elem) {
641 /* when map is full and update() is replacing
642 * old element, it's ok to allocate, since
643 * old element will be freed immediately.
644 * Otherwise return an error
645 */
646 atomic_dec(&htab->count);
647 return ERR_PTR(-E2BIG);
648 }
649 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
650 if (!l_new)
651 return ERR_PTR(-ENOMEM);
613 } 652 }
614 653
615 memcpy(l_new->key, key, key_size); 654 memcpy(l_new->key, key, key_size);
@@ -661,7 +700,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
661{ 700{
662 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 701 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
663 struct htab_elem *l_new = NULL, *l_old; 702 struct htab_elem *l_new = NULL, *l_old;
664 struct hlist_head *head; 703 struct hlist_nulls_head *head;
665 unsigned long flags; 704 unsigned long flags;
666 struct bucket *b; 705 struct bucket *b;
667 u32 key_size, hash; 706 u32 key_size, hash;
@@ -690,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
690 goto err; 729 goto err;
691 730
692 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, 731 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
693 !!l_old); 732 l_old);
694 if (IS_ERR(l_new)) { 733 if (IS_ERR(l_new)) {
695 /* all pre-allocated elements are in use or memory exhausted */ 734 /* all pre-allocated elements are in use or memory exhausted */
696 ret = PTR_ERR(l_new); 735 ret = PTR_ERR(l_new);
@@ -700,10 +739,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
700 /* add new element to the head of the list, so that 739 /* add new element to the head of the list, so that
701 * concurrent search will find it before old elem 740 * concurrent search will find it before old elem
702 */ 741 */
703 hlist_add_head_rcu(&l_new->hash_node, head); 742 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
704 if (l_old) { 743 if (l_old) {
705 hlist_del_rcu(&l_old->hash_node); 744 hlist_nulls_del_rcu(&l_old->hash_node);
706 free_htab_elem(htab, l_old); 745 if (!htab_is_prealloc(htab))
746 free_htab_elem(htab, l_old);
707 } 747 }
708 ret = 0; 748 ret = 0;
709err: 749err:
@@ -716,7 +756,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
716{ 756{
717 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 757 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
718 struct htab_elem *l_new, *l_old = NULL; 758 struct htab_elem *l_new, *l_old = NULL;
719 struct hlist_head *head; 759 struct hlist_nulls_head *head;
720 unsigned long flags; 760 unsigned long flags;
721 struct bucket *b; 761 struct bucket *b;
722 u32 key_size, hash; 762 u32 key_size, hash;
@@ -757,10 +797,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
757 /* add new element to the head of the list, so that 797 /* add new element to the head of the list, so that
758 * concurrent search will find it before old elem 798 * concurrent search will find it before old elem
759 */ 799 */
760 hlist_add_head_rcu(&l_new->hash_node, head); 800 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
761 if (l_old) { 801 if (l_old) {
762 bpf_lru_node_set_ref(&l_new->lru_node); 802 bpf_lru_node_set_ref(&l_new->lru_node);
763 hlist_del_rcu(&l_old->hash_node); 803 hlist_nulls_del_rcu(&l_old->hash_node);
764 } 804 }
765 ret = 0; 805 ret = 0;
766 806
@@ -781,7 +821,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
781{ 821{
782 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 822 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
783 struct htab_elem *l_new = NULL, *l_old; 823 struct htab_elem *l_new = NULL, *l_old;
784 struct hlist_head *head; 824 struct hlist_nulls_head *head;
785 unsigned long flags; 825 unsigned long flags;
786 struct bucket *b; 826 struct bucket *b;
787 u32 key_size, hash; 827 u32 key_size, hash;
@@ -815,12 +855,12 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
815 value, onallcpus); 855 value, onallcpus);
816 } else { 856 } else {
817 l_new = alloc_htab_elem(htab, key, value, key_size, 857 l_new = alloc_htab_elem(htab, key, value, key_size,
818 hash, true, onallcpus, false); 858 hash, true, onallcpus, NULL);
819 if (IS_ERR(l_new)) { 859 if (IS_ERR(l_new)) {
820 ret = PTR_ERR(l_new); 860 ret = PTR_ERR(l_new);
821 goto err; 861 goto err;
822 } 862 }
823 hlist_add_head_rcu(&l_new->hash_node, head); 863 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
824 } 864 }
825 ret = 0; 865 ret = 0;
826err: 866err:
@@ -834,7 +874,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
834{ 874{
835 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 875 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
836 struct htab_elem *l_new = NULL, *l_old; 876 struct htab_elem *l_new = NULL, *l_old;
837 struct hlist_head *head; 877 struct hlist_nulls_head *head;
838 unsigned long flags; 878 unsigned long flags;
839 struct bucket *b; 879 struct bucket *b;
840 u32 key_size, hash; 880 u32 key_size, hash;
@@ -882,7 +922,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
882 } else { 922 } else {
883 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), 923 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
884 value, onallcpus); 924 value, onallcpus);
885 hlist_add_head_rcu(&l_new->hash_node, head); 925 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
886 l_new = NULL; 926 l_new = NULL;
887 } 927 }
888 ret = 0; 928 ret = 0;
@@ -910,7 +950,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
910static int htab_map_delete_elem(struct bpf_map *map, void *key) 950static int htab_map_delete_elem(struct bpf_map *map, void *key)
911{ 951{
912 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 952 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
913 struct hlist_head *head; 953 struct hlist_nulls_head *head;
914 struct bucket *b; 954 struct bucket *b;
915 struct htab_elem *l; 955 struct htab_elem *l;
916 unsigned long flags; 956 unsigned long flags;
@@ -930,7 +970,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
930 l = lookup_elem_raw(head, hash, key, key_size); 970 l = lookup_elem_raw(head, hash, key, key_size);
931 971
932 if (l) { 972 if (l) {
933 hlist_del_rcu(&l->hash_node); 973 hlist_nulls_del_rcu(&l->hash_node);
934 free_htab_elem(htab, l); 974 free_htab_elem(htab, l);
935 ret = 0; 975 ret = 0;
936 } 976 }
@@ -942,7 +982,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
942static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 982static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
943{ 983{
944 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 984 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
945 struct hlist_head *head; 985 struct hlist_nulls_head *head;
946 struct bucket *b; 986 struct bucket *b;
947 struct htab_elem *l; 987 struct htab_elem *l;
948 unsigned long flags; 988 unsigned long flags;
@@ -962,7 +1002,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
962 l = lookup_elem_raw(head, hash, key, key_size); 1002 l = lookup_elem_raw(head, hash, key, key_size);
963 1003
964 if (l) { 1004 if (l) {
965 hlist_del_rcu(&l->hash_node); 1005 hlist_nulls_del_rcu(&l->hash_node);
966 ret = 0; 1006 ret = 0;
967 } 1007 }
968 1008
@@ -977,14 +1017,13 @@ static void delete_all_elements(struct bpf_htab *htab)
977 int i; 1017 int i;
978 1018
979 for (i = 0; i < htab->n_buckets; i++) { 1019 for (i = 0; i < htab->n_buckets; i++) {
980 struct hlist_head *head = select_bucket(htab, i); 1020 struct hlist_nulls_head *head = select_bucket(htab, i);
981 struct hlist_node *n; 1021 struct hlist_nulls_node *n;
982 struct htab_elem *l; 1022 struct htab_elem *l;
983 1023
984 hlist_for_each_entry_safe(l, n, head, hash_node) { 1024 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
985 hlist_del_rcu(&l->hash_node); 1025 hlist_nulls_del_rcu(&l->hash_node);
986 if (l->state != HTAB_EXTRA_ELEM_USED) 1026 htab_elem_free(htab, l);
987 htab_elem_free(htab, l);
988 } 1027 }
989 } 1028 }
990} 1029}
@@ -1004,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
1004 * not have executed. Wait for them. 1043 * not have executed. Wait for them.
1005 */ 1044 */
1006 rcu_barrier(); 1045 rcu_barrier();
1007 if (htab->map.map_flags & BPF_F_NO_PREALLOC) 1046 if (!htab_is_prealloc(htab))
1008 delete_all_elements(htab); 1047 delete_all_elements(htab);
1009 else 1048 else
1010 prealloc_destroy(htab); 1049 prealloc_destroy(htab);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 8bfe0afaee10..b37bd9ab7f57 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -500,9 +500,15 @@ unlock:
500 raw_spin_unlock(&trie->lock); 500 raw_spin_unlock(&trie->lock);
501} 501}
502 502
503static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
504{
505 return -ENOTSUPP;
506}
507
503static const struct bpf_map_ops trie_ops = { 508static const struct bpf_map_ops trie_ops = {
504 .map_alloc = trie_alloc, 509 .map_alloc = trie_alloc,
505 .map_free = trie_free, 510 .map_free = trie_free,
511 .map_get_next_key = trie_get_next_key,
506 .map_lookup_elem = trie_lookup_elem, 512 .map_lookup_elem = trie_lookup_elem,
507 .map_update_elem = trie_update_elem, 513 .map_update_elem = trie_update_elem,
508 .map_delete_elem = trie_delete_elem, 514 .map_delete_elem = trie_delete_elem,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..a834068a400e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
765 } 765 }
766} 766}
767 767
768static int check_ptr_alignment(struct bpf_verifier_env *env, 768static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
769 struct bpf_reg_state *reg, int off, int size) 769 int off, int size)
770{ 770{
771 if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
772 if (off % size != 0) {
773 verbose("misaligned access off %d size %d\n",
774 off, size);
775 return -EACCES;
776 } else {
777 return 0;
778 }
779 }
780
781 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
782 /* misaligned access to packet is ok on x86,arm,arm64 */
783 return 0;
784
785 if (reg->id && size != 1) { 771 if (reg->id && size != 1) {
786 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 772 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
787 return -EACCES; 773 return -EACCES;
788 } 774 }
789 775
790 /* skb->data is NET_IP_ALIGN-ed */ 776 /* skb->data is NET_IP_ALIGN-ed */
791 if (reg->type == PTR_TO_PACKET && 777 if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
792 (NET_IP_ALIGN + reg->off + off) % size != 0) {
793 verbose("misaligned packet access off %d+%d+%d size %d\n", 778 verbose("misaligned packet access off %d+%d+%d size %d\n",
794 NET_IP_ALIGN, reg->off, off, size); 779 NET_IP_ALIGN, reg->off, off, size);
795 return -EACCES; 780 return -EACCES;
796 } 781 }
782
797 return 0; 783 return 0;
798} 784}
799 785
786static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
787 int size)
788{
789 if (size != 1) {
790 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
791 return -EACCES;
792 }
793
794 return 0;
795}
796
797static int check_ptr_alignment(const struct bpf_reg_state *reg,
798 int off, int size)
799{
800 switch (reg->type) {
801 case PTR_TO_PACKET:
802 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
803 check_pkt_ptr_alignment(reg, off, size);
804 case PTR_TO_MAP_VALUE_ADJ:
805 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
806 check_val_ptr_alignment(reg, size);
807 default:
808 if (off % size != 0) {
809 verbose("misaligned access off %d size %d\n",
810 off, size);
811 return -EACCES;
812 }
813
814 return 0;
815 }
816}
817
800/* check whether memory at (regno + off) is accessible for t = (read | write) 818/* check whether memory at (regno + off) is accessible for t = (read | write)
801 * if t==write, value_regno is a register which value is stored into memory 819 * if t==write, value_regno is a register which value is stored into memory
802 * if t==read, value_regno is a register which will receive the value from memory 820 * if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
818 if (size < 0) 836 if (size < 0)
819 return size; 837 return size;
820 838
821 err = check_ptr_alignment(env, reg, off, size); 839 err = check_ptr_alignment(reg, off, size);
822 if (err) 840 if (err)
823 return err; 841 return err;
824 842
@@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1925 * register as unknown. 1943 * register as unknown.
1926 */ 1944 */
1927 if (env->allow_ptr_leaks && 1945 if (env->allow_ptr_leaks &&
1946 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1928 (dst_reg->type == PTR_TO_MAP_VALUE || 1947 (dst_reg->type == PTR_TO_MAP_VALUE ||
1929 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 1948 dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1930 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 1949 dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
1973 1992
1974 for (i = 0; i < MAX_BPF_REG; i++) 1993 for (i = 0; i < MAX_BPF_REG; i++)
1975 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1994 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
1976 regs[i].range = dst_reg->off; 1995 /* keep the maximum range already checked */
1996 regs[i].range = max(regs[i].range, dst_reg->off);
1977 1997
1978 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1998 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1979 if (state->stack_slot_type[i] != STACK_SPILL) 1999 if (state->stack_slot_type[i] != STACK_SPILL)
1980 continue; 2000 continue;
1981 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2001 reg = &state->spilled_regs[i / BPF_REG_SIZE];
1982 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2002 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
1983 reg->range = dst_reg->off; 2003 reg->range = max(reg->range, dst_reg->off);
1984 } 2004 }
1985} 2005}
1986 2006
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 56eba9caa632..1dc22f6b49f5 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1329,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
1329 struct task_struct *task; 1329 struct task_struct *task;
1330 int count = 0; 1330 int count = 0;
1331 1331
1332 seq_printf(seq, "css_set %p\n", cset); 1332 seq_printf(seq, "css_set %pK\n", cset);
1333 1333
1334 list_for_each_entry(task, &cset->tasks, cg_list) { 1334 list_for_each_entry(task, &cset->tasks, cg_list) {
1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS) 1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index e756dae49300..2237201d66d5 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
229 /* Only log the first time events_limit is incremented. */ 229 /* Only log the first time events_limit is incremented. */
230 if (atomic64_inc_return(&pids->events_limit) == 1) { 230 if (atomic64_inc_return(&pids->events_limit) == 1) {
231 pr_info("cgroup: fork rejected by pids controller in "); 231 pr_info("cgroup: fork rejected by pids controller in ");
232 pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); 232 pr_cont_cgroup_path(css->cgroup);
233 pr_cont("\n"); 233 pr_cont("\n");
234 } 234 }
235 cgroup_file_notify(&pids->events_file); 235 cgroup_file_notify(&pids->events_file);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f7c063239fa5..37b223e4fc05 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1335 struct cpuhp_step *sp; 1335 struct cpuhp_step *sp;
1336 int ret = 0; 1336 int ret = 0;
1337 1337
1338 mutex_lock(&cpuhp_state_mutex);
1339
1340 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { 1338 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1341 ret = cpuhp_reserve_state(state); 1339 ret = cpuhp_reserve_state(state);
1342 if (ret < 0) 1340 if (ret < 0)
1343 goto out; 1341 return ret;
1344 state = ret; 1342 state = ret;
1345 } 1343 }
1346 sp = cpuhp_get_step(state); 1344 sp = cpuhp_get_step(state);
1347 if (name && sp->name) { 1345 if (name && sp->name)
1348 ret = -EBUSY; 1346 return -EBUSY;
1349 goto out; 1347
1350 }
1351 sp->startup.single = startup; 1348 sp->startup.single = startup;
1352 sp->teardown.single = teardown; 1349 sp->teardown.single = teardown;
1353 sp->name = name; 1350 sp->name = name;
1354 sp->multi_instance = multi_instance; 1351 sp->multi_instance = multi_instance;
1355 INIT_HLIST_HEAD(&sp->list); 1352 INIT_HLIST_HEAD(&sp->list);
1356out:
1357 mutex_unlock(&cpuhp_state_mutex);
1358 return ret; 1353 return ret;
1359} 1354}
1360 1355
@@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1428 return -EINVAL; 1423 return -EINVAL;
1429 1424
1430 get_online_cpus(); 1425 get_online_cpus();
1426 mutex_lock(&cpuhp_state_mutex);
1431 1427
1432 if (!invoke || !sp->startup.multi) 1428 if (!invoke || !sp->startup.multi)
1433 goto add_node; 1429 goto add_node;
@@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1447 if (ret) { 1443 if (ret) {
1448 if (sp->teardown.multi) 1444 if (sp->teardown.multi)
1449 cpuhp_rollback_install(cpu, state, node); 1445 cpuhp_rollback_install(cpu, state, node);
1450 goto err; 1446 goto unlock;
1451 } 1447 }
1452 } 1448 }
1453add_node: 1449add_node:
1454 ret = 0; 1450 ret = 0;
1455 mutex_lock(&cpuhp_state_mutex);
1456 hlist_add_head(node, &sp->list); 1451 hlist_add_head(node, &sp->list);
1452unlock:
1457 mutex_unlock(&cpuhp_state_mutex); 1453 mutex_unlock(&cpuhp_state_mutex);
1458
1459err:
1460 put_online_cpus(); 1454 put_online_cpus();
1461 return ret; 1455 return ret;
1462} 1456}
@@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1491 return -EINVAL; 1485 return -EINVAL;
1492 1486
1493 get_online_cpus(); 1487 get_online_cpus();
1488 mutex_lock(&cpuhp_state_mutex);
1494 1489
1495 ret = cpuhp_store_callbacks(state, name, startup, teardown, 1490 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1496 multi_instance); 1491 multi_instance);
@@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1524 } 1519 }
1525 } 1520 }
1526out: 1521out:
1522 mutex_unlock(&cpuhp_state_mutex);
1527 put_online_cpus(); 1523 put_online_cpus();
1528 /* 1524 /*
1529 * If the requested state is CPUHP_AP_ONLINE_DYN, return the 1525 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
@@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
1547 return -EINVAL; 1543 return -EINVAL;
1548 1544
1549 get_online_cpus(); 1545 get_online_cpus();
1546 mutex_lock(&cpuhp_state_mutex);
1547
1550 if (!invoke || !cpuhp_get_teardown_cb(state)) 1548 if (!invoke || !cpuhp_get_teardown_cb(state))
1551 goto remove; 1549 goto remove;
1552 /* 1550 /*
@@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
1563 } 1561 }
1564 1562
1565remove: 1563remove:
1566 mutex_lock(&cpuhp_state_mutex);
1567 hlist_del(node); 1564 hlist_del(node);
1568 mutex_unlock(&cpuhp_state_mutex); 1565 mutex_unlock(&cpuhp_state_mutex);
1569 put_online_cpus(); 1566 put_online_cpus();
@@ -1571,6 +1568,7 @@ remove:
1571 return 0; 1568 return 0;
1572} 1569}
1573EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); 1570EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1571
1574/** 1572/**
1575 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state 1573 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1576 * @state: The state to remove 1574 * @state: The state to remove
@@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1589 1587
1590 get_online_cpus(); 1588 get_online_cpus();
1591 1589
1590 mutex_lock(&cpuhp_state_mutex);
1592 if (sp->multi_instance) { 1591 if (sp->multi_instance) {
1593 WARN(!hlist_empty(&sp->list), 1592 WARN(!hlist_empty(&sp->list),
1594 "Error: Removing state %d which has instances left.\n", 1593 "Error: Removing state %d which has instances left.\n",
@@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1613 } 1612 }
1614remove: 1613remove:
1615 cpuhp_store_callbacks(state, NULL, NULL, NULL, false); 1614 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1615 mutex_unlock(&cpuhp_state_mutex);
1616 put_online_cpus(); 1616 put_online_cpus();
1617} 1617}
1618EXPORT_SYMBOL(__cpuhp_remove_state); 1618EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a17ed56c8ce1..ff01cba86f43 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4256,7 +4256,7 @@ int perf_event_release_kernel(struct perf_event *event)
4256 4256
4257 raw_spin_lock_irq(&ctx->lock); 4257 raw_spin_lock_irq(&ctx->lock);
4258 /* 4258 /*
4259 * Mark this even as STATE_DEAD, there is no external reference to it 4259 * Mark this event as STATE_DEAD, there is no external reference to it
4260 * anymore. 4260 * anymore.
4261 * 4261 *
4262 * Anybody acquiring event->child_mutex after the below loop _must_ 4262 * Anybody acquiring event->child_mutex after the below loop _must_
@@ -10417,21 +10417,22 @@ void perf_event_free_task(struct task_struct *task)
10417 continue; 10417 continue;
10418 10418
10419 mutex_lock(&ctx->mutex); 10419 mutex_lock(&ctx->mutex);
10420again: 10420 raw_spin_lock_irq(&ctx->lock);
10421 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, 10421 /*
10422 group_entry) 10422 * Destroy the task <-> ctx relation and mark the context dead.
10423 perf_free_event(event, ctx); 10423 *
10424 * This is important because even though the task hasn't been
10425 * exposed yet the context has been (through child_list).
10426 */
10427 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
10428 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
10429 put_task_struct(task); /* cannot be last */
10430 raw_spin_unlock_irq(&ctx->lock);
10424 10431
10425 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, 10432 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
10426 group_entry)
10427 perf_free_event(event, ctx); 10433 perf_free_event(event, ctx);
10428 10434
10429 if (!list_empty(&ctx->pinned_groups) ||
10430 !list_empty(&ctx->flexible_groups))
10431 goto again;
10432
10433 mutex_unlock(&ctx->mutex); 10435 mutex_unlock(&ctx->mutex);
10434
10435 put_ctx(ctx); 10436 put_ctx(ctx);
10436 } 10437 }
10437} 10438}
@@ -10469,7 +10470,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10469} 10470}
10470 10471
10471/* 10472/*
10472 * inherit a event from parent task to child task: 10473 * Inherit a event from parent task to child task.
10474 *
10475 * Returns:
10476 * - valid pointer on success
10477 * - NULL for orphaned events
10478 * - IS_ERR() on error
10473 */ 10479 */
10474static struct perf_event * 10480static struct perf_event *
10475inherit_event(struct perf_event *parent_event, 10481inherit_event(struct perf_event *parent_event,
@@ -10563,6 +10569,16 @@ inherit_event(struct perf_event *parent_event,
10563 return child_event; 10569 return child_event;
10564} 10570}
10565 10571
10572/*
10573 * Inherits an event group.
10574 *
10575 * This will quietly suppress orphaned events; !inherit_event() is not an error.
10576 * This matches with perf_event_release_kernel() removing all child events.
10577 *
10578 * Returns:
10579 * - 0 on success
10580 * - <0 on error
10581 */
10566static int inherit_group(struct perf_event *parent_event, 10582static int inherit_group(struct perf_event *parent_event,
10567 struct task_struct *parent, 10583 struct task_struct *parent,
10568 struct perf_event_context *parent_ctx, 10584 struct perf_event_context *parent_ctx,
@@ -10577,6 +10593,11 @@ static int inherit_group(struct perf_event *parent_event,
10577 child, NULL, child_ctx); 10593 child, NULL, child_ctx);
10578 if (IS_ERR(leader)) 10594 if (IS_ERR(leader))
10579 return PTR_ERR(leader); 10595 return PTR_ERR(leader);
10596 /*
10597 * @leader can be NULL here because of is_orphaned_event(). In this
10598 * case inherit_event() will create individual events, similar to what
10599 * perf_group_detach() would do anyway.
10600 */
10580 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 10601 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10581 child_ctr = inherit_event(sub, parent, parent_ctx, 10602 child_ctr = inherit_event(sub, parent, parent_ctx,
10582 child, leader, child_ctx); 10603 child, leader, child_ctx);
@@ -10586,6 +10607,17 @@ static int inherit_group(struct perf_event *parent_event,
10586 return 0; 10607 return 0;
10587} 10608}
10588 10609
10610/*
10611 * Creates the child task context and tries to inherit the event-group.
10612 *
10613 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
10614 * inherited_all set when we 'fail' to inherit an orphaned event; this is
10615 * consistent with perf_event_release_kernel() removing all child events.
10616 *
10617 * Returns:
10618 * - 0 on success
10619 * - <0 on error
10620 */
10589static int 10621static int
10590inherit_task_group(struct perf_event *event, struct task_struct *parent, 10622inherit_task_group(struct perf_event *event, struct task_struct *parent,
10591 struct perf_event_context *parent_ctx, 10623 struct perf_event_context *parent_ctx,
@@ -10608,7 +10640,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
10608 * First allocate and initialize a context for the 10640 * First allocate and initialize a context for the
10609 * child. 10641 * child.
10610 */ 10642 */
10611
10612 child_ctx = alloc_perf_context(parent_ctx->pmu, child); 10643 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
10613 if (!child_ctx) 10644 if (!child_ctx)
10614 return -ENOMEM; 10645 return -ENOMEM;
@@ -10670,7 +10701,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10670 ret = inherit_task_group(event, parent, parent_ctx, 10701 ret = inherit_task_group(event, parent, parent_ctx,
10671 child, ctxn, &inherited_all); 10702 child, ctxn, &inherited_all);
10672 if (ret) 10703 if (ret)
10673 break; 10704 goto out_unlock;
10674 } 10705 }
10675 10706
10676 /* 10707 /*
@@ -10686,7 +10717,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10686 ret = inherit_task_group(event, parent, parent_ctx, 10717 ret = inherit_task_group(event, parent, parent_ctx,
10687 child, ctxn, &inherited_all); 10718 child, ctxn, &inherited_all);
10688 if (ret) 10719 if (ret)
10689 break; 10720 goto out_unlock;
10690 } 10721 }
10691 10722
10692 raw_spin_lock_irqsave(&parent_ctx->lock, flags); 10723 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10714,6 +10745,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
10714 } 10745 }
10715 10746
10716 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); 10747 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10748out_unlock:
10717 mutex_unlock(&parent_ctx->mutex); 10749 mutex_unlock(&parent_ctx->mutex);
10718 10750
10719 perf_unpin_context(parent_ctx); 10751 perf_unpin_context(parent_ctx);
diff --git a/kernel/futex.c b/kernel/futex.c
index 229a744b1781..45858ec73941 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2815{ 2815{
2816 struct hrtimer_sleeper timeout, *to = NULL; 2816 struct hrtimer_sleeper timeout, *to = NULL;
2817 struct rt_mutex_waiter rt_waiter; 2817 struct rt_mutex_waiter rt_waiter;
2818 struct rt_mutex *pi_mutex = NULL;
2819 struct futex_hash_bucket *hb; 2818 struct futex_hash_bucket *hb;
2820 union futex_key key2 = FUTEX_KEY_INIT; 2819 union futex_key key2 = FUTEX_KEY_INIT;
2821 struct futex_q q = futex_q_init; 2820 struct futex_q q = futex_q_init;
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2899 if (q.pi_state && (q.pi_state->owner != current)) { 2898 if (q.pi_state && (q.pi_state->owner != current)) {
2900 spin_lock(q.lock_ptr); 2899 spin_lock(q.lock_ptr);
2901 ret = fixup_pi_state_owner(uaddr2, &q, current); 2900 ret = fixup_pi_state_owner(uaddr2, &q, current);
2901 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
2902 rt_mutex_unlock(&q.pi_state->pi_mutex);
2902 /* 2903 /*
2903 * Drop the reference to the pi state which 2904 * Drop the reference to the pi state which
2904 * the requeue_pi() code acquired for us. 2905 * the requeue_pi() code acquired for us.
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2907 spin_unlock(q.lock_ptr); 2908 spin_unlock(q.lock_ptr);
2908 } 2909 }
2909 } else { 2910 } else {
2911 struct rt_mutex *pi_mutex;
2912
2910 /* 2913 /*
2911 * We have been woken up by futex_unlock_pi(), a timeout, or a 2914 * We have been woken up by futex_unlock_pi(), a timeout, or a
2912 * signal. futex_unlock_pi() will not destroy the lock_ptr nor 2915 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2930 if (res) 2933 if (res)
2931 ret = (res < 0) ? res : 0; 2934 ret = (res < 0) ? res : 0;
2932 2935
2936 /*
2937 * If fixup_pi_state_owner() faulted and was unable to handle
2938 * the fault, unlock the rt_mutex and return the fault to
2939 * userspace.
2940 */
2941 if (ret && rt_mutex_owner(pi_mutex) == current)
2942 rt_mutex_unlock(pi_mutex);
2943
2933 /* Unqueue and drop the lock. */ 2944 /* Unqueue and drop the lock. */
2934 unqueue_me_pi(&q); 2945 unqueue_me_pi(&q);
2935 } 2946 }
2936 2947
2937 /* 2948 if (ret == -EINTR) {
2938 * If fixup_pi_state_owner() faulted and was unable to handle the
2939 * fault, unlock the rt_mutex and return the fault to userspace.
2940 */
2941 if (ret == -EFAULT) {
2942 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2943 rt_mutex_unlock(pi_mutex);
2944 } else if (ret == -EINTR) {
2945 /* 2949 /*
2946 * We've already been requeued, but cannot restart by calling 2950 * We've already been requeued, but cannot restart by calling
2947 * futex_lock_pi() directly. We could restart this syscall, but 2951 * futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 7bc24d477805..c65f7989f850 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
213 */ 213 */
214 if (sem->count == 0) 214 if (sem->count == 0)
215 break; 215 break;
216 if (signal_pending_state(state, current)) { 216 if (signal_pending_state(state, current))
217 ret = -EINTR; 217 goto out_nolock;
218 goto out; 218
219 }
220 set_current_state(state); 219 set_current_state(state);
221 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
222 schedule(); 221 schedule();
@@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
224 } 223 }
225 /* got the lock */ 224 /* got the lock */
226 sem->count = -1; 225 sem->count = -1;
227out:
228 list_del(&waiter.list); 226 list_del(&waiter.list);
229 227
230 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 228 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
231 229
232 return ret; 230 return ret;
231
232out_nolock:
233 list_del(&waiter.list);
234 if (!list_empty(&sem->wait_list))
235 __rwsem_do_wake(sem, 1);
236 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
237
238 return -EINTR;
233} 239}
234 240
235void __sched __down_write(struct rw_semaphore *sem) 241void __sched __down_write(struct rw_semaphore *sem)
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 06123234f118..07e85e5229da 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
247 align_start = res->start & ~(SECTION_SIZE - 1); 247 align_start = res->start & ~(SECTION_SIZE - 1);
248 align_size = ALIGN(resource_size(res), SECTION_SIZE); 248 align_size = ALIGN(resource_size(res), SECTION_SIZE);
249 249
250 lock_device_hotplug();
251 mem_hotplug_begin(); 250 mem_hotplug_begin();
252 arch_remove_memory(align_start, align_size); 251 arch_remove_memory(align_start, align_size);
253 mem_hotplug_done(); 252 mem_hotplug_done();
254 unlock_device_hotplug();
255 253
256 untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 254 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
257 pgmap_radix_release(res); 255 pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
364 if (error) 362 if (error)
365 goto err_pfn_remap; 363 goto err_pfn_remap;
366 364
367 lock_device_hotplug();
368 mem_hotplug_begin(); 365 mem_hotplug_begin();
369 error = arch_add_memory(nid, align_start, align_size, true); 366 error = arch_add_memory(nid, align_start, align_size, true);
370 mem_hotplug_done(); 367 mem_hotplug_done();
371 unlock_device_hotplug();
372 if (error) 368 if (error)
373 goto err_add_memory; 369 goto err_add_memory;
374 370
diff --git a/kernel/padata.c b/kernel/padata.c
index 05316c9f32da..3202aa17492c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
186 186
187 reorder = &next_queue->reorder; 187 reorder = &next_queue->reorder;
188 188
189 spin_lock(&reorder->lock);
189 if (!list_empty(&reorder->list)) { 190 if (!list_empty(&reorder->list)) {
190 padata = list_entry(reorder->list.next, 191 padata = list_entry(reorder->list.next,
191 struct padata_priv, list); 192 struct padata_priv, list);
192 193
193 spin_lock(&reorder->lock);
194 list_del_init(&padata->list); 194 list_del_init(&padata->list);
195 atomic_dec(&pd->reorder_objects); 195 atomic_dec(&pd->reorder_objects);
196 spin_unlock(&reorder->lock);
197 196
198 pd->processed++; 197 pd->processed++;
199 198
199 spin_unlock(&reorder->lock);
200 goto out; 200 goto out;
201 } 201 }
202 spin_unlock(&reorder->lock);
202 203
203 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
204 padata = ERR_PTR(-ENODATA); 205 padata = ERR_PTR(-ENODATA);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
184 184
185 WARN_ON(!task->ptrace || task->parent != current); 185 WARN_ON(!task->ptrace || task->parent != current);
186 186
187 /*
188 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
189 * Recheck state under the lock to close this race.
190 */
187 spin_lock_irq(&task->sighand->siglock); 191 spin_lock_irq(&task->sighand->siglock);
188 if (__fatal_signal_pending(task)) 192 if (task->state == __TASK_TRACED) {
189 wake_up_state(task, __TASK_TRACED); 193 if (__fatal_signal_pending(task))
190 else 194 wake_up_state(task, __TASK_TRACED);
191 task->state = TASK_TRACED; 195 else
196 task->state = TASK_TRACED;
197 }
192 spin_unlock_irq(&task->sighand->siglock); 198 spin_unlock_irq(&task->sighand->siglock);
193} 199}
194 200
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index a08795e21628..00a45c45beca 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
96static int __sched_clock_stable_early = 1; 96static int __sched_clock_stable_early = 1;
97 97
98/* 98/*
99 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
100 */ 100 */
101static __read_mostly u64 raw_offset; 101__read_mostly u64 __sched_clock_offset;
102static __read_mostly u64 gtod_offset; 102static __read_mostly u64 __gtod_offset;
103 103
104struct sched_clock_data { 104struct sched_clock_data {
105 u64 tick_raw; 105 u64 tick_raw;
@@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void)
131 /* 131 /*
132 * Attempt to make the (initial) unstable->stable transition continuous. 132 * Attempt to make the (initial) unstable->stable transition continuous.
133 */ 133 */
134 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
135 135
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
137 scd->tick_gtod, gtod_offset, 137 scd->tick_gtod, __gtod_offset,
138 scd->tick_raw, raw_offset); 138 scd->tick_raw, __sched_clock_offset);
139 139
140 static_branch_enable(&__sched_clock_stable); 140 static_branch_enable(&__sched_clock_stable);
141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
142} 142}
143 143
144static void __clear_sched_clock_stable(struct work_struct *work) 144static void __sched_clock_work(struct work_struct *work)
145{
146 static_branch_disable(&__sched_clock_stable);
147}
148
149static DECLARE_WORK(sched_clock_work, __sched_clock_work);
150
151static void __clear_sched_clock_stable(void)
145{ 152{
146 struct sched_clock_data *scd = this_scd(); 153 struct sched_clock_data *scd = this_scd();
147 154
@@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work)
154 * 161 *
155 * Still do what we can. 162 * Still do what we can.
156 */ 163 */
157 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
158 165
159 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
160 scd->tick_gtod, gtod_offset, 167 scd->tick_gtod, __gtod_offset,
161 scd->tick_raw, raw_offset); 168 scd->tick_raw, __sched_clock_offset);
162 169
163 static_branch_disable(&__sched_clock_stable);
164 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
165}
166 171
167static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 172 if (sched_clock_stable())
173 schedule_work(&sched_clock_work);
174}
168 175
169void clear_sched_clock_stable(void) 176void clear_sched_clock_stable(void)
170{ 177{
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void)
173 smp_mb(); /* matches sched_clock_init_late() */ 180 smp_mb(); /* matches sched_clock_init_late() */
174 181
175 if (sched_clock_running == 2) 182 if (sched_clock_running == 2)
176 schedule_work(&sched_clock_work); 183 __clear_sched_clock_stable();
177} 184}
178 185
179void sched_clock_init_late(void) 186void sched_clock_init_late(void)
@@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y)
214 */ 221 */
215static u64 sched_clock_local(struct sched_clock_data *scd) 222static u64 sched_clock_local(struct sched_clock_data *scd)
216{ 223{
217 u64 now, clock, old_clock, min_clock, max_clock; 224 u64 now, clock, old_clock, min_clock, max_clock, gtod;
218 s64 delta; 225 s64 delta;
219 226
220again: 227again:
@@ -231,9 +238,10 @@ again:
231 * scd->tick_gtod + TICK_NSEC); 238 * scd->tick_gtod + TICK_NSEC);
232 */ 239 */
233 240
234 clock = scd->tick_gtod + gtod_offset + delta; 241 gtod = scd->tick_gtod + __gtod_offset;
235 min_clock = wrap_max(scd->tick_gtod, old_clock); 242 clock = gtod + delta;
236 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 243 min_clock = wrap_max(gtod, old_clock);
244 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
237 245
238 clock = wrap_max(clock, min_clock); 246 clock = wrap_max(clock, min_clock);
239 clock = wrap_min(clock, max_clock); 247 clock = wrap_min(clock, max_clock);
@@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu)
317 u64 clock; 325 u64 clock;
318 326
319 if (sched_clock_stable()) 327 if (sched_clock_stable())
320 return sched_clock() + raw_offset; 328 return sched_clock() + __sched_clock_offset;
321 329
322 if (unlikely(!sched_clock_running)) 330 if (unlikely(!sched_clock_running))
323 return 0ull; 331 return 0ull;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cd7cd489f739..54c577578da6 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
586 586
587 memset(sg_cpu, 0, sizeof(*sg_cpu));
587 sg_cpu->sg_policy = sg_policy; 588 sg_cpu->sg_policy = sg_policy;
588 if (policy_is_shared(policy)) { 589 sg_cpu->flags = SCHED_CPUFREQ_RT;
589 sg_cpu->util = 0; 590 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
590 sg_cpu->max = 0; 591 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 592 policy_is_shared(policy) ?
592 sg_cpu->last_update = 0; 593 sugov_update_shared :
593 sg_cpu->iowait_boost = 0; 594 sugov_update_single);
594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
596 sugov_update_shared);
597 } else {
598 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
599 sugov_update_single);
600 }
601 } 595 }
602 return 0; 596 return 0;
603} 597}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 99b2c33a9fbc..a2ce59015642 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
445 * 445 *
446 * This function returns true if: 446 * This function returns true if:
447 * 447 *
448 * runtime / (deadline - t) > dl_runtime / dl_period , 448 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
449 * 449 *
450 * IOW we can't recycle current parameters. 450 * IOW we can't recycle current parameters.
451 * 451 *
452 * Notice that the bandwidth check is done against the period. For 452 * Notice that the bandwidth check is done against the deadline. For
453 * task with deadline equal to period this is the same of using 453 * task with deadline equal to period this is the same of using
454 * dl_deadline instead of dl_period in the equation above. 454 * dl_period instead of dl_deadline in the equation above.
455 */ 455 */
456static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
457 struct sched_dl_entity *pi_se, u64 t) 457 struct sched_dl_entity *pi_se, u64 t)
@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
476 * of anything below microseconds resolution is actually fiction 476 * of anything below microseconds resolution is actually fiction
477 * (but still we want to give the user that illusion >;). 477 * (but still we want to give the user that illusion >;).
478 */ 478 */
479 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 479 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
480 right = ((dl_se->deadline - t) >> DL_SCALE) * 480 right = ((dl_se->deadline - t) >> DL_SCALE) *
481 (pi_se->dl_runtime >> DL_SCALE); 481 (pi_se->dl_runtime >> DL_SCALE);
482 482
@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
505 } 505 }
506} 506}
507 507
508static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
509{
510 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
511}
512
508/* 513/*
509 * If the entity depleted all its runtime, and if we want it to sleep 514 * If the entity depleted all its runtime, and if we want it to sleep
510 * while waiting for some new execution time to become available, we 515 * while waiting for some new execution time to become available, we
511 * set the bandwidth enforcement timer to the replenishment instant 516 * set the bandwidth replenishment timer to the replenishment instant
512 * and try to activate it. 517 * and try to activate it.
513 * 518 *
514 * Notice that it is important for the caller to know if the timer 519 * Notice that it is important for the caller to know if the timer
@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
530 * that it is actually coming from rq->clock and not from 535 * that it is actually coming from rq->clock and not from
531 * hrtimer's time base reading. 536 * hrtimer's time base reading.
532 */ 537 */
533 act = ns_to_ktime(dl_se->deadline); 538 act = ns_to_ktime(dl_next_period(dl_se));
534 now = hrtimer_cb_get_time(timer); 539 now = hrtimer_cb_get_time(timer);
535 delta = ktime_to_ns(now) - rq_clock(rq); 540 delta = ktime_to_ns(now) - rq_clock(rq);
536 act = ktime_add_ns(act, delta); 541 act = ktime_add_ns(act, delta);
@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
638 lockdep_unpin_lock(&rq->lock, rf.cookie); 643 lockdep_unpin_lock(&rq->lock, rf.cookie);
639 rq = dl_task_offline_migration(rq, p); 644 rq = dl_task_offline_migration(rq, p);
640 rf.cookie = lockdep_pin_lock(&rq->lock); 645 rf.cookie = lockdep_pin_lock(&rq->lock);
646 update_rq_clock(rq);
641 647
642 /* 648 /*
643 * Now that the task has been migrated to the new RQ and we 649 * Now that the task has been migrated to the new RQ and we
@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
689 timer->function = dl_task_timer; 695 timer->function = dl_task_timer;
690} 696}
691 697
698/*
699 * During the activation, CBS checks if it can reuse the current task's
700 * runtime and period. If the deadline of the task is in the past, CBS
701 * cannot use the runtime, and so it replenishes the task. This rule
702 * works fine for implicit deadline tasks (deadline == period), and the
703 * CBS was designed for implicit deadline tasks. However, a task with
704 * constrained deadline (deadine < period) might be awakened after the
705 * deadline, but before the next period. In this case, replenishing the
706 * task would allow it to run for runtime / deadline. As in this case
707 * deadline < period, CBS enables a task to run for more than the
708 * runtime / period. In a very loaded system, this can cause a domino
709 * effect, making other tasks miss their deadlines.
710 *
711 * To avoid this problem, in the activation of a constrained deadline
712 * task after the deadline but before the next period, throttle the
713 * task and set the replenishing timer to the begin of the next period,
714 * unless it is boosted.
715 */
716static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
717{
718 struct task_struct *p = dl_task_of(dl_se);
719 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
720
721 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
722 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
723 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
724 return;
725 dl_se->dl_throttled = 1;
726 }
727}
728
692static 729static
693int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 730int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
694{ 731{
@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
922 __dequeue_dl_entity(dl_se); 959 __dequeue_dl_entity(dl_se);
923} 960}
924 961
962static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
963{
964 return dl_se->dl_deadline < dl_se->dl_period;
965}
966
925static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 967static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
926{ 968{
927 struct task_struct *pi_task = rt_mutex_get_top_task(p); 969 struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -948,6 +990,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
948 } 990 }
949 991
950 /* 992 /*
993 * Check if a constrained deadline task was activated
994 * after the deadline but before the next period.
995 * If that is the case, the task will be throttled and
996 * the replenishment timer will be set to the next period.
997 */
998 if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
999 dl_check_constrained_dl(&p->dl);
1000
1001 /*
951 * If p is throttled, we do nothing. In fact, if it exhausted 1002 * If p is throttled, we do nothing. In fact, if it exhausted
952 * its budget it needs a replenishment and, since it now is on 1003 * its budget it needs a replenishment and, since it now is on
953 * its rq, the bandwidth timer callback (which clearly has not 1004 * its rq, the bandwidth timer callback (which clearly has not
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 7296b7308eca..f15fb2bdbc0d 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void)
169 * If the folding window started, make sure we start writing in the 169 * If the folding window started, make sure we start writing in the
170 * next idle-delta. 170 * next idle-delta.
171 */ 171 */
172 if (!time_before(jiffies, calc_load_update)) 172 if (!time_before(jiffies, READ_ONCE(calc_load_update)))
173 idx++; 173 idx++;
174 174
175 return idx & 1; 175 return idx & 1;
@@ -202,8 +202,9 @@ void calc_load_exit_idle(void)
202 struct rq *this_rq = this_rq(); 202 struct rq *this_rq = this_rq();
203 203
204 /* 204 /*
205 * If we're still before the sample window, we're done. 205 * If we're still before the pending sample window, we're done.
206 */ 206 */
207 this_rq->calc_load_update = READ_ONCE(calc_load_update);
207 if (time_before(jiffies, this_rq->calc_load_update)) 208 if (time_before(jiffies, this_rq->calc_load_update))
208 return; 209 return;
209 210
@@ -212,7 +213,6 @@ void calc_load_exit_idle(void)
212 * accounted through the nohz accounting, so skip the entire deal and 213 * accounted through the nohz accounting, so skip the entire deal and
213 * sync up for the next window. 214 * sync up for the next window.
214 */ 215 */
215 this_rq->calc_load_update = calc_load_update;
216 if (time_before(jiffies, this_rq->calc_load_update + 10)) 216 if (time_before(jiffies, this_rq->calc_load_update + 10))
217 this_rq->calc_load_update += LOAD_FREQ; 217 this_rq->calc_load_update += LOAD_FREQ;
218} 218}
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp,
308 */ 308 */
309static void calc_global_nohz(void) 309static void calc_global_nohz(void)
310{ 310{
311 unsigned long sample_window;
311 long delta, active, n; 312 long delta, active, n;
312 313
313 if (!time_before(jiffies, calc_load_update + 10)) { 314 sample_window = READ_ONCE(calc_load_update);
315 if (!time_before(jiffies, sample_window + 10)) {
314 /* 316 /*
315 * Catch-up, fold however many we are behind still 317 * Catch-up, fold however many we are behind still
316 */ 318 */
317 delta = jiffies - calc_load_update - 10; 319 delta = jiffies - sample_window - 10;
318 n = 1 + (delta / LOAD_FREQ); 320 n = 1 + (delta / LOAD_FREQ);
319 321
320 active = atomic_long_read(&calc_load_tasks); 322 active = atomic_long_read(&calc_load_tasks);
@@ -324,7 +326,7 @@ static void calc_global_nohz(void)
324 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 326 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
325 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 327 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
326 328
327 calc_load_update += n * LOAD_FREQ; 329 WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
328 } 330 }
329 331
330 /* 332 /*
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { }
352 */ 354 */
353void calc_global_load(unsigned long ticks) 355void calc_global_load(unsigned long ticks)
354{ 356{
357 unsigned long sample_window;
355 long active, delta; 358 long active, delta;
356 359
357 if (time_before(jiffies, calc_load_update + 10)) 360 sample_window = READ_ONCE(calc_load_update);
361 if (time_before(jiffies, sample_window + 10))
358 return; 362 return;
359 363
360 /* 364 /*
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks)
371 avenrun[1] = calc_load(avenrun[1], EXP_5, active); 375 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
372 avenrun[2] = calc_load(avenrun[2], EXP_15, active); 376 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
373 377
374 calc_load_update += LOAD_FREQ; 378 WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
375 379
376 /* 380 /*
377 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. 381 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acf0a5a06da7..8c8714fcb53c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
2133 if (write) { 2133 if (write) {
2134 if (*negp) 2134 if (*negp)
2135 return -EINVAL; 2135 return -EINVAL;
2136 if (*lvalp > UINT_MAX)
2137 return -EINVAL;
2136 *valp = *lvalp; 2138 *valp = *lvalp;
2137 } else { 2139 } else {
2138 unsigned int val = *valp; 2140 unsigned int val = *valp;
2141 *negp = false;
2139 *lvalp = (unsigned long)val; 2142 *lvalp = (unsigned long)val;
2140 } 2143 }
2141 return 0; 2144 return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 96fc3c043ad6..54e7a90db848 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void)
4826 rb_data[cpu].cnt = cpu; 4826 rb_data[cpu].cnt = cpu;
4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4828 "rbtester/%d", cpu); 4828 "rbtester/%d", cpu);
4829 if (WARN_ON(!rb_threads[cpu])) { 4829 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4830 pr_cont("FAILED\n"); 4830 pr_cont("FAILED\n");
4831 ret = -1; 4831 ret = PTR_ERR(rb_threads[cpu]);
4832 goto out_free; 4832 goto out_free;
4833 } 4833 }
4834 4834
@@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void)
4838 4838
4839 /* Now create the rb hammer! */ 4839 /* Now create the rb hammer! */
4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4841 if (WARN_ON(!rb_hammer)) { 4841 if (WARN_ON(IS_ERR(rb_hammer))) {
4842 pr_cont("FAILED\n"); 4842 pr_cont("FAILED\n");
4843 ret = -1; 4843 ret = PTR_ERR(rb_hammer);
4844 goto out_free; 4844 goto out_free;
4845 } 4845 }
4846 4846
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 072cbc9b175d..c0168b7da1ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1507 struct timer_list *timer = &dwork->timer; 1507 struct timer_list *timer = &dwork->timer;
1508 struct work_struct *work = &dwork->work; 1508 struct work_struct *work = &dwork->work;
1509 1509
1510 WARN_ON_ONCE(!wq);
1510 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1511 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1511 timer->data != (unsigned long)dwork); 1512 timer->data != (unsigned long)dwork);
1512 WARN_ON_ONCE(timer_pending(timer)); 1513 WARN_ON_ONCE(timer_pending(timer));
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5fa6a3..2c6cd1b5c3ea 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
12 12
13 if (!try_get_task_stack(target)) { 13 if (!try_get_task_stack(target)) {
14 /* Task has no stack, so the task isn't in a syscall. */ 14 /* Task has no stack, so the task isn't in a syscall. */
15 *sp = *pc = 0;
15 *callno = -1; 16 *callno = -1;
16 return 0; 17 return 0;
17 } 18 }
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d3140fbb8..a25c9763fce1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kasan.h>
23 24
24/* 25/*
25 * Note: test functions are marked noinline so that their names appear in 26 * Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
474 475
475static int __init kmalloc_tests_init(void) 476static int __init kmalloc_tests_init(void)
476{ 477{
478 /*
479 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
480 * report for the first case.
481 */
482 bool multishot = kasan_save_enable_multi_shot();
483
477 kmalloc_oob_right(); 484 kmalloc_oob_right();
478 kmalloc_oob_left(); 485 kmalloc_oob_left();
479 kmalloc_node_oob_right(); 486 kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
499 ksize_unpoisons_memory(); 506 ksize_unpoisons_memory();
500 copy_user_test(); 507 copy_user_test();
501 use_after_scope_test(); 508 use_after_scope_test();
509
510 kasan_restore_multi_shot(multishot);
511
502 return -EAGAIN; 512 return -EAGAIN;
503} 513}
504 514
diff --git a/mm/gup.c b/mm/gup.c
index c74bad1bf6e8..04aa405350dc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1455,7 +1455,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1455 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 1455 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
1456 P4D_SHIFT, next, write, pages, nr)) 1456 P4D_SHIFT, next, write, pages, nr))
1457 return 0; 1457 return 0;
1458 } else if (!gup_p4d_range(p4d, addr, next, write, pages, nr)) 1458 } else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1459 return 0; 1459 return 0;
1460 } while (p4dp++, addr = next, addr != end); 1460 } while (p4dp++, addr = next, addr != end);
1461 1461
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..fef4cf210cc7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
243 } else if (!memcmp("defer", buf,
244 min(sizeof("defer")-1, count))) {
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer+madvise", buf, 243 } else if (!memcmp("defer+madvise", buf,
250 min(sizeof("defer+madvise")-1, count))) { 244 min(sizeof("defer+madvise")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer", buf,
250 min(sizeof("defer")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
255 } else if (!memcmp("madvise", buf, 255 } else if (!memcmp("madvise", buf,
256 min(sizeof("madvise")-1, count))) { 256 min(sizeof("madvise")-1, count))) {
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3d0aab9ee80d..e5828875f7bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
4403 return 0; 4403 return 0;
4404out_err: 4404out_err:
4405 if (!vma || vma->vm_flags & VM_MAYSHARE) 4405 if (!vma || vma->vm_flags & VM_MAYSHARE)
4406 region_abort(resv_map, from, to); 4406 /* Don't call region_abort if region_chg failed */
4407 if (chg >= 0)
4408 region_abort(resv_map, from, to);
4407 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4409 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4408 kref_put(&resv_map->refs, resv_map_release); 4410 kref_put(&resv_map->refs, resv_map_release);
4409 return ret; 4411 return ret;
@@ -4651,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4651{ 4653{
4652 struct page *page = NULL; 4654 struct page *page = NULL;
4653 spinlock_t *ptl; 4655 spinlock_t *ptl;
4656 pte_t pte;
4654retry: 4657retry:
4655 ptl = pmd_lockptr(mm, pmd); 4658 ptl = pmd_lockptr(mm, pmd);
4656 spin_lock(ptl); 4659 spin_lock(ptl);
@@ -4660,12 +4663,13 @@ retry:
4660 */ 4663 */
4661 if (!pmd_huge(*pmd)) 4664 if (!pmd_huge(*pmd))
4662 goto out; 4665 goto out;
4663 if (pmd_present(*pmd)) { 4666 pte = huge_ptep_get((pte_t *)pmd);
4667 if (pte_present(pte)) {
4664 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4668 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4665 if (flags & FOLL_GET) 4669 if (flags & FOLL_GET)
4666 get_page(page); 4670 get_page(page);
4667 } else { 4671 } else {
4668 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4672 if (is_hugetlb_entry_migration(pte)) {
4669 spin_unlock(ptl); 4673 spin_unlock(ptl);
4670 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4674 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4671 goto retry; 4675 goto retry;
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
481enum ttu_flags; 481enum ttu_flags;
482struct tlbflush_unmap_batch; 482struct tlbflush_unmap_batch;
483 483
484
485/*
486 * only for MM internal work items which do not depend on
487 * any allocations or locks which might depend on allocations
488 */
489extern struct workqueue_struct *mm_percpu_wq;
490
484#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 491#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
485void try_to_unmap_flush(void); 492void try_to_unmap_flush(void);
486void try_to_unmap_flush_dirty(void); 493void try_to_unmap_flush_dirty(void);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6b3b3c..dd2dea8eb077 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
96 << KASAN_SHADOW_SCALE_SHIFT); 96 << KASAN_SHADOW_SCALE_SHIFT);
97} 97}
98 98
99static inline bool kasan_report_enabled(void)
100{
101 return !current->kasan_depth;
102}
103
104void kasan_report(unsigned long addr, size_t size, 99void kasan_report(unsigned long addr, size_t size,
105 bool is_write, unsigned long ip); 100 bool is_write, unsigned long ip);
106void kasan_report_double_free(struct kmem_cache *cache, void *object, 101void kasan_report_double_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index f479365530b6..ab42a0803f16 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/bitops.h>
16#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/mm.h> 20#include <linux/mm.h>
19#include <linux/printk.h> 21#include <linux/printk.h>
@@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
293 kasan_end_report(&flags); 295 kasan_end_report(&flags);
294} 296}
295 297
298static unsigned long kasan_flags;
299
300#define KASAN_BIT_REPORTED 0
301#define KASAN_BIT_MULTI_SHOT 1
302
303bool kasan_save_enable_multi_shot(void)
304{
305 return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
306}
307EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
308
309void kasan_restore_multi_shot(bool enabled)
310{
311 if (!enabled)
312 clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
313}
314EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
315
316static int __init kasan_set_multi_shot(char *str)
317{
318 set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
319 return 1;
320}
321__setup("kasan_multi_shot", kasan_set_multi_shot);
322
323static inline bool kasan_report_enabled(void)
324{
325 if (current->kasan_depth)
326 return false;
327 if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
328 return true;
329 return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
330}
331
296void kasan_report(unsigned long addr, size_t size, 332void kasan_report(unsigned long addr, size_t size,
297 bool is_write, unsigned long ip) 333 bool is_write, unsigned long ip)
298{ 334{
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 26c874e90b12..20036d4f9f13 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
1416 /* data/bss scanning */ 1416 /* data/bss scanning */
1417 scan_large_block(_sdata, _edata); 1417 scan_large_block(_sdata, _edata);
1418 scan_large_block(__bss_start, __bss_stop); 1418 scan_large_block(__bss_start, __bss_stop);
1419 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1419 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422 /* per-cpu sections scanning */ 1422 /* per-cpu sections scanning */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 295479b792ec..6fa7208bcd56 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -125,9 +125,12 @@ void put_online_mems(void)
125 125
126} 126}
127 127
128/* Serializes write accesses to mem_hotplug.active_writer. */
129static DEFINE_MUTEX(memory_add_remove_lock);
130
128void mem_hotplug_begin(void) 131void mem_hotplug_begin(void)
129{ 132{
130 assert_held_device_hotplug(); 133 mutex_lock(&memory_add_remove_lock);
131 134
132 mem_hotplug.active_writer = current; 135 mem_hotplug.active_writer = current;
133 136
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
147 mem_hotplug.active_writer = NULL; 150 mem_hotplug.active_writer = NULL;
148 mutex_unlock(&mem_hotplug.lock); 151 mutex_unlock(&mem_hotplug.lock);
149 memhp_lock_release(); 152 memhp_lock_release();
153 mutex_unlock(&memory_add_remove_lock);
150} 154}
151 155
152/* add this memory to iomem resource */ 156/* add this memory to iomem resource */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75b2745bac41..37d0b334bfe9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1530 compat_ulong_t, maxnode) 1530 compat_ulong_t, maxnode)
1531{ 1531{
1532 long err = 0;
1533 unsigned long __user *nm = NULL; 1532 unsigned long __user *nm = NULL;
1534 unsigned long nr_bits, alloc_size; 1533 unsigned long nr_bits, alloc_size;
1535 DECLARE_BITMAP(bm, MAX_NUMNODES); 1534 DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1538 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1537 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1539 1538
1540 if (nmask) { 1539 if (nmask) {
1541 err = compat_get_bitmap(bm, nmask, nr_bits); 1540 if (compat_get_bitmap(bm, nmask, nr_bits))
1541 return -EFAULT;
1542 nm = compat_alloc_user_space(alloc_size); 1542 nm = compat_alloc_user_space(alloc_size);
1543 err |= copy_to_user(nm, bm, alloc_size); 1543 if (copy_to_user(nm, bm, alloc_size))
1544 return -EFAULT;
1544 } 1545 }
1545 1546
1546 if (err)
1547 return -EFAULT;
1548
1549 return sys_set_mempolicy(mode, nm, nr_bits+1); 1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1550} 1548}
1551 1549
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1553 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1554 compat_ulong_t, maxnode, compat_ulong_t, flags) 1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
1555{ 1553{
1556 long err = 0;
1557 unsigned long __user *nm = NULL; 1554 unsigned long __user *nm = NULL;
1558 unsigned long nr_bits, alloc_size; 1555 unsigned long nr_bits, alloc_size;
1559 nodemask_t bm; 1556 nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1563 1560
1564 if (nmask) { 1561 if (nmask) {
1565 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1562 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1563 return -EFAULT;
1566 nm = compat_alloc_user_space(alloc_size); 1564 nm = compat_alloc_user_space(alloc_size);
1567 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1565 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1566 return -EFAULT;
1568 } 1567 }
1569 1568
1570 if (err)
1571 return -EFAULT;
1572
1573 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1574} 1570}
1575 1571
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0897a14d37..ed97c2c14fa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
209 209
210 VM_BUG_ON_PAGE(PageTail(page), page); 210 VM_BUG_ON_PAGE(PageTail(page), page);
211 while (page_vma_mapped_walk(&pvmw)) { 211 while (page_vma_mapped_walk(&pvmw)) {
212 new = page - pvmw.page->index + 212 if (PageKsm(page))
213 linear_page_index(vma, pvmw.address); 213 new = page;
214 else
215 new = page - pvmw.page->index +
216 linear_page_index(vma, pvmw.address);
214 217
215 get_page(new); 218 get_page(new);
216 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 219 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..f3d603cef2c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
2373 */ 2373 */
2374 static cpumask_t cpus_with_pcps; 2374 static cpumask_t cpus_with_pcps;
2375 2375
2376 /*
2377 * Make sure nobody triggers this path before mm_percpu_wq is fully
2378 * initialized.
2379 */
2380 if (WARN_ON_ONCE(!mm_percpu_wq))
2381 return;
2382
2376 /* Workqueues cannot recurse */ 2383 /* Workqueues cannot recurse */
2377 if (current->flags & PF_WQ_WORKER) 2384 if (current->flags & PF_WQ_WORKER)
2378 return; 2385 return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
2422 for_each_cpu(cpu, &cpus_with_pcps) { 2429 for_each_cpu(cpu, &cpus_with_pcps) {
2423 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2430 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2424 INIT_WORK(work, drain_local_pages_wq); 2431 INIT_WORK(work, drain_local_pages_wq);
2425 schedule_work_on(cpu, work); 2432 queue_work_on(cpu, mm_percpu_wq, work);
2426 } 2433 }
2427 for_each_cpu(cpu, &cpus_with_pcps) 2434 for_each_cpu(cpu, &cpus_with_pcps)
2428 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2435 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4519 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4526 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4520 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4527 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4521 K(node_page_state(pgdat, NR_WRITEBACK)), 4528 K(node_page_state(pgdat, NR_WRITEBACK)),
4529 K(node_page_state(pgdat, NR_SHMEM)),
4522#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4530#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4523 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4524 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4532 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4525 * HPAGE_PMD_NR), 4533 * HPAGE_PMD_NR),
4526 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4534 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4527#endif 4535#endif
4528 K(node_page_state(pgdat, NR_SHMEM)),
4529 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4536 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4530 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4537 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4531 node_page_state(pgdat, NR_PAGES_SCANNED), 4538 node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
111 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw); 112 return not_found(pvmw);
113 113
114 /* Only for THP, seek to next pte entry makes sense */ 114 if (pvmw->pte)
115 if (pvmw->pte) {
116 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
117 return not_found(pvmw);
118 goto next_pte; 115 goto next_pte;
119 }
120 116
121 if (unlikely(PageHuge(pvmw->page))) { 117 if (unlikely(PageHuge(pvmw->page))) {
122 /* when pud is not present, pte will be NULL */ 118 /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
165 while (1) { 161 while (1) {
166 if (check_pte(pvmw)) 162 if (check_pte(pvmw))
167 return true; 163 return true;
168next_pte: do { 164next_pte:
165 /* Seek to next pte only makes sense for THP */
166 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
167 return not_found(pvmw);
168 do {
169 pvmw->address += PAGE_SIZE; 169 pvmw->address += PAGE_SIZE;
170 if (pvmw->address >= 170 if (pvmw->address >= pvmw->vma->vm_end ||
171 pvmw->address >=
171 __vma_address(pvmw->page, pvmw->vma) + 172 __vma_address(pvmw->page, pvmw->vma) +
172 hpage_nr_pages(pvmw->page) * PAGE_SIZE) 173 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
173 return not_found(pvmw); 174 return not_found(pvmw);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 538998a137d2..9ac639499bd1 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -21,7 +21,6 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
21 21
22/** 22/**
23 * pcpu_get_pages - get temp pages array 23 * pcpu_get_pages - get temp pages array
24 * @chunk: chunk of interest
25 * 24 *
26 * Returns pointer to array of pointers to struct page which can be indexed 25 * Returns pointer to array of pointers to struct page which can be indexed
27 * with pcpu_page_idx(). Note that there is only one array and accesses 26 * with pcpu_page_idx(). Note that there is only one array and accesses
@@ -30,7 +29,7 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
30 * RETURNS: 29 * RETURNS:
31 * Pointer to temp pages array on success. 30 * Pointer to temp pages array on success.
32 */ 31 */
33static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc) 32static struct page **pcpu_get_pages(void)
34{ 33{
35 static struct page **pages; 34 static struct page **pages;
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
@@ -275,7 +274,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
275{ 274{
276 struct page **pages; 275 struct page **pages;
277 276
278 pages = pcpu_get_pages(chunk); 277 pages = pcpu_get_pages();
279 if (!pages) 278 if (!pages)
280 return -ENOMEM; 279 return -ENOMEM;
281 280
@@ -313,7 +312,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
313 * successful population attempt so the temp pages array must 312 * successful population attempt so the temp pages array must
314 * be available now. 313 * be available now.
315 */ 314 */
316 pages = pcpu_get_pages(chunk); 315 pages = pcpu_get_pages();
317 BUG_ON(!pages); 316 BUG_ON(!pages);
318 317
319 /* unmap and free */ 318 /* unmap and free */
diff --git a/mm/percpu.c b/mm/percpu.c
index 5696039b5c07..60a6488e9e6d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1011,8 +1011,11 @@ area_found:
1011 mutex_unlock(&pcpu_alloc_mutex); 1011 mutex_unlock(&pcpu_alloc_mutex);
1012 } 1012 }
1013 1013
1014 if (chunk != pcpu_reserved_chunk) 1014 if (chunk != pcpu_reserved_chunk) {
1015 spin_lock_irqsave(&pcpu_lock, flags);
1015 pcpu_nr_empty_pop_pages -= occ_pages; 1016 pcpu_nr_empty_pop_pages -= occ_pages;
1017 spin_unlock_irqrestore(&pcpu_lock, flags);
1018 }
1016 1019
1017 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 1020 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1018 pcpu_schedule_balance_work(); 1021 pcpu_schedule_balance_work();
diff --git a/mm/rmap.c b/mm/rmap.c
index 49ed681ccc7b..f6838015810f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1159 goto out; 1159 goto out;
1160 } 1160 }
1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1162 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1162 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
1163out: 1163out:
1164 unlock_page_memcg(page); 1164 unlock_page_memcg(page);
1165} 1165}
@@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1199 * pte lock(a spinlock) is held, which implies preemption disabled.
1200 */ 1200 */
1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1202 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1202 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
1203 1203
1204 if (unlikely(PageMlocked(page))) 1204 if (unlikely(PageMlocked(page)))
1205 clear_page_mlock(page); 1205 clear_page_mlock(page);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
670 670
671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
672 672
673/*
674 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
675 * workqueue, aiding in getting memory freed.
676 */
677static struct workqueue_struct *lru_add_drain_wq;
678
679static int __init lru_init(void)
680{
681 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
682
683 if (WARN(!lru_add_drain_wq,
684 "Failed to create workqueue lru_add_drain_wq"))
685 return -ENOMEM;
686
687 return 0;
688}
689early_initcall(lru_init);
690
691void lru_add_drain_all(void) 673void lru_add_drain_all(void)
692{ 674{
693 static DEFINE_MUTEX(lock); 675 static DEFINE_MUTEX(lock);
694 static struct cpumask has_work; 676 static struct cpumask has_work;
695 int cpu; 677 int cpu;
696 678
679 /*
680 * Make sure nobody triggers this path before mm_percpu_wq is fully
681 * initialized.
682 */
683 if (WARN_ON(!mm_percpu_wq))
684 return;
685
697 mutex_lock(&lock); 686 mutex_lock(&lock);
698 get_online_cpus(); 687 get_online_cpus();
699 cpumask_clear(&has_work); 688 cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
707 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
708 need_activate_page_drain(cpu)) { 697 need_activate_page_drain(cpu)) {
709 INIT_WORK(work, lru_add_drain_per_cpu); 698 INIT_WORK(work, lru_add_drain_per_cpu);
710 queue_work_on(cpu, lru_add_drain_wq, work); 699 queue_work_on(cpu, mm_percpu_wq, work);
711 cpumask_set_cpu(cpu, &has_work); 700 cpumask_set_cpu(cpu, &has_work);
712 } 701 }
713 } 702 }
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
201 struct page *page = map[i]; 201 struct page *page = map[i];
202 if (page) 202 if (page)
203 __free_page(page); 203 __free_page(page);
204 if (!(i % SWAP_CLUSTER_MAX))
205 cond_resched();
204 } 206 }
205 vfree(map); 207 vfree(map);
206 } 208 }
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 9b5bc86f96ad..b1ccb58ad397 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry)
267{ 267{
268 struct swap_slots_cache *cache; 268 struct swap_slots_cache *cache;
269 269
270 BUG_ON(!swap_slot_cache_initialized);
271
272 cache = &get_cpu_var(swp_slots); 270 cache = &get_cpu_var(swp_slots);
273 if (use_swap_slot_cache && cache->slots_ret) { 271 if (use_swap_slot_cache && cache->slots_ret) {
274 spin_lock_irq(&cache->free_lock); 272 spin_lock_irq(&cache->free_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0dd80222b20b..0b057628a7ba 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1683,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1683 1683
1684 if (fatal_signal_pending(current)) { 1684 if (fatal_signal_pending(current)) {
1685 area->nr_pages = i; 1685 area->nr_pages = i;
1686 goto fail; 1686 goto fail_no_warn;
1687 } 1687 }
1688 1688
1689 if (node == NUMA_NO_NODE) 1689 if (node == NUMA_NO_NODE)
@@ -1709,6 +1709,7 @@ fail:
1709 warn_alloc(gfp_mask, NULL, 1709 warn_alloc(gfp_mask, NULL,
1710 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1710 "vmalloc: allocation failure, allocated %ld of %ld bytes",
1711 (area->nr_pages*PAGE_SIZE), area->size); 1711 (area->nr_pages*PAGE_SIZE), area->size);
1712fail_no_warn:
1712 vfree(area->addr); 1713 vfree(area->addr);
1713 return NULL; 1714 return NULL;
1714} 1715}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b1947f0cbee2..809025ed97ea 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
1552#endif /* CONFIG_PROC_FS */ 1552#endif /* CONFIG_PROC_FS */
1553 1553
1554#ifdef CONFIG_SMP 1554#ifdef CONFIG_SMP
1555static struct workqueue_struct *vmstat_wq;
1556static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1555static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1557int sysctl_stat_interval __read_mostly = HZ; 1556int sysctl_stat_interval __read_mostly = HZ;
1558 1557
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
1623 * to occur in the future. Keep on running the 1622 * to occur in the future. Keep on running the
1624 * update worker thread. 1623 * update worker thread.
1625 */ 1624 */
1626 queue_delayed_work_on(smp_processor_id(), vmstat_wq, 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1627 this_cpu_ptr(&vmstat_work), 1626 this_cpu_ptr(&vmstat_work),
1628 round_jiffies_relative(sysctl_stat_interval)); 1627 round_jiffies_relative(sysctl_stat_interval));
1629 } 1628 }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
1702 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1703 1702
1704 if (!delayed_work_pending(dw) && need_update(cpu)) 1703 if (!delayed_work_pending(dw) && need_update(cpu))
1705 queue_delayed_work_on(cpu, vmstat_wq, dw, 0); 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1706 } 1705 }
1707 put_online_cpus(); 1706 put_online_cpus();
1708 1707
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
1718 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1719 vmstat_update); 1718 vmstat_update);
1720 1719
1721 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1722 schedule_delayed_work(&shepherd, 1720 schedule_delayed_work(&shepherd,
1723 round_jiffies_relative(sysctl_stat_interval)); 1721 round_jiffies_relative(sysctl_stat_interval));
1724} 1722}
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1762
1765#endif 1763#endif
1766 1764
1767static int __init setup_vmstat(void) 1765struct workqueue_struct *mm_percpu_wq;
1766
1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769 int ret __maybe_unused;
1770 int ret; 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
1772 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1771 1773
1774#ifdef CONFIG_SMP
1772 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1775 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1773 NULL, vmstat_cpu_dead); 1776 NULL, vmstat_cpu_dead);
1774 if (ret < 0) 1777 if (ret < 0)
@@ -1792,9 +1795,7 @@ static int __init setup_vmstat(void)
1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1795 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1796 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1794#endif 1797#endif
1795 return 0;
1796} 1798}
1797module_init(setup_vmstat)
1798 1799
1799#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1800#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1800 1801
diff --git a/mm/workingset.c b/mm/workingset.c
index ac839fca0e76..eda05c71fa49 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -532,7 +532,7 @@ static int __init workingset_init(void)
532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
533 timestamp_bits, max_order, bucket_order); 533 timestamp_bits, max_order, bucket_order);
534 534
535 ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 535 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
536 if (ret) 536 if (ret)
537 goto err; 537 goto err;
538 ret = register_shrinker(&workingset_shadow_shrinker); 538 ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8970a2fd3b1a..f9492bccfd79 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -667,6 +667,7 @@ next:
667 z3fold_page_unlock(zhdr); 667 z3fold_page_unlock(zhdr);
668 spin_lock(&pool->lock); 668 spin_lock(&pool->lock);
669 if (kref_put(&zhdr->refcount, release_z3fold_page)) { 669 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
670 spin_unlock(&pool->lock);
670 atomic64_dec(&pool->pages_nr); 671 atomic64_dec(&pool->pages_nr);
671 return 0; 672 return 0;
672 } 673 }
diff --git a/net/atm/svc.c b/net/atm/svc.c
index db9794ec61d8..5589de7086af 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -318,7 +318,8 @@ out:
318 return error; 318 return error;
319} 319}
320 320
321static int svc_accept(struct socket *sock, struct socket *newsock, int flags) 321static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
322 bool kern)
322{ 323{
323 struct sock *sk = sock->sk; 324 struct sock *sk = sock->sk;
324 struct sk_buff *skb; 325 struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
329 330
330 lock_sock(sk); 331 lock_sock(sk);
331 332
332 error = svc_create(sock_net(sk), newsock, 0, 0); 333 error = svc_create(sock_net(sk), newsock, 0, kern);
333 if (error) 334 if (error)
334 goto out; 335 goto out;
335 336
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a8e42cedf1db..b7c486752b3a 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1320,7 +1320,8 @@ out_release:
1320 return err; 1320 return err;
1321} 1321}
1322 1322
1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) 1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
1324 bool kern)
1324{ 1325{
1325 struct sk_buff *skb; 1326 struct sk_buff *skb;
1326 struct sock *newsk; 1327 struct sock *newsk;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7c3d994e90d8..71343d0fec94 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
2477 batadv_iv_ogm_schedule(hard_iface); 2477 batadv_iv_ogm_schedule(hard_iface);
2478} 2478}
2479 2479
2480/**
2481 * batadv_iv_init_sel_class - initialize GW selection class
2482 * @bat_priv: the bat priv with all the soft interface information
2483 */
2484static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
2485{
2486 /* set default TQ difference threshold to 20 */
2487 atomic_set(&bat_priv->gw.sel_class, 20);
2488}
2489
2480static struct batadv_gw_node * 2490static struct batadv_gw_node *
2481batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 2491batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
2482{ 2492{
@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
2823 .del_if = batadv_iv_ogm_orig_del_if, 2833 .del_if = batadv_iv_ogm_orig_del_if,
2824 }, 2834 },
2825 .gw = { 2835 .gw = {
2836 .init_sel_class = batadv_iv_init_sel_class,
2826 .get_best_gw_node = batadv_iv_gw_get_best_gw_node, 2837 .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
2827 .is_eligible = batadv_iv_gw_is_eligible, 2838 .is_eligible = batadv_iv_gw_is_eligible,
2828#ifdef CONFIG_BATMAN_ADV_DEBUGFS 2839#ifdef CONFIG_BATMAN_ADV_DEBUGFS
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0acd081dd286..a36c8e7291d6 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -668,6 +668,16 @@ err_ifinfo1:
668 return ret; 668 return ret;
669} 669}
670 670
671/**
672 * batadv_v_init_sel_class - initialize GW selection class
673 * @bat_priv: the bat priv with all the soft interface information
674 */
675static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
676{
677 /* set default throughput difference threshold to 5Mbps */
678 atomic_set(&bat_priv->gw.sel_class, 50);
679}
680
671static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, 681static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
672 char *buff, size_t count) 682 char *buff, size_t count)
673{ 683{
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
1052 .dump = batadv_v_orig_dump, 1062 .dump = batadv_v_orig_dump,
1053 }, 1063 },
1054 .gw = { 1064 .gw = {
1065 .init_sel_class = batadv_v_init_sel_class,
1055 .store_sel_class = batadv_v_store_sel_class, 1066 .store_sel_class = batadv_v_store_sel_class,
1056 .show_sel_class = batadv_v_show_sel_class, 1067 .show_sel_class = batadv_v_show_sel_class,
1057 .get_best_gw_node = batadv_v_gw_get_best_gw_node, 1068 .get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
1092 if (ret < 0) 1103 if (ret < 0)
1093 return ret; 1104 return ret;
1094 1105
1095 /* set default throughput difference threshold to 5Mbps */
1096 atomic_set(&bat_priv->gw.sel_class, 50);
1097
1098 return 0; 1106 return 0;
1099} 1107}
1100 1108
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 11a23fd6e1a0..8f964beaac28 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -404,7 +404,7 @@ out:
404 * batadv_frag_create - create a fragment from skb 404 * batadv_frag_create - create a fragment from skb
405 * @skb: skb to create fragment from 405 * @skb: skb to create fragment from
406 * @frag_head: header to use in new fragment 406 * @frag_head: header to use in new fragment
407 * @mtu: size of new fragment 407 * @fragment_size: size of new fragment
408 * 408 *
409 * Split the passed skb into two fragments: A new one with size matching the 409 * Split the passed skb into two fragments: A new one with size matching the
410 * passed mtu and the old one with the rest. The new skb contains data from the 410 * passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ out:
414 */ 414 */
415static struct sk_buff *batadv_frag_create(struct sk_buff *skb, 415static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
416 struct batadv_frag_packet *frag_head, 416 struct batadv_frag_packet *frag_head,
417 unsigned int mtu) 417 unsigned int fragment_size)
418{ 418{
419 struct sk_buff *skb_fragment; 419 struct sk_buff *skb_fragment;
420 unsigned int header_size = sizeof(*frag_head); 420 unsigned int header_size = sizeof(*frag_head);
421 unsigned int fragment_size = mtu - header_size; 421 unsigned int mtu = fragment_size + header_size;
422 422
423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); 423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
424 if (!skb_fragment) 424 if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
456 struct sk_buff *skb_fragment; 456 struct sk_buff *skb_fragment;
457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; 457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
458 unsigned int header_size = sizeof(frag_header); 458 unsigned int header_size = sizeof(frag_header);
459 unsigned int max_fragment_size, max_packet_size; 459 unsigned int max_fragment_size, num_fragments;
460 int ret; 460 int ret;
461 461
462 /* To avoid merge and refragmentation at next-hops we never send 462 /* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
464 */ 464 */
465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
466 max_fragment_size = mtu - header_size; 466 max_fragment_size = mtu - header_size;
467 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 467
468 if (skb->len == 0 || max_fragment_size == 0)
469 return -EINVAL;
470
471 num_fragments = (skb->len - 1) / max_fragment_size + 1;
472 max_fragment_size = (skb->len - 1) / num_fragments + 1;
468 473
469 /* Don't even try to fragment, if we need more than 16 fragments */ 474 /* Don't even try to fragment, if we need more than 16 fragments */
470 if (skb->len > max_packet_size) { 475 if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
471 ret = -EAGAIN; 476 ret = -EAGAIN;
472 goto free_skb; 477 goto free_skb;
473 } 478 }
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
507 goto put_primary_if; 512 goto put_primary_if;
508 } 513 }
509 514
510 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 515 skb_fragment = batadv_frag_create(skb, &frag_header,
516 max_fragment_size);
511 if (!skb_fragment) { 517 if (!skb_fragment) {
512 ret = -ENOMEM; 518 ret = -ENOMEM;
513 goto put_primary_if; 519 goto put_primary_if;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 5db2e43e3775..33940c5c74a8 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
253 */ 253 */
254void batadv_gw_init(struct batadv_priv *bat_priv) 254void batadv_gw_init(struct batadv_priv *bat_priv)
255{ 255{
256 if (bat_priv->algo_ops->gw.init_sel_class)
257 bat_priv->algo_ops->gw.init_sel_class(bat_priv);
258 else
259 atomic_set(&bat_priv->gw.sel_class, 1);
260
256 batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, 261 batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
257 NULL, BATADV_TVLV_GW, 1, 262 NULL, BATADV_TVLV_GW, 1,
258 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 263 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5d099b2e6cfc..d042c99af028 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
819 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); 819 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
820#endif 820#endif
821 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); 821 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
822 atomic_set(&bat_priv->gw.sel_class, 20);
823 atomic_set(&bat_priv->gw.bandwidth_down, 100); 822 atomic_set(&bat_priv->gw.bandwidth_down, 100);
824 atomic_set(&bat_priv->gw.bandwidth_up, 20); 823 atomic_set(&bat_priv->gw.bandwidth_up, 20);
825 atomic_set(&bat_priv->orig_interval, 1000); 824 atomic_set(&bat_priv->orig_interval, 1000);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 66b25e410a41..246f21b4973b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
1489 1489
1490/** 1490/**
1491 * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) 1491 * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
1492 * @init_sel_class: initialize GW selection class (optional)
1492 * @store_sel_class: parse and stores a new GW selection class (optional) 1493 * @store_sel_class: parse and stores a new GW selection class (optional)
1493 * @show_sel_class: prints the current GW selection class (optional) 1494 * @show_sel_class: prints the current GW selection class (optional)
1494 * @get_best_gw_node: select the best GW from the list of available nodes 1495 * @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
1499 * @dump: dump gateways to a netlink socket (optional) 1500 * @dump: dump gateways to a netlink socket (optional)
1500 */ 1501 */
1501struct batadv_algo_gw_ops { 1502struct batadv_algo_gw_ops {
1503 void (*init_sel_class)(struct batadv_priv *bat_priv);
1502 ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, 1504 ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
1503 size_t count); 1505 size_t count);
1504 ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); 1506 ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f307b145ea54..507b80d59dec 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -301,7 +301,7 @@ done:
301} 301}
302 302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, 303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
304 int flags) 304 int flags, bool kern)
305{ 305{
306 DEFINE_WAIT_FUNC(wait, woken_wake_function); 306 DEFINE_WAIT_FUNC(wait, woken_wake_function);
307 struct sock *sk = sock->sk, *nsk; 307 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa1a814ceddc..ac3c650cb234 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -471,7 +471,8 @@ done:
471 return err; 471 return err;
472} 472}
473 473
474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) 474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
475 bool kern)
475{ 476{
476 DEFINE_WAIT_FUNC(wait, woken_wake_function); 477 DEFINE_WAIT_FUNC(wait, woken_wake_function);
477 struct sock *sk = sock->sk, *nsk; 478 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e4e9a2da1e7e..728e0c8dc8e7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -627,7 +627,7 @@ done:
627} 627}
628 628
629static int sco_sock_accept(struct socket *sock, struct socket *newsock, 629static int sco_sock_accept(struct socket *sock, struct socket *newsock,
630 int flags) 630 int flags, bool kern)
631{ 631{
632 DEFINE_WAIT_FUNC(wait, woken_wake_function); 632 DEFINE_WAIT_FUNC(wait, woken_wake_function);
633 struct sock *sk = sock->sk, *ch; 633 struct sock *sk = sock->sk, *ch;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4f598dc2d916..6e08b7199dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
106 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 106 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
107 struct net_bridge_fdb_entry *fdb; 107 struct net_bridge_fdb_entry *fdb;
108 108
109 WARN_ON_ONCE(!br_hash_lock_held(br)); 109 lockdep_assert_held_once(&br->hash_lock);
110 110
111 rcu_read_lock(); 111 rcu_read_lock();
112 fdb = fdb_find_rcu(head, addr, vid); 112 fdb = fdb_find_rcu(head, addr, vid);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 236f34244dbe..013f2290bfa5 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
30static int 30static int
31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) 31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
32{ 32{
33 br_drop_fake_rtable(skb);
33 return netif_receive_skb(skb); 34 return netif_receive_skb(skb);
34} 35}
35 36
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 95087e6e8258..1f1e62095464 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
521} 521}
522 522
523 523
524/* PF_BRIDGE/LOCAL_IN ************************************************/
525/* The packet is locally destined, which requires a real
526 * dst_entry, so detach the fake one. On the way up, the
527 * packet would pass through PRE_ROUTING again (which already
528 * took place when the packet entered the bridge), but we
529 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
530 * prevent this from happening. */
531static unsigned int br_nf_local_in(void *priv,
532 struct sk_buff *skb,
533 const struct nf_hook_state *state)
534{
535 br_drop_fake_rtable(skb);
536 return NF_ACCEPT;
537}
538
539/* PF_BRIDGE/FORWARD *************************************************/ 524/* PF_BRIDGE/FORWARD *************************************************/
540static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 525static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
541{ 526{
@@ -721,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
721 706
722static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 707static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
723{ 708{
724 struct nf_bridge_info *nf_bridge; 709 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
725 unsigned int mtu_reserved; 710 unsigned int mtu, mtu_reserved;
726 711
727 mtu_reserved = nf_bridge_mtu_reduction(skb); 712 mtu_reserved = nf_bridge_mtu_reduction(skb);
713 mtu = skb->dev->mtu;
728 714
729 if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { 715 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
716 mtu = nf_bridge->frag_max_size;
717
718 if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
730 nf_bridge_info_free(skb); 719 nf_bridge_info_free(skb);
731 return br_dev_queue_push_xmit(net, sk, skb); 720 return br_dev_queue_push_xmit(net, sk, skb);
732 } 721 }
733 722
734 nf_bridge = nf_bridge_info_get(skb);
735
736 /* This is wrong! We should preserve the original fragment 723 /* This is wrong! We should preserve the original fragment
737 * boundaries by preserving frag_list rather than refragmenting. 724 * boundaries by preserving frag_list rather than refragmenting.
738 */ 725 */
@@ -908,12 +895,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
908 .priority = NF_BR_PRI_BRNF, 895 .priority = NF_BR_PRI_BRNF,
909 }, 896 },
910 { 897 {
911 .hook = br_nf_local_in,
912 .pf = NFPROTO_BRIDGE,
913 .hooknum = NF_BR_LOCAL_IN,
914 .priority = NF_BR_PRI_BRNF,
915 },
916 {
917 .hook = br_nf_forward_ip, 898 .hook = br_nf_forward_ip,
918 .pf = NFPROTO_BRIDGE, 899 .pf = NFPROTO_BRIDGE,
919 .hooknum = NF_BR_FORWARD, 900 .hooknum = NF_BR_FORWARD,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2288fca7756c..61368186edea 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
531int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 531int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
532 const unsigned char *addr, u16 vid); 532 const unsigned char *addr, u16 vid);
533 533
534static inline bool br_hash_lock_held(struct net_bridge *br)
535{
536#ifdef CONFIG_LOCKDEP
537 return lockdep_is_held(&br->hash_lock);
538#else
539 return true;
540#endif
541}
542
543/* br_forward.c */ 534/* br_forward.c */
544enum br_pkt_type { 535enum br_pkt_type {
545 BR_PKT_UNICAST, 536 BR_PKT_UNICAST,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 38dcf1eb427d..f76bb3332613 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/sched/mm.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/socket.h> 12#include <linux/socket.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
469{ 470{
470 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 471 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
471 struct socket *sock; 472 struct socket *sock;
473 unsigned int noio_flag;
472 int ret; 474 int ret;
473 475
474 BUG_ON(con->sock); 476 BUG_ON(con->sock);
477
478 /* sock_create_kern() allocates with GFP_KERNEL */
479 noio_flag = memalloc_noio_save();
475 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 480 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
476 SOCK_STREAM, IPPROTO_TCP, &sock); 481 SOCK_STREAM, IPPROTO_TCP, &sock);
482 memalloc_noio_restore(noio_flag);
477 if (ret) 483 if (ret)
478 return ret; 484 return ret;
479 sock->sk->sk_allocation = GFP_NOFS; 485 sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8637b2b71f3d..7869ae3837ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
1304{ 1304{
1305 rtnl_lock(); 1305 rtnl_lock();
1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1307 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1307 rtnl_unlock(); 1308 rtnl_unlock();
1308} 1309}
1309EXPORT_SYMBOL(netdev_notify_peers); 1310EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..d98d4998213d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -390,7 +390,7 @@ mpls:
390 unsigned char ar_tip[4]; 390 unsigned char ar_tip[4];
391 } *arp_eth, _arp_eth; 391 } *arp_eth, _arp_eth;
392 const struct arphdr *arp; 392 const struct arphdr *arp;
393 struct arphdr *_arp; 393 struct arphdr _arp;
394 394
395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
396 hlen, &_arp); 396 hlen, &_arp);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7c12caa20c8..4526cbd7e28a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
860 if (skb) 860 if (skb)
861 skb = skb_clone(skb, GFP_ATOMIC); 861 skb = skb_clone(skb, GFP_ATOMIC);
862 write_unlock(&neigh->lock); 862 write_unlock(&neigh->lock);
863 neigh->ops->solicit(neigh, skb); 863 if (neigh->ops->solicit)
864 neigh->ops->solicit(neigh, skb);
864 atomic_inc(&neigh->probes); 865 atomic_inc(&neigh->probes);
865 kfree_skb(skb); 866 kfree_skb(skb);
866} 867}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3945821e9c1f..65ea0ff4017c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
953 while (--i >= new_num) { 953 while (--i >= new_num) {
954 struct kobject *kobj = &dev->_rx[i].kobj; 954 struct kobject *kobj = &dev->_rx[i].kobj;
955 955
956 if (!list_empty(&dev_net(dev)->exit_list)) 956 if (!atomic_read(&dev_net(dev)->count))
957 kobj->uevent_suppress = 1; 957 kobj->uevent_suppress = 1;
958 if (dev->sysfs_rx_queue_group) 958 if (dev->sysfs_rx_queue_group)
959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1371 while (--i >= new_num) { 1371 while (--i >= new_num) {
1372 struct netdev_queue *queue = dev->_tx + i; 1372 struct netdev_queue *queue = dev->_tx + i;
1373 1373
1374 if (!list_empty(&dev_net(dev)->exit_list)) 1374 if (!atomic_read(&dev_net(dev)->count))
1375 queue->kobj.uevent_suppress = 1; 1375 queue->kobj.uevent_suppress = 1;
1376#ifdef CONFIG_BQL 1376#ifdef CONFIG_BQL
1377 sysfs_remove_group(&queue->kobj, &dql_group); 1377 sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
1558{ 1558{
1559 struct device *dev = &(ndev->dev); 1559 struct device *dev = &(ndev->dev);
1560 1560
1561 if (!list_empty(&dev_net(ndev)->exit_list)) 1561 if (!atomic_read(&dev_net(ndev)->count))
1562 dev_set_uevent_suppress(dev, 1); 1562 dev_set_uevent_suppress(dev, 1);
1563 1563
1564 kobject_get(&dev->kobj); 1564 kobject_get(&dev->kobj);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6ae56037bb13..029a61ac6cdd 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
71 return 0; 71 return 0;
72} 72}
73 73
74static void update_classid(struct cgroup_subsys_state *css, void *v) 74static void cgrp_attach(struct cgroup_taskset *tset)
75{ 75{
76 struct css_task_iter it; 76 struct cgroup_subsys_state *css;
77 struct task_struct *p; 77 struct task_struct *p;
78 78
79 css_task_iter_start(css, &it); 79 cgroup_taskset_for_each(p, css, tset) {
80 while ((p = css_task_iter_next(&it))) {
81 task_lock(p); 80 task_lock(p);
82 iterate_fd(p->files, 0, update_classid_sock, v); 81 iterate_fd(p->files, 0, update_classid_sock,
82 (void *)(unsigned long)css_cls_state(css)->classid);
83 task_unlock(p); 83 task_unlock(p);
84 } 84 }
85 css_task_iter_end(&it);
86}
87
88static void cgrp_attach(struct cgroup_taskset *tset)
89{
90 struct cgroup_subsys_state *css;
91
92 cgroup_taskset_first(tset, &css);
93 update_classid(css,
94 (void *)(unsigned long)css_cls_state(css)->classid);
95} 85}
96 86
97static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) 87static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
103 u64 value) 93 u64 value)
104{ 94{
105 struct cgroup_cls_state *cs = css_cls_state(css); 95 struct cgroup_cls_state *cs = css_cls_state(css);
96 struct css_task_iter it;
97 struct task_struct *p;
106 98
107 cgroup_sk_alloc_disable(); 99 cgroup_sk_alloc_disable();
108 100
109 cs->classid = (u32)value; 101 cs->classid = (u32)value;
110 102
111 update_classid(css, (void *)(unsigned long)cs->classid); 103 css_task_iter_start(css, &it);
104 while ((p = css_task_iter_next(&it))) {
105 task_lock(p);
106 iterate_fd(p->files, 0, update_classid_sock,
107 (void *)(unsigned long)cs->classid);
108 task_unlock(p);
109 }
110 css_task_iter_end(&it);
111
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..d28da7d363f1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -20,9 +20,11 @@
20#include <net/tcp.h> 20#include <net/tcp.h>
21 21
22static siphash_key_t net_secret __read_mostly; 22static siphash_key_t net_secret __read_mostly;
23static siphash_key_t ts_secret __read_mostly;
23 24
24static __always_inline void net_secret_init(void) 25static __always_inline void net_secret_init(void)
25{ 26{
27 net_get_random_once(&ts_secret, sizeof(ts_secret));
26 net_get_random_once(&net_secret, sizeof(net_secret)); 28 net_get_random_once(&net_secret, sizeof(net_secret));
27} 29}
28#endif 30#endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
45#endif 47#endif
46 48
47#if IS_ENABLED(CONFIG_IPV6) 49#if IS_ENABLED(CONFIG_IPV6)
50static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
51{
52 const struct {
53 struct in6_addr saddr;
54 struct in6_addr daddr;
55 } __aligned(SIPHASH_ALIGNMENT) combined = {
56 .saddr = *(struct in6_addr *)saddr,
57 .daddr = *(struct in6_addr *)daddr,
58 };
59
60 if (sysctl_tcp_timestamps != 1)
61 return 0;
62
63 return siphash(&combined, offsetofend(typeof(combined), daddr),
64 &ts_secret);
65}
66
48u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 67u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
49 __be16 sport, __be16 dport, u32 *tsoff) 68 __be16 sport, __be16 dport, u32 *tsoff)
50{ 69{
@@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
63 net_secret_init(); 82 net_secret_init();
64 hash = siphash(&combined, offsetofend(typeof(combined), dport), 83 hash = siphash(&combined, offsetofend(typeof(combined), dport),
65 &net_secret); 84 &net_secret);
66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 85 *tsoff = secure_tcpv6_ts_off(saddr, daddr);
67 return seq_scale(hash); 86 return seq_scale(hash);
68} 87}
69EXPORT_SYMBOL(secure_tcpv6_sequence_number); 88EXPORT_SYMBOL(secure_tcpv6_sequence_number);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
88#endif 107#endif
89 108
90#ifdef CONFIG_INET 109#ifdef CONFIG_INET
110static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
111{
112 if (sysctl_tcp_timestamps != 1)
113 return 0;
114
115 return siphash_2u32((__force u32)saddr, (__force u32)daddr,
116 &ts_secret);
117}
91 118
92/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 119/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 120 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
103 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, 130 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
104 (__force u32)sport << 16 | (__force u32)dport, 131 (__force u32)sport << 16 | (__force u32)dport,
105 &net_secret); 132 &net_secret);
106 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 133 *tsoff = secure_tcp_ts_off(saddr, daddr);
107 return seq_scale(hash); 134 return seq_scale(hash);
108} 135}
109 136
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3557958e9bf..9f781092fda9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
3694 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3694 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3695} 3695}
3696 3696
3697static void skb_set_err_queue(struct sk_buff *skb)
3698{
3699 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
3700 * So, it is safe to (mis)use it to mark skbs on the error queue.
3701 */
3702 skb->pkt_type = PACKET_OUTGOING;
3703 BUILD_BUG_ON(PACKET_OUTGOING == 0);
3704}
3705
3697/* 3706/*
3698 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3707 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3699 */ 3708 */
@@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3707 skb->sk = sk; 3716 skb->sk = sk;
3708 skb->destructor = sock_rmem_free; 3717 skb->destructor = sock_rmem_free;
3709 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3718 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3719 skb_set_err_queue(skb);
3710 3720
3711 /* before exiting rcu section, make sure dst is refcounted */ 3721 /* before exiting rcu section, make sure dst is refcounted */
3712 skb_dst_force(skb); 3722 skb_dst_force(skb);
@@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
3783 3793
3784static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3794static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3785 struct sock *sk, 3795 struct sock *sk,
3786 int tstype) 3796 int tstype,
3797 bool opt_stats)
3787{ 3798{
3788 struct sock_exterr_skb *serr; 3799 struct sock_exterr_skb *serr;
3789 int err; 3800 int err;
3790 3801
3802 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
3803
3791 serr = SKB_EXT_ERR(skb); 3804 serr = SKB_EXT_ERR(skb);
3792 memset(serr, 0, sizeof(*serr)); 3805 memset(serr, 0, sizeof(*serr));
3793 serr->ee.ee_errno = ENOMSG; 3806 serr->ee.ee_errno = ENOMSG;
3794 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3807 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3795 serr->ee.ee_info = tstype; 3808 serr->ee.ee_info = tstype;
3809 serr->opt_stats = opt_stats;
3796 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3810 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3797 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3811 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3798 if (sk->sk_protocol == IPPROTO_TCP && 3812 if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3828,13 +3842,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3828 if (!skb_may_tx_timestamp(sk, false)) 3842 if (!skb_may_tx_timestamp(sk, false))
3829 return; 3843 return;
3830 3844
3831 /* take a reference to prevent skb_orphan() from freeing the socket */ 3845 /* Take a reference to prevent skb_orphan() from freeing the socket,
3832 sock_hold(sk); 3846 * but only if the socket refcount is not zero.
3833 3847 */
3834 *skb_hwtstamps(skb) = *hwtstamps; 3848 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3835 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3849 *skb_hwtstamps(skb) = *hwtstamps;
3836 3850 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
3837 sock_put(sk); 3851 sock_put(sk);
3852 }
3838} 3853}
3839EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3854EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3840 3855
@@ -3843,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3843 struct sock *sk, int tstype) 3858 struct sock *sk, int tstype)
3844{ 3859{
3845 struct sk_buff *skb; 3860 struct sk_buff *skb;
3846 bool tsonly; 3861 bool tsonly, opt_stats = false;
3847 3862
3848 if (!sk) 3863 if (!sk)
3849 return; 3864 return;
@@ -3856,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3856#ifdef CONFIG_INET 3871#ifdef CONFIG_INET
3857 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 3872 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
3858 sk->sk_protocol == IPPROTO_TCP && 3873 sk->sk_protocol == IPPROTO_TCP &&
3859 sk->sk_type == SOCK_STREAM) 3874 sk->sk_type == SOCK_STREAM) {
3860 skb = tcp_get_timestamping_opt_stats(sk); 3875 skb = tcp_get_timestamping_opt_stats(sk);
3861 else 3876 opt_stats = true;
3877 } else
3862#endif 3878#endif
3863 skb = alloc_skb(0, GFP_ATOMIC); 3879 skb = alloc_skb(0, GFP_ATOMIC);
3864 } else { 3880 } else {
@@ -3877,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
3877 else 3893 else
3878 skb->tstamp = ktime_get_real(); 3894 skb->tstamp = ktime_get_real();
3879 3895
3880 __skb_complete_tx_timestamp(skb, sk, tstype); 3896 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
3881} 3897}
3882EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3898EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3883 3899
@@ -3893,7 +3909,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3893{ 3909{
3894 struct sock *sk = skb->sk; 3910 struct sock *sk = skb->sk;
3895 struct sock_exterr_skb *serr; 3911 struct sock_exterr_skb *serr;
3896 int err; 3912 int err = 1;
3897 3913
3898 skb->wifi_acked_valid = 1; 3914 skb->wifi_acked_valid = 1;
3899 skb->wifi_acked = acked; 3915 skb->wifi_acked = acked;
@@ -3903,14 +3919,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3903 serr->ee.ee_errno = ENOMSG; 3919 serr->ee.ee_errno = ENOMSG;
3904 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3920 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3905 3921
3906 /* take a reference to prevent skb_orphan() from freeing the socket */ 3922 /* Take a reference to prevent skb_orphan() from freeing the socket,
3907 sock_hold(sk); 3923 * but only if the socket refcount is not zero.
3908 3924 */
3909 err = sock_queue_err_skb(sk, skb); 3925 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3926 err = sock_queue_err_skb(sk, skb);
3927 sock_put(sk);
3928 }
3910 if (err) 3929 if (err)
3911 kfree_skb(skb); 3930 kfree_skb(skb);
3912
3913 sock_put(sk);
3914} 3931}
3915EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3932EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3916 3933
diff --git a/net/core/sock.c b/net/core/sock.c
index f6fd79f33097..2c4f574168fb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
197 197
198/* 198/*
199 * Each address family might have different locking rules, so we have 199 * Each address family might have different locking rules, so we have
200 * one slock key per address family: 200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
201 */ 202 */
202static struct lock_class_key af_family_keys[AF_MAX]; 203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
203static struct lock_class_key af_family_slock_keys[AF_MAX]; 205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
204 207
205/* 208/*
206 * Make lock validator output more readable. (we pre-construct these 209 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket 210 * strings build-time, so that runtime initialization of socket
208 * locks is fast): 211 * locks is fast):
209 */ 212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
210static const char *const af_family_key_strings[AF_MAX+1] = { 231static const char *const af_family_key_strings[AF_MAX+1] = {
211 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 232 _sock_locks("sk_lock-")
212 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
213 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
214 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
215 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
216 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
217 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
218 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
219 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
220 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
226}; 233};
227static const char *const af_family_slock_key_strings[AF_MAX+1] = { 234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 235 _sock_locks("slock-")
229 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
230 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
231 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
232 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
233 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
234 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
235 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
236 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
237 "slock-27" , "slock-28" , "slock-AF_CAN" ,
238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
243}; 236};
244static const char *const af_family_clock_key_strings[AF_MAX+1] = { 237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 238 _sock_locks("clock-")
246 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 239};
247 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 240
248 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
249 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 242 _sock_locks("k-sk_lock-")
250 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 243};
251 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
252 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 245 _sock_locks("k-slock-")
253 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 246};
254 "clock-27" , "clock-28" , "clock-AF_CAN" , 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 248 _sock_locks("k-clock-")
256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
260}; 249};
261 250
262/* 251/*
@@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
264 * so split the lock classes by using a per-AF key: 253 * so split the lock classes by using a per-AF key:
265 */ 254 */
266static struct lock_class_key af_callback_keys[AF_MAX]; 255static struct lock_class_key af_callback_keys[AF_MAX];
256static struct lock_class_key af_kern_callback_keys[AF_MAX];
267 257
268/* Take into consideration the size of the struct sk_buff overhead in the 258/* Take into consideration the size of the struct sk_buff overhead in the
269 * determination of these values, since that is non-constant across 259 * determination of these values, since that is non-constant across
@@ -1293,7 +1283,16 @@ lenout:
1293 */ 1283 */
1294static inline void sock_lock_init(struct sock *sk) 1284static inline void sock_lock_init(struct sock *sk)
1295{ 1285{
1296 sock_lock_init_class_and_name(sk, 1286 if (sk->sk_kern_sock)
1287 sock_lock_init_class_and_name(
1288 sk,
1289 af_family_kern_slock_key_strings[sk->sk_family],
1290 af_family_kern_slock_keys + sk->sk_family,
1291 af_family_kern_key_strings[sk->sk_family],
1292 af_family_kern_keys + sk->sk_family);
1293 else
1294 sock_lock_init_class_and_name(
1295 sk,
1297 af_family_slock_key_strings[sk->sk_family], 1296 af_family_slock_key_strings[sk->sk_family],
1298 af_family_slock_keys + sk->sk_family, 1297 af_family_slock_keys + sk->sk_family,
1299 af_family_key_strings[sk->sk_family], 1298 af_family_key_strings[sk->sk_family],
@@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1399 * why we need sk_prot_creator -acme 1398 * why we need sk_prot_creator -acme
1400 */ 1399 */
1401 sk->sk_prot = sk->sk_prot_creator = prot; 1400 sk->sk_prot = sk->sk_prot_creator = prot;
1401 sk->sk_kern_sock = kern;
1402 sock_lock_init(sk); 1402 sock_lock_init(sk);
1403 sk->sk_net_refcnt = kern ? 0 : 1; 1403 sk->sk_net_refcnt = kern ? 0 : 1;
1404 if (likely(sk->sk_net_refcnt)) 1404 if (likely(sk->sk_net_refcnt))
@@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
1442 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1442 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1443 __func__, atomic_read(&sk->sk_omem_alloc)); 1443 __func__, atomic_read(&sk->sk_omem_alloc));
1444 1444
1445 if (sk->sk_frag.page) {
1446 put_page(sk->sk_frag.page);
1447 sk->sk_frag.page = NULL;
1448 }
1449
1445 if (sk->sk_peer_cred) 1450 if (sk->sk_peer_cred)
1446 put_cred(sk->sk_peer_cred); 1451 put_cred(sk->sk_peer_cred);
1447 put_pid(sk->sk_peer_pid); 1452 put_pid(sk->sk_peer_pid);
@@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1539 is_charged = sk_filter_charge(newsk, filter); 1544 is_charged = sk_filter_charge(newsk, filter);
1540 1545
1541 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1546 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1547 /* We need to make sure that we don't uncharge the new
1548 * socket if we couldn't charge it in the first place
1549 * as otherwise we uncharge the parent's filter.
1550 */
1551 if (!is_charged)
1552 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1542 sk_free_unlock_clone(newsk); 1553 sk_free_unlock_clone(newsk);
1543 newsk = NULL; 1554 newsk = NULL;
1544 goto out; 1555 goto out;
@@ -2277,7 +2288,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2277} 2288}
2278EXPORT_SYMBOL(sock_no_socketpair); 2289EXPORT_SYMBOL(sock_no_socketpair);
2279 2290
2280int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2291int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2292 bool kern)
2281{ 2293{
2282 return -EOPNOTSUPP; 2294 return -EOPNOTSUPP;
2283} 2295}
@@ -2481,7 +2493,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2481 } 2493 }
2482 2494
2483 rwlock_init(&sk->sk_callback_lock); 2495 rwlock_init(&sk->sk_callback_lock);
2484 lockdep_set_class_and_name(&sk->sk_callback_lock, 2496 if (sk->sk_kern_sock)
2497 lockdep_set_class_and_name(
2498 &sk->sk_callback_lock,
2499 af_kern_callback_keys + sk->sk_family,
2500 af_family_kern_clock_key_strings[sk->sk_family]);
2501 else
2502 lockdep_set_class_and_name(
2503 &sk->sk_callback_lock,
2485 af_callback_keys + sk->sk_family, 2504 af_callback_keys + sk->sk_family,
2486 af_family_clock_key_strings[sk->sk_family]); 2505 af_family_clock_key_strings[sk->sk_family]);
2487 2506
@@ -2779,11 +2798,6 @@ void sk_common_release(struct sock *sk)
2779 2798
2780 sk_refcnt_debug_release(sk); 2799 sk_refcnt_debug_release(sk);
2781 2800
2782 if (sk->sk_frag.page) {
2783 put_page(sk->sk_frag.page);
2784 sk->sk_frag.page = NULL;
2785 }
2786
2787 sock_put(sk); 2801 sock_put(sk);
2788} 2802}
2789EXPORT_SYMBOL(sk_common_release); 2803EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4ead336e14ea..7f9cc400eca0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
408 .data = &sysctl_net_busy_poll, 408 .data = &sysctl_net_busy_poll,
409 .maxlen = sizeof(unsigned int), 409 .maxlen = sizeof(unsigned int),
410 .mode = 0644, 410 .mode = 0644,
411 .proc_handler = proc_dointvec 411 .proc_handler = proc_dointvec_minmax,
412 .extra1 = &zero,
412 }, 413 },
413 { 414 {
414 .procname = "busy_read", 415 .procname = "busy_read",
415 .data = &sysctl_net_busy_read, 416 .data = &sysctl_net_busy_read,
416 .maxlen = sizeof(unsigned int), 417 .maxlen = sizeof(unsigned int),
417 .mode = 0644, 418 .mode = 0644,
418 .proc_handler = proc_dointvec 419 .proc_handler = proc_dointvec_minmax,
420 .extra1 = &zero,
419 }, 421 },
420#endif 422#endif
421#ifdef CONFIG_NET_SCHED 423#ifdef CONFIG_NET_SCHED
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
749 for (i = 0; i < hc->tx_seqbufc; i++) 749 for (i = 0; i < hc->tx_seqbufc; i++)
750 kfree(hc->tx_seqbuf[i]); 750 kfree(hc->tx_seqbuf[i]);
751 hc->tx_seqbufc = 0; 751 hc->tx_seqbufc = 0;
752 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
752} 753}
753 754
754static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 755static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 409d0cfd3447..b99168b0fabf 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
289 289
290 switch (type) { 290 switch (type) {
291 case ICMP_REDIRECT: 291 case ICMP_REDIRECT:
292 dccp_do_redirect(skb, sk); 292 if (!sock_owned_by_user(sk))
293 dccp_do_redirect(skb, sk);
293 goto out; 294 goto out;
294 case ICMP_SOURCE_QUENCH: 295 case ICMP_SOURCE_QUENCH:
295 /* Just silently ignore these. */ 296 /* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 233b57367758..d9b6a4e403e7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
122 np = inet6_sk(sk); 122 np = inet6_sk(sk);
123 123
124 if (type == NDISC_REDIRECT) { 124 if (type == NDISC_REDIRECT) {
125 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 125 if (!sock_owned_by_user(sk)) {
126 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
126 127
127 if (dst) 128 if (dst)
128 dst->ops->redirect(dst, sk, skb); 129 dst->ops->redirect(dst, sk, skb);
130 }
129 goto out; 131 goto out;
130 } 132 }
131 133
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index e267e6f4c9a5..abd07a443219 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
142 struct dccp_request_sock *dreq = dccp_rsk(req); 142 struct dccp_request_sock *dreq = dccp_rsk(req);
143 bool own_req; 143 bool own_req;
144 144
145 /* TCP/DCCP listeners became lockless.
146 * DCCP stores complex state in its request_sock, so we need
147 * a protection for them, now this code runs without being protected
148 * by the parent (listener) lock.
149 */
150 spin_lock_bh(&dreq->dreq_lock);
151
145 /* Check for retransmitted REQUEST */ 152 /* Check for retransmitted REQUEST */
146 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 153 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
147 154
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
156 inet_rtx_syn_ack(sk, req); 163 inet_rtx_syn_ack(sk, req);
157 } 164 }
158 /* Network Duplicate, discard packet */ 165 /* Network Duplicate, discard packet */
159 return NULL; 166 goto out;
160 } 167 }
161 168
162 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 169 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
182 189
183 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 190 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
184 req, &own_req); 191 req, &own_req);
185 if (!child) 192 if (child) {
186 goto listen_overflow; 193 child = inet_csk_complete_hashdance(sk, child, req, own_req);
187 194 goto out;
188 return inet_csk_complete_hashdance(sk, child, req, own_req); 195 }
189 196
190listen_overflow:
191 dccp_pr_debug("listen_overflow!\n");
192 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 197 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
193drop: 198drop:
194 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 199 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
195 req->rsk_ops->send_reset(sk, skb); 200 req->rsk_ops->send_reset(sk, skb);
196 201
197 inet_csk_reqsk_queue_drop(sk, req); 202 inet_csk_reqsk_queue_drop(sk, req);
198 return NULL; 203out:
204 spin_unlock_bh(&dreq->dreq_lock);
205 return child;
199} 206}
200 207
201EXPORT_SYMBOL_GPL(dccp_check_req); 208EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
246{ 253{
247 struct dccp_request_sock *dreq = dccp_rsk(req); 254 struct dccp_request_sock *dreq = dccp_rsk(req);
248 255
256 spin_lock_init(&dreq->dreq_lock);
249 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; 257 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
250 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); 258 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
251 inet_rsk(req)->acked = 0; 259 inet_rsk(req)->acked = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index e6e79eda9763..7de5b40a5d0d 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1070 return skb == NULL ? ERR_PTR(err) : skb; 1070 return skb == NULL ? ERR_PTR(err) : skb;
1071} 1071}
1072 1072
1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags) 1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1074 bool kern)
1074{ 1075{
1075 struct sock *sk = sock->sk, *newsk; 1076 struct sock *sk = sock->sk, *newsk;
1076 struct sk_buff *skb = NULL; 1077 struct sk_buff *skb = NULL;
@@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1099 1100
1100 cb = DN_SKB_CB(skb); 1101 cb = DN_SKB_CB(skb);
1101 sk->sk_ack_backlog--; 1102 sk->sk_ack_backlog--;
1102 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0); 1103 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1103 if (newsk == NULL) { 1104 if (newsk == NULL) {
1104 release_sock(sk); 1105 release_sock(sk);
1105 kfree_skb(skb); 1106 kfree_skb(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 602d40f43687..6b1fc6e4278e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
689 * Accept a pending connection. The TCP layer now gives BSD semantics. 689 * Accept a pending connection. The TCP layer now gives BSD semantics.
690 */ 690 */
691 691
692int inet_accept(struct socket *sock, struct socket *newsock, int flags) 692int inet_accept(struct socket *sock, struct socket *newsock, int flags,
693 bool kern)
693{ 694{
694 struct sock *sk1 = sock->sk; 695 struct sock *sk1 = sock->sk;
695 int err = -EINVAL; 696 int err = -EINVAL;
696 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err); 697 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
697 698
698 if (!sk2) 699 if (!sk2)
699 goto do_err; 700 goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
1487 int proto = iph->protocol; 1488 int proto = iph->protocol;
1488 int err = -ENOSYS; 1489 int err = -ENOSYS;
1489 1490
1490 if (skb->encapsulation) 1491 if (skb->encapsulation) {
1492 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1491 skb_set_inner_network_header(skb, nhoff); 1493 skb_set_inner_network_header(skb, nhoff);
1494 }
1492 1495
1493 csum_replace2(&iph->check, iph->tot_len, newlen); 1496 csum_replace2(&iph->check, iph->tot_len, newlen);
1494 iph->tot_len = newlen; 1497 iph->tot_len = newlen;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 42bfd08109dd..8f2133ffc2ff 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
1083 1083
1084 net = sock_net(skb->sk); 1084 net = sock_net(skb->sk);
1085 nlh = nlmsg_hdr(skb); 1085 nlh = nlmsg_hdr(skb);
1086 if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || 1086 if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
1087 skb->len < nlh->nlmsg_len ||
1087 nlmsg_len(nlh) < sizeof(*frn)) 1088 nlmsg_len(nlh) < sizeof(*frn))
1088 return; 1089 return;
1089 1090
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4d5980ade3b..5e313c1ac94f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
424/* 424/*
425 * This will accept the next outstanding connection. 425 * This will accept the next outstanding connection.
426 */ 426 */
427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
428{ 428{
429 struct inet_connection_sock *icsk = inet_csk(sk); 429 struct inet_connection_sock *icsk = inet_csk(sk);
430 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 430 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index bbe7f72db9c1..b3cdeec85f1f 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
198 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); 198 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
199 net = container_of(qp->q.net, struct net, ipv4.frags); 199 net = container_of(qp->q.net, struct net, ipv4.frags);
200 200
201 rcu_read_lock();
201 spin_lock(&qp->q.lock); 202 spin_lock(&qp->q.lock);
202 203
203 if (qp->q.flags & INET_FRAG_COMPLETE) 204 if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
207 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); 208 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
208 209
209 if (!inet_frag_evicting(&qp->q)) { 210 if (!inet_frag_evicting(&qp->q)) {
210 struct sk_buff *head = qp->q.fragments; 211 struct sk_buff *clone, *head = qp->q.fragments;
211 const struct iphdr *iph; 212 const struct iphdr *iph;
212 int err; 213 int err;
213 214
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
216 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) 217 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
217 goto out; 218 goto out;
218 219
219 rcu_read_lock();
220 head->dev = dev_get_by_index_rcu(net, qp->iif); 220 head->dev = dev_get_by_index_rcu(net, qp->iif);
221 if (!head->dev) 221 if (!head->dev)
222 goto out_rcu_unlock; 222 goto out;
223
223 224
224 /* skb has no dst, perform route lookup again */ 225 /* skb has no dst, perform route lookup again */
225 iph = ip_hdr(head); 226 iph = ip_hdr(head);
226 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 227 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
227 iph->tos, head->dev); 228 iph->tos, head->dev);
228 if (err) 229 if (err)
229 goto out_rcu_unlock; 230 goto out;
230 231
231 /* Only an end host needs to send an ICMP 232 /* Only an end host needs to send an ICMP
232 * "Fragment Reassembly Timeout" message, per RFC792. 233 * "Fragment Reassembly Timeout" message, per RFC792.
233 */ 234 */
234 if (frag_expire_skip_icmp(qp->user) && 235 if (frag_expire_skip_icmp(qp->user) &&
235 (skb_rtable(head)->rt_type != RTN_LOCAL)) 236 (skb_rtable(head)->rt_type != RTN_LOCAL))
236 goto out_rcu_unlock; 237 goto out;
238
239 clone = skb_clone(head, GFP_ATOMIC);
237 240
238 /* Send an ICMP "Fragment Reassembly Timeout" message. */ 241 /* Send an ICMP "Fragment Reassembly Timeout" message. */
239 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 242 if (clone) {
240out_rcu_unlock: 243 spin_unlock(&qp->q.lock);
241 rcu_read_unlock(); 244 icmp_send(clone, ICMP_TIME_EXCEEDED,
245 ICMP_EXC_FRAGTIME, 0);
246 consume_skb(clone);
247 goto out_rcu_unlock;
248 }
242 } 249 }
243out: 250out:
244 spin_unlock(&qp->q.lock); 251 spin_unlock(&qp->q.lock);
252out_rcu_unlock:
253 rcu_read_unlock();
245 ipq_put(qp); 254 ipq_put(qp);
246} 255}
247 256
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 737ce826d7ec..7a3fd25e8913 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
966 cork->length += length; 966 cork->length += length;
967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
968 (sk->sk_protocol == IPPROTO_UDP) && 968 (sk->sk_protocol == IPPROTO_UDP) &&
969 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
971 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 971 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
972 hh_len, fragheaderlen, transhdrlen, 972 hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index fd9f34bbd740..dfb2ab2dd3c8 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
306 while ((d = next)) { 306 while ((d = next)) {
307 next = d->next; 307 next = d->next;
308 dev = d->dev; 308 dev = d->dev;
309 if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { 309 if (d != ic_dev && !netdev_uses_dsa(dev)) {
310 pr_debug("IP-Config: Downing %s\n", dev->name); 310 pr_debug("IP-Config: Downing %s\n", dev->name);
311 dev_change_flags(dev, d->flags); 311 dev_change_flags(dev, d->flags);
312 } 312 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index bc1486f2c064..2e14ed11a35c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
165 if (skb->len < sizeof(struct iphdr) || 165 if (skb->len < sizeof(struct iphdr) ||
166 ip_hdrlen(skb) < sizeof(struct iphdr)) 166 ip_hdrlen(skb) < sizeof(struct iphdr))
167 return NF_ACCEPT; 167 return NF_ACCEPT;
168
169 if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
170 return NF_ACCEPT;
171
168 return nf_conntrack_in(state->net, PF_INET, state->hook, skb); 172 return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
169} 173}
170 174
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03d674b..6f5e8d01b876 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
255 /* maniptype == SRC for postrouting. */ 255 /* maniptype == SRC for postrouting. */
256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); 256 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
257 257
258 /* We never see fragments: conntrack defrags on pre-routing
259 * and local-out, and nf_nat_out protects post-routing.
260 */
261 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
262
263 ct = nf_ct_get(skb, &ctinfo); 258 ct = nf_ct_get(skb, &ctinfo);
264 /* Can't track? It's not due to stress, or conntrack would 259 /* Can't track? It's not due to stress, or conntrack would
265 * have dropped it. Hence it's the user's responsibilty to 260 * have dropped it. Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c361da2..53e49f5011d3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
1260 .timeout = 180, 1260 .timeout = 180,
1261}; 1261};
1262 1262
1263static struct nf_conntrack_helper snmp_helper __read_mostly = {
1264 .me = THIS_MODULE,
1265 .help = help,
1266 .expect_policy = &snmp_exp_policy,
1267 .name = "snmp",
1268 .tuple.src.l3num = AF_INET,
1269 .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
1270 .tuple.dst.protonum = IPPROTO_UDP,
1271};
1272
1273static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1263static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1274 .me = THIS_MODULE, 1264 .me = THIS_MODULE,
1275 .help = help, 1265 .help = help,
@@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1288 1278
1289static int __init nf_nat_snmp_basic_init(void) 1279static int __init nf_nat_snmp_basic_init(void)
1290{ 1280{
1291 int ret = 0;
1292
1293 BUG_ON(nf_nat_snmp_hook != NULL); 1281 BUG_ON(nf_nat_snmp_hook != NULL);
1294 RCU_INIT_POINTER(nf_nat_snmp_hook, help); 1282 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1295 1283
1296 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1284 return nf_conntrack_helper_register(&snmp_trap_helper);
1297 if (ret < 0) {
1298 nf_conntrack_helper_unregister(&snmp_helper);
1299 return ret;
1300 }
1301 return ret;
1302} 1285}
1303 1286
1304static void __exit nf_nat_snmp_basic_fini(void) 1287static void __exit nf_nat_snmp_basic_fini(void)
1305{ 1288{
1306 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); 1289 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1290 synchronize_rcu();
1307 nf_conntrack_helper_unregister(&snmp_trap_helper); 1291 nf_conntrack_helper_unregister(&snmp_trap_helper);
1308} 1292}
1309 1293
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index a0ea8aad1bf1..f18677277119 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
26 memset(&range, 0, sizeof(range)); 26 memset(&range, 0, sizeof(range));
27 range.flags = priv->flags; 27 range.flags = priv->flags;
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 range.min_proto.all = 29 range.min_proto.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min]; 30 &regs->data[priv->sreg_proto_min]);
31 range.max_proto.all = 31 range.max_proto.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max]; 32 &regs->data[priv->sreg_proto_max]);
33 } 33 }
34 regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), 34 regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
35 &range, nft_out(pkt)); 35 &range, nft_out(pkt));
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index 1650ed23c15d..5120be1d3118 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
26 26
27 memset(&mr, 0, sizeof(mr)); 27 memset(&mr, 0, sizeof(mr));
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 mr.range[0].min.all = 29 mr.range[0].min.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min]; 30 &regs->data[priv->sreg_proto_min]);
31 mr.range[0].max.all = 31 mr.range[0].max.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max]; 32 &regs->data[priv->sreg_proto_max]);
33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 33 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
34 } 34 }
35 35
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2af6244b83e2..ccfbce13a633 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
156void ping_unhash(struct sock *sk) 156void ping_unhash(struct sock *sk)
157{ 157{
158 struct inet_sock *isk = inet_sk(sk); 158 struct inet_sock *isk = inet_sk(sk);
159
159 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 160 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
161 write_lock_bh(&ping_table.lock);
160 if (sk_hashed(sk)) { 162 if (sk_hashed(sk)) {
161 write_lock_bh(&ping_table.lock);
162 hlist_nulls_del(&sk->sk_nulls_node); 163 hlist_nulls_del(&sk->sk_nulls_node);
163 sk_nulls_node_init(&sk->sk_nulls_node); 164 sk_nulls_node_init(&sk->sk_nulls_node);
164 sock_put(sk); 165 sock_put(sk);
165 isk->inet_num = 0; 166 isk->inet_num = 0;
166 isk->inet_sport = 0; 167 isk->inet_sport = 0;
167 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 168 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
168 write_unlock_bh(&ping_table.lock);
169 } 169 }
170 write_unlock_bh(&ping_table.lock);
170} 171}
171EXPORT_SYMBOL_GPL(ping_unhash); 172EXPORT_SYMBOL_GPL(ping_unhash);
172 173
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf4555581282..1e319a525d51 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2770{ 2770{
2771 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 2771 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
2772 const struct inet_connection_sock *icsk = inet_csk(sk); 2772 const struct inet_connection_sock *icsk = inet_csk(sk);
2773 u32 now = tcp_time_stamp, intv; 2773 u32 now, intv;
2774 u64 rate64; 2774 u64 rate64;
2775 bool slow; 2775 bool slow;
2776 u32 rate; 2776 u32 rate;
@@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2839 info->tcpi_retrans = tp->retrans_out; 2839 info->tcpi_retrans = tp->retrans_out;
2840 info->tcpi_fackets = tp->fackets_out; 2840 info->tcpi_fackets = tp->fackets_out;
2841 2841
2842 now = tcp_time_stamp;
2842 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2843 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2843 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2844 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2844 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2845 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393cc0fd3..2c1f59386a7b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
126#define REXMIT_LOST 1 /* retransmit packets marked lost */ 126#define REXMIT_LOST 1 /* retransmit packets marked lost */
127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
128 128
129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) 129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
130 unsigned int len)
130{ 131{
131 static bool __once __read_mostly; 132 static bool __once __read_mostly;
132 133
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
137 138
138 rcu_read_lock(); 139 rcu_read_lock();
139 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
140 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 141 if (!dev || len >= dev->mtu)
141 dev ? dev->name : "Unknown driver"); 142 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
143 dev ? dev->name : "Unknown driver");
142 rcu_read_unlock(); 144 rcu_read_unlock();
143 } 145 }
144} 146}
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
161 if (len >= icsk->icsk_ack.rcv_mss) { 163 if (len >= icsk->icsk_ack.rcv_mss) {
162 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 164 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
163 tcp_sk(sk)->advmss); 165 tcp_sk(sk)->advmss);
164 if (unlikely(icsk->icsk_ack.rcv_mss != len)) 166 /* Account for possibly-removed options */
165 tcp_gro_dev_warn(sk, skb); 167 if (unlikely(len > icsk->icsk_ack.rcv_mss +
168 MAX_TCP_OPTION_SPACE))
169 tcp_gro_dev_warn(sk, skb, len);
166 } else { 170 } else {
167 /* Otherwise, we make more careful check taking into account, 171 /* Otherwise, we make more careful check taking into account,
168 * that SACKs block is variable. 172 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
874 const int ts) 878 const int ts)
875{ 879{
876 struct tcp_sock *tp = tcp_sk(sk); 880 struct tcp_sock *tp = tcp_sk(sk);
877 if (metric > tp->reordering) { 881 int mib_idx;
878 int mib_idx;
879 882
883 if (metric > tp->reordering) {
880 tp->reordering = min(sysctl_tcp_max_reordering, metric); 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
881 885
882 /* This exciting event is worth to be remembered. 8) */
883 if (ts)
884 mib_idx = LINUX_MIB_TCPTSREORDER;
885 else if (tcp_is_reno(tp))
886 mib_idx = LINUX_MIB_TCPRENOREORDER;
887 else if (tcp_is_fack(tp))
888 mib_idx = LINUX_MIB_TCPFACKREORDER;
889 else
890 mib_idx = LINUX_MIB_TCPSACKREORDER;
891
892 NET_INC_STATS(sock_net(sk), mib_idx);
893#if FASTRETRANS_DEBUG > 1 886#if FASTRETRANS_DEBUG > 1
894 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 887 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
895 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 888 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
902 } 895 }
903 896
904 tp->rack.reord = 1; 897 tp->rack.reord = 1;
898
899 /* This exciting event is worth to be remembered. 8) */
900 if (ts)
901 mib_idx = LINUX_MIB_TCPTSREORDER;
902 else if (tcp_is_reno(tp))
903 mib_idx = LINUX_MIB_TCPRENOREORDER;
904 else if (tcp_is_fack(tp))
905 mib_idx = LINUX_MIB_TCPFACKREORDER;
906 else
907 mib_idx = LINUX_MIB_TCPSACKREORDER;
908
909 NET_INC_STATS(sock_net(sk), mib_idx);
905} 910}
906 911
907/* This must be called before lost_out is incremented */ 912/* This must be called before lost_out is incremented */
@@ -5541,6 +5546,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5541 struct inet_connection_sock *icsk = inet_csk(sk); 5546 struct inet_connection_sock *icsk = inet_csk(sk);
5542 5547
5543 tcp_set_state(sk, TCP_ESTABLISHED); 5548 tcp_set_state(sk, TCP_ESTABLISHED);
5549 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5544 5550
5545 if (skb) { 5551 if (skb) {
5546 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5552 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5765,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5759 * to stand against the temptation 8) --ANK 5765 * to stand against the temptation 8) --ANK
5760 */ 5766 */
5761 inet_csk_schedule_ack(sk); 5767 inet_csk_schedule_ack(sk);
5762 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5763 tcp_enter_quickack_mode(sk); 5768 tcp_enter_quickack_mode(sk);
5764 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5769 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5765 TCP_DELACK_MAX, TCP_RTO_MAX); 5770 TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9a89b8deafae..575e19dcc017 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
279 */ 279 */
280void tcp_v4_mtu_reduced(struct sock *sk) 280void tcp_v4_mtu_reduced(struct sock *sk)
281{ 281{
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk); 282 struct inet_sock *inet = inet_sk(sk);
284 u32 mtu = tcp_sk(sk)->mtu_info; 283 struct dst_entry *dst;
284 u32 mtu;
285 285
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 return;
288 mtu = tcp_sk(sk)->mtu_info;
286 dst = inet_csk_update_pmtu(sk, mtu); 289 dst = inet_csk_update_pmtu(sk, mtu);
287 if (!dst) 290 if (!dst)
288 return; 291 return;
@@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
428 431
429 switch (type) { 432 switch (type) {
430 case ICMP_REDIRECT: 433 case ICMP_REDIRECT:
431 do_redirect(icmp_skb, sk); 434 if (!sock_owned_by_user(sk))
435 do_redirect(icmp_skb, sk);
432 goto out; 436 goto out;
433 case ICMP_SOURCE_QUENCH: 437 case ICMP_SOURCE_QUENCH:
434 /* Just silently ignore these. */ 438 /* Just silently ignore these. */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e16243cdb58..65c0f3d13eca 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
460 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 460 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
461 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); 461 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
462 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 462 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
463 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
463 464
464 newtp->packets_out = 0; 465 newtp->packets_out = 0;
465 newtp->retrans_out = 0; 466 newtp->retrans_out = 0;
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 4ecb38ae8504..d8acbd9f477a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
12 /* Account for retransmits that are lost again */ 12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb); 14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 15 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
16 } 17 }
17} 18}
18 19
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 40d893556e67..b2ab411c6d37 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
249 249
250 sk_mem_reclaim_partial(sk); 250 sk_mem_reclaim_partial(sk);
251 251
252 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 252 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
253 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
253 goto out; 254 goto out;
254 255
255 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 256 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
552 struct inet_connection_sock *icsk = inet_csk(sk); 553 struct inet_connection_sock *icsk = inet_csk(sk);
553 int event; 554 int event;
554 555
555 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 556 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
557 !icsk->icsk_pending)
556 goto out; 558 goto out;
557 559
558 if (time_after(icsk->icsk_timeout, jiffies)) { 560 if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04db40620ea6..a9a9553ee63d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
920 err = register_pernet_subsys(&inet6_net_ops); 920 err = register_pernet_subsys(&inet6_net_ops);
921 if (err) 921 if (err)
922 goto register_pernet_fail; 922 goto register_pernet_fail;
923 err = icmpv6_init();
924 if (err)
925 goto icmp_fail;
926 err = ip6_mr_init(); 923 err = ip6_mr_init();
927 if (err) 924 if (err)
928 goto ipmr_fail; 925 goto ipmr_fail;
926 err = icmpv6_init();
927 if (err)
928 goto icmp_fail;
929 err = ndisc_init(); 929 err = ndisc_init();
930 if (err) 930 if (err)
931 goto ndisc_fail; 931 goto ndisc_fail;
@@ -1061,10 +1061,10 @@ igmp_fail:
1061 ndisc_cleanup(); 1061 ndisc_cleanup();
1062ndisc_fail: 1062ndisc_fail:
1063 ip6_mr_cleanup(); 1063 ip6_mr_cleanup();
1064ipmr_fail:
1065 icmpv6_cleanup();
1066icmp_fail: 1064icmp_fail:
1067 unregister_pernet_subsys(&inet6_net_ops); 1065 unregister_pernet_subsys(&inet6_net_ops);
1066ipmr_fail:
1067 icmpv6_cleanup();
1068register_pernet_fail: 1068register_pernet_fail:
1069 sock_unregister(PF_INET6); 1069 sock_unregister(PF_INET6);
1070 rtnl_unregister_all(PF_INET6); 1070 rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e4266746e4a2..d4bf2c68a545 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -923,6 +923,8 @@ add:
923 ins = &rt->dst.rt6_next; 923 ins = &rt->dst.rt6_next;
924 iter = *ins; 924 iter = *ins;
925 while (iter) { 925 while (iter) {
926 if (iter->rt6i_metric > rt->rt6i_metric)
927 break;
926 if (rt6_qualify_for_ecmp(iter)) { 928 if (rt6_qualify_for_ecmp(iter)) {
927 *ins = iter->dst.rt6_next; 929 *ins = iter->dst.rt6_next;
928 fib6_purge_rt(iter, fn, info->nl_net); 930 fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 0838e6d01d2e..93e58a5e1837 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); 294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
295 int err = -ENOSYS; 295 int err = -ENOSYS;
296 296
297 if (skb->encapsulation) 297 if (skb->encapsulation) {
298 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
298 skb_set_inner_network_header(skb, nhoff); 299 skb_set_inner_network_header(skb, nhoff);
300 }
299 301
300 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 302 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
301 303
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 528b3c1f3fde..58f6288e9ba5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,13 +768,14 @@ slow_path:
768 * Fragment the datagram. 768 * Fragment the datagram.
769 */ 769 */
770 770
771 *prevhdr = NEXTHDR_FRAGMENT;
772 troom = rt->dst.dev->needed_tailroom; 771 troom = rt->dst.dev->needed_tailroom;
773 772
774 /* 773 /*
775 * Keep copying data until we run out. 774 * Keep copying data until we run out.
776 */ 775 */
777 while (left > 0) { 776 while (left > 0) {
777 u8 *fragnexthdr_offset;
778
778 len = left; 779 len = left;
779 /* IF: it doesn't fit, use 'mtu' - the data space left */ 780 /* IF: it doesn't fit, use 'mtu' - the data space left */
780 if (len > mtu) 781 if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
819 */ 820 */
820 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); 821 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
821 822
823 fragnexthdr_offset = skb_network_header(frag);
824 fragnexthdr_offset += prevhdr - skb_network_header(skb);
825 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
826
822 /* 827 /*
823 * Build fragment header. 828 * Build fragment header.
824 */ 829 */
@@ -1385,7 +1390,7 @@ emsgsize:
1385 if ((((length + fragheaderlen) > mtu) || 1390 if ((((length + fragheaderlen) > mtu) ||
1386 (skb && skb_is_gso(skb))) && 1391 (skb && skb_is_gso(skb))) &&
1387 (sk->sk_protocol == IPPROTO_UDP) && 1392 (sk->sk_protocol == IPPROTO_UDP) &&
1388 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1389 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1394 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1390 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1395 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1391 hh_len, fragheaderlen, exthdrlen, 1396 hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 644ba59fbd9d..3d8a3b63b4fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
485 if (!skb->ignore_df && skb->len > mtu) { 485 if (!skb->ignore_df && skb->len > mtu) {
486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
487 487
488 if (skb->protocol == htons(ETH_P_IPV6)) 488 if (skb->protocol == htons(ETH_P_IPV6)) {
489 if (mtu < IPV6_MIN_MTU)
490 mtu = IPV6_MIN_MTU;
491
489 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 492 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
490 else 493 } else {
491 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 494 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
492 htonl(mtu)); 495 htonl(mtu));
496 }
493 497
494 return -EMSGSIZE; 498 return -EMSGSIZE;
495 } 499 }
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 6c5b5b1830a7..4146536e9c15 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 range.flags = priv->flags; 28 range.flags = priv->flags;
29 if (priv->sreg_proto_min) { 29 if (priv->sreg_proto_min) {
30 range.min_proto.all = 30 range.min_proto.all = (__force __be16)nft_reg_load16(
31 *(__be16 *)&regs->data[priv->sreg_proto_min]; 31 &regs->data[priv->sreg_proto_min]);
32 range.max_proto.all = 32 range.max_proto.all = (__force __be16)nft_reg_load16(
33 *(__be16 *)&regs->data[priv->sreg_proto_max]; 33 &regs->data[priv->sreg_proto_max]);
34 } 34 }
35 regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, 35 regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
36 nft_out(pkt)); 36 nft_out(pkt));
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index f5ac080fc084..a27e424f690d 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
26 26
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
29 range.min_proto.all = 29 range.min_proto.all = (__force __be16)nft_reg_load16(
30 *(__be16 *)&regs->data[priv->sreg_proto_min], 30 &regs->data[priv->sreg_proto_min]);
31 range.max_proto.all = 31 range.max_proto.all = (__force __be16)nft_reg_load16(
32 *(__be16 *)&regs->data[priv->sreg_proto_max], 32 &regs->data[priv->sreg_proto_max]);
33 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 33 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
34 } 34 }
35 35
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 229bfcc451ef..9db1418993f2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3300 + NLA_ALIGN(sizeof(struct rtnexthop)) 3300 + NLA_ALIGN(sizeof(struct rtnexthop))
3301 + nla_total_size(16) /* RTA_GATEWAY */ 3301 + nla_total_size(16) /* RTA_GATEWAY */
3302 + nla_total_size(4) /* RTA_OIF */
3303 + lwtunnel_get_encap_size(rt->dst.lwtstate); 3302 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3304 3303
3305 nexthop_len *= rt->rt6i_nsiblings; 3304 nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3323} 3322}
3324 3323
3325static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, 3324static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3326 unsigned int *flags) 3325 unsigned int *flags, bool skip_oif)
3327{ 3326{
3328 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { 3327 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3329 *flags |= RTNH_F_LINKDOWN; 3328 *flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3336 goto nla_put_failure; 3335 goto nla_put_failure;
3337 } 3336 }
3338 3337
3339 if (rt->dst.dev && 3338 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3339 if (!skip_oif && rt->dst.dev &&
3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3341 goto nla_put_failure; 3341 goto nla_put_failure;
3342 3342
@@ -3350,6 +3350,7 @@ nla_put_failure:
3350 return -EMSGSIZE; 3350 return -EMSGSIZE;
3351} 3351}
3352 3352
3353/* add multipath next hop */
3353static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) 3354static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3354{ 3355{
3355 struct rtnexthop *rtnh; 3356 struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3362 rtnh->rtnh_hops = 0; 3363 rtnh->rtnh_hops = 0;
3363 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; 3364 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3364 3365
3365 if (rt6_nexthop_info(skb, rt, &flags) < 0) 3366 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3366 goto nla_put_failure; 3367 goto nla_put_failure;
3367 3368
3368 rtnh->rtnh_flags = flags; 3369 rtnh->rtnh_flags = flags;
@@ -3422,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
3422 } 3423 }
3423 else if (rt->rt6i_flags & RTF_LOCAL) 3424 else if (rt->rt6i_flags & RTF_LOCAL)
3424 rtm->rtm_type = RTN_LOCAL; 3425 rtm->rtm_type = RTN_LOCAL;
3426 else if (rt->rt6i_flags & RTF_ANYCAST)
3427 rtm->rtm_type = RTN_ANYCAST;
3425 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) 3428 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3426 rtm->rtm_type = RTN_LOCAL; 3429 rtm->rtm_type = RTN_LOCAL;
3427 else 3430 else
@@ -3515,7 +3518,7 @@ static int rt6_fill_node(struct net *net,
3515 3518
3516 nla_nest_end(skb, mp); 3519 nla_nest_end(skb, mp);
3517 } else { 3520 } else {
3518 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0) 3521 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3519 goto nla_put_failure; 3522 goto nla_put_failure;
3520 } 3523 }
3521 3524
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60a5295a7de6..49fa2e8c3fa9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
391 np = inet6_sk(sk); 391 np = inet6_sk(sk);
392 392
393 if (type == NDISC_REDIRECT) { 393 if (type == NDISC_REDIRECT) {
394 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
395 396
396 if (dst) 397 if (dst)
397 dst->ops->redirect(dst, sk, skb); 398 dst->ops->redirect(dst, sk, skb);
399 }
398 goto out; 400 goto out;
399 } 401 }
400 402
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401e3bc6..e28082f0a307 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1035 ipc6.hlimit = -1; 1035 ipc6.hlimit = -1;
1036 ipc6.tclass = -1; 1036 ipc6.tclass = -1;
1037 ipc6.dontfrag = -1; 1037 ipc6.dontfrag = -1;
1038 sockc.tsflags = sk->sk_tsflags;
1038 1039
1039 /* destination address check */ 1040 /* destination address check */
1040 if (sin6) { 1041 if (sin6) {
@@ -1159,7 +1160,6 @@ do_udp_sendmsg:
1159 1160
1160 fl6.flowi6_mark = sk->sk_mark; 1161 fl6.flowi6_mark = sk->sk_mark;
1161 fl6.flowi6_uid = sk->sk_uid; 1162 fl6.flowi6_uid = sk->sk_uid;
1162 sockc.tsflags = sk->sk_tsflags;
1163 1163
1164 if (msg->msg_controllen) { 1164 if (msg->msg_controllen) {
1165 opt = &opt_space; 1165 opt = &opt_space;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 81adc29a448d..8d77ad5cadaf 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -828,7 +828,8 @@ out:
828 * Wait for incoming connection 828 * Wait for incoming connection
829 * 829 *
830 */ 830 */
831static int irda_accept(struct socket *sock, struct socket *newsock, int flags) 831static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
832 bool kern)
832{ 833{
833 struct sock *sk = sock->sk; 834 struct sock *sk = sock->sk;
834 struct irda_sock *new, *self = irda_sk(sk); 835 struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
836 struct sk_buff *skb = NULL; 837 struct sk_buff *skb = NULL;
837 int err; 838 int err;
838 839
839 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); 840 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
840 if (err) 841 if (err)
841 return err; 842 return err;
842 843
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 89bbde1081ce..84de7b6326dc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -938,7 +938,7 @@ done:
938 938
939/* Accept a pending connection */ 939/* Accept a pending connection */
940static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 940static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
941 int flags) 941 int flags, bool kern)
942{ 942{
943 DECLARE_WAITQUEUE(wait, current); 943 DECLARE_WAITQUEUE(wait, current);
944 struct sock *sk = sock->sk, *nsk; 944 struct sock *sk = sock->sk, *nsk;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 309062f3debe..31762f76cdb5 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1687 struct kcm_attach info; 1687 struct kcm_attach info;
1688 1688
1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1690 err = -EFAULT; 1690 return -EFAULT;
1691 1691
1692 err = kcm_attach_ioctl(sock, &info); 1692 err = kcm_attach_ioctl(sock, &info);
1693 1693
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1697 struct kcm_unattach info; 1697 struct kcm_unattach info;
1698 1698
1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1700 err = -EFAULT; 1700 return -EFAULT;
1701 1701
1702 err = kcm_unattach_ioctl(sock, &info); 1702 err = kcm_unattach_ioctl(sock, &info);
1703 1703
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1708 struct socket *newsock = NULL; 1708 struct socket *newsock = NULL;
1709 1709
1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1711 err = -EFAULT; 1711 return -EFAULT;
1712 1712
1713 err = kcm_clone(sock, &info, &newsock); 1713 err = kcm_clone(sock, &info, &newsock);
1714 1714
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8adab6335ced..e37d9554da7b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
278} 278}
279EXPORT_SYMBOL_GPL(l2tp_session_find); 279EXPORT_SYMBOL_GPL(l2tp_session_find);
280 280
281struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 281/* Like l2tp_session_find() but takes a reference on the returned session.
282 * Optionally calls session->ref() too if do_ref is true.
283 */
284struct l2tp_session *l2tp_session_get(struct net *net,
285 struct l2tp_tunnel *tunnel,
286 u32 session_id, bool do_ref)
287{
288 struct hlist_head *session_list;
289 struct l2tp_session *session;
290
291 if (!tunnel) {
292 struct l2tp_net *pn = l2tp_pernet(net);
293
294 session_list = l2tp_session_id_hash_2(pn, session_id);
295
296 rcu_read_lock_bh();
297 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
298 if (session->session_id == session_id) {
299 l2tp_session_inc_refcount(session);
300 if (do_ref && session->ref)
301 session->ref(session);
302 rcu_read_unlock_bh();
303
304 return session;
305 }
306 }
307 rcu_read_unlock_bh();
308
309 return NULL;
310 }
311
312 session_list = l2tp_session_id_hash(tunnel, session_id);
313 read_lock_bh(&tunnel->hlist_lock);
314 hlist_for_each_entry(session, session_list, hlist) {
315 if (session->session_id == session_id) {
316 l2tp_session_inc_refcount(session);
317 if (do_ref && session->ref)
318 session->ref(session);
319 read_unlock_bh(&tunnel->hlist_lock);
320
321 return session;
322 }
323 }
324 read_unlock_bh(&tunnel->hlist_lock);
325
326 return NULL;
327}
328EXPORT_SYMBOL_GPL(l2tp_session_get);
329
330struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
331 bool do_ref)
282{ 332{
283 int hash; 333 int hash;
284 struct l2tp_session *session; 334 struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 338 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
289 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 339 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
290 if (++count > nth) { 340 if (++count > nth) {
341 l2tp_session_inc_refcount(session);
342 if (do_ref && session->ref)
343 session->ref(session);
291 read_unlock_bh(&tunnel->hlist_lock); 344 read_unlock_bh(&tunnel->hlist_lock);
292 return session; 345 return session;
293 } 346 }
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
298 351
299 return NULL; 352 return NULL;
300} 353}
301EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 354EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
302 355
303/* Lookup a session by interface name. 356/* Lookup a session by interface name.
304 * This is very inefficient but is only used by management interfaces. 357 * This is very inefficient but is only used by management interfaces.
305 */ 358 */
306struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) 359struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
360 bool do_ref)
307{ 361{
308 struct l2tp_net *pn = l2tp_pernet(net); 362 struct l2tp_net *pn = l2tp_pernet(net);
309 int hash; 363 int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 367 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { 368 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 369 if (!strcmp(session->ifname, ifname)) {
370 l2tp_session_inc_refcount(session);
371 if (do_ref && session->ref)
372 session->ref(session);
316 rcu_read_unlock_bh(); 373 rcu_read_unlock_bh();
374
317 return session; 375 return session;
318 } 376 }
319 } 377 }
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
323 381
324 return NULL; 382 return NULL;
325} 383}
326EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); 384EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
385
386static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
387 struct l2tp_session *session)
388{
389 struct l2tp_session *session_walk;
390 struct hlist_head *g_head;
391 struct hlist_head *head;
392 struct l2tp_net *pn;
393
394 head = l2tp_session_id_hash(tunnel, session->session_id);
395
396 write_lock_bh(&tunnel->hlist_lock);
397 hlist_for_each_entry(session_walk, head, hlist)
398 if (session_walk->session_id == session->session_id)
399 goto exist;
400
401 if (tunnel->version == L2TP_HDR_VER_3) {
402 pn = l2tp_pernet(tunnel->l2tp_net);
403 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
404 session->session_id);
405
406 spin_lock_bh(&pn->l2tp_session_hlist_lock);
407 hlist_for_each_entry(session_walk, g_head, global_hlist)
408 if (session_walk->session_id == session->session_id)
409 goto exist_glob;
410
411 hlist_add_head_rcu(&session->global_hlist, g_head);
412 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
413 }
414
415 hlist_add_head(&session->hlist, head);
416 write_unlock_bh(&tunnel->hlist_lock);
417
418 return 0;
419
420exist_glob:
421 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
422exist:
423 write_unlock_bh(&tunnel->hlist_lock);
424
425 return -EEXIST;
426}
327 427
328/* Lookup a tunnel by id 428/* Lookup a tunnel by id
329 */ 429 */
@@ -633,6 +733,9 @@ discard:
633 * a data (not control) frame before coming here. Fields up to the 733 * a data (not control) frame before coming here. Fields up to the
634 * session-id have already been parsed and ptr points to the data 734 * session-id have already been parsed and ptr points to the data
635 * after the session-id. 735 * after the session-id.
736 *
737 * session->ref() must have been called prior to l2tp_recv_common().
738 * session->deref() will be called automatically after skb is processed.
636 */ 739 */
637void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, 740void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
638 unsigned char *ptr, unsigned char *optr, u16 hdrflags, 741 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
642 int offset; 745 int offset;
643 u32 ns, nr; 746 u32 ns, nr;
644 747
645 /* The ref count is increased since we now hold a pointer to
646 * the session. Take care to decrement the refcnt when exiting
647 * this function from now on...
648 */
649 l2tp_session_inc_refcount(session);
650 if (session->ref)
651 (*session->ref)(session);
652
653 /* Parse and check optional cookie */ 748 /* Parse and check optional cookie */
654 if (session->peer_cookie_len > 0) { 749 if (session->peer_cookie_len > 0) {
655 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 750 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
802 /* Try to dequeue as many skbs from reorder_q as we can. */ 897 /* Try to dequeue as many skbs from reorder_q as we can. */
803 l2tp_recv_dequeue(session); 898 l2tp_recv_dequeue(session);
804 899
805 l2tp_session_dec_refcount(session);
806
807 return; 900 return;
808 901
809discard: 902discard:
@@ -812,8 +905,6 @@ discard:
812 905
813 if (session->deref) 906 if (session->deref)
814 (*session->deref)(session); 907 (*session->deref)(session);
815
816 l2tp_session_dec_refcount(session);
817} 908}
818EXPORT_SYMBOL(l2tp_recv_common); 909EXPORT_SYMBOL(l2tp_recv_common);
819 910
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
920 } 1011 }
921 1012
922 /* Find the session context */ 1013 /* Find the session context */
923 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 1014 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
924 if (!session || !session->recv_skb) { 1015 if (!session || !session->recv_skb) {
1016 if (session) {
1017 if (session->deref)
1018 session->deref(session);
1019 l2tp_session_dec_refcount(session);
1020 }
1021
925 /* Not found? Pass to userspace to deal with */ 1022 /* Not found? Pass to userspace to deal with */
926 l2tp_info(tunnel, L2TP_MSG_DATA, 1023 l2tp_info(tunnel, L2TP_MSG_DATA,
927 "%s: no session found (%u/%u). Passing up.\n", 1024 "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
930 } 1027 }
931 1028
932 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); 1029 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
1030 l2tp_session_dec_refcount(session);
933 1031
934 return 0; 1032 return 0;
935 1033
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1738struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1836struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1739{ 1837{
1740 struct l2tp_session *session; 1838 struct l2tp_session *session;
1839 int err;
1741 1840
1742 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); 1841 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1743 if (session != NULL) { 1842 if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1793 1892
1794 l2tp_session_set_header_len(session, tunnel->version); 1893 l2tp_session_set_header_len(session, tunnel->version);
1795 1894
1895 err = l2tp_session_add_to_tunnel(tunnel, session);
1896 if (err) {
1897 kfree(session);
1898
1899 return ERR_PTR(err);
1900 }
1901
1796 /* Bump the reference count. The session context is deleted 1902 /* Bump the reference count. The session context is deleted
1797 * only when this drops to zero. 1903 * only when this drops to zero.
1798 */ 1904 */
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1802 /* Ensure tunnel socket isn't deleted */ 1908 /* Ensure tunnel socket isn't deleted */
1803 sock_hold(tunnel->sock); 1909 sock_hold(tunnel->sock);
1804 1910
1805 /* Add session to the tunnel's hash list */
1806 write_lock_bh(&tunnel->hlist_lock);
1807 hlist_add_head(&session->hlist,
1808 l2tp_session_id_hash(tunnel, session_id));
1809 write_unlock_bh(&tunnel->hlist_lock);
1810
1811 /* And to the global session list if L2TPv3 */
1812 if (tunnel->version != L2TP_HDR_VER_2) {
1813 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1814
1815 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1816 hlist_add_head_rcu(&session->global_hlist,
1817 l2tp_session_id_hash_2(pn, session_id));
1818 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1819 }
1820
1821 /* Ignore management session in session count value */ 1911 /* Ignore management session in session count value */
1822 if (session->session_id != 0) 1912 if (session->session_id != 0)
1823 atomic_inc(&l2tp_session_count); 1913 atomic_inc(&l2tp_session_count);
1914
1915 return session;
1824 } 1916 }
1825 1917
1826 return session; 1918 return ERR_PTR(-ENOMEM);
1827} 1919}
1828EXPORT_SYMBOL_GPL(l2tp_session_create); 1920EXPORT_SYMBOL_GPL(l2tp_session_create);
1829 1921
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index aebf281d09ee..8ce7818c7a9d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -230,11 +230,16 @@ out:
230 return tunnel; 230 return tunnel;
231} 231}
232 232
233struct l2tp_session *l2tp_session_get(struct net *net,
234 struct l2tp_tunnel *tunnel,
235 u32 session_id, bool do_ref);
233struct l2tp_session *l2tp_session_find(struct net *net, 236struct l2tp_session *l2tp_session_find(struct net *net,
234 struct l2tp_tunnel *tunnel, 237 struct l2tp_tunnel *tunnel,
235 u32 session_id); 238 u32 session_id);
236struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 239struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
237struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 240 bool do_ref);
241struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
242 bool do_ref);
238struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); 243struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
239struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 244struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
240 245
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
53 53
54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
55{ 55{
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
57 pd->session_idx++; 57 pd->session_idx++;
58 58
59 if (pd->session == NULL) { 59 if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
238 } 238 }
239 239
240 /* Show the tunnel or session context */ 240 /* Show the tunnel or session context */
241 if (pd->session == NULL) 241 if (!pd->session) {
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
243 else 243 } else {
244 l2tp_dfs_seq_session_show(m, pd->session); 244 l2tp_dfs_seq_session_show(m, pd->session);
245 if (pd->session->deref)
246 pd->session->deref(pd->session);
247 l2tp_session_dec_refcount(pd->session);
248 }
245 249
246out: 250out:
247 return 0; 251 return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8bf18a5f66e0..6fd41d7afe1e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
221 goto out; 221 goto out;
222 } 222 }
223 223
224 session = l2tp_session_find(net, tunnel, session_id);
225 if (session) {
226 rc = -EEXIST;
227 goto out;
228 }
229
230 if (cfg->ifname) { 224 if (cfg->ifname) {
231 dev = dev_get_by_name(net, cfg->ifname); 225 dev = dev_get_by_name(net, cfg->ifname);
232 if (dev) { 226 if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
240 234
241 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, 235 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
242 peer_session_id, cfg); 236 peer_session_id, cfg);
243 if (!session) { 237 if (IS_ERR(session)) {
244 rc = -ENOMEM; 238 rc = PTR_ERR(session);
245 goto out; 239 goto out;
246 } 240 }
247 241
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d25038cfd64e..4d322c1b7233 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
143 } 143 }
144 144
145 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_find(net, NULL, session_id); 146 session = l2tp_session_get(net, NULL, session_id, true);
147 if (session == NULL) 147 if (!session)
148 goto discard; 148 goto discard;
149 149
150 tunnel = session->tunnel; 150 tunnel = session->tunnel;
151 if (tunnel == NULL) 151 if (!tunnel)
152 goto discard; 152 goto discard_sess;
153 153
154 /* Trace packet contents, if enabled */ 154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) { 155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len); 156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length)) 157 if (!pskb_may_pull(skb, length))
158 goto discard; 158 goto discard_sess;
159 159
160 /* Point to L2TP header */ 160 /* Point to L2TP header */
161 optr = ptr = skb->data; 161 optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 } 165 }
166 166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
168 l2tp_session_dec_refcount(session);
168 169
169 return 0; 170 return 0;
170 171
@@ -178,9 +179,10 @@ pass_up:
178 179
179 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
180 tunnel = l2tp_tunnel_find(net, tunnel_id); 181 tunnel = l2tp_tunnel_find(net, tunnel_id);
181 if (tunnel != NULL) 182 if (tunnel) {
182 sk = tunnel->sock; 183 sk = tunnel->sock;
183 else { 184 sock_hold(sk);
185 } else {
184 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 186 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
185 187
186 read_lock_bh(&l2tp_ip_lock); 188 read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
202 204
203 return sk_receive_skb(sk, skb, 1); 205 return sk_receive_skb(sk, skb, 1);
204 206
207discard_sess:
208 if (session->deref)
209 session->deref(session);
210 l2tp_session_dec_refcount(session);
211 goto discard;
212
205discard_put: 213discard_put:
206 sock_put(sk); 214 sock_put(sk);
207 215
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a4abcbc4c09a..88b397c30d86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
156 } 156 }
157 157
158 /* Ok, this is a data packet. Lookup the session. */ 158 /* Ok, this is a data packet. Lookup the session. */
159 session = l2tp_session_find(net, NULL, session_id); 159 session = l2tp_session_get(net, NULL, session_id, true);
160 if (session == NULL) 160 if (!session)
161 goto discard; 161 goto discard;
162 162
163 tunnel = session->tunnel; 163 tunnel = session->tunnel;
164 if (tunnel == NULL) 164 if (!tunnel)
165 goto discard; 165 goto discard_sess;
166 166
167 /* Trace packet contents, if enabled */ 167 /* Trace packet contents, if enabled */
168 if (tunnel->debug & L2TP_MSG_DATA) { 168 if (tunnel->debug & L2TP_MSG_DATA) {
169 length = min(32u, skb->len); 169 length = min(32u, skb->len);
170 if (!pskb_may_pull(skb, length)) 170 if (!pskb_may_pull(skb, length))
171 goto discard; 171 goto discard_sess;
172 172
173 /* Point to L2TP header */ 173 /* Point to L2TP header */
174 optr = ptr = skb->data; 174 optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
179 179
180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
181 tunnel->recv_payload_hook); 181 tunnel->recv_payload_hook);
182 l2tp_session_dec_refcount(session);
183
182 return 0; 184 return 0;
183 185
184pass_up: 186pass_up:
@@ -191,9 +193,10 @@ pass_up:
191 193
192 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
193 tunnel = l2tp_tunnel_find(net, tunnel_id); 195 tunnel = l2tp_tunnel_find(net, tunnel_id);
194 if (tunnel != NULL) 196 if (tunnel) {
195 sk = tunnel->sock; 197 sk = tunnel->sock;
196 else { 198 sock_hold(sk);
199 } else {
197 struct ipv6hdr *iph = ipv6_hdr(skb); 200 struct ipv6hdr *iph = ipv6_hdr(skb);
198 201
199 read_lock_bh(&l2tp_ip6_lock); 202 read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
215 218
216 return sk_receive_skb(sk, skb, 1); 219 return sk_receive_skb(sk, skb, 1);
217 220
221discard_sess:
222 if (session->deref)
223 session->deref(session);
224 l2tp_session_dec_refcount(session);
225 goto discard;
226
218discard_put: 227discard_put:
219 sock_put(sk); 228 sock_put(sk);
220 229
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 3620fba31786..7e3e669baac4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
48/* Accessed under genl lock */ 48/* Accessed under genl lock */
49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; 49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
50 50
51static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) 51static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
52 bool do_ref)
52{ 53{
53 u32 tunnel_id; 54 u32 tunnel_id;
54 u32 session_id; 55 u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
59 60
60 if (info->attrs[L2TP_ATTR_IFNAME]) { 61 if (info->attrs[L2TP_ATTR_IFNAME]) {
61 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); 62 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
62 session = l2tp_session_find_by_ifname(net, ifname); 63 session = l2tp_session_get_by_ifname(net, ifname, do_ref);
63 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && 64 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
64 (info->attrs[L2TP_ATTR_CONN_ID])) { 65 (info->attrs[L2TP_ATTR_CONN_ID])) {
65 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
66 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
67 tunnel = l2tp_tunnel_find(net, tunnel_id); 68 tunnel = l2tp_tunnel_find(net, tunnel_id);
68 if (tunnel) 69 if (tunnel)
69 session = l2tp_session_find(net, tunnel, session_id); 70 session = l2tp_session_get(net, tunnel, session_id,
71 do_ref);
70 } 72 }
71 73
72 return session; 74 return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
642 session_id, peer_session_id, &cfg); 644 session_id, peer_session_id, &cfg);
643 645
644 if (ret >= 0) { 646 if (ret >= 0) {
645 session = l2tp_session_find(net, tunnel, session_id); 647 session = l2tp_session_get(net, tunnel, session_id, false);
646 if (session) 648 if (session) {
647 ret = l2tp_session_notify(&l2tp_nl_family, info, session, 649 ret = l2tp_session_notify(&l2tp_nl_family, info, session,
648 L2TP_CMD_SESSION_CREATE); 650 L2TP_CMD_SESSION_CREATE);
651 l2tp_session_dec_refcount(session);
652 }
649 } 653 }
650 654
651out: 655out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
658 struct l2tp_session *session; 662 struct l2tp_session *session;
659 u16 pw_type; 663 u16 pw_type;
660 664
661 session = l2tp_nl_session_find(info); 665 session = l2tp_nl_session_get(info, true);
662 if (session == NULL) { 666 if (session == NULL) {
663 ret = -ENODEV; 667 ret = -ENODEV;
664 goto out; 668 goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
672 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) 676 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
673 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); 677 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
674 678
679 if (session->deref)
680 session->deref(session);
681 l2tp_session_dec_refcount(session);
682
675out: 683out:
676 return ret; 684 return ret;
677} 685}
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
681 int ret = 0; 689 int ret = 0;
682 struct l2tp_session *session; 690 struct l2tp_session *session;
683 691
684 session = l2tp_nl_session_find(info); 692 session = l2tp_nl_session_get(info, false);
685 if (session == NULL) { 693 if (session == NULL) {
686 ret = -ENODEV; 694 ret = -ENODEV;
687 goto out; 695 goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
716 ret = l2tp_session_notify(&l2tp_nl_family, info, 724 ret = l2tp_session_notify(&l2tp_nl_family, info,
717 session, L2TP_CMD_SESSION_MODIFY); 725 session, L2TP_CMD_SESSION_MODIFY);
718 726
727 l2tp_session_dec_refcount(session);
728
719out: 729out:
720 return ret; 730 return ret;
721} 731}
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
811 struct sk_buff *msg; 821 struct sk_buff *msg;
812 int ret; 822 int ret;
813 823
814 session = l2tp_nl_session_find(info); 824 session = l2tp_nl_session_get(info, false);
815 if (session == NULL) { 825 if (session == NULL) {
816 ret = -ENODEV; 826 ret = -ENODEV;
817 goto out; 827 goto err;
818 } 828 }
819 829
820 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 830 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
821 if (!msg) { 831 if (!msg) {
822 ret = -ENOMEM; 832 ret = -ENOMEM;
823 goto out; 833 goto err_ref;
824 } 834 }
825 835
826 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 836 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
827 0, session, L2TP_CMD_SESSION_GET); 837 0, session, L2TP_CMD_SESSION_GET);
828 if (ret < 0) 838 if (ret < 0)
829 goto err_out; 839 goto err_ref_msg;
830 840
831 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); 841 ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
832 842
833err_out: 843 l2tp_session_dec_refcount(session);
834 nlmsg_free(msg);
835 844
836out: 845 return ret;
846
847err_ref_msg:
848 nlmsg_free(msg);
849err_ref:
850 l2tp_session_dec_refcount(session);
851err:
837 return ret; 852 return ret;
838} 853}
839 854
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
852 goto out; 867 goto out;
853 } 868 }
854 869
855 session = l2tp_session_find_nth(tunnel, si); 870 session = l2tp_session_get_nth(tunnel, si, false);
856 if (session == NULL) { 871 if (session == NULL) {
857 ti++; 872 ti++;
858 tunnel = NULL; 873 tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
862 877
863 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, 878 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
864 cb->nlh->nlmsg_seq, NLM_F_MULTI, 879 cb->nlh->nlmsg_seq, NLM_F_MULTI,
865 session, L2TP_CMD_SESSION_GET) < 0) 880 session, L2TP_CMD_SESSION_GET) < 0) {
881 l2tp_session_dec_refcount(session);
866 break; 882 break;
883 }
884 l2tp_session_dec_refcount(session);
867 885
868 si++; 886 si++;
869 } 887 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 36cc56fd0418..861b255a2d51 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
450static void pppol2tp_session_destruct(struct sock *sk) 450static void pppol2tp_session_destruct(struct sock *sk)
451{ 451{
452 struct l2tp_session *session = sk->sk_user_data; 452 struct l2tp_session *session = sk->sk_user_data;
453
454 skb_queue_purge(&sk->sk_receive_queue);
455 skb_queue_purge(&sk->sk_write_queue);
456
453 if (session) { 457 if (session) {
454 sk->sk_user_data = NULL; 458 sk->sk_user_data = NULL;
455 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 459 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
488 l2tp_session_queue_purge(session); 492 l2tp_session_queue_purge(session);
489 sock_put(sk); 493 sock_put(sk);
490 } 494 }
491 skb_queue_purge(&sk->sk_receive_queue);
492 skb_queue_purge(&sk->sk_write_queue);
493
494 release_sock(sk); 495 release_sock(sk);
495 496
496 /* This will delete the session context via 497 /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
582 int error = 0; 583 int error = 0;
583 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
584 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false;
585 int ver = 2; 587 int ver = 2;
586 int fd; 588 int fd;
587 589
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
683 if (tunnel->peer_tunnel_id == 0) 685 if (tunnel->peer_tunnel_id == 0)
684 tunnel->peer_tunnel_id = peer_tunnel_id; 686 tunnel->peer_tunnel_id = peer_tunnel_id;
685 687
686 /* Create session if it doesn't already exist. We handle the 688 session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
687 * case where a session was previously created by the netlink 689 if (session) {
688 * interface by checking that the session doesn't already have 690 drop_refcnt = true;
689 * a socket and its tunnel socket are what we expect. If any 691 ps = l2tp_session_priv(session);
690 * of those checks fail, return EEXIST to the caller. 692
691 */ 693 /* Using a pre-existing session is fine as long as it hasn't
692 session = l2tp_session_find(sock_net(sk), tunnel, session_id); 694 * been connected yet.
693 if (session == NULL) {
694 /* Default MTU must allow space for UDP/L2TP/PPP
695 * headers.
696 */ 695 */
697 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; 696 if (ps->sock) {
697 error = -EEXIST;
698 goto end;
699 }
698 700
699 /* Allocate and initialize a new session context. */ 701 /* consistency checks */
700 session = l2tp_session_create(sizeof(struct pppol2tp_session), 702 if (ps->tunnel_sock != tunnel->sock) {
701 tunnel, session_id, 703 error = -EEXIST;
702 peer_session_id, &cfg);
703 if (session == NULL) {
704 error = -ENOMEM;
705 goto end; 704 goto end;
706 } 705 }
707 } else { 706 } else {
708 ps = l2tp_session_priv(session); 707 /* Default MTU must allow space for UDP/L2TP/PPP headers */
709 error = -EEXIST; 708 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
710 if (ps->sock != NULL) 709 cfg.mru = cfg.mtu;
711 goto end;
712 710
713 /* consistency checks */ 711 session = l2tp_session_create(sizeof(struct pppol2tp_session),
714 if (ps->tunnel_sock != tunnel->sock) 712 tunnel, session_id,
713 peer_session_id, &cfg);
714 if (IS_ERR(session)) {
715 error = PTR_ERR(session);
715 goto end; 716 goto end;
717 }
716 } 718 }
717 719
718 /* Associate session with its PPPoL2TP socket */ 720 /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
777 session->name); 779 session->name);
778 780
779end: 781end:
782 if (drop_refcnt)
783 l2tp_session_dec_refcount(session);
780 release_sock(sk); 784 release_sock(sk);
781 785
782 return error; 786 return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
804 if (tunnel->sock == NULL) 808 if (tunnel->sock == NULL)
805 goto out; 809 goto out;
806 810
807 /* Check that this session doesn't already exist */
808 error = -EEXIST;
809 session = l2tp_session_find(net, tunnel, session_id);
810 if (session != NULL)
811 goto out;
812
813 /* Default MTU values. */ 811 /* Default MTU values. */
814 if (cfg->mtu == 0) 812 if (cfg->mtu == 0)
815 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; 813 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
817 cfg->mru = cfg->mtu; 815 cfg->mru = cfg->mtu;
818 816
819 /* Allocate and initialize a new session context. */ 817 /* Allocate and initialize a new session context. */
820 error = -ENOMEM;
821 session = l2tp_session_create(sizeof(struct pppol2tp_session), 818 session = l2tp_session_create(sizeof(struct pppol2tp_session),
822 tunnel, session_id, 819 tunnel, session_id,
823 peer_session_id, cfg); 820 peer_session_id, cfg);
824 if (session == NULL) 821 if (IS_ERR(session)) {
822 error = PTR_ERR(session);
825 goto out; 823 goto out;
824 }
826 825
827 ps = l2tp_session_priv(session); 826 ps = l2tp_session_priv(session);
828 ps->tunnel_sock = tunnel->sock; 827 ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1140 if (stats.session_id != 0) { 1139 if (stats.session_id != 0) {
1141 /* resend to session ioctl handler */ 1140 /* resend to session ioctl handler */
1142 struct l2tp_session *session = 1141 struct l2tp_session *session =
1143 l2tp_session_find(sock_net(sk), tunnel, stats.session_id); 1142 l2tp_session_get(sock_net(sk), tunnel,
1144 if (session != NULL) 1143 stats.session_id, true);
1145 err = pppol2tp_session_ioctl(session, cmd, arg); 1144
1146 else 1145 if (session) {
1146 err = pppol2tp_session_ioctl(session, cmd,
1147 arg);
1148 if (session->deref)
1149 session->deref(session);
1150 l2tp_session_dec_refcount(session);
1151 } else {
1147 err = -EBADR; 1152 err = -EBADR;
1153 }
1148 break; 1154 break;
1149 } 1155 }
1150#ifdef CONFIG_XFRM 1156#ifdef CONFIG_XFRM
@@ -1554,7 +1560,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1554 1560
1555static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1561static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1556{ 1562{
1557 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 1563 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1558 pd->session_idx++; 1564 pd->session_idx++;
1559 1565
1560 if (pd->session == NULL) { 1566 if (pd->session == NULL) {
@@ -1681,10 +1687,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1681 1687
1682 /* Show the tunnel or session context. 1688 /* Show the tunnel or session context.
1683 */ 1689 */
1684 if (pd->session == NULL) 1690 if (!pd->session) {
1685 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1691 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1686 else 1692 } else {
1687 pppol2tp_seq_session_show(m, pd->session); 1693 pppol2tp_seq_session_show(m, pd->session);
1694 if (pd->session->deref)
1695 pd->session->deref(pd->session);
1696 l2tp_session_dec_refcount(pd->session);
1697 }
1688 1698
1689out: 1699out:
1690 return 0; 1700 return 0;
@@ -1843,4 +1853,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
1843MODULE_LICENSE("GPL"); 1853MODULE_LICENSE("GPL");
1844MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1854MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1845MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); 1855MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
1846MODULE_ALIAS_L2TP_PWTYPE(11); 1856MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 06186d608a27..cb4fff785cbf 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
641 * @sock: Socket which connections arrive on. 641 * @sock: Socket which connections arrive on.
642 * @newsock: Socket to move incoming connection to. 642 * @newsock: Socket to move incoming connection to.
643 * @flags: User specified operational flags. 643 * @flags: User specified operational flags.
644 * @kern: If the socket is kernel internal
644 * 645 *
645 * Accept a new incoming connection. 646 * Accept a new incoming connection.
646 * Returns 0 upon success, negative otherwise. 647 * Returns 0 upon success, negative otherwise.
647 */ 648 */
648static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) 649static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
650 bool kern)
649{ 651{
650 struct sock *sk = sock->sk, *newsk; 652 struct sock *sk = sock->sk, *newsk;
651 struct llc_sock *llc, *newllc; 653 struct llc_sock *llc, *newllc;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 40813dd3301c..5bb0c5012819 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
718 ieee80211_recalc_ps(local); 718 ieee80211_recalc_ps(local);
719 719
720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
722 local->ops->wake_tx_queue) {
722 /* XXX: for AP_VLAN, actually track AP queues */ 723 /* XXX: for AP_VLAN, actually track AP queues */
723 netif_tx_start_all_queues(dev); 724 netif_tx_start_all_queues(dev);
724 } else if (dev) { 725 } else if (dev) {
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3818686182b2..6414079aa729 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
1269{ 1269{
1270 struct mpls_route __rcu **platform_label; 1270 struct mpls_route __rcu **platform_label;
1271 struct net *net = dev_net(dev); 1271 struct net *net = dev_net(dev);
1272 unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
1273 unsigned int alive;
1272 unsigned index; 1274 unsigned index;
1273 1275
1274 platform_label = rtnl_dereference(net->mpls.platform_label); 1276 platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
1278 if (!rt) 1280 if (!rt)
1279 continue; 1281 continue;
1280 1282
1283 alive = 0;
1281 change_nexthops(rt) { 1284 change_nexthops(rt) {
1282 if (rtnl_dereference(nh->nh_dev) != dev) 1285 if (rtnl_dereference(nh->nh_dev) != dev)
1283 continue; 1286 goto next;
1287
1284 switch (event) { 1288 switch (event) {
1285 case NETDEV_DOWN: 1289 case NETDEV_DOWN:
1286 case NETDEV_UNREGISTER: 1290 case NETDEV_UNREGISTER:
@@ -1288,12 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
1288 /* fall through */ 1292 /* fall through */
1289 case NETDEV_CHANGE: 1293 case NETDEV_CHANGE:
1290 nh->nh_flags |= RTNH_F_LINKDOWN; 1294 nh->nh_flags |= RTNH_F_LINKDOWN;
1291 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
1292 break; 1295 break;
1293 } 1296 }
1294 if (event == NETDEV_UNREGISTER) 1297 if (event == NETDEV_UNREGISTER)
1295 RCU_INIT_POINTER(nh->nh_dev, NULL); 1298 RCU_INIT_POINTER(nh->nh_dev, NULL);
1299next:
1300 if (!(nh->nh_flags & nh_flags))
1301 alive++;
1296 } endfor_nexthops(rt); 1302 } endfor_nexthops(rt);
1303
1304 WRITE_ONCE(rt->rt_nhn_alive, alive);
1297 } 1305 }
1298} 1306}
1299 1307
@@ -2028,6 +2036,7 @@ static void mpls_net_exit(struct net *net)
2028 for (index = 0; index < platform_labels; index++) { 2036 for (index = 0; index < platform_labels; index++) {
2029 struct mpls_route *rt = rtnl_dereference(platform_label[index]); 2037 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2030 RCU_INIT_POINTER(platform_label[index], NULL); 2038 RCU_INIT_POINTER(platform_label[index], NULL);
2039 mpls_notify_route(net, index, rt, NULL, NULL);
2031 mpls_rt_free(rt); 2040 mpls_rt_free(rt);
2032 } 2041 }
2033 rtnl_unlock(); 2042 rtnl_unlock();
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 071b97fcbefb..ffb78e5f7b70 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
181unsigned int nf_conntrack_max __read_mostly; 181unsigned int nf_conntrack_max __read_mostly;
182seqcount_t nf_conntrack_generation __read_mostly; 182seqcount_t nf_conntrack_generation __read_mostly;
183 183
184DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 184/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
185 * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
186 * alignment to enforce this.
187 */
188DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
185EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 189EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
186 190
187static unsigned int nf_conntrack_hash_rnd __read_mostly; 191static unsigned int nf_conntrack_hash_rnd __read_mostly;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d56e66..22fc32143e9c 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
290 BUG_ON(notify != new); 290 BUG_ON(notify != new);
291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); 291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
292 mutex_unlock(&nf_ct_ecache_mutex); 292 mutex_unlock(&nf_ct_ecache_mutex);
293 /* synchronize_rcu() is called from ctnetlink_exit. */
293} 294}
294EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 295EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
295 296
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
326 BUG_ON(notify != new); 327 BUG_ON(notify != new);
327 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); 328 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
328 mutex_unlock(&nf_ct_ecache_mutex); 329 mutex_unlock(&nf_ct_ecache_mutex);
330 /* synchronize_rcu() is called from ctnetlink_exit. */
329} 331}
330EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 332EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
331 333
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00c2492..008299b7f78f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
53 53
54 rcu_read_lock(); 54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL); 56 if (!t) {
57 rcu_read_unlock();
58 return NULL;
59 }
60
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 61 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len; 62 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len; 63 alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
88 92
89 rcu_read_lock(); 93 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 94 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 95 if (!t) {
96 rcu_read_unlock();
97 return NULL;
98 }
92 99
93 newoff = ALIGN(old->len, t->align); 100 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len; 101 newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
175 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); 182 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
176 update_alloc_size(type); 183 update_alloc_size(type);
177 mutex_unlock(&nf_ct_ext_type_mutex); 184 mutex_unlock(&nf_ct_ext_type_mutex);
178 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 185 synchronize_rcu();
179} 186}
180EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); 187EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6806b5e73567..908d858034e4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3442,6 +3442,7 @@ static void __exit ctnetlink_exit(void)
3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3444#endif 3444#endif
3445 synchronize_rcu();
3445} 3446}
3446 3447
3447module_init(ctnetlink_init); 3448module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 94b14c5a8b17..82802e4a6640 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
903#ifdef CONFIG_XFRM 903#ifdef CONFIG_XFRM
904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
905#endif 905#endif
906 synchronize_rcu();
907
906 for (i = 0; i < NFPROTO_NUMPROTO; i++) 908 for (i = 0; i < NFPROTO_NUMPROTO; i++)
907 kfree(nf_nat_l4protos[i]); 909 kfree(nf_nat_l4protos[i]);
908 910
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 31d358691af0..804e8a0ab36e 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
33 enum nf_nat_manip_type maniptype) 33 enum nf_nat_manip_type maniptype)
34{ 34{
35 sctp_sctphdr_t *hdr; 35 sctp_sctphdr_t *hdr;
36 int hdrsize = 8;
36 37
37 if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 38 /* This could be an inner header returned in imcp packet; in such
39 * cases we cannot update the checksum field since it is outside
40 * of the 8 bytes of transport layer headers we are guaranteed.
41 */
42 if (skb->len >= hdroff + sizeof(*hdr))
43 hdrsize = sizeof(*hdr);
44
45 if (!skb_make_writable(skb, hdroff + hdrsize))
38 return false; 46 return false;
39 47
40 hdr = (struct sctphdr *)(skb->data + hdroff); 48 hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
47 hdr->dest = tuple->dst.u.sctp.port; 55 hdr->dest = tuple->dst.u.sctp.port;
48 } 56 }
49 57
58 if (hdrsize < sizeof(*hdr))
59 return true;
60
50 if (skb->ip_summed != CHECKSUM_PARTIAL) { 61 if (skb->ip_summed != CHECKSUM_PARTIAL) {
51 hdr->checksum = sctp_compute_cksum(skb, hdroff); 62 hdr->checksum = sctp_compute_cksum(skb, hdroff);
52 skb->ip_summed = CHECKSUM_NONE; 63 skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5e0ccfd5bb37..434c739dfeca 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
3145 iter.count = 0; 3145 iter.count = 0;
3146 iter.err = 0; 3146 iter.err = 0;
3147 iter.fn = nf_tables_bind_check_setelem; 3147 iter.fn = nf_tables_bind_check_setelem;
3148 iter.flush = false;
3149 3148
3150 set->ops->walk(ctx, set, &iter); 3149 set->ops->walk(ctx, set, &iter);
3151 if (iter.err < 0) 3150 if (iter.err < 0)
@@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3399 args.iter.count = 0; 3398 args.iter.count = 0;
3400 args.iter.err = 0; 3399 args.iter.err = 0;
3401 args.iter.fn = nf_tables_dump_setelem; 3400 args.iter.fn = nf_tables_dump_setelem;
3402 args.iter.flush = false;
3403 set->ops->walk(&ctx, set, &args.iter); 3401 set->ops->walk(&ctx, set, &args.iter);
3404 3402
3405 nla_nest_end(skb, nest); 3403 nla_nest_end(skb, nest);
@@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3963 struct nft_set_iter iter = { 3961 struct nft_set_iter iter = {
3964 .genmask = genmask, 3962 .genmask = genmask,
3965 .fn = nft_flush_set, 3963 .fn = nft_flush_set,
3966 .flush = true,
3967 }; 3964 };
3968 set->ops->walk(&ctx, set, &iter); 3965 set->ops->walk(&ctx, set, &iter);
3969 3966
@@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5114 iter.count = 0; 5111 iter.count = 0;
5115 iter.err = 0; 5112 iter.err = 0;
5116 iter.fn = nf_tables_loop_check_setelem; 5113 iter.fn = nf_tables_loop_check_setelem;
5117 iter.flush = false;
5118 5114
5119 set->ops->walk(ctx, set, &iter); 5115 set->ops->walk(ctx, set, &iter);
5120 if (iter.err < 0) 5116 if (iter.err < 0)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de8782345c86..d45558178da5 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); 33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34 34
35struct nfnl_cthelper {
36 struct list_head list;
37 struct nf_conntrack_helper helper;
38};
39
40static LIST_HEAD(nfnl_cthelper_list);
41
35static int 42static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, 43nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 44 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
161 int i, ret; 168 int i, ret;
162 struct nf_conntrack_expect_policy *expect_policy; 169 struct nf_conntrack_expect_policy *expect_policy;
163 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; 170 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
171 unsigned int class_max;
164 172
165 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, 173 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
166 nfnl_cthelper_expect_policy_set); 174 nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
170 if (!tb[NFCTH_POLICY_SET_NUM]) 178 if (!tb[NFCTH_POLICY_SET_NUM])
171 return -EINVAL; 179 return -EINVAL;
172 180
173 helper->expect_class_max = 181 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
174 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); 182 if (class_max == 0)
175 183 return -EINVAL;
176 if (helper->expect_class_max != 0 && 184 if (class_max > NF_CT_MAX_EXPECT_CLASSES)
177 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
178 return -EOVERFLOW; 185 return -EOVERFLOW;
179 186
180 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * 187 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
181 helper->expect_class_max, GFP_KERNEL); 188 class_max, GFP_KERNEL);
182 if (expect_policy == NULL) 189 if (expect_policy == NULL)
183 return -ENOMEM; 190 return -ENOMEM;
184 191
185 for (i=0; i<helper->expect_class_max; i++) { 192 for (i = 0; i < class_max; i++) {
186 if (!tb[NFCTH_POLICY_SET+i]) 193 if (!tb[NFCTH_POLICY_SET+i])
187 goto err; 194 goto err;
188 195
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
191 if (ret < 0) 198 if (ret < 0)
192 goto err; 199 goto err;
193 } 200 }
201
202 helper->expect_class_max = class_max - 1;
194 helper->expect_policy = expect_policy; 203 helper->expect_policy = expect_policy;
195 return 0; 204 return 0;
196err: 205err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
203 struct nf_conntrack_tuple *tuple) 212 struct nf_conntrack_tuple *tuple)
204{ 213{
205 struct nf_conntrack_helper *helper; 214 struct nf_conntrack_helper *helper;
215 struct nfnl_cthelper *nfcth;
206 int ret; 216 int ret;
207 217
208 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) 218 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
209 return -EINVAL; 219 return -EINVAL;
210 220
211 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); 221 nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
212 if (helper == NULL) 222 if (nfcth == NULL)
213 return -ENOMEM; 223 return -ENOMEM;
224 helper = &nfcth->helper;
214 225
215 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); 226 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
216 if (ret < 0) 227 if (ret < 0)
217 goto err; 228 goto err1;
218 229
219 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 230 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
220 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 231 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
245 256
246 ret = nf_conntrack_helper_register(helper); 257 ret = nf_conntrack_helper_register(helper);
247 if (ret < 0) 258 if (ret < 0)
248 goto err; 259 goto err2;
249 260
261 list_add_tail(&nfcth->list, &nfnl_cthelper_list);
250 return 0; 262 return 0;
251err: 263err2:
252 kfree(helper); 264 kfree(helper->expect_policy);
265err1:
266 kfree(nfcth);
253 return ret; 267 return ret;
254} 268}
255 269
256static int 270static int
271nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
272 struct nf_conntrack_expect_policy *new_policy,
273 const struct nlattr *attr)
274{
275 struct nlattr *tb[NFCTH_POLICY_MAX + 1];
276 int err;
277
278 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
279 nfnl_cthelper_expect_pol);
280 if (err < 0)
281 return err;
282
283 if (!tb[NFCTH_POLICY_NAME] ||
284 !tb[NFCTH_POLICY_EXPECT_MAX] ||
285 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
286 return -EINVAL;
287
288 if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
289 return -EBUSY;
290
291 new_policy->max_expected =
292 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
293 new_policy->timeout =
294 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
295
296 return 0;
297}
298
299static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
300 struct nf_conntrack_helper *helper)
301{
302 struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
303 struct nf_conntrack_expect_policy *policy;
304 int i, err;
305
306 /* Check first that all policy attributes are well-formed, so we don't
307 * leave things in inconsistent state on errors.
308 */
309 for (i = 0; i < helper->expect_class_max + 1; i++) {
310
311 if (!tb[NFCTH_POLICY_SET + i])
312 return -EINVAL;
313
314 err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
315 &new_policy[i],
316 tb[NFCTH_POLICY_SET + i]);
317 if (err < 0)
318 return err;
319 }
320 /* Now we can safely update them. */
321 for (i = 0; i < helper->expect_class_max + 1; i++) {
322 policy = (struct nf_conntrack_expect_policy *)
323 &helper->expect_policy[i];
324 policy->max_expected = new_policy->max_expected;
325 policy->timeout = new_policy->timeout;
326 }
327
328 return 0;
329}
330
331static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
332 const struct nlattr *attr)
333{
334 struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
335 unsigned int class_max;
336 int err;
337
338 err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
339 nfnl_cthelper_expect_policy_set);
340 if (err < 0)
341 return err;
342
343 if (!tb[NFCTH_POLICY_SET_NUM])
344 return -EINVAL;
345
346 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
347 if (helper->expect_class_max + 1 != class_max)
348 return -EBUSY;
349
350 return nfnl_cthelper_update_policy_all(tb, helper);
351}
352
353static int
257nfnl_cthelper_update(const struct nlattr * const tb[], 354nfnl_cthelper_update(const struct nlattr * const tb[],
258 struct nf_conntrack_helper *helper) 355 struct nf_conntrack_helper *helper)
259{ 356{
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
263 return -EBUSY; 360 return -EBUSY;
264 361
265 if (tb[NFCTH_POLICY]) { 362 if (tb[NFCTH_POLICY]) {
266 ret = nfnl_cthelper_parse_expect_policy(helper, 363 ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
267 tb[NFCTH_POLICY]);
268 if (ret < 0) 364 if (ret < 0)
269 return ret; 365 return ret;
270 } 366 }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
293 const char *helper_name; 389 const char *helper_name;
294 struct nf_conntrack_helper *cur, *helper = NULL; 390 struct nf_conntrack_helper *cur, *helper = NULL;
295 struct nf_conntrack_tuple tuple; 391 struct nf_conntrack_tuple tuple;
296 int ret = 0, i; 392 struct nfnl_cthelper *nlcth;
393 int ret = 0;
297 394
298 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 395 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
299 return -EINVAL; 396 return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
304 if (ret < 0) 401 if (ret < 0)
305 return ret; 402 return ret;
306 403
307 rcu_read_lock(); 404 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
308 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 405 cur = &nlcth->helper;
309 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
310 406
311 /* skip non-userspace conntrack helpers. */ 407 if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
312 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 408 continue;
313 continue;
314 409
315 if (strncmp(cur->name, helper_name, 410 if ((tuple.src.l3num != cur->tuple.src.l3num ||
316 NF_CT_HELPER_NAME_LEN) != 0) 411 tuple.dst.protonum != cur->tuple.dst.protonum))
317 continue; 412 continue;
318 413
319 if ((tuple.src.l3num != cur->tuple.src.l3num || 414 if (nlh->nlmsg_flags & NLM_F_EXCL)
320 tuple.dst.protonum != cur->tuple.dst.protonum)) 415 return -EEXIST;
321 continue;
322 416
323 if (nlh->nlmsg_flags & NLM_F_EXCL) { 417 helper = cur;
324 ret = -EEXIST; 418 break;
325 goto err;
326 }
327 helper = cur;
328 break;
329 }
330 } 419 }
331 rcu_read_unlock();
332 420
333 if (helper == NULL) 421 if (helper == NULL)
334 ret = nfnl_cthelper_create(tb, &tuple); 422 ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
336 ret = nfnl_cthelper_update(tb, helper); 424 ret = nfnl_cthelper_update(tb, helper);
337 425
338 return ret; 426 return ret;
339err:
340 rcu_read_unlock();
341 return ret;
342} 427}
343 428
344static int 429static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
377 goto nla_put_failure; 462 goto nla_put_failure;
378 463
379 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, 464 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
380 htonl(helper->expect_class_max))) 465 htonl(helper->expect_class_max + 1)))
381 goto nla_put_failure; 466 goto nla_put_failure;
382 467
383 for (i=0; i<helper->expect_class_max; i++) { 468 for (i = 0; i < helper->expect_class_max + 1; i++) {
384 nest_parms2 = nla_nest_start(skb, 469 nest_parms2 = nla_nest_start(skb,
385 (NFCTH_POLICY_SET+i) | NLA_F_NESTED); 470 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
386 if (nest_parms2 == NULL) 471 if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
502 struct sk_buff *skb, const struct nlmsghdr *nlh, 587 struct sk_buff *skb, const struct nlmsghdr *nlh,
503 const struct nlattr * const tb[]) 588 const struct nlattr * const tb[])
504{ 589{
505 int ret = -ENOENT, i; 590 int ret = -ENOENT;
506 struct nf_conntrack_helper *cur; 591 struct nf_conntrack_helper *cur;
507 struct sk_buff *skb2; 592 struct sk_buff *skb2;
508 char *helper_name = NULL; 593 char *helper_name = NULL;
509 struct nf_conntrack_tuple tuple; 594 struct nf_conntrack_tuple tuple;
595 struct nfnl_cthelper *nlcth;
510 bool tuple_set = false; 596 bool tuple_set = false;
511 597
512 if (nlh->nlmsg_flags & NLM_F_DUMP) { 598 if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
527 tuple_set = true; 613 tuple_set = true;
528 } 614 }
529 615
530 for (i = 0; i < nf_ct_helper_hsize; i++) { 616 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
531 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { 617 cur = &nlcth->helper;
618 if (helper_name &&
619 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
620 continue;
532 621
533 /* skip non-userspace conntrack helpers. */ 622 if (tuple_set &&
534 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 623 (tuple.src.l3num != cur->tuple.src.l3num ||
535 continue; 624 tuple.dst.protonum != cur->tuple.dst.protonum))
625 continue;
536 626
537 if (helper_name && strncmp(cur->name, helper_name, 627 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
538 NF_CT_HELPER_NAME_LEN) != 0) { 628 if (skb2 == NULL) {
539 continue; 629 ret = -ENOMEM;
540 } 630 break;
541 if (tuple_set && 631 }
542 (tuple.src.l3num != cur->tuple.src.l3num ||
543 tuple.dst.protonum != cur->tuple.dst.protonum))
544 continue;
545
546 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
547 if (skb2 == NULL) {
548 ret = -ENOMEM;
549 break;
550 }
551 632
552 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, 633 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
553 nlh->nlmsg_seq, 634 nlh->nlmsg_seq,
554 NFNL_MSG_TYPE(nlh->nlmsg_type), 635 NFNL_MSG_TYPE(nlh->nlmsg_type),
555 NFNL_MSG_CTHELPER_NEW, cur); 636 NFNL_MSG_CTHELPER_NEW, cur);
556 if (ret <= 0) { 637 if (ret <= 0) {
557 kfree_skb(skb2); 638 kfree_skb(skb2);
558 break; 639 break;
559 } 640 }
560 641
561 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, 642 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
562 MSG_DONTWAIT); 643 MSG_DONTWAIT);
563 if (ret > 0) 644 if (ret > 0)
564 ret = 0; 645 ret = 0;
565 646
566 /* this avoids a loop in nfnetlink. */ 647 /* this avoids a loop in nfnetlink. */
567 return ret == -EAGAIN ? -ENOBUFS : ret; 648 return ret == -EAGAIN ? -ENOBUFS : ret;
568 }
569 } 649 }
570 return ret; 650 return ret;
571} 651}
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
576{ 656{
577 char *helper_name = NULL; 657 char *helper_name = NULL;
578 struct nf_conntrack_helper *cur; 658 struct nf_conntrack_helper *cur;
579 struct hlist_node *tmp;
580 struct nf_conntrack_tuple tuple; 659 struct nf_conntrack_tuple tuple;
581 bool tuple_set = false, found = false; 660 bool tuple_set = false, found = false;
582 int i, j = 0, ret; 661 struct nfnl_cthelper *nlcth, *n;
662 int j = 0, ret;
583 663
584 if (tb[NFCTH_NAME]) 664 if (tb[NFCTH_NAME])
585 helper_name = nla_data(tb[NFCTH_NAME]); 665 helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
592 tuple_set = true; 672 tuple_set = true;
593 } 673 }
594 674
595 for (i = 0; i < nf_ct_helper_hsize; i++) { 675 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
596 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 676 cur = &nlcth->helper;
597 hnode) { 677 j++;
598 /* skip non-userspace conntrack helpers. */
599 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
600 continue;
601 678
602 j++; 679 if (helper_name &&
680 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
681 continue;
603 682
604 if (helper_name && strncmp(cur->name, helper_name, 683 if (tuple_set &&
605 NF_CT_HELPER_NAME_LEN) != 0) { 684 (tuple.src.l3num != cur->tuple.src.l3num ||
606 continue; 685 tuple.dst.protonum != cur->tuple.dst.protonum))
607 } 686 continue;
608 if (tuple_set &&
609 (tuple.src.l3num != cur->tuple.src.l3num ||
610 tuple.dst.protonum != cur->tuple.dst.protonum))
611 continue;
612 687
613 found = true; 688 found = true;
614 nf_conntrack_helper_unregister(cur); 689 nf_conntrack_helper_unregister(cur);
615 } 690 kfree(cur->expect_policy);
691
692 list_del(&nlcth->list);
693 kfree(nlcth);
616 } 694 }
695
617 /* Make sure we return success if we flush and there is no helpers */ 696 /* Make sure we return success if we flush and there is no helpers */
618 return (found || j == 0) ? 0 : -ENOENT; 697 return (found || j == 0) ? 0 : -ENOENT;
619} 698}
@@ -662,20 +741,16 @@ err_out:
662static void __exit nfnl_cthelper_exit(void) 741static void __exit nfnl_cthelper_exit(void)
663{ 742{
664 struct nf_conntrack_helper *cur; 743 struct nf_conntrack_helper *cur;
665 struct hlist_node *tmp; 744 struct nfnl_cthelper *nlcth, *n;
666 int i;
667 745
668 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 746 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
669 747
670 for (i=0; i<nf_ct_helper_hsize; i++) { 748 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
671 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 749 cur = &nlcth->helper;
672 hnode) {
673 /* skip non-userspace conntrack helpers. */
674 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
675 continue;
676 750
677 nf_conntrack_helper_unregister(cur); 751 nf_conntrack_helper_unregister(cur);
678 } 752 kfree(cur->expect_policy);
753 kfree(nlcth);
679 } 754 }
680} 755}
681 756
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e0867e56e..47d6656c9119 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); 647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); 648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
649 synchronize_rcu();
649#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 650#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
650 rcu_barrier();
651} 651}
652 652
653module_init(cttimeout_init); 653module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3ee0b8a000a4..933509ebf3d3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
443 skb = alloc_skb(size, GFP_ATOMIC); 443 skb = alloc_skb(size, GFP_ATOMIC);
444 if (!skb) { 444 if (!skb) {
445 skb_tx_error(entskb); 445 skb_tx_error(entskb);
446 return NULL; 446 goto nlmsg_failure;
447 } 447 }
448 448
449 nlh = nlmsg_put(skb, 0, 0, 449 nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
452 if (!nlh) { 452 if (!nlh) {
453 skb_tx_error(entskb); 453 skb_tx_error(entskb);
454 kfree_skb(skb); 454 kfree_skb(skb);
455 return NULL; 455 goto nlmsg_failure;
456 } 456 }
457 nfmsg = nlmsg_data(nlh); 457 nfmsg = nlmsg_data(nlh);
458 nfmsg->nfgen_family = entry->state.pf; 458 nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
598 } 598 }
599 599
600 nlh->nlmsg_len = skb->len; 600 nlh->nlmsg_len = skb->len;
601 if (seclen)
602 security_release_secctx(secdata, seclen);
601 return skb; 603 return skb;
602 604
603nla_put_failure: 605nla_put_failure:
604 skb_tx_error(entskb); 606 skb_tx_error(entskb);
605 kfree_skb(skb); 607 kfree_skb(skb);
606 net_err_ratelimited("nf_queue: error creating packet message\n"); 608 net_err_ratelimited("nf_queue: error creating packet message\n");
609nlmsg_failure:
610 if (seclen)
611 security_release_secctx(secdata, seclen);
607 return NULL; 612 return NULL;
608} 613}
609 614
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bf548a7a71ec..0264258c46fe 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
83 83
84 switch (priv->key) { 84 switch (priv->key) {
85 case NFT_CT_DIRECTION: 85 case NFT_CT_DIRECTION:
86 *dest = CTINFO2DIR(ctinfo); 86 nft_reg_store8(dest, CTINFO2DIR(ctinfo));
87 return; 87 return;
88 case NFT_CT_STATUS: 88 case NFT_CT_STATUS:
89 *dest = ct->status; 89 *dest = ct->status;
@@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
151 return; 151 return;
152 } 152 }
153 case NFT_CT_L3PROTOCOL: 153 case NFT_CT_L3PROTOCOL:
154 *dest = nf_ct_l3num(ct); 154 nft_reg_store8(dest, nf_ct_l3num(ct));
155 return; 155 return;
156 case NFT_CT_PROTOCOL: 156 case NFT_CT_PROTOCOL:
157 *dest = nf_ct_protonum(ct); 157 nft_reg_store8(dest, nf_ct_protonum(ct));
158 return; 158 return;
159#ifdef CONFIG_NF_CONNTRACK_ZONES 159#ifdef CONFIG_NF_CONNTRACK_ZONES
160 case NFT_CT_ZONE: { 160 case NFT_CT_ZONE: {
161 const struct nf_conntrack_zone *zone = nf_ct_zone(ct); 161 const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
162 u16 zoneid;
162 163
163 if (priv->dir < IP_CT_DIR_MAX) 164 if (priv->dir < IP_CT_DIR_MAX)
164 *dest = nf_ct_zone_id(zone, priv->dir); 165 zoneid = nf_ct_zone_id(zone, priv->dir);
165 else 166 else
166 *dest = zone->id; 167 zoneid = zone->id;
167 168
169 nft_reg_store16(dest, zoneid);
168 return; 170 return;
169 } 171 }
170#endif 172#endif
@@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
183 nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); 185 nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
184 return; 186 return;
185 case NFT_CT_PROTO_SRC: 187 case NFT_CT_PROTO_SRC:
186 *dest = (__force __u16)tuple->src.u.all; 188 nft_reg_store16(dest, (__force u16)tuple->src.u.all);
187 return; 189 return;
188 case NFT_CT_PROTO_DST: 190 case NFT_CT_PROTO_DST:
189 *dest = (__force __u16)tuple->dst.u.all; 191 nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
190 return; 192 return;
191 default: 193 default:
192 break; 194 break;
@@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
205 const struct nft_ct *priv = nft_expr_priv(expr); 207 const struct nft_ct *priv = nft_expr_priv(expr);
206 struct sk_buff *skb = pkt->skb; 208 struct sk_buff *skb = pkt->skb;
207 enum ip_conntrack_info ctinfo; 209 enum ip_conntrack_info ctinfo;
208 u16 value = regs->data[priv->sreg]; 210 u16 value = nft_reg_load16(&regs->data[priv->sreg]);
209 struct nf_conn *ct; 211 struct nf_conn *ct;
210 212
211 ct = nf_ct_get(skb, &ctinfo); 213 ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
542 case IP_CT_DIR_REPLY: 544 case IP_CT_DIR_REPLY:
543 break; 545 break;
544 default: 546 default:
545 return -EINVAL; 547 err = -EINVAL;
548 goto err1;
546 } 549 }
547 } 550 }
548 551
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e1f5ca9b423b..7b60e01f38ff 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
45 *dest = skb->len; 45 *dest = skb->len;
46 break; 46 break;
47 case NFT_META_PROTOCOL: 47 case NFT_META_PROTOCOL:
48 *dest = 0; 48 nft_reg_store16(dest, (__force u16)skb->protocol);
49 *(__be16 *)dest = skb->protocol;
50 break; 49 break;
51 case NFT_META_NFPROTO: 50 case NFT_META_NFPROTO:
52 *dest = nft_pf(pkt); 51 nft_reg_store8(dest, nft_pf(pkt));
53 break; 52 break;
54 case NFT_META_L4PROTO: 53 case NFT_META_L4PROTO:
55 if (!pkt->tprot_set) 54 if (!pkt->tprot_set)
56 goto err; 55 goto err;
57 *dest = pkt->tprot; 56 nft_reg_store8(dest, pkt->tprot);
58 break; 57 break;
59 case NFT_META_PRIORITY: 58 case NFT_META_PRIORITY:
60 *dest = skb->priority; 59 *dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
85 case NFT_META_IIFTYPE: 84 case NFT_META_IIFTYPE:
86 if (in == NULL) 85 if (in == NULL)
87 goto err; 86 goto err;
88 *dest = 0; 87 nft_reg_store16(dest, in->type);
89 *(u16 *)dest = in->type;
90 break; 88 break;
91 case NFT_META_OIFTYPE: 89 case NFT_META_OIFTYPE:
92 if (out == NULL) 90 if (out == NULL)
93 goto err; 91 goto err;
94 *dest = 0; 92 nft_reg_store16(dest, out->type);
95 *(u16 *)dest = out->type;
96 break; 93 break;
97 case NFT_META_SKUID: 94 case NFT_META_SKUID:
98 sk = skb_to_full_sk(skb); 95 sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
142#endif 139#endif
143 case NFT_META_PKTTYPE: 140 case NFT_META_PKTTYPE:
144 if (skb->pkt_type != PACKET_LOOPBACK) { 141 if (skb->pkt_type != PACKET_LOOPBACK) {
145 *dest = skb->pkt_type; 142 nft_reg_store8(dest, skb->pkt_type);
146 break; 143 break;
147 } 144 }
148 145
149 switch (nft_pf(pkt)) { 146 switch (nft_pf(pkt)) {
150 case NFPROTO_IPV4: 147 case NFPROTO_IPV4:
151 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 148 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
152 *dest = PACKET_MULTICAST; 149 nft_reg_store8(dest, PACKET_MULTICAST);
153 else 150 else
154 *dest = PACKET_BROADCAST; 151 nft_reg_store8(dest, PACKET_BROADCAST);
155 break; 152 break;
156 case NFPROTO_IPV6: 153 case NFPROTO_IPV6:
157 *dest = PACKET_MULTICAST; 154 nft_reg_store8(dest, PACKET_MULTICAST);
158 break; 155 break;
159 case NFPROTO_NETDEV: 156 case NFPROTO_NETDEV:
160 switch (skb->protocol) { 157 switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
168 goto err; 165 goto err;
169 166
170 if (ipv4_is_multicast(iph->daddr)) 167 if (ipv4_is_multicast(iph->daddr))
171 *dest = PACKET_MULTICAST; 168 nft_reg_store8(dest, PACKET_MULTICAST);
172 else 169 else
173 *dest = PACKET_BROADCAST; 170 nft_reg_store8(dest, PACKET_BROADCAST);
174 171
175 break; 172 break;
176 } 173 }
177 case htons(ETH_P_IPV6): 174 case htons(ETH_P_IPV6):
178 *dest = PACKET_MULTICAST; 175 nft_reg_store8(dest, PACKET_MULTICAST);
179 break; 176 break;
180 default: 177 default:
181 WARN_ON_ONCE(1); 178 WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
230{ 227{
231 const struct nft_meta *meta = nft_expr_priv(expr); 228 const struct nft_meta *meta = nft_expr_priv(expr);
232 struct sk_buff *skb = pkt->skb; 229 struct sk_buff *skb = pkt->skb;
233 u32 value = regs->data[meta->sreg]; 230 u32 *sreg = &regs->data[meta->sreg];
231 u32 value = *sreg;
232 u8 pkt_type;
234 233
235 switch (meta->key) { 234 switch (meta->key) {
236 case NFT_META_MARK: 235 case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
240 skb->priority = value; 239 skb->priority = value;
241 break; 240 break;
242 case NFT_META_PKTTYPE: 241 case NFT_META_PKTTYPE:
243 if (skb->pkt_type != value && 242 pkt_type = nft_reg_load8(sreg);
244 skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) 243
245 skb->pkt_type = value; 244 if (skb->pkt_type != pkt_type &&
245 skb_pkt_type_ok(pkt_type) &&
246 skb_pkt_type_ok(skb->pkt_type))
247 skb->pkt_type = pkt_type;
246 break; 248 break;
247 case NFT_META_NFTRACE: 249 case NFT_META_NFTRACE:
248 skb->nf_trace = !!value; 250 skb->nf_trace = !!value;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 19a7bf3236f9..439e0bd152a0 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
65 } 65 }
66 66
67 if (priv->sreg_proto_min) { 67 if (priv->sreg_proto_min) {
68 range.min_proto.all = 68 range.min_proto.all = (__force __be16)nft_reg_load16(
69 *(__be16 *)&regs->data[priv->sreg_proto_min]; 69 &regs->data[priv->sreg_proto_min]);
70 range.max_proto.all = 70 range.max_proto.all = (__force __be16)nft_reg_load16(
71 *(__be16 *)&regs->data[priv->sreg_proto_max]; 71 &regs->data[priv->sreg_proto_max]);
72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
73 } 73 }
74 74
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 152d226552c1..8ebbc2940f4c 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -15,6 +15,11 @@
15#include <linux/netfilter/nf_tables.h> 15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables.h> 16#include <net/netfilter/nf_tables.h>
17 17
18struct nft_bitmap_elem {
19 struct list_head head;
20 struct nft_set_ext ext;
21};
22
18/* This bitmap uses two bits to represent one element. These two bits determine 23/* This bitmap uses two bits to represent one element. These two bits determine
19 * the element state in the current and the future generation. 24 * the element state in the current and the future generation.
20 * 25 *
@@ -41,13 +46,22 @@
41 * restore its previous state. 46 * restore its previous state.
42 */ 47 */
43struct nft_bitmap { 48struct nft_bitmap {
44 u16 bitmap_size; 49 struct list_head list;
45 u8 bitmap[]; 50 u16 bitmap_size;
51 u8 bitmap[];
46}; 52};
47 53
48static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) 54static inline void nft_bitmap_location(const struct nft_set *set,
55 const void *key,
56 u32 *idx, u32 *off)
49{ 57{
50 u32 k = (key << 1); 58 u32 k;
59
60 if (set->klen == 2)
61 k = *(u16 *)key;
62 else
63 k = *(u8 *)key;
64 k <<= 1;
51 65
52 *idx = k / BITS_PER_BYTE; 66 *idx = k / BITS_PER_BYTE;
53 *off = k % BITS_PER_BYTE; 67 *off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
69 u8 genmask = nft_genmask_cur(net); 83 u8 genmask = nft_genmask_cur(net);
70 u32 idx, off; 84 u32 idx, off;
71 85
72 nft_bitmap_location(*key, &idx, &off); 86 nft_bitmap_location(set, key, &idx, &off);
73 87
74 return nft_bitmap_active(priv->bitmap, idx, off, genmask); 88 return nft_bitmap_active(priv->bitmap, idx, off, genmask);
75} 89}
76 90
91static struct nft_bitmap_elem *
92nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
93 u8 genmask)
94{
95 const struct nft_bitmap *priv = nft_set_priv(set);
96 struct nft_bitmap_elem *be;
97
98 list_for_each_entry_rcu(be, &priv->list, head) {
99 if (memcmp(nft_set_ext_key(&be->ext),
100 nft_set_ext_key(&this->ext), set->klen) ||
101 !nft_set_elem_active(&be->ext, genmask))
102 continue;
103
104 return be;
105 }
106 return NULL;
107}
108
77static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, 109static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
78 const struct nft_set_elem *elem, 110 const struct nft_set_elem *elem,
79 struct nft_set_ext **_ext) 111 struct nft_set_ext **ext)
80{ 112{
81 struct nft_bitmap *priv = nft_set_priv(set); 113 struct nft_bitmap *priv = nft_set_priv(set);
82 struct nft_set_ext *ext = elem->priv; 114 struct nft_bitmap_elem *new = elem->priv, *be;
83 u8 genmask = nft_genmask_next(net); 115 u8 genmask = nft_genmask_next(net);
84 u32 idx, off; 116 u32 idx, off;
85 117
86 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 118 be = nft_bitmap_elem_find(set, new, genmask);
87 if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) 119 if (be) {
120 *ext = &be->ext;
88 return -EEXIST; 121 return -EEXIST;
122 }
89 123
124 nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
90 /* Enter 01 state. */ 125 /* Enter 01 state. */
91 priv->bitmap[idx] |= (genmask << off); 126 priv->bitmap[idx] |= (genmask << off);
127 list_add_tail_rcu(&new->head, &priv->list);
92 128
93 return 0; 129 return 0;
94} 130}
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
98 const struct nft_set_elem *elem) 134 const struct nft_set_elem *elem)
99{ 135{
100 struct nft_bitmap *priv = nft_set_priv(set); 136 struct nft_bitmap *priv = nft_set_priv(set);
101 struct nft_set_ext *ext = elem->priv; 137 struct nft_bitmap_elem *be = elem->priv;
102 u8 genmask = nft_genmask_next(net); 138 u8 genmask = nft_genmask_next(net);
103 u32 idx, off; 139 u32 idx, off;
104 140
105 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 141 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
106 /* Enter 00 state. */ 142 /* Enter 00 state. */
107 priv->bitmap[idx] &= ~(genmask << off); 143 priv->bitmap[idx] &= ~(genmask << off);
144 list_del_rcu(&be->head);
108} 145}
109 146
110static void nft_bitmap_activate(const struct net *net, 147static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
112 const struct nft_set_elem *elem) 149 const struct nft_set_elem *elem)
113{ 150{
114 struct nft_bitmap *priv = nft_set_priv(set); 151 struct nft_bitmap *priv = nft_set_priv(set);
115 struct nft_set_ext *ext = elem->priv; 152 struct nft_bitmap_elem *be = elem->priv;
116 u8 genmask = nft_genmask_next(net); 153 u8 genmask = nft_genmask_next(net);
117 u32 idx, off; 154 u32 idx, off;
118 155
119 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 156 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
120 /* Enter 11 state. */ 157 /* Enter 11 state. */
121 priv->bitmap[idx] |= (genmask << off); 158 priv->bitmap[idx] |= (genmask << off);
159 nft_set_elem_change_active(net, set, &be->ext);
122} 160}
123 161
124static bool nft_bitmap_flush(const struct net *net, 162static bool nft_bitmap_flush(const struct net *net,
125 const struct nft_set *set, void *ext) 163 const struct nft_set *set, void *_be)
126{ 164{
127 struct nft_bitmap *priv = nft_set_priv(set); 165 struct nft_bitmap *priv = nft_set_priv(set);
128 u8 genmask = nft_genmask_next(net); 166 u8 genmask = nft_genmask_next(net);
167 struct nft_bitmap_elem *be = _be;
129 u32 idx, off; 168 u32 idx, off;
130 169
131 nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); 170 nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
132 /* Enter 10 state, similar to deactivation. */ 171 /* Enter 10 state, similar to deactivation. */
133 priv->bitmap[idx] &= ~(genmask << off); 172 priv->bitmap[idx] &= ~(genmask << off);
173 nft_set_elem_change_active(net, set, &be->ext);
134 174
135 return true; 175 return true;
136} 176}
137 177
138static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
139 const struct nft_set_elem *elem)
140{
141 struct nft_set_ext_tmpl tmpl;
142 struct nft_set_ext *ext;
143
144 nft_set_ext_prepare(&tmpl);
145 nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
146
147 ext = kzalloc(tmpl.len, GFP_KERNEL);
148 if (!ext)
149 return NULL;
150
151 nft_set_ext_init(ext, &tmpl);
152 memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
153
154 return ext;
155}
156
157static void *nft_bitmap_deactivate(const struct net *net, 178static void *nft_bitmap_deactivate(const struct net *net,
158 const struct nft_set *set, 179 const struct nft_set *set,
159 const struct nft_set_elem *elem) 180 const struct nft_set_elem *elem)
160{ 181{
161 struct nft_bitmap *priv = nft_set_priv(set); 182 struct nft_bitmap *priv = nft_set_priv(set);
183 struct nft_bitmap_elem *this = elem->priv, *be;
162 u8 genmask = nft_genmask_next(net); 184 u8 genmask = nft_genmask_next(net);
163 struct nft_set_ext *ext; 185 u32 idx, off;
164 u32 idx, off, key = 0;
165
166 memcpy(&key, elem->key.val.data, set->klen);
167 nft_bitmap_location(key, &idx, &off);
168 186
169 if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) 187 nft_bitmap_location(set, elem->key.val.data, &idx, &off);
170 return NULL;
171 188
172 /* We have no real set extension since this is a bitmap, allocate this 189 be = nft_bitmap_elem_find(set, this, genmask);
173 * dummy object that is released from the commit/abort path. 190 if (!be)
174 */
175 ext = nft_bitmap_ext_alloc(set, elem);
176 if (!ext)
177 return NULL; 191 return NULL;
178 192
179 /* Enter 10 state. */ 193 /* Enter 10 state. */
180 priv->bitmap[idx] &= ~(genmask << off); 194 priv->bitmap[idx] &= ~(genmask << off);
195 nft_set_elem_change_active(net, set, &be->ext);
181 196
182 return ext; 197 return be;
183} 198}
184 199
185static void nft_bitmap_walk(const struct nft_ctx *ctx, 200static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
187 struct nft_set_iter *iter) 202 struct nft_set_iter *iter)
188{ 203{
189 const struct nft_bitmap *priv = nft_set_priv(set); 204 const struct nft_bitmap *priv = nft_set_priv(set);
190 struct nft_set_ext_tmpl tmpl; 205 struct nft_bitmap_elem *be;
191 struct nft_set_elem elem; 206 struct nft_set_elem elem;
192 struct nft_set_ext *ext; 207
193 int idx, off; 208 list_for_each_entry_rcu(be, &priv->list, head) {
194 u16 key; 209 if (iter->count < iter->skip)
195 210 goto cont;
196 nft_set_ext_prepare(&tmpl); 211 if (!nft_set_elem_active(&be->ext, iter->genmask))
197 nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); 212 goto cont;
198 213
199 for (idx = 0; idx < priv->bitmap_size; idx++) { 214 elem.priv = be;
200 for (off = 0; off < BITS_PER_BYTE; off += 2) { 215
201 if (iter->count < iter->skip) 216 iter->err = iter->fn(ctx, set, iter, &elem);
202 goto cont; 217
203 218 if (iter->err < 0)
204 if (!nft_bitmap_active(priv->bitmap, idx, off, 219 return;
205 iter->genmask))
206 goto cont;
207
208 ext = kzalloc(tmpl.len, GFP_KERNEL);
209 if (!ext) {
210 iter->err = -ENOMEM;
211 return;
212 }
213 nft_set_ext_init(ext, &tmpl);
214 key = ((idx * BITS_PER_BYTE) + off) >> 1;
215 memcpy(nft_set_ext_key(ext), &key, set->klen);
216
217 elem.priv = ext;
218 iter->err = iter->fn(ctx, set, iter, &elem);
219
220 /* On set flush, this dummy extension object is released
221 * from the commit/abort path.
222 */
223 if (!iter->flush)
224 kfree(ext);
225
226 if (iter->err < 0)
227 return;
228cont: 220cont:
229 iter->count++; 221 iter->count++;
230 }
231 } 222 }
232} 223}
233 224
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
258{ 249{
259 struct nft_bitmap *priv = nft_set_priv(set); 250 struct nft_bitmap *priv = nft_set_priv(set);
260 251
252 INIT_LIST_HEAD(&priv->list);
261 priv->bitmap_size = nft_bitmap_size(set->klen); 253 priv->bitmap_size = nft_bitmap_size(set->klen);
262 254
263 return 0; 255 return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
283 275
284static struct nft_set_ops nft_bitmap_ops __read_mostly = { 276static struct nft_set_ops nft_bitmap_ops __read_mostly = {
285 .privsize = nft_bitmap_privsize, 277 .privsize = nft_bitmap_privsize,
278 .elemsize = offsetof(struct nft_bitmap_elem, ext),
286 .estimate = nft_bitmap_estimate, 279 .estimate = nft_bitmap_estimate,
287 .init = nft_bitmap_init, 280 .init = nft_bitmap_init,
288 .destroy = nft_bitmap_destroy, 281 .destroy = nft_bitmap_destroy,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7b73c7c161a9..596eaff66649 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
96 96
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98 98
99static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
100
101static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
102 "nlk_cb_mutex-ROUTE",
103 "nlk_cb_mutex-1",
104 "nlk_cb_mutex-USERSOCK",
105 "nlk_cb_mutex-FIREWALL",
106 "nlk_cb_mutex-SOCK_DIAG",
107 "nlk_cb_mutex-NFLOG",
108 "nlk_cb_mutex-XFRM",
109 "nlk_cb_mutex-SELINUX",
110 "nlk_cb_mutex-ISCSI",
111 "nlk_cb_mutex-AUDIT",
112 "nlk_cb_mutex-FIB_LOOKUP",
113 "nlk_cb_mutex-CONNECTOR",
114 "nlk_cb_mutex-NETFILTER",
115 "nlk_cb_mutex-IP6_FW",
116 "nlk_cb_mutex-DNRTMSG",
117 "nlk_cb_mutex-KOBJECT_UEVENT",
118 "nlk_cb_mutex-GENERIC",
119 "nlk_cb_mutex-17",
120 "nlk_cb_mutex-SCSITRANSPORT",
121 "nlk_cb_mutex-ECRYPTFS",
122 "nlk_cb_mutex-RDMA",
123 "nlk_cb_mutex-CRYPTO",
124 "nlk_cb_mutex-SMC",
125 "nlk_cb_mutex-23",
126 "nlk_cb_mutex-24",
127 "nlk_cb_mutex-25",
128 "nlk_cb_mutex-26",
129 "nlk_cb_mutex-27",
130 "nlk_cb_mutex-28",
131 "nlk_cb_mutex-29",
132 "nlk_cb_mutex-30",
133 "nlk_cb_mutex-31",
134 "nlk_cb_mutex-MAX_LINKS"
135};
136
99static int netlink_dump(struct sock *sk); 137static int netlink_dump(struct sock *sk);
100static void netlink_skb_destructor(struct sk_buff *skb); 138static void netlink_skb_destructor(struct sk_buff *skb);
101 139
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
585 } else { 623 } else {
586 nlk->cb_mutex = &nlk->cb_def_mutex; 624 nlk->cb_mutex = &nlk->cb_def_mutex;
587 mutex_init(nlk->cb_mutex); 625 mutex_init(nlk->cb_mutex);
626 lockdep_set_class_and_name(nlk->cb_mutex,
627 nlk_cb_mutex_keys + protocol,
628 nlk_cb_mutex_key_strings[protocol]);
588 } 629 }
589 init_waitqueue_head(&nlk->wait); 630 init_waitqueue_head(&nlk->wait);
590 631
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index fb6e10fdb217..92e0981f7404 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
783 783
784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 784 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
785 cb->nlh->nlmsg_seq, NLM_F_MULTI, 785 cb->nlh->nlmsg_seq, NLM_F_MULTI,
786 skb, CTRL_CMD_NEWFAMILY) < 0) 786 skb, CTRL_CMD_NEWFAMILY) < 0) {
787 n--;
787 break; 788 break;
789 }
788 } 790 }
789 791
790 cb->args[0] = n; 792 cb->args[0] = n;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bbf4526b885..ebf16f7f9089 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -765,7 +765,8 @@ out_release:
765 return err; 765 return err;
766} 766}
767 767
768static int nr_accept(struct socket *sock, struct socket *newsock, int flags) 768static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
769 bool kern)
769{ 770{
770 struct sk_buff *skb; 771 struct sk_buff *skb;
771 struct sock *newsk; 772 struct sock *newsk;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 879885b31cce..2ffb18e73df6 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
441} 441}
442 442
443static int llcp_sock_accept(struct socket *sock, struct socket *newsock, 443static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
444 int flags) 444 int flags, bool kern)
445{ 445{
446 DECLARE_WAITQUEUE(wait, current); 446 DECLARE_WAITQUEUE(wait, current);
447 struct sock *sk = sock->sk, *new_sk; 447 struct sock *sk = sock->sk, *new_sk;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e0a87776a010..7b2c2fce408a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
643 */ 643 */
644 if (nf_ct_is_confirmed(ct)) 644 if (nf_ct_is_confirmed(ct))
645 nf_ct_delete(ct, 0, 0); 645 nf_ct_delete(ct, 0, 0);
646 else 646
647 nf_conntrack_put(&ct->ct_general); 647 nf_conntrack_put(&ct->ct_general);
648 nf_ct_set(skb, NULL, 0); 648 nf_ct_set(skb, NULL, 0);
649 return false; 649 return false;
650 } 650 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9d4bb8eb63f2..3f76cb765e5b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
527 527
528 /* Link layer. */ 528 /* Link layer. */
529 clear_vlan(key); 529 clear_vlan(key);
530 if (key->mac_proto == MAC_PROTO_NONE) { 530 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
531 if (unlikely(eth_type_vlan(skb->protocol))) 531 if (unlikely(eth_type_vlan(skb->protocol)))
532 return -EINVAL; 532 return -EINVAL;
533 533
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
745 745
746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
747{ 747{
748 return key_extract(skb, key); 748 int res;
749
750 res = key_extract(skb, key);
751 if (!res)
752 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
753
754 return res;
749} 755}
750 756
751static int key_extract_mac_proto(struct sk_buff *skb) 757static int key_extract_mac_proto(struct sk_buff *skb)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6f5fa50f716d..1105a838bab8 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
604 ipv4 = true; 604 ipv4 = true;
605 break; 605 break;
606 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: 606 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
607 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, 607 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
608 nla_get_in6_addr(a), is_mask); 608 nla_get_in6_addr(a), is_mask);
609 ipv6 = true; 609 ipv6 = true;
610 break; 610 break;
@@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
665 tun_flags |= TUNNEL_VXLAN_OPT; 665 tun_flags |= TUNNEL_VXLAN_OPT;
666 opts_type = type; 666 opts_type = type;
667 break; 667 break;
668 case OVS_TUNNEL_KEY_ATTR_PAD:
669 break;
668 default: 670 default:
669 OVS_NLERR(log, "Unknown IP tunnel attribute %d", 671 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
670 type); 672 type);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a0dbe7ca8f72..8489beff5c25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3665 return -EBUSY; 3665 return -EBUSY;
3666 if (copy_from_user(&val, optval, sizeof(val))) 3666 if (copy_from_user(&val, optval, sizeof(val)))
3667 return -EFAULT; 3667 return -EFAULT;
3668 if (val > INT_MAX)
3669 return -EINVAL;
3668 po->tp_reserve = val; 3670 po->tp_reserve = val;
3669 return 0; 3671 return 0;
3670 } 3672 }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4193 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4195 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4194 goto out; 4196 goto out;
4195 if (po->tp_version >= TPACKET_V3 && 4197 if (po->tp_version >= TPACKET_V3 &&
4196 (int)(req->tp_block_size - 4198 req->tp_block_size <=
4197 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4199 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4198 goto out; 4200 goto out;
4199 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4201 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4200 po->tp_reserve)) 4202 po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4205 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4207 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4206 if (unlikely(rb->frames_per_block == 0)) 4208 if (unlikely(rb->frames_per_block == 0))
4207 goto out; 4209 goto out;
4210 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4211 goto out;
4208 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4212 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4209 req->tp_frame_nr)) 4213 req->tp_frame_nr))
4210 goto out; 4214 goto out;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 222bedcd9575..e81537991ddf 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
772 sock_put(sk); 772 sock_put(sk);
773} 773}
774 774
775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
776 bool kern)
776{ 777{
777 struct pep_sock *pn = pep_sk(sk), *newpn; 778 struct pep_sock *pn = pep_sk(sk), *newpn;
778 struct sock *newsk = NULL; 779 struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
846 } 847 }
847 848
848 /* Create a new to-be-accepted sock */ 849 /* Create a new to-be-accepted sock */
849 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0); 850 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
851 kern);
850 if (!newsk) { 852 if (!newsk) {
851 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); 853 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
852 err = -ENOBUFS; 854 err = -ENOBUFS;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index a6c8da3ee893..64634e3ec2fc 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -305,7 +305,7 @@ out:
305} 305}
306 306
307static int pn_socket_accept(struct socket *sock, struct socket *newsock, 307static int pn_socket_accept(struct socket *sock, struct socket *newsock,
308 int flags) 308 int flags, bool kern)
309{ 309{
310 struct sock *sk = sock->sk; 310 struct sock *sk = sock->sk;
311 struct sock *newsk; 311 struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
314 if (unlikely(sk->sk_state != TCP_LISTEN)) 314 if (unlikely(sk->sk_state != TCP_LISTEN))
315 return -EINVAL; 315 return -EINVAL;
316 316
317 newsk = sk->sk_prot->accept(sk, flags, &err); 317 newsk = sk->sk_prot->accept(sk, flags, &err, kern);
318 if (!newsk) 318 if (!newsk)
319 return err; 319 return err;
320 320
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 0e04dcceb1d4..1fa75ab7b733 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn)
429 */ 429 */
430 rds_cong_remove_conn(conn); 430 rds_cong_remove_conn(conn);
431 431
432 put_net(conn->c_net);
432 kmem_cache_free(rds_conn_slab, conn); 433 kmem_cache_free(rds_conn_slab, conn);
433 434
434 spin_lock_irqsave(&rds_conn_lock, flags); 435 spin_lock_irqsave(&rds_conn_lock, flags);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ce3775abc6e7..1c38d2c7caa8 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
442 ic->i_send_cq = NULL; 442 ic->i_send_cq = NULL;
443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); 443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
444 rdsdebug("ib_create_cq send failed: %d\n", ret); 444 rdsdebug("ib_create_cq send failed: %d\n", ret);
445 goto out; 445 goto rds_ibdev_out;
446 } 446 }
447 447
448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); 448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
456 ic->i_recv_cq = NULL; 456 ic->i_recv_cq = NULL;
457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); 457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
458 rdsdebug("ib_create_cq recv failed: %d\n", ret); 458 rdsdebug("ib_create_cq recv failed: %d\n", ret);
459 goto out; 459 goto send_cq_out;
460 } 460 }
461 461
462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); 462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
463 if (ret) { 463 if (ret) {
464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
465 goto out; 465 goto recv_cq_out;
466 } 466 }
467 467
468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
469 if (ret) { 469 if (ret) {
470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); 470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
471 goto out; 471 goto recv_cq_out;
472 } 472 }
473 473
474 /* XXX negotiate max send/recv with remote? */ 474 /* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); 494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
495 if (ret) { 495 if (ret) {
496 rdsdebug("rdma_create_qp failed: %d\n", ret); 496 rdsdebug("rdma_create_qp failed: %d\n", ret);
497 goto out; 497 goto recv_cq_out;
498 } 498 }
499 499
500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev, 500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
504 if (!ic->i_send_hdrs) { 504 if (!ic->i_send_hdrs) {
505 ret = -ENOMEM; 505 ret = -ENOMEM;
506 rdsdebug("ib_dma_alloc_coherent send failed\n"); 506 rdsdebug("ib_dma_alloc_coherent send failed\n");
507 goto out; 507 goto qp_out;
508 } 508 }
509 509
510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, 510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
514 if (!ic->i_recv_hdrs) { 514 if (!ic->i_recv_hdrs) {
515 ret = -ENOMEM; 515 ret = -ENOMEM;
516 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 516 rdsdebug("ib_dma_alloc_coherent recv failed\n");
517 goto out; 517 goto send_hdrs_dma_out;
518 } 518 }
519 519
520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
522 if (!ic->i_ack) { 522 if (!ic->i_ack) {
523 ret = -ENOMEM; 523 ret = -ENOMEM;
524 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 524 rdsdebug("ib_dma_alloc_coherent ack failed\n");
525 goto out; 525 goto recv_hdrs_dma_out;
526 } 526 }
527 527
528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), 528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
530 if (!ic->i_sends) { 530 if (!ic->i_sends) {
531 ret = -ENOMEM; 531 ret = -ENOMEM;
532 rdsdebug("send allocation failed\n"); 532 rdsdebug("send allocation failed\n");
533 goto out; 533 goto ack_dma_out;
534 } 534 }
535 535
536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), 536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
538 if (!ic->i_recvs) { 538 if (!ic->i_recvs) {
539 ret = -ENOMEM; 539 ret = -ENOMEM;
540 rdsdebug("recv allocation failed\n"); 540 rdsdebug("recv allocation failed\n");
541 goto out; 541 goto sends_out;
542 } 542 }
543 543
544 rds_ib_recv_init_ack(ic); 544 rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
547 ic->i_send_cq, ic->i_recv_cq); 547 ic->i_send_cq, ic->i_recv_cq);
548 548
549out: 549 return ret;
550
551sends_out:
552 vfree(ic->i_sends);
553ack_dma_out:
554 ib_dma_free_coherent(dev, sizeof(struct rds_header),
555 ic->i_ack, ic->i_ack_dma);
556recv_hdrs_dma_out:
557 ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
558 sizeof(struct rds_header),
559 ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
560send_hdrs_dma_out:
561 ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
562 sizeof(struct rds_header),
563 ic->i_send_hdrs, ic->i_send_hdrs_dma);
564qp_out:
565 rdma_destroy_qp(ic->i_cm_id);
566recv_cq_out:
567 if (!ib_destroy_cq(ic->i_recv_cq))
568 ic->i_recv_cq = NULL;
569send_cq_out:
570 if (!ib_destroy_cq(ic->i_send_cq))
571 ic->i_send_cq = NULL;
572rds_ibdev_out:
573 rds_ib_remove_conn(rds_ibdev, conn);
550 rds_ib_dev_put(rds_ibdev); 574 rds_ib_dev_put(rds_ibdev);
575
551 return ret; 576 return ret;
552} 577}
553 578
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 39518ef7af4d..82d38ccf5e8b 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -147,7 +147,7 @@ struct rds_connection {
147 147
148 /* Protocol version */ 148 /* Protocol version */
149 unsigned int c_version; 149 unsigned int c_version;
150 possible_net_t c_net; 150 struct net *c_net;
151 151
152 struct list_head c_map_item; 152 struct list_head c_map_item;
153 unsigned long c_map_queued; 153 unsigned long c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
162static inline 162static inline
163struct net *rds_conn_net(struct rds_connection *conn) 163struct net *rds_conn_net(struct rds_connection *conn)
164{ 164{
165 return read_pnet(&conn->c_net); 165 return conn->c_net;
166} 166}
167 167
168static inline 168static inline
169void rds_conn_net_set(struct rds_connection *conn, struct net *net) 169void rds_conn_net_set(struct rds_connection *conn, struct net *net)
170{ 170{
171 write_pnet(&conn->c_net, net); 171 conn->c_net = get_net(net);
172} 172}
173 173
174#define RDS_FLAG_CONG_BITMAP 0x01 174#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index a973d3b4dff0..225690076773 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
484 * we do need to clean up the listen socket here. 484 * we do need to clean up the listen socket here.
485 */ 485 */
486 if (rtn->rds_tcp_listen_sock) { 486 if (rtn->rds_tcp_listen_sock) {
487 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); 487 struct socket *lsock = rtn->rds_tcp_listen_sock;
488
488 rtn->rds_tcp_listen_sock = NULL; 489 rtn->rds_tcp_listen_sock = NULL;
489 flush_work(&rtn->rds_tcp_accept_w); 490 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
490 } 491 }
491} 492}
492 493
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
523 struct rds_tcp_connection *tc, *_tc; 524 struct rds_tcp_connection *tc, *_tc;
524 LIST_HEAD(tmp_list); 525 LIST_HEAD(tmp_list);
525 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 526 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
527 struct socket *lsock = rtn->rds_tcp_listen_sock;
526 528
527 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
528 rtn->rds_tcp_listen_sock = NULL; 529 rtn->rds_tcp_listen_sock = NULL;
529 flush_work(&rtn->rds_tcp_accept_w); 530 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
530 spin_lock_irq(&rds_tcp_conn_lock); 531 spin_lock_irq(&rds_tcp_conn_lock);
531 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 532 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
532 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 533 struct net *c_net = tc->t_cpath->cp_conn->c_net;
533 534
534 if (net != c_net || !tc->t_sock) 535 if (net != c_net || !tc->t_sock)
535 continue; 536 continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
546void *rds_tcp_listen_sock_def_readable(struct net *net) 547void *rds_tcp_listen_sock_def_readable(struct net *net)
547{ 548{
548 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 549 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
550 struct socket *lsock = rtn->rds_tcp_listen_sock;
551
552 if (!lsock)
553 return NULL;
549 554
550 return rtn->rds_tcp_listen_sock->sk->sk_user_data; 555 return lsock->sk->sk_user_data;
551} 556}
552 557
553static int rds_tcp_dev_event(struct notifier_block *this, 558static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
584 589
585 spin_lock_irq(&rds_tcp_conn_lock); 590 spin_lock_irq(&rds_tcp_conn_lock);
586 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 591 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
587 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 592 struct net *c_net = tc->t_cpath->cp_conn->c_net;
588 593
589 if (net != c_net || !tc->t_sock) 594 if (net != c_net || !tc->t_sock)
590 continue; 595 continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
638 goto out; 643 goto out;
639 } 644 }
640 645
641 ret = register_netdevice_notifier(&rds_tcp_dev_notifier); 646 ret = rds_tcp_recv_init();
642 if (ret) { 647 if (ret)
643 pr_warn("could not register rds_tcp_dev_notifier\n");
644 goto out_slab; 648 goto out_slab;
645 }
646 649
647 ret = register_pernet_subsys(&rds_tcp_net_ops); 650 ret = register_pernet_subsys(&rds_tcp_net_ops);
648 if (ret) 651 if (ret)
649 goto out_notifier; 652 goto out_recv;
650 653
651 ret = rds_tcp_recv_init(); 654 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
652 if (ret) 655 if (ret) {
656 pr_warn("could not register rds_tcp_dev_notifier\n");
653 goto out_pernet; 657 goto out_pernet;
658 }
654 659
655 rds_trans_register(&rds_tcp_transport); 660 rds_trans_register(&rds_tcp_transport);
656 661
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
660 665
661out_pernet: 666out_pernet:
662 unregister_pernet_subsys(&rds_tcp_net_ops); 667 unregister_pernet_subsys(&rds_tcp_net_ops);
663out_notifier: 668out_recv:
664 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) 669 rds_tcp_recv_exit();
665 pr_warn("could not unregister rds_tcp_dev_notifier\n");
666out_slab: 670out_slab:
667 kmem_cache_destroy(rds_tcp_conn_slab); 671 kmem_cache_destroy(rds_tcp_conn_slab);
668out: 672out:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc8906576..56ea6620fcf9 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
66 66
67/* tcp_listen.c */ 67/* tcp_listen.c */
68struct socket *rds_tcp_listen_init(struct net *); 68struct socket *rds_tcp_listen_init(struct net *);
69void rds_tcp_listen_stop(struct socket *); 69void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
70void rds_tcp_listen_data_ready(struct sock *sk); 70void rds_tcp_listen_data_ready(struct sock *sk);
71int rds_tcp_accept_one(struct socket *sock); 71int rds_tcp_accept_one(struct socket *sock);
72int rds_tcp_keepalive(struct socket *sock); 72int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 67d0929c7d3d..507678853e6c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
133 133
134 new_sock->type = sock->type; 134 new_sock->type = sock->type;
135 new_sock->ops = sock->ops; 135 new_sock->ops = sock->ops;
136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); 136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
137 if (ret < 0) 137 if (ret < 0)
138 goto out; 138 goto out;
139 139
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
223 * before it has been accepted and the accepter has set up their 223 * before it has been accepted and the accepter has set up their
224 * data_ready.. we only want to queue listen work for our listening 224 * data_ready.. we only want to queue listen work for our listening
225 * socket 225 * socket
226 *
227 * (*ready)() may be null if we are racing with netns delete, and
228 * the listen socket is being torn down.
226 */ 229 */
227 if (sk->sk_state == TCP_LISTEN) 230 if (sk->sk_state == TCP_LISTEN)
228 rds_tcp_accept_work(sk); 231 rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
231 234
232out: 235out:
233 read_unlock_bh(&sk->sk_callback_lock); 236 read_unlock_bh(&sk->sk_callback_lock);
234 ready(sk); 237 if (ready)
238 ready(sk);
235} 239}
236 240
237struct socket *rds_tcp_listen_init(struct net *net) 241struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
271 return NULL; 275 return NULL;
272} 276}
273 277
274void rds_tcp_listen_stop(struct socket *sock) 278void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
275{ 279{
276 struct sock *sk; 280 struct sock *sk;
277 281
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
292 296
293 /* wait for accepts to stop and close the socket */ 297 /* wait for accepts to stop and close the socket */
294 flush_workqueue(rds_wq); 298 flush_workqueue(rds_wq);
299 flush_work(acceptor);
295 sock_release(sock); 300 sock_release(sock);
296} 301}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b8a1df2c9785..4a9729257023 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -871,7 +871,8 @@ out_release:
871 return err; 871 return err;
872} 872}
873 873
874static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 874static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
875 bool kern)
875{ 876{
876 struct sk_buff *skb; 877 struct sk_buff *skb;
877 struct sock *newsk; 878 struct sock *newsk;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7ec632..b099b64366f3 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
275 rxrpc_conn_retransmit_call(conn, skb); 275 rxrpc_conn_retransmit_call(conn, skb);
276 return 0; 276 return 0;
277 277
278 case RXRPC_PACKET_TYPE_BUSY:
279 /* Just ignore BUSY packets for now. */
280 return 0;
281
278 case RXRPC_PACKET_TYPE_ABORT: 282 case RXRPC_PACKET_TYPE_ABORT:
279 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 283 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
280 &wtmp, sizeof(wtmp)) < 0) 284 &wtmp, sizeof(wtmp)) < 0)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9f4cfa25af7c..18b2ad8be8e2 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
420 u16 skew) 420 u16 skew)
421{ 421{
422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423 enum rxrpc_call_state state;
423 unsigned int offset = sizeof(struct rxrpc_wire_header); 424 unsigned int offset = sizeof(struct rxrpc_wire_header);
424 unsigned int ix; 425 unsigned int ix;
425 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 426 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
434 _proto("Rx DATA %%%u { #%u f=%02x }", 435 _proto("Rx DATA %%%u { #%u f=%02x }",
435 sp->hdr.serial, seq, sp->hdr.flags); 436 sp->hdr.serial, seq, sp->hdr.flags);
436 437
437 if (call->state >= RXRPC_CALL_COMPLETE) 438 state = READ_ONCE(call->state);
439 if (state >= RXRPC_CALL_COMPLETE)
438 return; 440 return;
439 441
440 /* Received data implicitly ACKs all of the request packets we sent 442 /* Received data implicitly ACKs all of the request packets we sent
441 * when we're acting as a client. 443 * when we're acting as a client.
442 */ 444 */
443 if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || 445 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
444 call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 446 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
445 !rxrpc_receiving_reply(call)) 447 !rxrpc_receiving_reply(call))
446 return; 448 return;
447 449
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
650 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 652 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
651 struct rxrpc_peer *peer; 653 struct rxrpc_peer *peer;
652 unsigned int mtu; 654 unsigned int mtu;
655 bool wake = false;
653 u32 rwind = ntohl(ackinfo->rwind); 656 u32 rwind = ntohl(ackinfo->rwind);
654 657
655 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 658 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
657 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), 660 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
658 rwind, ntohl(ackinfo->jumbo_max)); 661 rwind, ntohl(ackinfo->jumbo_max));
659 662
660 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) 663 if (call->tx_winsize != rwind) {
661 rwind = RXRPC_RXTX_BUFF_SIZE - 1; 664 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
662 call->tx_winsize = rwind; 665 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
666 if (rwind > call->tx_winsize)
667 wake = true;
668 call->tx_winsize = rwind;
669 }
670
663 if (call->cong_ssthresh > rwind) 671 if (call->cong_ssthresh > rwind)
664 call->cong_ssthresh = rwind; 672 call->cong_ssthresh = rwind;
665 673
@@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
673 spin_unlock_bh(&peer->lock); 681 spin_unlock_bh(&peer->lock);
674 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 682 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
675 } 683 }
684
685 if (wake)
686 wake_up(&call->waitq);
676} 687}
677 688
678/* 689/*
@@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
799 return rxrpc_proto_abort("AK0", call, 0); 810 return rxrpc_proto_abort("AK0", call, 0);
800 811
801 /* Ignore ACKs unless we are or have just been transmitting. */ 812 /* Ignore ACKs unless we are or have just been transmitting. */
802 switch (call->state) { 813 switch (READ_ONCE(call->state)) {
803 case RXRPC_CALL_CLIENT_SEND_REQUEST: 814 case RXRPC_CALL_CLIENT_SEND_REQUEST:
804 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 815 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
805 case RXRPC_CALL_SERVER_SEND_REPLY: 816 case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
940static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 951static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
941 struct rxrpc_call *call) 952 struct rxrpc_call *call)
942{ 953{
943 switch (call->state) { 954 switch (READ_ONCE(call->state)) {
944 case RXRPC_CALL_SERVER_AWAIT_ACK: 955 case RXRPC_CALL_SERVER_AWAIT_ACK:
945 rxrpc_call_completed(call); 956 rxrpc_call_completed(call);
946 break; 957 break;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 6491ca46a03f..3e2f1a8e9c5b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -527,7 +527,7 @@ try_again:
527 msg->msg_namelen = len; 527 msg->msg_namelen = len;
528 } 528 }
529 529
530 switch (call->state) { 530 switch (READ_ONCE(call->state)) {
531 case RXRPC_CALL_SERVER_ACCEPTING: 531 case RXRPC_CALL_SERVER_ACCEPTING:
532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
533 break; 533 break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
640 640
641 mutex_lock(&call->user_mutex); 641 mutex_lock(&call->user_mutex);
642 642
643 switch (call->state) { 643 switch (READ_ONCE(call->state)) {
644 case RXRPC_CALL_CLIENT_RECV_REPLY: 644 case RXRPC_CALL_CLIENT_RECV_REPLY:
645 case RXRPC_CALL_SERVER_RECV_REQUEST: 645 case RXRPC_CALL_SERVER_RECV_REQUEST:
646 case RXRPC_CALL_SERVER_ACK_REQUEST: 646 case RXRPC_CALL_SERVER_ACK_REQUEST:
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bc2d3dcff9de..97ab214ca411 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
489 __releases(&rx->sk.sk_lock.slock) 489 __releases(&rx->sk.sk_lock.slock)
490{ 490{
491 enum rxrpc_call_state state;
491 enum rxrpc_command cmd; 492 enum rxrpc_command cmd;
492 struct rxrpc_call *call; 493 struct rxrpc_call *call;
493 unsigned long user_call_ID = 0; 494 unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
526 return PTR_ERR(call); 527 return PTR_ERR(call);
527 /* ... and we have the call lock. */ 528 /* ... and we have the call lock. */
528 } else { 529 } else {
529 ret = -EBUSY; 530 switch (READ_ONCE(call->state)) {
530 if (call->state == RXRPC_CALL_UNINITIALISED || 531 case RXRPC_CALL_UNINITIALISED:
531 call->state == RXRPC_CALL_CLIENT_AWAIT_CONN || 532 case RXRPC_CALL_CLIENT_AWAIT_CONN:
532 call->state == RXRPC_CALL_SERVER_PREALLOC || 533 case RXRPC_CALL_SERVER_PREALLOC:
533 call->state == RXRPC_CALL_SERVER_SECURING || 534 case RXRPC_CALL_SERVER_SECURING:
534 call->state == RXRPC_CALL_SERVER_ACCEPTING) 535 case RXRPC_CALL_SERVER_ACCEPTING:
536 ret = -EBUSY;
535 goto error_release_sock; 537 goto error_release_sock;
538 default:
539 break;
540 }
536 541
537 ret = mutex_lock_interruptible(&call->user_mutex); 542 ret = mutex_lock_interruptible(&call->user_mutex);
538 release_sock(&rx->sk); 543 release_sock(&rx->sk);
@@ -542,10 +547,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
542 } 547 }
543 } 548 }
544 549
550 state = READ_ONCE(call->state);
545 _debug("CALL %d USR %lx ST %d on CONN %p", 551 _debug("CALL %d USR %lx ST %d on CONN %p",
546 call->debug_id, call->user_call_ID, call->state, call->conn); 552 call->debug_id, call->user_call_ID, state, call->conn);
547 553
548 if (call->state >= RXRPC_CALL_COMPLETE) { 554 if (state >= RXRPC_CALL_COMPLETE) {
549 /* it's too late for this call */ 555 /* it's too late for this call */
550 ret = -ESHUTDOWN; 556 ret = -ESHUTDOWN;
551 } else if (cmd == RXRPC_CMD_SEND_ABORT) { 557 } else if (cmd == RXRPC_CMD_SEND_ABORT) {
@@ -555,12 +561,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
555 } else if (cmd != RXRPC_CMD_SEND_DATA) { 561 } else if (cmd != RXRPC_CMD_SEND_DATA) {
556 ret = -EINVAL; 562 ret = -EINVAL;
557 } else if (rxrpc_is_client_call(call) && 563 } else if (rxrpc_is_client_call(call) &&
558 call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 564 state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
559 /* request phase complete for this client call */ 565 /* request phase complete for this client call */
560 ret = -EPROTO; 566 ret = -EPROTO;
561 } else if (rxrpc_is_service_call(call) && 567 } else if (rxrpc_is_service_call(call) &&
562 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 568 state != RXRPC_CALL_SERVER_ACK_REQUEST &&
563 call->state != RXRPC_CALL_SERVER_SEND_REPLY) { 569 state != RXRPC_CALL_SERVER_SEND_REPLY) {
564 /* Reply phase not begun or not complete for service call. */ 570 /* Reply phase not begun or not complete for service call. */
565 ret = -EPROTO; 571 ret = -EPROTO;
566 } else { 572 } else {
@@ -605,14 +611,21 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
605 _debug("CALL %d USR %lx ST %d on CONN %p", 611 _debug("CALL %d USR %lx ST %d on CONN %p",
606 call->debug_id, call->user_call_ID, call->state, call->conn); 612 call->debug_id, call->user_call_ID, call->state, call->conn);
607 613
608 if (call->state >= RXRPC_CALL_COMPLETE) { 614 switch (READ_ONCE(call->state)) {
609 ret = -ESHUTDOWN; /* it's too late for this call */ 615 case RXRPC_CALL_CLIENT_SEND_REQUEST:
610 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && 616 case RXRPC_CALL_SERVER_ACK_REQUEST:
611 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 617 case RXRPC_CALL_SERVER_SEND_REPLY:
612 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
613 ret = -EPROTO; /* request phase complete for this client call */
614 } else {
615 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); 618 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
619 break;
620 case RXRPC_CALL_COMPLETE:
621 read_lock_bh(&call->state_lock);
622 ret = -call->error;
623 read_unlock_bh(&call->state_lock);
624 break;
625 default:
626 /* Request phase complete for this client call */
627 ret = -EPROTO;
628 break;
616 } 629 }
617 630
618 mutex_unlock(&call->user_mutex); 631 mutex_unlock(&call->user_mutex);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ab8062909962..f9bb43c25697 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
113 if (ret < 0) 113 if (ret < 0)
114 return ret; 114 return ret;
115 115
116 if (!tb[TCA_CONNMARK_PARMS])
117 return -EINVAL;
118
116 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
117 120
118 if (!tcf_hash_check(tn, parm->index, a, bind)) { 121 if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3b7074e23024..c736627f8f4a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
228 228
229 return skb->len; 229 return skb->len;
230nla_put_failure: 230nla_put_failure:
231 rcu_read_unlock();
232 nlmsg_trim(skb, b); 231 nlmsg_trim(skb, b);
233 return -1; 232 return -1;
234} 233}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c2e5e8..5334e309f17f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); 201 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
202 202
203 if (p->set_tc_index) { 203 if (p->set_tc_index) {
204 int wlen = skb_network_offset(skb);
205
204 switch (tc_skb_protocol(skb)) { 206 switch (tc_skb_protocol(skb)) {
205 case htons(ETH_P_IP): 207 case htons(ETH_P_IP):
206 if (skb_cow_head(skb, sizeof(struct iphdr))) 208 wlen += sizeof(struct iphdr);
209 if (!pskb_may_pull(skb, wlen) ||
210 skb_try_make_writable(skb, wlen))
207 goto drop; 211 goto drop;
208 212
209 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) 213 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
211 break; 215 break;
212 216
213 case htons(ETH_P_IPV6): 217 case htons(ETH_P_IPV6):
214 if (skb_cow_head(skb, sizeof(struct ipv6hdr))) 218 wlen += sizeof(struct ipv6hdr);
219 if (!pskb_may_pull(skb, wlen) ||
220 skb_try_make_writable(skb, wlen))
215 goto drop; 221 goto drop;
216 222
217 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) 223 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2a6835b4562b..a9708da28eb5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
71{ 71{
72 struct net *net = sock_net(sk); 72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp; 73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p; 74 sctp_paramhdr_t *p;
76 int err; 75 int i;
77 76
78 /* Retrieve the SCTP per socket area. */ 77 /* Retrieve the SCTP per socket area. */
79 sp = sctp_sk((struct sock *)sk); 78 sp = sctp_sk((struct sock *)sk);
@@ -247,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
247 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
248 goto fail_init; 247 goto fail_init;
249 248
249 if (sctp_stream_new(asoc, gfp))
250 goto fail_init;
251
250 /* Assume that peer would support both address types unless we are 252 /* Assume that peer would support both address types unless we are
251 * told otherwise. 253 * told otherwise.
252 */ 254 */
@@ -264,9 +266,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
264 266
265 /* AUTH related initializations */ 267 /* AUTH related initializations */
266 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 268 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
267 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); 269 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
268 if (err) 270 goto stream_free;
269 goto fail_init;
270 271
271 asoc->active_key_id = ep->active_key_id; 272 asoc->active_key_id = ep->active_key_id;
272 asoc->prsctp_enable = ep->prsctp_enable; 273 asoc->prsctp_enable = ep->prsctp_enable;
@@ -289,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
289 290
290 return asoc; 291 return asoc;
291 292
293stream_free:
294 sctp_stream_free(asoc->stream);
292fail_init: 295fail_init:
293 sock_put(asoc->base.sk); 296 sock_put(asoc->base.sk);
294 sctp_endpoint_put(asoc->ep); 297 sctp_endpoint_put(asoc->ep);
@@ -1409,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1409/* Update the association's pmtu and frag_point by going through all the 1412/* Update the association's pmtu and frag_point by going through all the
1410 * transports. This routine is called when a transport's PMTU has changed. 1413 * transports. This routine is called when a transport's PMTU has changed.
1411 */ 1414 */
1412void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) 1415void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1413{ 1416{
1414 struct sctp_transport *t; 1417 struct sctp_transport *t;
1415 __u32 pmtu = 0; 1418 __u32 pmtu = 0;
@@ -1421,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1421 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1424 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1422 transports) { 1425 transports) {
1423 if (t->pmtu_pending && t->dst) { 1426 if (t->pmtu_pending && t->dst) {
1424 sctp_transport_update_pmtu(sk, t, 1427 sctp_transport_update_pmtu(
1425 SCTP_TRUNC4(dst_mtu(t->dst))); 1428 t, SCTP_TRUNC4(dst_mtu(t->dst)));
1426 t->pmtu_pending = 0; 1429 t->pmtu_pending = 0;
1427 } 1430 }
1428 if (!pmtu || (t->pathmtu < pmtu)) 1431 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a28ab20487f..0e06a278d2a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (t->param_flags & SPP_PMTUD_ENABLE) {
403 /* Update transports view of the MTU */ 403 /* Update transports view of the MTU */
404 sctp_transport_update_pmtu(sk, t, pmtu); 404 sctp_transport_update_pmtu(t, pmtu);
405 405
406 /* Update association pmtu. */ 406 /* Update association pmtu. */
407 sctp_assoc_sync_pmtu(sk, asoc); 407 sctp_assoc_sync_pmtu(asoc);
408 } 408 }
409 409
410 /* Retransmit with the new pmtu setting. 410 /* Retransmit with the new pmtu setting.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 063baac5b9fe..961ee59f696a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
640 640
641/* Create and initialize a new sk for the socket to be returned by accept(). */ 641/* Create and initialize a new sk for the socket to be returned by accept(). */
642static struct sock *sctp_v6_create_accept_sk(struct sock *sk, 642static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
643 struct sctp_association *asoc) 643 struct sctp_association *asoc,
644 bool kern)
644{ 645{
645 struct sock *newsk; 646 struct sock *newsk;
646 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 647 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
647 struct sctp6_sock *newsctp6sk; 648 struct sctp6_sock *newsctp6sk;
648 struct ipv6_txoptions *opt; 649 struct ipv6_txoptions *opt;
649 650
650 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 651 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
651 if (!newsk) 652 if (!newsk)
652 goto out; 653 goto out;
653 654
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 71ce6b945dcb..1409a875ad8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
86{ 86{
87 struct sctp_transport *tp = packet->transport; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc; 88 struct sctp_association *asoc = tp->asoc;
89 struct sock *sk;
89 90
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 91 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91
92 packet->vtag = vtag; 92 packet->vtag = vtag;
93 93
94 if (asoc && tp->dst) { 94 /* do the following jobs only once for a flush schedule */
95 struct sock *sk = asoc->base.sk; 95 if (!sctp_packet_empty(packet))
96 96 return;
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105 97
106 packet->max_size = dev->gso_max_size; 98 /* set packet max_size with pathmtu */
107 } else { 99 packet->max_size = tp->pathmtu;
108 packet->max_size = asoc->pathmtu; 100 if (!asoc)
109 } 101 return;
110 rcu_read_unlock();
111 102
112 } else { 103 /* update dst or transport pathmtu if in need */
113 packet->max_size = tp->pathmtu; 104 sk = asoc->base.sk;
105 if (!sctp_transport_dst_check(tp)) {
106 sctp_transport_route(tp, NULL, sctp_sk(sk));
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 } else if (!sctp_transport_pmtu_check(tp)) {
110 if (asoc->param_flags & SPP_PMTUD_ENABLE)
111 sctp_assoc_sync_pmtu(asoc);
114 } 112 }
115 113
116 if (ecn_capable && sctp_packet_empty(packet)) { 114 /* If there a is a prepend chunk stick it on the list before
117 struct sctp_chunk *chunk; 115 * any other chunks get appended.
116 */
117 if (ecn_capable) {
118 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
118 119
119 /* If there a is a prepend chunk stick it on the list before
120 * any other chunks get appended.
121 */
122 chunk = sctp_get_ecne_prepend(asoc);
123 if (chunk) 120 if (chunk)
124 sctp_packet_append_chunk(packet, chunk); 121 sctp_packet_append_chunk(packet, chunk);
125 } 122 }
123
124 if (!tp->dst)
125 return;
126
127 /* set packet max_size with gso_max_size if gso is enabled*/
128 rcu_read_lock();
129 if (__sk_dst_get(sk) != tp->dst) {
130 dst_hold(tp->dst);
131 sk_setup_caps(sk, tp->dst);
132 }
133 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
134 : asoc->pathmtu;
135 rcu_read_unlock();
126} 136}
127 137
128/* Initialize the packet structure. */ 138/* Initialize the packet structure. */
@@ -546,7 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
546 struct sctp_association *asoc = tp->asoc; 556 struct sctp_association *asoc = tp->asoc;
547 struct sctp_chunk *chunk, *tmp; 557 struct sctp_chunk *chunk, *tmp;
548 int pkt_count, gso = 0; 558 int pkt_count, gso = 0;
549 int confirm;
550 struct dst_entry *dst; 559 struct dst_entry *dst;
551 struct sk_buff *head; 560 struct sk_buff *head;
552 struct sctphdr *sh; 561 struct sctphdr *sh;
@@ -583,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
583 sh->vtag = htonl(packet->vtag); 592 sh->vtag = htonl(packet->vtag);
584 sh->checksum = 0; 593 sh->checksum = 0;
585 594
586 /* update dst if in need */ 595 /* drop packet if no dst */
587 if (!sctp_transport_dst_check(tp)) {
588 sctp_transport_route(tp, NULL, sctp_sk(sk));
589 if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
590 sctp_assoc_sync_pmtu(sk, asoc);
591 }
592 dst = dst_clone(tp->dst); 596 dst = dst_clone(tp->dst);
593 if (!dst) { 597 if (!dst) {
594 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 598 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -625,13 +629,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
625 asoc->peer.last_sent_to = tp; 629 asoc->peer.last_sent_to = tp;
626 } 630 }
627 head->ignore_df = packet->ipfragok; 631 head->ignore_df = packet->ipfragok;
628 confirm = tp->dst_pending_confirm; 632 if (tp->dst_pending_confirm)
629 if (confirm)
630 skb_set_dst_pending_confirm(head, 1); 633 skb_set_dst_pending_confirm(head, 1);
631 /* neighbour should be confirmed on successful transmission or 634 /* neighbour should be confirmed on successful transmission or
632 * positive error 635 * positive error
633 */ 636 */
634 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) 637 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
638 tp->dst_pending_confirm)
635 tp->dst_pending_confirm = 0; 639 tp->dst_pending_confirm = 0;
636 640
637out: 641out:
@@ -705,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
705 */ 709 */
706 710
707 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 711 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
708 !chunk->msg->force_delay) 712 !asoc->force_delay)
709 /* Nothing unacked */ 713 /* Nothing unacked */
710 return SCTP_XMIT_OK; 714 return SCTP_XMIT_OK;
711 715
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index db352e5d61f8..8081476ed313 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
382} 382}
383 383
384static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, 384static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
385 struct sctp_sndrcvinfo *sinfo, 385 struct sctp_sndrcvinfo *sinfo, int msg_len)
386 struct list_head *queue, int msg_len)
387{ 386{
387 struct sctp_outq *q = &asoc->outqueue;
388 struct sctp_chunk *chk, *temp; 388 struct sctp_chunk *chk, *temp;
389 389
390 list_for_each_entry_safe(chk, temp, queue, list) { 390 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
393 continue; 393 continue;
394 394
395 list_del_init(&chk->list); 395 list_del_init(&chk->list);
396 q->out_qlen -= chk->skb->len;
396 asoc->sent_cnt_removable--; 397 asoc->sent_cnt_removable--;
397 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; 398 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
398 399
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
431 return; 432 return;
432 } 433 }
433 434
434 sctp_prsctp_prune_unsent(asoc, sinfo, 435 sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
435 &asoc->outqueue.out_chunk_list,
436 msg_len);
437} 436}
438 437
439/* Mark all the eligible packets on a transport for retransmission. */ 438/* Mark all the eligible packets on a transport for retransmission. */
@@ -1027,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1027 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 1026 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1028 * stream identifier. 1027 * stream identifier.
1029 */ 1028 */
1030 if (chunk->sinfo.sinfo_stream >= 1029 if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
1031 asoc->c.sinit_num_ostreams) {
1032 1030
1033 /* Mark as failed send. */ 1031 /* Mark as failed send. */
1034 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1032 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377fe91ec..a0b29d43627f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
361 sctp_seq_dump_remote_addrs(seq, assoc); 361 sctp_seq_dump_remote_addrs(seq, assoc);
362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " 362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
363 "%8d %8d %8d %8d", 363 "%8d %8d %8d %8d",
364 assoc->hbinterval, assoc->c.sinit_max_instreams, 364 assoc->hbinterval, assoc->stream->incnt,
365 assoc->c.sinit_num_ostreams, assoc->max_retrans, 365 assoc->stream->outcnt, assoc->max_retrans,
366 assoc->init_retries, assoc->shutdown_retries, 366 assoc->init_retries, assoc->shutdown_retries,
367 assoc->rtx_data_chunks, 367 assoc->rtx_data_chunks,
368 atomic_read(&sk->sk_wmem_alloc), 368 atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1b6d4574d2b0..989a900383b5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
575 575
576/* Create and initialize a new sk for the socket returned by accept(). */ 576/* Create and initialize a new sk for the socket returned by accept(). */
577static struct sock *sctp_v4_create_accept_sk(struct sock *sk, 577static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
578 struct sctp_association *asoc) 578 struct sctp_association *asoc,
579 bool kern)
579{ 580{
580 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, 581 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
581 sk->sk_prot, 0); 582 sk->sk_prot, kern);
582 struct inet_sock *newinet; 583 struct inet_sock *newinet;
583 584
584 if (!newsk) 585 if (!newsk)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 969a30c7bb54..118faff6a332 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2460 * association. 2460 * association.
2461 */ 2461 */
2462 if (!asoc->temp) { 2462 if (!asoc->temp) {
2463 int error; 2463 if (sctp_stream_init(asoc, gfp))
2464
2465 asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
2466 asoc->c.sinit_num_ostreams, gfp);
2467 if (!asoc->stream)
2468 goto clean_up; 2464 goto clean_up;
2469 2465
2470 error = sctp_assoc_set_id(asoc, gfp); 2466 if (sctp_assoc_set_id(asoc, gfp))
2471 if (error)
2472 goto clean_up; 2467 goto clean_up;
2473 } 2468 }
2474 2469
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..24c6ccce7539 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3946 3946
3947 /* Silently discard the chunk if stream-id is not valid */ 3947 /* Silently discard the chunk if stream-id is not valid */
3948 sctp_walk_fwdtsn(skip, chunk) { 3948 sctp_walk_fwdtsn(skip, chunk) {
3949 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 3949 if (ntohs(skip->stream) >= asoc->stream->incnt)
3950 goto discard_noforce; 3950 goto discard_noforce;
3951 } 3951 }
3952 3952
@@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
4017 4017
4018 /* Silently discard the chunk if stream-id is not valid */ 4018 /* Silently discard the chunk if stream-id is not valid */
4019 sctp_walk_fwdtsn(skip, chunk) { 4019 sctp_walk_fwdtsn(skip, chunk) {
4020 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 4020 if (ntohs(skip->stream) >= asoc->stream->incnt)
4021 goto gen_shutdown; 4021 goto gen_shutdown;
4022 } 4022 }
4023 4023
@@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6353 * and discard the DATA chunk. 6353 * and discard the DATA chunk.
6354 */ 6354 */
6355 sid = ntohs(data_hdr->stream); 6355 sid = ntohs(data_hdr->stream);
6356 if (sid >= asoc->c.sinit_max_instreams) { 6356 if (sid >= asoc->stream->incnt) {
6357 /* Mark tsn as received even though we drop it */ 6357 /* Mark tsn as received even though we drop it */
6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6359 6359
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6f0a9be50f50..c1401f43d40f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1907 } 1907 }
1908 1908
1909 if (asoc->pmtu_pending) 1909 if (asoc->pmtu_pending)
1910 sctp_assoc_pending_pmtu(sk, asoc); 1910 sctp_assoc_pending_pmtu(asoc);
1911 1911
1912 /* If fragmentation is disabled and the message length exceeds the 1912 /* If fragmentation is disabled and the message length exceeds the
1913 * association fragmentation point, return EMSGSIZE. The I-D 1913 * association fragmentation point, return EMSGSIZE. The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1920 } 1920 }
1921 1921
1922 /* Check for invalid stream. */ 1922 /* Check for invalid stream. */
1923 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1923 if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
1924 err = -EINVAL; 1924 err = -EINVAL;
1925 goto out_free; 1925 goto out_free;
1926 } 1926 }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1965 err = PTR_ERR(datamsg); 1965 err = PTR_ERR(datamsg);
1966 goto out_free; 1966 goto out_free;
1967 } 1967 }
1968 datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); 1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1969 1969
1970 /* Now send the (possibly) fragmented message. */ 1970 /* Now send the (possibly) fragmented message. */
1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2436 if (trans) { 2436 if (trans) {
2437 trans->pathmtu = params->spp_pathmtu; 2437 trans->pathmtu = params->spp_pathmtu;
2438 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2438 sctp_assoc_sync_pmtu(asoc);
2439 } else if (asoc) { 2439 } else if (asoc) {
2440 asoc->pathmtu = params->spp_pathmtu; 2440 asoc->pathmtu = params->spp_pathmtu;
2441 } else { 2441 } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2452 if (update) { 2452 if (update) {
2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2454 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2454 sctp_assoc_sync_pmtu(asoc);
2455 } 2455 }
2456 } else if (asoc) { 2456 } else if (asoc) {
2457 asoc->param_flags = 2457 asoc->param_flags =
@@ -4116,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
4116 * descriptor will be returned from accept() to represent the newly 4116 * descriptor will be returned from accept() to represent the newly
4117 * formed association. 4117 * formed association.
4118 */ 4118 */
4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4120{ 4120{
4121 struct sctp_sock *sp; 4121 struct sctp_sock *sp;
4122 struct sctp_endpoint *ep; 4122 struct sctp_endpoint *ep;
@@ -4151,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
4151 */ 4151 */
4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4153 4153
4154 newsk = sp->pf->create_accept_sk(sk, asoc); 4154 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4155 if (!newsk) { 4155 if (!newsk) {
4156 error = -ENOMEM; 4156 error = -ENOMEM;
4157 goto out; 4157 goto out;
@@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4461 info->sctpi_rwnd = asoc->a_rwnd; 4461 info->sctpi_rwnd = asoc->a_rwnd;
4462 info->sctpi_unackdata = asoc->unack_data; 4462 info->sctpi_unackdata = asoc->unack_data;
4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4464 info->sctpi_instrms = asoc->c.sinit_max_instreams; 4464 info->sctpi_instrms = asoc->stream->incnt;
4465 info->sctpi_outstrms = asoc->c.sinit_num_ostreams; 4465 info->sctpi_outstrms = asoc->stream->outcnt;
4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4467 info->sctpi_inqueue++; 4467 info->sctpi_inqueue++;
4468 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4468 list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4691 status.sstat_unackdata = asoc->unack_data; 4691 status.sstat_unackdata = asoc->unack_data;
4692 4692
4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4694 status.sstat_instrms = asoc->c.sinit_max_instreams; 4694 status.sstat_instrms = asoc->stream->incnt;
4695 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4695 status.sstat_outstrms = asoc->stream->outcnt;
4696 status.sstat_fragmentation_point = asoc->frag_point; 4696 status.sstat_fragmentation_point = asoc->frag_point;
4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..bbed997e1c5f 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,33 +35,60 @@
35#include <net/sctp/sctp.h> 35#include <net/sctp/sctp.h>
36#include <net/sctp/sm.h> 36#include <net/sctp/sm.h>
37 37
38struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) 38int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
39{ 39{
40 struct sctp_stream *stream; 40 struct sctp_stream *stream;
41 int i; 41 int i;
42 42
43 stream = kzalloc(sizeof(*stream), gfp); 43 stream = kzalloc(sizeof(*stream), gfp);
44 if (!stream) 44 if (!stream)
45 return NULL; 45 return -ENOMEM;
46 46
47 stream->outcnt = outcnt; 47 stream->outcnt = asoc->c.sinit_num_ostreams;
48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); 48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
49 if (!stream->out) { 49 if (!stream->out) {
50 kfree(stream); 50 kfree(stream);
51 return NULL; 51 return -ENOMEM;
52 } 52 }
53 for (i = 0; i < stream->outcnt; i++) 53 for (i = 0; i < stream->outcnt; i++)
54 stream->out[i].state = SCTP_STREAM_OPEN; 54 stream->out[i].state = SCTP_STREAM_OPEN;
55 55
56 stream->incnt = incnt; 56 asoc->stream = stream;
57
58 return 0;
59}
60
61int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
62{
63 struct sctp_stream *stream = asoc->stream;
64 int i;
65
66 /* Initial stream->out size may be very big, so free it and alloc
67 * a new one with new outcnt to save memory.
68 */
69 kfree(stream->out);
70 stream->outcnt = asoc->c.sinit_num_ostreams;
71 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
72 if (!stream->out)
73 goto nomem;
74
75 for (i = 0; i < stream->outcnt; i++)
76 stream->out[i].state = SCTP_STREAM_OPEN;
77
78 stream->incnt = asoc->c.sinit_max_instreams;
57 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); 79 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
58 if (!stream->in) { 80 if (!stream->in) {
59 kfree(stream->out); 81 kfree(stream->out);
60 kfree(stream); 82 goto nomem;
61 return NULL;
62 } 83 }
63 84
64 return stream; 85 return 0;
86
87nomem:
88 asoc->stream = NULL;
89 kfree(stream);
90
91 return -ENOMEM;
65} 92}
66 93
67void sctp_stream_free(struct sctp_stream *stream) 94void sctp_stream_free(struct sctp_stream *stream)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3379668af368..721eeebfcd8a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
252} 252}
253 253
254void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) 254void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
255{ 255{
256 struct dst_entry *dst; 256 struct dst_entry *dst = sctp_transport_dst_check(t);
257 257
258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
260 __func__, pmtu, 260 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
261 SCTP_DEFAULT_MINSEGMENT);
262 /* Use default minimum segment size and disable 261 /* Use default minimum segment size and disable
263 * pmtu discovery on this transport. 262 * pmtu discovery on this transport.
264 */ 263 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
267 t->pathmtu = pmtu; 266 t->pathmtu = pmtu;
268 } 267 }
269 268
270 dst = sctp_transport_dst_check(t);
271 if (!dst)
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
273
274 if (dst) { 269 if (dst) {
275 dst->ops->update_pmtu(dst, sk, NULL, pmtu); 270 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
276
277 dst = sctp_transport_dst_check(t); 271 dst = sctp_transport_dst_check(t);
278 if (!dst)
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
280 } 272 }
273
274 if (!dst)
275 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
281} 276}
282 277
283/* Caches the dst entry and source address for a transport's destination 278/* Caches the dst entry and source address for a transport's destination
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 85837ab90e89..093803786eac 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -944,7 +944,7 @@ out:
944} 944}
945 945
946static int smc_accept(struct socket *sock, struct socket *new_sock, 946static int smc_accept(struct socket *sock, struct socket *new_sock,
947 int flags) 947 int flags, bool kern)
948{ 948{
949 struct sock *sk = sock->sk, *nsk; 949 struct sock *sk = sock->sk, *nsk;
950 DECLARE_WAITQUEUE(wait, current); 950 DECLARE_WAITQUEUE(wait, current);
diff --git a/net/socket.c b/net/socket.c
index 2c1e8677ff2d..985ef06792d6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
652} 652}
653EXPORT_SYMBOL(kernel_sendmsg); 653EXPORT_SYMBOL(kernel_sendmsg);
654 654
655static bool skb_is_err_queue(const struct sk_buff *skb)
656{
657 /* pkt_type of skbs enqueued on the error queue are set to
658 * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
659 * in recvmsg, since skbs received on a local socket will never
660 * have a pkt_type of PACKET_OUTGOING.
661 */
662 return skb->pkt_type == PACKET_OUTGOING;
663}
664
655/* 665/*
656 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) 666 * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
657 */ 667 */
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
695 put_cmsg(msg, SOL_SOCKET, 705 put_cmsg(msg, SOL_SOCKET,
696 SCM_TIMESTAMPING, sizeof(tss), &tss); 706 SCM_TIMESTAMPING, sizeof(tss), &tss);
697 707
698 if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) 708 if (skb_is_err_queue(skb) && skb->len &&
709 SKB_EXT_ERR(skb)->opt_stats)
699 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, 710 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
700 skb->len, skb->data); 711 skb->len, skb->data);
701 } 712 }
@@ -1506,7 +1517,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1506 if (err) 1517 if (err)
1507 goto out_fd; 1518 goto out_fd;
1508 1519
1509 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1520 err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
1510 if (err < 0) 1521 if (err < 0)
1511 goto out_fd; 1522 goto out_fd;
1512 1523
@@ -1731,6 +1742,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1731 /* We assume all kernel code knows the size of sockaddr_storage */ 1742 /* We assume all kernel code knows the size of sockaddr_storage */
1732 msg.msg_namelen = 0; 1743 msg.msg_namelen = 0;
1733 msg.msg_iocb = NULL; 1744 msg.msg_iocb = NULL;
1745 msg.msg_flags = 0;
1734 if (sock->file->f_flags & O_NONBLOCK) 1746 if (sock->file->f_flags & O_NONBLOCK)
1735 flags |= MSG_DONTWAIT; 1747 flags |= MSG_DONTWAIT;
1736 err = sock_recvmsg(sock, &msg, flags); 1748 err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3250,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3238 if (err < 0) 3250 if (err < 0)
3239 goto done; 3251 goto done;
3240 3252
3241 err = sock->ops->accept(sock, *newsock, flags); 3253 err = sock->ops->accept(sock, *newsock, flags, true);
3242 if (err < 0) { 3254 if (err < 0) {
3243 sock_release(*newsock); 3255 sock_release(*newsock);
3244 *newsock = NULL; 3256 *newsock = NULL;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8931e33b6541..2b720fa35c4f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1635 1635
1636 xprt = &svsk->sk_xprt; 1636 xprt = &svsk->sk_xprt;
1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); 1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
1638 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1638 1639
1639 serv->sv_bc_xprt = xprt; 1640 serv->sv_bc_xprt = xprt;
1640 1641
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c13a5c35ce14..fc8f14c7bfec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
127 xprt = &cma_xprt->sc_xprt; 127 xprt = &cma_xprt->sc_xprt;
128 128
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
130 serv->sv_bc_xprt = xprt; 131 serv->sv_bc_xprt = xprt;
131 132
132 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 81cd31acf690..3b332b395045 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
503 struct ib_cq *sendcq, *recvcq; 503 struct ib_cq *sendcq, *recvcq;
504 int rc; 504 int rc;
505 505
506 max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); 506 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
507 RPCRDMA_MAX_SEND_SGES);
507 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 508 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
508 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); 509 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
509 return -ENOMEM; 510 return -ENOMEM;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43e4045e72bc..7130e73bd42c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
115static void tipc_write_space(struct sock *sk); 115static void tipc_write_space(struct sock *sk);
116static void tipc_sock_destruct(struct sock *sk); 116static void tipc_sock_destruct(struct sock *sk);
117static int tipc_release(struct socket *sock); 117static int tipc_release(struct socket *sock);
118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
119 bool kern);
119static void tipc_sk_timeout(unsigned long data); 120static void tipc_sk_timeout(unsigned long data);
120static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 121static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
121 struct tipc_name_seq const *seq); 122 struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
2029 * 2030 *
2030 * Returns 0 on success, errno otherwise 2031 * Returns 0 on success, errno otherwise
2031 */ 2032 */
2032static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) 2033static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2034 bool kern)
2033{ 2035{
2034 struct sock *new_sk, *sk = sock->sk; 2036 struct sock *new_sk, *sk = sock->sk;
2035 struct sk_buff *buf; 2037 struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2051 2053
2052 buf = skb_peek(&sk->sk_receive_queue); 2054 buf = skb_peek(&sk->sk_receive_queue);
2053 2055
2054 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0); 2056 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2055 if (res) 2057 if (res)
2056 goto exit; 2058 goto exit;
2057 security_sk_clone(sock->sk, new_sock->sk); 2059 security_sk_clone(sock->sk, new_sock->sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65d0894..271cd66e4b3b 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
141static void tipc_subscrp_timeout(unsigned long data) 141static void tipc_subscrp_timeout(unsigned long data)
142{ 142{
143 struct tipc_subscription *sub = (struct tipc_subscription *)data; 143 struct tipc_subscription *sub = (struct tipc_subscription *)data;
144 struct tipc_subscriber *subscriber = sub->subscriber;
145
146 spin_lock_bh(&subscriber->lock);
147 tipc_nametbl_unsubscribe(sub);
148 spin_unlock_bh(&subscriber->lock);
144 149
145 /* Notify subscriber of timeout */ 150 /* Notify subscriber of timeout */
146 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 151 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
173 struct tipc_subscriber *subscriber = sub->subscriber; 178 struct tipc_subscriber *subscriber = sub->subscriber;
174 179
175 spin_lock_bh(&subscriber->lock); 180 spin_lock_bh(&subscriber->lock);
176 tipc_nametbl_unsubscribe(sub);
177 list_del(&sub->subscrp_list); 181 list_del(&sub->subscrp_list);
178 atomic_dec(&tn->subscription_count); 182 atomic_dec(&tn->subscription_count);
179 spin_unlock_bh(&subscriber->lock); 183 spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) 209 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
206 continue; 210 continue;
207 211
212 tipc_nametbl_unsubscribe(sub);
208 tipc_subscrp_get(sub); 213 tipc_subscrp_get(sub);
209 spin_unlock_bh(&subscriber->lock); 214 spin_unlock_bh(&subscriber->lock);
210 tipc_subscrp_delete(sub); 215 tipc_subscrp_delete(sub);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ee37b390260a..928691c43408 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
636static int unix_stream_connect(struct socket *, struct sockaddr *, 636static int unix_stream_connect(struct socket *, struct sockaddr *,
637 int addr_len, int flags); 637 int addr_len, int flags);
638static int unix_socketpair(struct socket *, struct socket *); 638static int unix_socketpair(struct socket *, struct socket *);
639static int unix_accept(struct socket *, struct socket *, int); 639static int unix_accept(struct socket *, struct socket *, int, bool);
640static int unix_getname(struct socket *, struct sockaddr *, int *, int); 640static int unix_getname(struct socket *, struct sockaddr *, int *, int);
641static unsigned int unix_poll(struct file *, struct socket *, poll_table *); 641static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
642static unsigned int unix_dgram_poll(struct file *, struct socket *, 642static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
1402 set_bit(SOCK_PASSSEC, &new->flags); 1402 set_bit(SOCK_PASSSEC, &new->flags);
1403} 1403}
1404 1404
1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1406 bool kern)
1406{ 1407{
1407 struct sock *sk = sock->sk; 1408 struct sock *sk = sock->sk;
1408 struct sock *tsk; 1409 struct sock *tsk;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d48525fcf..c36757e72844 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
146 if (s) { 146 if (s) {
147 struct unix_sock *u = unix_sk(s); 147 struct unix_sock *u = unix_sk(s);
148 148
149 BUG_ON(!atomic_long_read(&u->inflight));
149 BUG_ON(list_empty(&u->link)); 150 BUG_ON(list_empty(&u->link));
150 151
151 if (atomic_long_dec_and_test(&u->inflight)) 152 if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
341 } 342 }
342 list_del(&cursor); 343 list_del(&cursor);
343 344
345 /* Now gc_candidates contains only garbage. Restore original
346 * inflight counters for these as well, and remove the skbuffs
347 * which are creating the cycle(s).
348 */
349 skb_queue_head_init(&hitlist);
350 list_for_each_entry(u, &gc_candidates, link)
351 scan_children(&u->sk, inc_inflight, &hitlist);
352
344 /* not_cycle_list contains those sockets which do not make up a 353 /* not_cycle_list contains those sockets which do not make up a
345 * cycle. Restore these to the inflight list. 354 * cycle. Restore these to the inflight list.
346 */ 355 */
@@ -350,14 +359,6 @@ void unix_gc(void)
350 list_move_tail(&u->link, &gc_inflight_list); 359 list_move_tail(&u->link, &gc_inflight_list);
351 } 360 }
352 361
353 /* Now gc_candidates contains only garbage. Restore original
354 * inflight counters for these as well, and remove the skbuffs
355 * which are creating the cycle(s).
356 */
357 skb_queue_head_init(&hitlist);
358 list_for_each_entry(u, &gc_candidates, link)
359 scan_children(&u->sk, inc_inflight, &hitlist);
360
361 spin_unlock(&unix_gc_lock); 362 spin_unlock(&unix_gc_lock);
362 363
363 /* Here we are. Hitlist is filled. Die. */ 364 /* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9192ead66751..6f7f6757ceef 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
1102 .sendpage = sock_no_sendpage, 1102 .sendpage = sock_no_sendpage,
1103}; 1103};
1104 1104
1105static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1106{
1107 if (!transport->cancel_pkt)
1108 return -EOPNOTSUPP;
1109
1110 return transport->cancel_pkt(vsk);
1111}
1112
1105static void vsock_connect_timeout(struct work_struct *work) 1113static void vsock_connect_timeout(struct work_struct *work)
1106{ 1114{
1107 struct sock *sk; 1115 struct sock *sk;
1108 struct vsock_sock *vsk; 1116 struct vsock_sock *vsk;
1117 int cancel = 0;
1109 1118
1110 vsk = container_of(work, struct vsock_sock, dwork.work); 1119 vsk = container_of(work, struct vsock_sock, dwork.work);
1111 sk = sk_vsock(vsk); 1120 sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
1116 sk->sk_state = SS_UNCONNECTED; 1125 sk->sk_state = SS_UNCONNECTED;
1117 sk->sk_err = ETIMEDOUT; 1126 sk->sk_err = ETIMEDOUT;
1118 sk->sk_error_report(sk); 1127 sk->sk_error_report(sk);
1128 cancel = 1;
1119 } 1129 }
1120 release_sock(sk); 1130 release_sock(sk);
1131 if (cancel)
1132 vsock_transport_cancel_pkt(vsk);
1121 1133
1122 sock_put(sk); 1134 sock_put(sk);
1123} 1135}
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1224 err = sock_intr_errno(timeout); 1236 err = sock_intr_errno(timeout);
1225 sk->sk_state = SS_UNCONNECTED; 1237 sk->sk_state = SS_UNCONNECTED;
1226 sock->state = SS_UNCONNECTED; 1238 sock->state = SS_UNCONNECTED;
1239 vsock_transport_cancel_pkt(vsk);
1227 goto out_wait; 1240 goto out_wait;
1228 } else if (timeout == 0) { 1241 } else if (timeout == 0) {
1229 err = -ETIMEDOUT; 1242 err = -ETIMEDOUT;
1230 sk->sk_state = SS_UNCONNECTED; 1243 sk->sk_state = SS_UNCONNECTED;
1231 sock->state = SS_UNCONNECTED; 1244 sock->state = SS_UNCONNECTED;
1245 vsock_transport_cancel_pkt(vsk);
1232 goto out_wait; 1246 goto out_wait;
1233 } 1247 }
1234 1248
@@ -1250,7 +1264,8 @@ out:
1250 return err; 1264 return err;
1251} 1265}
1252 1266
1253static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) 1267static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1268 bool kern)
1254{ 1269{
1255 struct sock *listener; 1270 struct sock *listener;
1256 int err; 1271 int err;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9d24c0e958b1..68675a151f22 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
213 return len; 213 return len;
214} 214}
215 215
216static int
217virtio_transport_cancel_pkt(struct vsock_sock *vsk)
218{
219 struct virtio_vsock *vsock;
220 struct virtio_vsock_pkt *pkt, *n;
221 int cnt = 0;
222 LIST_HEAD(freeme);
223
224 vsock = virtio_vsock_get();
225 if (!vsock) {
226 return -ENODEV;
227 }
228
229 spin_lock_bh(&vsock->send_pkt_list_lock);
230 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
231 if (pkt->vsk != vsk)
232 continue;
233 list_move(&pkt->list, &freeme);
234 }
235 spin_unlock_bh(&vsock->send_pkt_list_lock);
236
237 list_for_each_entry_safe(pkt, n, &freeme, list) {
238 if (pkt->reply)
239 cnt++;
240 list_del(&pkt->list);
241 virtio_transport_free_pkt(pkt);
242 }
243
244 if (cnt) {
245 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
246 int new_cnt;
247
248 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
249 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
250 new_cnt < virtqueue_get_vring_size(rx_vq))
251 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
252 }
253
254 return 0;
255}
256
216static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 257static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
217{ 258{
218 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; 259 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
462 .release = virtio_transport_release, 503 .release = virtio_transport_release,
463 .connect = virtio_transport_connect, 504 .connect = virtio_transport_connect,
464 .shutdown = virtio_transport_shutdown, 505 .shutdown = virtio_transport_shutdown,
506 .cancel_pkt = virtio_transport_cancel_pkt,
465 507
466 .dgram_bind = virtio_transport_dgram_bind, 508 .dgram_bind = virtio_transport_dgram_bind,
467 .dgram_dequeue = virtio_transport_dgram_dequeue, 509 .dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 8d592a45b597..af087b44ceea 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
58 pkt->len = len; 58 pkt->len = len;
59 pkt->hdr.len = cpu_to_le32(len); 59 pkt->hdr.len = cpu_to_le32(len);
60 pkt->reply = info->reply; 60 pkt->reply = info->reply;
61 pkt->vsk = info->vsk;
61 62
62 if (info->msg && len > 0) { 63 if (info->msg && len > 0) {
63 pkt->buf = kmalloc(len, GFP_KERNEL); 64 pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
180 struct virtio_vsock_pkt_info info = { 181 struct virtio_vsock_pkt_info info = {
181 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, 182 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
182 .type = type, 183 .type = type,
184 .vsk = vsk,
183 }; 185 };
184 186
185 return virtio_transport_send_pkt_info(vsk, &info); 187 return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
519 struct virtio_vsock_pkt_info info = { 521 struct virtio_vsock_pkt_info info = {
520 .op = VIRTIO_VSOCK_OP_REQUEST, 522 .op = VIRTIO_VSOCK_OP_REQUEST,
521 .type = VIRTIO_VSOCK_TYPE_STREAM, 523 .type = VIRTIO_VSOCK_TYPE_STREAM,
524 .vsk = vsk,
522 }; 525 };
523 526
524 return virtio_transport_send_pkt_info(vsk, &info); 527 return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
534 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | 537 VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
535 (mode & SEND_SHUTDOWN ? 538 (mode & SEND_SHUTDOWN ?
536 VIRTIO_VSOCK_SHUTDOWN_SEND : 0), 539 VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
540 .vsk = vsk,
537 }; 541 };
538 542
539 return virtio_transport_send_pkt_info(vsk, &info); 543 return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
560 .type = VIRTIO_VSOCK_TYPE_STREAM, 564 .type = VIRTIO_VSOCK_TYPE_STREAM,
561 .msg = msg, 565 .msg = msg,
562 .pkt_len = len, 566 .pkt_len = len,
567 .vsk = vsk,
563 }; 568 };
564 569
565 return virtio_transport_send_pkt_info(vsk, &info); 570 return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
581 .op = VIRTIO_VSOCK_OP_RST, 586 .op = VIRTIO_VSOCK_OP_RST,
582 .type = VIRTIO_VSOCK_TYPE_STREAM, 587 .type = VIRTIO_VSOCK_TYPE_STREAM,
583 .reply = !!pkt, 588 .reply = !!pkt,
589 .vsk = vsk,
584 }; 590 };
585 591
586 /* Send RST only if the original pkt is not a RST pkt */ 592 /* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
826 .remote_cid = le64_to_cpu(pkt->hdr.src_cid), 832 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
827 .remote_port = le32_to_cpu(pkt->hdr.src_port), 833 .remote_port = le32_to_cpu(pkt->hdr.src_port),
828 .reply = true, 834 .reply = true,
835 .vsk = vsk,
829 }; 836 };
830 837
831 return virtio_transport_send_pkt_info(vsk, &info); 838 return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7f8be4e321a..2312dc2ffdb9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
545{ 545{
546 int err; 546 int err;
547 547
548 rtnl_lock();
549
550 if (!cb->args[0]) { 548 if (!cb->args[0]) {
551 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 549 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
552 genl_family_attrbuf(&nl80211_fam), 550 genl_family_attrbuf(&nl80211_fam),
553 nl80211_fam.maxattr, nl80211_policy); 551 nl80211_fam.maxattr, nl80211_policy);
554 if (err) 552 if (err)
555 goto out_unlock; 553 return err;
556 554
557 *wdev = __cfg80211_wdev_from_attrs( 555 *wdev = __cfg80211_wdev_from_attrs(
558 sock_net(skb->sk), 556 sock_net(skb->sk),
559 genl_family_attrbuf(&nl80211_fam)); 557 genl_family_attrbuf(&nl80211_fam));
560 if (IS_ERR(*wdev)) { 558 if (IS_ERR(*wdev))
561 err = PTR_ERR(*wdev); 559 return PTR_ERR(*wdev);
562 goto out_unlock;
563 }
564 *rdev = wiphy_to_rdev((*wdev)->wiphy); 560 *rdev = wiphy_to_rdev((*wdev)->wiphy);
565 /* 0 is the first index - add 1 to parse only once */ 561 /* 0 is the first index - add 1 to parse only once */
566 cb->args[0] = (*rdev)->wiphy_idx + 1; 562 cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
570 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 566 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
571 struct wireless_dev *tmp; 567 struct wireless_dev *tmp;
572 568
573 if (!wiphy) { 569 if (!wiphy)
574 err = -ENODEV; 570 return -ENODEV;
575 goto out_unlock;
576 }
577 *rdev = wiphy_to_rdev(wiphy); 571 *rdev = wiphy_to_rdev(wiphy);
578 *wdev = NULL; 572 *wdev = NULL;
579 573
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
584 } 578 }
585 } 579 }
586 580
587 if (!*wdev) { 581 if (!*wdev)
588 err = -ENODEV; 582 return -ENODEV;
589 goto out_unlock;
590 }
591 } 583 }
592 584
593 return 0; 585 return 0;
594 out_unlock:
595 rtnl_unlock();
596 return err;
597}
598
599static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
600{
601 rtnl_unlock();
602} 586}
603 587
604/* IE validation */ 588/* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2608 int filter_wiphy = -1; 2592 int filter_wiphy = -1;
2609 struct cfg80211_registered_device *rdev; 2593 struct cfg80211_registered_device *rdev;
2610 struct wireless_dev *wdev; 2594 struct wireless_dev *wdev;
2595 int ret;
2611 2596
2612 rtnl_lock(); 2597 rtnl_lock();
2613 if (!cb->args[2]) { 2598 if (!cb->args[2]) {
2614 struct nl80211_dump_wiphy_state state = { 2599 struct nl80211_dump_wiphy_state state = {
2615 .filter_wiphy = -1, 2600 .filter_wiphy = -1,
2616 }; 2601 };
2617 int ret;
2618 2602
2619 ret = nl80211_dump_wiphy_parse(skb, cb, &state); 2603 ret = nl80211_dump_wiphy_parse(skb, cb, &state);
2620 if (ret) 2604 if (ret)
2621 return ret; 2605 goto out_unlock;
2622 2606
2623 filter_wiphy = state.filter_wiphy; 2607 filter_wiphy = state.filter_wiphy;
2624 2608
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
2663 wp_idx++; 2647 wp_idx++;
2664 } 2648 }
2665 out: 2649 out:
2666 rtnl_unlock();
2667
2668 cb->args[0] = wp_idx; 2650 cb->args[0] = wp_idx;
2669 cb->args[1] = if_idx; 2651 cb->args[1] = if_idx;
2670 2652
2671 return skb->len; 2653 ret = skb->len;
2654 out_unlock:
2655 rtnl_unlock();
2656
2657 return ret;
2672} 2658}
2673 2659
2674static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) 2660static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
4452 int sta_idx = cb->args[2]; 4438 int sta_idx = cb->args[2];
4453 int err; 4439 int err;
4454 4440
4441 rtnl_lock();
4455 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 4442 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4456 if (err) 4443 if (err)
4457 return err; 4444 goto out_err;
4458 4445
4459 if (!wdev->netdev) { 4446 if (!wdev->netdev) {
4460 err = -EINVAL; 4447 err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
4489 cb->args[2] = sta_idx; 4476 cb->args[2] = sta_idx;
4490 err = skb->len; 4477 err = skb->len;
4491 out_err: 4478 out_err:
4492 nl80211_finish_wdev_dump(rdev); 4479 rtnl_unlock();
4493 4480
4494 return err; 4481 return err;
4495} 4482}
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
5275 int path_idx = cb->args[2]; 5262 int path_idx = cb->args[2];
5276 int err; 5263 int err;
5277 5264
5265 rtnl_lock();
5278 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5266 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
5279 if (err) 5267 if (err)
5280 return err; 5268 goto out_err;
5281 5269
5282 if (!rdev->ops->dump_mpath) { 5270 if (!rdev->ops->dump_mpath) {
5283 err = -EOPNOTSUPP; 5271 err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
5310 cb->args[2] = path_idx; 5298 cb->args[2] = path_idx;
5311 err = skb->len; 5299 err = skb->len;
5312 out_err: 5300 out_err:
5313 nl80211_finish_wdev_dump(rdev); 5301 rtnl_unlock();
5314 return err; 5302 return err;
5315} 5303}
5316 5304
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
5470 int path_idx = cb->args[2]; 5458 int path_idx = cb->args[2];
5471 int err; 5459 int err;
5472 5460
5461 rtnl_lock();
5473 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 5462 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
5474 if (err) 5463 if (err)
5475 return err; 5464 goto out_err;
5476 5465
5477 if (!rdev->ops->dump_mpp) { 5466 if (!rdev->ops->dump_mpp) {
5478 err = -EOPNOTSUPP; 5467 err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
5505 cb->args[2] = path_idx; 5494 cb->args[2] = path_idx;
5506 err = skb->len; 5495 err = skb->len;
5507 out_err: 5496 out_err:
5508 nl80211_finish_wdev_dump(rdev); 5497 rtnl_unlock();
5509 return err; 5498 return err;
5510} 5499}
5511 5500
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
7674 int start = cb->args[2], idx = 0; 7663 int start = cb->args[2], idx = 0;
7675 int err; 7664 int err;
7676 7665
7666 rtnl_lock();
7677 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7667 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
7678 if (err) 7668 if (err) {
7669 rtnl_unlock();
7679 return err; 7670 return err;
7671 }
7680 7672
7681 wdev_lock(wdev); 7673 wdev_lock(wdev);
7682 spin_lock_bh(&rdev->bss_lock); 7674 spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
7699 wdev_unlock(wdev); 7691 wdev_unlock(wdev);
7700 7692
7701 cb->args[2] = idx; 7693 cb->args[2] = idx;
7702 nl80211_finish_wdev_dump(rdev); 7694 rtnl_unlock();
7703 7695
7704 return skb->len; 7696 return skb->len;
7705} 7697}
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
7784 int res; 7776 int res;
7785 bool radio_stats; 7777 bool radio_stats;
7786 7778
7779 rtnl_lock();
7787 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 7780 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
7788 if (res) 7781 if (res)
7789 return res; 7782 goto out_err;
7790 7783
7791 /* prepare_wdev_dump parsed the attributes */ 7784 /* prepare_wdev_dump parsed the attributes */
7792 radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; 7785 radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
7827 cb->args[2] = survey_idx; 7820 cb->args[2] = survey_idx;
7828 res = skb->len; 7821 res = skb->len;
7829 out_err: 7822 out_err:
7830 nl80211_finish_wdev_dump(rdev); 7823 rtnl_unlock();
7831 return res; 7824 return res;
7832} 7825}
7833 7826
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11508 void *data = NULL; 11501 void *data = NULL;
11509 unsigned int data_len = 0; 11502 unsigned int data_len = 0;
11510 11503
11511 rtnl_lock();
11512
11513 if (cb->args[0]) { 11504 if (cb->args[0]) {
11514 /* subtract the 1 again here */ 11505 /* subtract the 1 again here */
11515 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 11506 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
11516 struct wireless_dev *tmp; 11507 struct wireless_dev *tmp;
11517 11508
11518 if (!wiphy) { 11509 if (!wiphy)
11519 err = -ENODEV; 11510 return -ENODEV;
11520 goto out_unlock;
11521 }
11522 *rdev = wiphy_to_rdev(wiphy); 11511 *rdev = wiphy_to_rdev(wiphy);
11523 *wdev = NULL; 11512 *wdev = NULL;
11524 11513
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11538 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 11527 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
11539 attrbuf, nl80211_fam.maxattr, nl80211_policy); 11528 attrbuf, nl80211_fam.maxattr, nl80211_policy);
11540 if (err) 11529 if (err)
11541 goto out_unlock; 11530 return err;
11542 11531
11543 if (!attrbuf[NL80211_ATTR_VENDOR_ID] || 11532 if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
11544 !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { 11533 !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
11545 err = -EINVAL; 11534 return -EINVAL;
11546 goto out_unlock;
11547 }
11548 11535
11549 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); 11536 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
11550 if (IS_ERR(*wdev)) 11537 if (IS_ERR(*wdev))
11551 *wdev = NULL; 11538 *wdev = NULL;
11552 11539
11553 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); 11540 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
11554 if (IS_ERR(*rdev)) { 11541 if (IS_ERR(*rdev))
11555 err = PTR_ERR(*rdev); 11542 return PTR_ERR(*rdev);
11556 goto out_unlock;
11557 }
11558 11543
11559 vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); 11544 vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
11560 subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); 11545 subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11567 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) 11552 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
11568 continue; 11553 continue;
11569 11554
11570 if (!vcmd->dumpit) { 11555 if (!vcmd->dumpit)
11571 err = -EOPNOTSUPP; 11556 return -EOPNOTSUPP;
11572 goto out_unlock;
11573 }
11574 11557
11575 vcmd_idx = i; 11558 vcmd_idx = i;
11576 break; 11559 break;
11577 } 11560 }
11578 11561
11579 if (vcmd_idx < 0) { 11562 if (vcmd_idx < 0)
11580 err = -EOPNOTSUPP; 11563 return -EOPNOTSUPP;
11581 goto out_unlock;
11582 }
11583 11564
11584 if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { 11565 if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
11585 data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); 11566 data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
11596 11577
11597 /* keep rtnl locked in successful case */ 11578 /* keep rtnl locked in successful case */
11598 return 0; 11579 return 0;
11599 out_unlock:
11600 rtnl_unlock();
11601 return err;
11602} 11580}
11603 11581
11604static int nl80211_vendor_cmd_dump(struct sk_buff *skb, 11582static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
11613 int err; 11591 int err;
11614 struct nlattr *vendor_data; 11592 struct nlattr *vendor_data;
11615 11593
11594 rtnl_lock();
11616 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); 11595 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
11617 if (err) 11596 if (err)
11618 return err; 11597 goto out;
11619 11598
11620 vcmd_idx = cb->args[2]; 11599 vcmd_idx = cb->args[2];
11621 data = (void *)cb->args[3]; 11600 data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
11624 11603
11625 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | 11604 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
11626 WIPHY_VENDOR_CMD_NEED_NETDEV)) { 11605 WIPHY_VENDOR_CMD_NEED_NETDEV)) {
11627 if (!wdev) 11606 if (!wdev) {
11628 return -EINVAL; 11607 err = -EINVAL;
11608 goto out;
11609 }
11629 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && 11610 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
11630 !wdev->netdev) 11611 !wdev->netdev) {
11631 return -EINVAL; 11612 err = -EINVAL;
11613 goto out;
11614 }
11632 11615
11633 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { 11616 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
11634 if (!wdev_running(wdev)) 11617 if (!wdev_running(wdev)) {
11635 return -ENETDOWN; 11618 err = -ENETDOWN;
11619 goto out;
11620 }
11636 } 11621 }
11637 } 11622 }
11638 11623
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 16b6b5988be9..570a2b67ca10 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
132 /* Age scan results with time spent in suspend */ 132 /* Age scan results with time spent in suspend */
133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
134 134
135 if (rdev->ops->resume) { 135 rtnl_lock();
136 rtnl_lock(); 136 if (rdev->wiphy.registered && rdev->ops->resume)
137 if (rdev->wiphy.registered) 137 ret = rdev_resume(rdev);
138 ret = rdev_resume(rdev); 138 rtnl_unlock();
139 rtnl_unlock();
140 }
141 139
142 return ret; 140 return ret;
143} 141}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fd28a49dbe8f..8b911c29860e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
852 return rc; 852 return rc;
853} 853}
854 854
855static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 855static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
856 bool kern)
856{ 857{
857 struct sock *sk = sock->sk; 858 struct sock *sk = sock->sk;
858 struct sock *newsk; 859 struct sock *newsk;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0806dccdf507..236cbbc0ab9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
1243} 1243}
1244 1244
1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1246 const struct flowi *fl) 1246 const struct flowi *fl, u16 family)
1247{ 1247{
1248 struct xfrm_policy *pol; 1248 struct xfrm_policy *pol;
1249 1249
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1251 again: 1251 again:
1252 pol = rcu_dereference(sk->sk_policy[dir]); 1252 pol = rcu_dereference(sk->sk_policy[dir]);
1253 if (pol != NULL) { 1253 if (pol != NULL) {
1254 bool match = xfrm_selector_match(&pol->selector, fl, 1254 bool match = xfrm_selector_match(&pol->selector, fl, family);
1255 sk->sk_family);
1256 int err = 0; 1255 int err = 0;
1257 1256
1258 if (match) { 1257 if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2239 sk = sk_const_to_full_sk(sk); 2238 sk = sk_const_to_full_sk(sk);
2240 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2239 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2241 num_pols = 1; 2240 num_pols = 1;
2242 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2241 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2243 err = xfrm_expand_policies(fl, family, pols, 2242 err = xfrm_expand_policies(fl, family, pols,
2244 &num_pols, &num_xfrms); 2243 &num_pols, &num_xfrms);
2245 if (err < 0) 2244 if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2518 pol = NULL; 2517 pol = NULL;
2519 sk = sk_to_full_sk(sk); 2518 sk = sk_to_full_sk(sk);
2520 if (sk && sk->sk_policy[dir]) { 2519 if (sk && sk->sk_policy[dir]) {
2521 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2520 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2522 if (IS_ERR(pol)) { 2521 if (IS_ERR(pol)) {
2523 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2524 return 0; 2523 return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
3069{ 3068{
3070 int rv; 3069 int rv;
3071 3070
3071 /* Initialize the per-net locks here */
3072 spin_lock_init(&net->xfrm.xfrm_state_lock);
3073 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3074 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3075
3072 rv = xfrm_statistics_init(net); 3076 rv = xfrm_statistics_init(net);
3073 if (rv < 0) 3077 if (rv < 0)
3074 goto out_statistics; 3078 goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
3085 if (rv < 0) 3089 if (rv < 0)
3086 goto out; 3090 goto out;
3087 3091
3088 /* Initialize the per-net locks here */
3089 spin_lock_init(&net->xfrm.xfrm_state_lock);
3090 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3091 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3092
3093 return 0; 3092 return 0;
3094 3093
3095out: 3094out:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 9705c279494b..40a8aa39220d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
412 up = nla_data(rp); 412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up); 413 ulen = xfrm_replay_state_esn_len(up);
414 414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 /* Check the overall length and the internal bitmap length to avoid
416 * potential overflow. */
417 if (nla_len(rp) < ulen ||
418 xfrm_replay_state_esn_len(replay_esn) != ulen ||
419 replay_esn->bmp_len != up->bmp_len)
420 return -EINVAL;
421
422 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
416 return -EINVAL; 423 return -EINVAL;
417 424
418 return 0; 425 return 0;
diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c
index 8571d766331d..d4d77b09412c 100644
--- a/samples/statx/test-statx.c
+++ b/samples/statx/test-statx.c
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
141 if (stx->stx_mask & STATX_BTIME) 141 if (stx->stx_mask & STATX_BTIME)
142 print_time(" Birth: ", &stx->stx_btime); 142 print_time(" Birth: ", &stx->stx_btime);
143 143
144 if (stx->stx_attributes) { 144 if (stx->stx_attributes_mask) {
145 unsigned char bits; 145 unsigned char bits, mbits;
146 int loop, byte; 146 int loop, byte;
147 147
148 static char attr_representation[64 + 1] = 148 static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
160 printf("Attributes: %016llx (", stx->stx_attributes); 160 printf("Attributes: %016llx (", stx->stx_attributes);
161 for (byte = 64 - 8; byte >= 0; byte -= 8) { 161 for (byte = 64 - 8; byte >= 0; byte -= 8) {
162 bits = stx->stx_attributes >> byte; 162 bits = stx->stx_attributes >> byte;
163 mbits = stx->stx_attributes_mask >> byte;
163 for (loop = 7; loop >= 0; loop--) { 164 for (loop = 7; loop >= 0; loop--) {
164 int bit = byte + loop; 165 int bit = byte + loop;
165 166
166 if (bits & 0x80) 167 if (!(mbits & 0x80))
168 putchar('.'); /* Not supported */
169 else if (bits & 0x80)
167 putchar(attr_representation[63 - bit]); 170 putchar(attr_representation[63 - bit]);
168 else 171 else
169 putchar('-'); 172 putchar('-'); /* Not set */
170 bits <<= 1; 173 bits <<= 1;
174 mbits <<= 1;
171 } 175 }
172 if (byte) 176 if (byte)
173 putchar(' '); 177 putchar(' ');
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d6ca649cb0e9..afe3fd3af1e4 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
150 150
151# cc-if-fullversion
152# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
153cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
154
151# cc-ldoption 155# cc-ldoption
152# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 156# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
153cc-ldoption = $(call try-run,\ 157cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..7234e61e7ce3 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -155,7 +155,7 @@ else
155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files 155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
156# and locates generated .h files 156# and locates generated .h files
157# FIXME: Replace both with specific CFLAGS* statements in the makefiles 157# FIXME: Replace both with specific CFLAGS* statements in the makefiles
158__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ 158__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
159 $(call flags,_c_flags) 159 $(call flags,_c_flags)
160__a_flags = $(call flags,_a_flags) 160__a_flags = $(call flags,_a_flags)
161__cpp_flags = $(call flags,_cpp_flags) 161__cpp_flags = $(call flags,_cpp_flags)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 26d208b435a0..cfddddb9c9d7 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
914 current = menu; 914 current = menu;
915 display_tree_part(); 915 display_tree_part();
916 gtk_widget_set_sensitive(back_btn, TRUE); 916 gtk_widget_set_sensitive(back_btn, TRUE);
917 } else if ((col == COL_OPTION)) { 917 } else if (col == COL_OPTION) {
918 toggle_sym_value(menu); 918 toggle_sym_value(menu);
919 gtk_tree_view_expand_row(view, path, TRUE); 919 gtk_tree_view_expand_row(view, path, TRUE);
920 } 920 }
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 4c935202ce23..f3b1d7f50b81 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1832 info->output_pool != client->pool->size)) { 1832 info->output_pool != client->pool->size)) {
1833 if (snd_seq_write_pool_allocated(client)) { 1833 if (snd_seq_write_pool_allocated(client)) {
1834 /* remove all existing cells */ 1834 /* remove all existing cells */
1835 snd_seq_pool_mark_closing(client->pool);
1835 snd_seq_queue_client_leave_cells(client->number); 1836 snd_seq_queue_client_leave_cells(client->number);
1836 snd_seq_pool_done(client->pool); 1837 snd_seq_pool_done(client->pool);
1837 } 1838 }
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 448efd4e980e..01c4cfe30c9f 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
72 return; 72 return;
73 *fifo = NULL; 73 *fifo = NULL;
74 74
75 if (f->pool)
76 snd_seq_pool_mark_closing(f->pool);
77
75 snd_seq_fifo_clear(f); 78 snd_seq_fifo_clear(f);
76 79
77 /* wake up clients if any */ 80 /* wake up clients if any */
@@ -264,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
264 /* NOTE: overflow flag is not cleared */ 267 /* NOTE: overflow flag is not cleared */
265 spin_unlock_irqrestore(&f->lock, flags); 268 spin_unlock_irqrestore(&f->lock, flags);
266 269
270 /* close the old pool and wait until all users are gone */
271 snd_seq_pool_mark_closing(oldpool);
272 snd_use_lock_sync(&f->use_lock);
273
267 /* release cells in old pool */ 274 /* release cells in old pool */
268 for (cell = oldhead; cell; cell = next) { 275 for (cell = oldhead; cell; cell = next) {
269 next = cell->next; 276 next = cell->next;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 1a1acf3ddda4..d4c61ec9be13 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -415,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
415 return 0; 415 return 0;
416} 416}
417 417
418/* refuse the further insertion to the pool */
419void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
420{
421 unsigned long flags;
422
423 if (snd_BUG_ON(!pool))
424 return;
425 spin_lock_irqsave(&pool->lock, flags);
426 pool->closing = 1;
427 spin_unlock_irqrestore(&pool->lock, flags);
428}
429
418/* remove events */ 430/* remove events */
419int snd_seq_pool_done(struct snd_seq_pool *pool) 431int snd_seq_pool_done(struct snd_seq_pool *pool)
420{ 432{
@@ -425,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
425 return -EINVAL; 437 return -EINVAL;
426 438
427 /* wait for closing all threads */ 439 /* wait for closing all threads */
428 spin_lock_irqsave(&pool->lock, flags);
429 pool->closing = 1;
430 spin_unlock_irqrestore(&pool->lock, flags);
431
432 if (waitqueue_active(&pool->output_sleep)) 440 if (waitqueue_active(&pool->output_sleep))
433 wake_up(&pool->output_sleep); 441 wake_up(&pool->output_sleep);
434 442
@@ -485,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
485 *ppool = NULL; 493 *ppool = NULL;
486 if (pool == NULL) 494 if (pool == NULL)
487 return 0; 495 return 0;
496 snd_seq_pool_mark_closing(pool);
488 snd_seq_pool_done(pool); 497 snd_seq_pool_done(pool);
489 kfree(pool); 498 kfree(pool);
490 return 0; 499 return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec779b8a7..32f959c17786 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
84int snd_seq_pool_init(struct snd_seq_pool *pool); 84int snd_seq_pool_init(struct snd_seq_pool *pool);
85 85
86/* done pool - free events */ 86/* done pool - free events */
87void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
87int snd_seq_pool_done(struct snd_seq_pool *pool); 88int snd_seq_pool_done(struct snd_seq_pool *pool);
88 89
89/* create pool */ 90/* create pool */
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ab4cdab5cfa5..79edd88d5cd0 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw)
1905 return err; 1905 return err;
1906 1906
1907 /* Set DMA transfer mask */ 1907 /* Set DMA transfer mask */
1908 if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { 1908 if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
1909 dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); 1909 dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
1910 } else { 1910 } else {
1911 dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); 1911 dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c15c51bea26d..69266b8ea2ad 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -261,6 +261,7 @@ enum {
261 CXT_FIXUP_HP_530, 261 CXT_FIXUP_HP_530,
262 CXT_FIXUP_CAP_MIX_AMP_5047, 262 CXT_FIXUP_CAP_MIX_AMP_5047,
263 CXT_FIXUP_MUTE_LED_EAPD, 263 CXT_FIXUP_MUTE_LED_EAPD,
264 CXT_FIXUP_HP_DOCK,
264 CXT_FIXUP_HP_SPECTRE, 265 CXT_FIXUP_HP_SPECTRE,
265 CXT_FIXUP_HP_GATE_MIC, 266 CXT_FIXUP_HP_GATE_MIC,
266}; 267};
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
778 .type = HDA_FIXUP_FUNC, 779 .type = HDA_FIXUP_FUNC,
779 .v.func = cxt_fixup_mute_led_eapd, 780 .v.func = cxt_fixup_mute_led_eapd,
780 }, 781 },
782 [CXT_FIXUP_HP_DOCK] = {
783 .type = HDA_FIXUP_PINS,
784 .v.pins = (const struct hda_pintbl[]) {
785 { 0x16, 0x21011020 }, /* line-out */
786 { 0x18, 0x2181103f }, /* line-in */
787 { }
788 }
789 },
781 [CXT_FIXUP_HP_SPECTRE] = { 790 [CXT_FIXUP_HP_SPECTRE] = {
782 .type = HDA_FIXUP_PINS, 791 .type = HDA_FIXUP_PINS,
783 .v.pins = (const struct hda_pintbl[]) { 792 .v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
839 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), 848 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
840 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 849 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
841 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), 850 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
851 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
842 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 852 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
843 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 853 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
844 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 854 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
871 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, 881 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
872 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, 882 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
873 { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, 883 { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
884 { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
874 {} 885 {}
875}; 886};
876 887
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4e112221d825..299835d1fbaa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4847,6 +4847,7 @@ enum {
4847 ALC286_FIXUP_HP_GPIO_LED, 4847 ALC286_FIXUP_HP_GPIO_LED,
4848 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, 4848 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
4849 ALC280_FIXUP_HP_DOCK_PINS, 4849 ALC280_FIXUP_HP_DOCK_PINS,
4850 ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
4850 ALC280_FIXUP_HP_9480M, 4851 ALC280_FIXUP_HP_9480M,
4851 ALC288_FIXUP_DELL_HEADSET_MODE, 4852 ALC288_FIXUP_DELL_HEADSET_MODE,
4852 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, 4853 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4857,6 +4858,7 @@ enum {
4857 ALC292_FIXUP_DISABLE_AAMIX, 4858 ALC292_FIXUP_DISABLE_AAMIX,
4858 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
4859 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4861 ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
4860 ALC275_FIXUP_DELL_XPS, 4862 ALC275_FIXUP_DELL_XPS,
4861 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4863 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4862 ALC293_FIXUP_LENOVO_SPK_NOISE, 4864 ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5388,6 +5390,16 @@ static const struct hda_fixup alc269_fixups[] = {
5388 .chained = true, 5390 .chained = true,
5389 .chain_id = ALC280_FIXUP_HP_GPIO4 5391 .chain_id = ALC280_FIXUP_HP_GPIO4
5390 }, 5392 },
5393 [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
5394 .type = HDA_FIXUP_PINS,
5395 .v.pins = (const struct hda_pintbl[]) {
5396 { 0x1b, 0x21011020 }, /* line-out */
5397 { 0x18, 0x2181103f }, /* line-in */
5398 { },
5399 },
5400 .chained = true,
5401 .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
5402 },
5391 [ALC280_FIXUP_HP_9480M] = { 5403 [ALC280_FIXUP_HP_9480M] = {
5392 .type = HDA_FIXUP_FUNC, 5404 .type = HDA_FIXUP_FUNC,
5393 .v.func = alc280_fixup_hp_9480m, 5405 .v.func = alc280_fixup_hp_9480m,
@@ -5459,6 +5471,15 @@ static const struct hda_fixup alc269_fixups[] = {
5459 .chained = true, 5471 .chained = true,
5460 .chain_id = ALC269_FIXUP_HEADSET_MODE 5472 .chain_id = ALC269_FIXUP_HEADSET_MODE
5461 }, 5473 },
5474 [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
5475 .type = HDA_FIXUP_PINS,
5476 .v.pins = (const struct hda_pintbl[]) {
5477 { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
5478 { }
5479 },
5480 .chained = true,
5481 .chain_id = ALC269_FIXUP_HEADSET_MODE
5482 },
5462 [ALC275_FIXUP_DELL_XPS] = { 5483 [ALC275_FIXUP_DELL_XPS] = {
5463 .type = HDA_FIXUP_VERBS, 5484 .type = HDA_FIXUP_VERBS,
5464 .v.verbs = (const struct hda_verb[]) { 5485 .v.verbs = (const struct hda_verb[]) {
@@ -5531,7 +5552,7 @@ static const struct hda_fixup alc269_fixups[] = {
5531 .type = HDA_FIXUP_FUNC, 5552 .type = HDA_FIXUP_FUNC,
5532 .v.func = alc298_fixup_speaker_volume, 5553 .v.func = alc298_fixup_speaker_volume,
5533 .chained = true, 5554 .chained = true,
5534 .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5555 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
5535 }, 5556 },
5536 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 5557 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5537 .type = HDA_FIXUP_PINS, 5558 .type = HDA_FIXUP_PINS,
@@ -5647,7 +5668,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5647 SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5668 SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5648 SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5669 SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5649 SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5670 SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5650 SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5671 SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
5651 SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5672 SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5652 SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5673 SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5653 SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5674 SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5816,6 +5837,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5816 {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, 5837 {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
5817 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, 5838 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
5818 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 5839 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
5840 {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
5819 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 5841 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
5820 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, 5842 {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
5821 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, 5843 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -6090,6 +6112,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6090 ALC295_STANDARD_PINS, 6112 ALC295_STANDARD_PINS,
6091 {0x17, 0x21014040}, 6113 {0x17, 0x21014040},
6092 {0x18, 0x21a19050}), 6114 {0x18, 0x21a19050}),
6115 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
6116 ALC295_STANDARD_PINS),
6093 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 6117 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
6094 ALC298_STANDARD_PINS, 6118 ALC298_STANDARD_PINS,
6095 {0x17, 0x90170110}), 6119 {0x17, 0x90170110}),
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 89ac5f5a93eb..7ae46c2647d4 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -349,7 +349,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai,
349} 349}
350 350
351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) 351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) 352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
353 353
354static struct { 354static struct {
355 int rate; 355 int rate;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 78fca8acd3ec..fd272a40485b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1534,21 +1534,20 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
1534 pin->mst_capable = false; 1534 pin->mst_capable = false;
1535 /* if not MST, default is port[0] */ 1535 /* if not MST, default is port[0] */
1536 hport = &pin->ports[0]; 1536 hport = &pin->ports[0];
1537 goto out;
1538 } else { 1537 } else {
1539 for (i = 0; i < pin->num_ports; i++) { 1538 for (i = 0; i < pin->num_ports; i++) {
1540 pin->mst_capable = true; 1539 pin->mst_capable = true;
1541 if (pin->ports[i].id == pipe) { 1540 if (pin->ports[i].id == pipe) {
1542 hport = &pin->ports[i]; 1541 hport = &pin->ports[i];
1543 goto out; 1542 break;
1544 } 1543 }
1545 } 1544 }
1546 } 1545 }
1546
1547 if (hport)
1548 hdac_hdmi_present_sense(pin, hport);
1547 } 1549 }
1548 1550
1549out:
1550 if (pin && hport)
1551 hdac_hdmi_present_sense(pin, hport);
1552} 1551}
1553 1552
1554static struct i915_audio_component_audio_ops aops = { 1553static struct i915_audio_component_audio_ops aops = {
@@ -1998,7 +1997,7 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
1998 struct hdac_hdmi_pin *pin, *pin_next; 1997 struct hdac_hdmi_pin *pin, *pin_next;
1999 struct hdac_hdmi_cvt *cvt, *cvt_next; 1998 struct hdac_hdmi_cvt *cvt, *cvt_next;
2000 struct hdac_hdmi_pcm *pcm, *pcm_next; 1999 struct hdac_hdmi_pcm *pcm, *pcm_next;
2001 struct hdac_hdmi_port *port; 2000 struct hdac_hdmi_port *port, *port_next;
2002 int i; 2001 int i;
2003 2002
2004 snd_soc_unregister_codec(&edev->hdac.dev); 2003 snd_soc_unregister_codec(&edev->hdac.dev);
@@ -2008,8 +2007,9 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
2008 if (list_empty(&pcm->port_list)) 2007 if (list_empty(&pcm->port_list))
2009 continue; 2008 continue;
2010 2009
2011 list_for_each_entry(port, &pcm->port_list, head) 2010 list_for_each_entry_safe(port, port_next,
2012 port = NULL; 2011 &pcm->port_list, head)
2012 list_del(&port->head);
2013 2013
2014 list_del(&pcm->head); 2014 list_del(&pcm->head);
2015 kfree(pcm); 2015 kfree(pcm);
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 324461e985b3..476135ec5726 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -1241,7 +1241,7 @@ static irqreturn_t rt5665_irq(int irq, void *data)
1241static void rt5665_jd_check_handler(struct work_struct *work) 1241static void rt5665_jd_check_handler(struct work_struct *work)
1242{ 1242{
1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, 1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
1244 calibrate_work.work); 1244 jd_check_work.work);
1245 1245
1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) { 1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
1247 /* jack out */ 1247 /* jack out */
@@ -2252,7 +2252,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = {
2252 2252
2253static const SOC_ENUM_SINGLE_DECL( 2253static const SOC_ENUM_SINGLE_DECL(
2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA, 2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
2255 RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src); 2255 RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
2256 2256
2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux = 2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum); 2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
@@ -3178,6 +3178,9 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc}, 3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc}, 3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
3181 {"I2S1 ASRC", NULL, "CLKDET"},
3182 {"I2S2 ASRC", NULL, "CLKDET"},
3183 {"I2S3 ASRC", NULL, "CLKDET"},
3181 3184
3182 /*Vref*/ 3185 /*Vref*/
3183 {"Mic Det Power", NULL, "Vref2"}, 3186 {"Mic Det Power", NULL, "Vref2"},
@@ -3912,6 +3915,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3912 {"Mono MIX", "MONOVOL Switch", "MONOVOL"}, 3915 {"Mono MIX", "MONOVOL Switch", "MONOVOL"},
3913 {"Mono Amp", NULL, "Mono MIX"}, 3916 {"Mono Amp", NULL, "Mono MIX"},
3914 {"Mono Amp", NULL, "Vref2"}, 3917 {"Mono Amp", NULL, "Vref2"},
3918 {"Mono Amp", NULL, "Vref3"},
3915 {"Mono Amp", NULL, "CLKDET SYS"}, 3919 {"Mono Amp", NULL, "CLKDET SYS"},
3916 {"Mono Amp", NULL, "CLKDET MONO"}, 3920 {"Mono Amp", NULL, "CLKDET MONO"},
3917 {"Mono Playback", "Switch", "Mono Amp"}, 3921 {"Mono Playback", "Switch", "Mono Amp"},
@@ -4798,7 +4802,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c,
4798 /* Enhance performance*/ 4802 /* Enhance performance*/
4799 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1, 4803 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
4800 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK, 4804 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
4801 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09); 4805 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12);
4802 4806
4803 INIT_DELAYED_WORK(&rt5665->jack_detect_work, 4807 INIT_DELAYED_WORK(&rt5665->jack_detect_work,
4804 rt5665_jack_detect_handler); 4808 rt5665_jack_detect_handler);
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
index 12f7080a0d3c..a30f5e6d0628 100644
--- a/sound/soc/codecs/rt5665.h
+++ b/sound/soc/codecs/rt5665.h
@@ -1106,7 +1106,7 @@
1106#define RT5665_HP_DRIVER_MASK (0x3 << 2) 1106#define RT5665_HP_DRIVER_MASK (0x3 << 2)
1107#define RT5665_HP_DRIVER_1X (0x0 << 2) 1107#define RT5665_HP_DRIVER_1X (0x0 << 2)
1108#define RT5665_HP_DRIVER_3X (0x1 << 2) 1108#define RT5665_HP_DRIVER_3X (0x1 << 2)
1109#define RT5665_HP_DRIVER_5X (0x2 << 2) 1109#define RT5665_HP_DRIVER_5X (0x3 << 2)
1110#define RT5665_LDO1_DVO_MASK (0x3) 1110#define RT5665_LDO1_DVO_MASK (0x3)
1111#define RT5665_LDO1_DVO_09 (0x0) 1111#define RT5665_LDO1_DVO_09 (0x0)
1112#define RT5665_LDO1_DVO_10 (0x1) 1112#define RT5665_LDO1_DVO_10 (0x1)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d151224ffcca..bbdb72f73df1 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -899,7 +899,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
899 899
900 mutex_lock(&ctl->dsp->pwr_lock); 900 mutex_lock(&ctl->dsp->pwr_lock);
901 901
902 memcpy(ctl->cache, p, ctl->len); 902 if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
903 ret = -EPERM;
904 else
905 memcpy(ctl->cache, p, ctl->len);
903 906
904 ctl->set = 1; 907 ctl->set = 1;
905 if (ctl->enabled && ctl->dsp->running) 908 if (ctl->enabled && ctl->dsp->running)
@@ -926,6 +929,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
926 ctl->set = 1; 929 ctl->set = 1;
927 if (ctl->enabled && ctl->dsp->running) 930 if (ctl->enabled && ctl->dsp->running)
928 ret = wm_coeff_write_control(ctl, ctl->cache, size); 931 ret = wm_coeff_write_control(ctl, ctl->cache, size);
932 else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
933 ret = -EPERM;
929 } 934 }
930 935
931 mutex_unlock(&ctl->dsp->pwr_lock); 936 mutex_unlock(&ctl->dsp->pwr_lock);
@@ -947,7 +952,7 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
947 952
948 mutex_lock(&ctl->dsp->pwr_lock); 953 mutex_lock(&ctl->dsp->pwr_lock);
949 954
950 if (ctl->enabled) 955 if (ctl->enabled && ctl->dsp->running)
951 ret = wm_coeff_write_acked_control(ctl, val); 956 ret = wm_coeff_write_acked_control(ctl, val);
952 else 957 else
953 ret = -EPERM; 958 ret = -EPERM;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 4924575d2e95..343b291fc372 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -115,6 +115,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
115 clk = devm_get_clk_from_child(dev, node, NULL); 115 clk = devm_get_clk_from_child(dev, node, NULL);
116 if (!IS_ERR(clk)) { 116 if (!IS_ERR(clk)) {
117 simple_dai->sysclk = clk_get_rate(clk); 117 simple_dai->sysclk = clk_get_rate(clk);
118 simple_dai->clk = clk;
118 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { 119 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
119 simple_dai->sysclk = val; 120 simple_dai->sysclk = val;
120 } else { 121 } else {
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index ed58b5b3555a..2dbfb1b24ef4 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -512,7 +512,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
512 if (bc->set_params != SKL_PARAM_INIT) 512 if (bc->set_params != SKL_PARAM_INIT)
513 continue; 513 continue;
514 514
515 mconfig->formats_config.caps = (u32 *)&bc->params; 515 mconfig->formats_config.caps = (u32 *)bc->params;
516 mconfig->formats_config.caps_size = bc->size; 516 mconfig->formats_config.caps_size = bc->size;
517 517
518 break; 518 break;
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 05cf809cf9e1..d7013bde6f45 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -13,7 +13,7 @@ config SND_SOC_MT2701
13 13
14config SND_SOC_MT2701_CS42448 14config SND_SOC_MT2701_CS42448
15 tristate "ASoc Audio driver for MT2701 with CS42448 codec" 15 tristate "ASoc Audio driver for MT2701 with CS42448 codec"
16 depends on SND_SOC_MT2701 16 depends on SND_SOC_MT2701 && I2C
17 select SND_SOC_CS42XX8_I2C 17 select SND_SOC_CS42XX8_I2C
18 select SND_SOC_BT_SCO 18 select SND_SOC_BT_SCO
19 help 19 help
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index abb5eaac854a..7d92a24b7cfa 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
32 struct device *dev = rsnd_priv_to_dev(priv); 32 struct device *dev = rsnd_priv_to_dev(priv);
33 u32 data; 33 u32 data;
34 u32 path[] = {
35 [1] = 1 << 0,
36 [5] = 1 << 8,
37 [6] = 1 << 12,
38 [9] = 1 << 15,
39 };
34 40
35 if (!mix && !dvc) 41 if (!mix && !dvc)
36 return 0; 42 return 0;
37 43
44 if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
45 return -ENXIO;
46
38 if (mix) { 47 if (mix) {
39 struct rsnd_dai *rdai; 48 struct rsnd_dai *rdai;
40 struct rsnd_mod *src; 49 struct rsnd_mod *src;
41 struct rsnd_dai_stream *tio; 50 struct rsnd_dai_stream *tio;
42 int i; 51 int i;
43 u32 path[] = {
44 [0] = 0,
45 [1] = 1 << 0,
46 [2] = 0,
47 [3] = 0,
48 [4] = 0,
49 [5] = 1 << 8
50 };
51 52
52 /* 53 /*
53 * it is assuming that integrater is well understanding about 54 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
70 } else { 71 } else {
71 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 72 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
72 73
73 u32 path[] = { 74 u8 cmd_case[] = {
74 [0] = 0x30000, 75 [0] = 0x3,
75 [1] = 0x30001, 76 [1] = 0x3,
76 [2] = 0x40000, 77 [2] = 0x4,
77 [3] = 0x10000, 78 [3] = 0x1,
78 [4] = 0x20000, 79 [4] = 0x2,
79 [5] = 0x40100 80 [5] = 0x4,
81 [6] = 0x1,
82 [9] = 0x2,
80 }; 83 };
81 84
82 data = path[rsnd_mod_id(src)]; 85 data = path[rsnd_mod_id(src)] |
86 cmd_case[rsnd_mod_id(src)] << 16;
83 } 87 }
84 88
85 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 1f405c833867..241cb3b08a07 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -454,6 +454,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
455} 455}
456 456
457static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
458{
459 struct rsnd_mod *mod = rsnd_mod_get(dma);
460 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
461 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
462 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
463 u32 val = ioread32(addr);
464
465 val &= ~mask;
466 val |= (data & mask);
467
468 iowrite32(val, addr);
469}
470
457static int rsnd_dmapp_stop(struct rsnd_mod *mod, 471static int rsnd_dmapp_stop(struct rsnd_mod *mod,
458 struct rsnd_dai_stream *io, 472 struct rsnd_dai_stream *io,
459 struct rsnd_priv *priv) 473 struct rsnd_priv *priv)
@@ -461,10 +475,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
461 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 475 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
462 int i; 476 int i;
463 477
464 rsnd_dmapp_write(dma, 0, PDMACHCR); 478 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
465 479
466 for (i = 0; i < 1024; i++) { 480 for (i = 0; i < 1024; i++) {
467 if (0 == rsnd_dmapp_read(dma, PDMACHCR)) 481 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
468 return 0; 482 return 0;
469 udelay(1); 483 udelay(1);
470 } 484 }
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 4e817c8a18c0..14fafdaf1395 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -64,7 +64,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */ 64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
65 mask2 = (1 << 4); /* mask sync bit */ 65 mask2 = (1 << 4); /* mask sync bit */
66 val1 = val2 = 0; 66 val1 = val2 = 0;
67 if (rsnd_ssi_is_pin_sharing(io)) { 67 if (id == 8) {
68 /*
69 * SSI8 pin is sharing with SSI7, nothing to do.
70 */
71 } else if (rsnd_ssi_is_pin_sharing(io)) {
68 int shift = -1; 72 int shift = -1;
69 73
70 switch (id) { 74 switch (id) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6dca408faae3..2722bb0c5573 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3326,7 +3326,10 @@ static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
3326{ 3326{
3327 struct snd_soc_platform *platform = rtd->platform; 3327 struct snd_soc_platform *platform = rtd->platform;
3328 3328
3329 return platform->driver->pcm_new(rtd); 3329 if (platform->driver->pcm_new)
3330 return platform->driver->pcm_new(rtd);
3331 else
3332 return 0;
3330} 3333}
3331 3334
3332static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) 3335static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
@@ -3334,7 +3337,8 @@ static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
3334 struct snd_soc_pcm_runtime *rtd = pcm->private_data; 3337 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
3335 struct snd_soc_platform *platform = rtd->platform; 3338 struct snd_soc_platform *platform = rtd->platform;
3336 3339
3337 platform->driver->pcm_free(pcm); 3340 if (platform->driver->pcm_free)
3341 platform->driver->pcm_free(pcm);
3338} 3342}
3339 3343
3340/** 3344/**
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 5992c6ab3833..93a8df6ed880 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -349,6 +349,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
349 struct uniperif *reader = priv->dai_data.uni; 349 struct uniperif *reader = priv->dai_data.uni;
350 int ret; 350 int ret;
351 351
352 reader->substream = substream;
353
352 if (!UNIPERIF_TYPE_IS_TDM(reader)) 354 if (!UNIPERIF_TYPE_IS_TDM(reader))
353 return 0; 355 return 0;
354 356
@@ -378,6 +380,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
378 /* Stop the reader */ 380 /* Stop the reader */
379 uni_reader_stop(reader); 381 uni_reader_stop(reader);
380 } 382 }
383 reader->substream = NULL;
381} 384}
382 385
383static const struct snd_soc_dai_ops uni_reader_dai_ops = { 386static const struct snd_soc_dai_ops uni_reader_dai_ops = {
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index b92bdc8361af..7527ba29a5a0 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -259,25 +259,20 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
259 return 0; 259 return 0;
260} 260}
261 261
262static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = { 262static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = {
263 SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC, 263 SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch",
264 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0), 264 SUN8I_DAC_MXR_SRC,
265 SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC, 265 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L,
266 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0),
267 SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC,
268 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0),
269 SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC,
270 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0),
271};
272
273static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = {
274 SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0), 266 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0),
276 SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC, 267 SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch",
268 SUN8I_DAC_MXR_SRC,
269 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L,
277 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0), 270 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0),
278 SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC, 271 SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
272 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL,
279 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0), 273 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0),
280 SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC, 274 SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL,
281 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0), 276 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0),
282}; 277};
283 278
@@ -286,19 +281,21 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
286 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA, 281 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA,
287 0, NULL, 0), 282 0, NULL, 0),
288 283
289 /* Analog DAC */ 284 /* Analog DAC AIF */
290 SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 285 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0,
291 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), 286 SUN8I_AIF1_DACDAT_CTRL,
292 SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 287 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
293 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 288 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0,
289 SUN8I_AIF1_DACDAT_CTRL,
290 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
294 291
295 /* DAC Mixers */ 292 /* DAC Mixers */
296 SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0, 293 SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
297 sun8i_output_left_mixer_controls, 294 sun8i_dac_mixer_controls,
298 ARRAY_SIZE(sun8i_output_left_mixer_controls)), 295 ARRAY_SIZE(sun8i_dac_mixer_controls)),
299 SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0, 296 SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
300 sun8i_output_right_mixer_controls, 297 sun8i_dac_mixer_controls,
301 ARRAY_SIZE(sun8i_output_right_mixer_controls)), 298 ARRAY_SIZE(sun8i_dac_mixer_controls)),
302 299
303 /* Clocks */ 300 /* Clocks */
304 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, 301 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
@@ -321,8 +318,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
321 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0), 318 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0),
322 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL, 319 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL,
323 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0), 320 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0),
324
325 SND_SOC_DAPM_OUTPUT("HP"),
326}; 321};
327 322
328static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { 323static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
@@ -338,16 +333,14 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
338 { "DAC", NULL, "MODCLK DAC" }, 333 { "DAC", NULL, "MODCLK DAC" },
339 334
340 /* DAC Routes */ 335 /* DAC Routes */
341 { "Digital Left DAC", NULL, "DAC" }, 336 { "AIF1 Slot 0 Right", NULL, "DAC" },
342 { "Digital Right DAC", NULL, "DAC" }, 337 { "AIF1 Slot 0 Left", NULL, "DAC" },
343 338
344 /* DAC Mixer Routes */ 339 /* DAC Mixer Routes */
345 { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"}, 340 { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
346 { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"}, 341 "AIF1 Slot 0 Left"},
347 342 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
348 /* End of route : HP out */ 343 "AIF1 Slot 0 Right"},
349 { "HP", NULL, "Left DAC Mixer" },
350 { "HP", NULL, "Right DAC Mixer" },
351}; 344};
352 345
353static struct snd_soc_dai_ops sun8i_codec_dai_ops = { 346static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig
index 84c8f8fc597c..8adf4d1bd46e 100644
--- a/sound/x86/Kconfig
+++ b/sound/x86/Kconfig
@@ -1,6 +1,7 @@
1menuconfig SND_X86 1menuconfig SND_X86
2 tristate "X86 sound devices" 2 bool "X86 sound devices"
3 depends on X86 3 depends on X86
4 default y
4 ---help--- 5 ---help---
5 X86 sound devices that don't fall under SoC or PCI categories 6 X86 sound devices that don't fall under SoC or PCI categories
6 7
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 122153b16ea4..390d7c9685fd 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -168,6 +168,16 @@
168 .off = OFF, \ 168 .off = OFF, \
169 .imm = 0 }) 169 .imm = 0 })
170 170
171/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
172
173#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
174 ((struct bpf_insn) { \
175 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
176 .dst_reg = DST, \
177 .src_reg = SRC, \
178 .off = OFF, \
179 .imm = 0 })
180
171/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 181/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
172 182
173#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 183#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9
10#include <linux/types.h>
11#include <linux/ptrace.h>
12
13struct bpf_perf_event_data {
14 struct pt_regs regs;
15 __u64 sample_period;
16};
17
18#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 70e389bc4af7..9b4d8ba22fed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
202 202
203 /* Last entry */ 203 /* Last entry */
204 if (curr->end == curr->start) 204 if (curr->end == curr->start)
205 curr->end = roundup(curr->start, 4096); 205 curr->end = roundup(curr->start, 4096) + 4096;
206} 206}
207 207
208void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 208void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 4b498265dae6..9af09e8099c0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,20 +1,30 @@
1LIBDIR := ../../../lib 1LIBDIR := ../../../lib
2BPFOBJ := $(LIBDIR)/bpf/bpf.o 2BPFDIR := $(LIBDIR)/bpf
3APIDIR := ../../../include/uapi
4GENDIR := ../../../../include/generated
5GENHDR := $(GENDIR)/autoconf.h
3 6
4CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) 7ifneq ($(wildcard $(GENHDR)),)
8 GENFLAGS := -DHAVE_GENHDR
9endif
10
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
12LDLIBS += -lcap
5 13
6TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
7 15
8TEST_PROGS := test_kmod.sh 16TEST_PROGS := test_kmod.sh
9 17
10.PHONY: all clean force 18include ../lib.mk
19
20BPFOBJ := $(OUTPUT)/bpf.o
21
22$(TEST_GEN_PROGS): $(BPFOBJ)
23
24.PHONY: force
11 25
12# force a rebuild of BPFOBJ when its dependencies are updated 26# force a rebuild of BPFOBJ when its dependencies are updated
13force: 27force:
14 28
15$(BPFOBJ): force 29$(BPFOBJ): force
16 $(MAKE) -C $(dir $(BPFOBJ)) 30 $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
17
18$(test_objs): $(BPFOBJ)
19
20include ../lib.mk
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index cada17ac00b8..a0aa2009b0e0 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); 80 assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
81 key = 2; 81 key = 2;
82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 82 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
83 key = 1; 83 key = 3;
84 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); 84 assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
85 errno == E2BIG);
85 86
86 /* Check that key = 0 doesn't exist. */ 87 /* Check that key = 0 doesn't exist. */
87 key = 0; 88 key = 0;
@@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
110 close(fd); 111 close(fd);
111} 112}
112 113
114static void test_hashmap_sizes(int task, void *data)
115{
116 int fd, i, j;
117
118 for (i = 1; i <= 512; i <<= 1)
119 for (j = 1; j <= 1 << 18; j <<= 1) {
120 fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
121 2, map_flags);
122 if (fd < 0) {
123 printf("Failed to create hashmap key=%d value=%d '%s'\n",
124 i, j, strerror(errno));
125 exit(1);
126 }
127 close(fd);
128 usleep(10); /* give kernel time to destroy */
129 }
130}
131
113static void test_hashmap_percpu(int task, void *data) 132static void test_hashmap_percpu(int task, void *data)
114{ 133{
115 unsigned int nr_cpus = bpf_num_possible_cpus(); 134 unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
317static void test_arraymap_percpu_many_keys(void) 336static void test_arraymap_percpu_many_keys(void)
318{ 337{
319 unsigned int nr_cpus = bpf_num_possible_cpus(); 338 unsigned int nr_cpus = bpf_num_possible_cpus();
320 unsigned int nr_keys = 20000; 339 /* nr_keys is not too large otherwise the test stresses percpu
340 * allocator more than anything else
341 */
342 unsigned int nr_keys = 2000;
321 long values[nr_cpus]; 343 long values[nr_cpus];
322 int key, fd, i; 344 int key, fd, i;
323 345
@@ -419,6 +441,7 @@ static void test_map_stress(void)
419{ 441{
420 run_parallel(100, test_hashmap, NULL); 442 run_parallel(100, test_hashmap, NULL);
421 run_parallel(100, test_hashmap_percpu, NULL); 443 run_parallel(100, test_hashmap_percpu, NULL);
444 run_parallel(100, test_hashmap_sizes, NULL);
422 445
423 run_parallel(100, test_arraymap, NULL); 446 run_parallel(100, test_arraymap, NULL);
424 run_parallel(100, test_arraymap_percpu, NULL); 447 run_parallel(100, test_arraymap_percpu, NULL);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e1f5b9eea1e8..c848e90b6421 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,8 @@
8 * License as published by the Free Software Foundation. 8 * License as published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <asm/types.h>
12#include <linux/types.h>
11#include <stdint.h> 13#include <stdint.h>
12#include <stdio.h> 14#include <stdio.h>
13#include <stdlib.h> 15#include <stdlib.h>
@@ -28,6 +30,14 @@
28 30
29#include <bpf/bpf.h> 31#include <bpf/bpf.h>
30 32
33#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
31#include "../../../include/linux/filter.h" 41#include "../../../include/linux/filter.h"
32 42
33#ifndef ARRAY_SIZE 43#ifndef ARRAY_SIZE
@@ -37,6 +47,8 @@
37#define MAX_INSNS 512 47#define MAX_INSNS 512
38#define MAX_FIXUPS 8 48#define MAX_FIXUPS 8
39 49
50#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
51
40struct bpf_test { 52struct bpf_test {
41 const char *descr; 53 const char *descr;
42 struct bpf_insn insns[MAX_INSNS]; 54 struct bpf_insn insns[MAX_INSNS];
@@ -51,6 +63,7 @@ struct bpf_test {
51 REJECT 63 REJECT
52 } result, result_unpriv; 64 } result, result_unpriv;
53 enum bpf_prog_type prog_type; 65 enum bpf_prog_type prog_type;
66 uint8_t flags;
54}; 67};
55 68
56/* Note we want this to be 64 bit aligned so that the end of our array is 69/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2430,6 +2443,30 @@ static struct bpf_test tests[] = {
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2443 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2431 }, 2444 },
2432 { 2445 {
2446 "direct packet access: test15 (spill with xadd)",
2447 .insns = {
2448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2449 offsetof(struct __sk_buff, data)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2451 offsetof(struct __sk_buff, data_end)),
2452 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2454 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2455 BPF_MOV64_IMM(BPF_REG_5, 4096),
2456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2458 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2459 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2461 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 .errstr = "R2 invalid mem access 'inv'",
2466 .result = REJECT,
2467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2468 },
2469 {
2433 "helper access to packet: test1, valid packet_ptr range", 2470 "helper access to packet: test1, valid packet_ptr range",
2434 .insns = { 2471 .insns = {
2435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2932,6 +2969,7 @@ static struct bpf_test tests[] = {
2932 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2969 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2933 .result_unpriv = REJECT, 2970 .result_unpriv = REJECT,
2934 .result = ACCEPT, 2971 .result = ACCEPT,
2972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2935 }, 2973 },
2936 { 2974 {
2937 "valid map access into an array with a variable", 2975 "valid map access into an array with a variable",
@@ -2955,6 +2993,7 @@ static struct bpf_test tests[] = {
2955 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2993 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2956 .result_unpriv = REJECT, 2994 .result_unpriv = REJECT,
2957 .result = ACCEPT, 2995 .result = ACCEPT,
2996 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2958 }, 2997 },
2959 { 2998 {
2960 "valid map access into an array with a signed variable", 2999 "valid map access into an array with a signed variable",
@@ -2982,6 +3021,7 @@ static struct bpf_test tests[] = {
2982 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3021 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2983 .result_unpriv = REJECT, 3022 .result_unpriv = REJECT,
2984 .result = ACCEPT, 3023 .result = ACCEPT,
3024 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2985 }, 3025 },
2986 { 3026 {
2987 "invalid map access into an array with a constant", 3027 "invalid map access into an array with a constant",
@@ -3023,6 +3063,7 @@ static struct bpf_test tests[] = {
3023 .errstr = "R0 min value is outside of the array range", 3063 .errstr = "R0 min value is outside of the array range",
3024 .result_unpriv = REJECT, 3064 .result_unpriv = REJECT,
3025 .result = REJECT, 3065 .result = REJECT,
3066 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3026 }, 3067 },
3027 { 3068 {
3028 "invalid map access into an array with a variable", 3069 "invalid map access into an array with a variable",
@@ -3046,6 +3087,7 @@ static struct bpf_test tests[] = {
3046 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3087 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3047 .result_unpriv = REJECT, 3088 .result_unpriv = REJECT,
3048 .result = REJECT, 3089 .result = REJECT,
3090 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3049 }, 3091 },
3050 { 3092 {
3051 "invalid map access into an array with no floor check", 3093 "invalid map access into an array with no floor check",
@@ -3072,6 +3114,7 @@ static struct bpf_test tests[] = {
3072 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3114 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3073 .result_unpriv = REJECT, 3115 .result_unpriv = REJECT,
3074 .result = REJECT, 3116 .result = REJECT,
3117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3075 }, 3118 },
3076 { 3119 {
3077 "invalid map access into an array with a invalid max check", 3120 "invalid map access into an array with a invalid max check",
@@ -3098,6 +3141,7 @@ static struct bpf_test tests[] = {
3098 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3141 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3099 .result_unpriv = REJECT, 3142 .result_unpriv = REJECT,
3100 .result = REJECT, 3143 .result = REJECT,
3144 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3101 }, 3145 },
3102 { 3146 {
3103 "invalid map access into an array with a invalid max check", 3147 "invalid map access into an array with a invalid max check",
@@ -3127,6 +3171,7 @@ static struct bpf_test tests[] = {
3127 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3171 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3128 .result_unpriv = REJECT, 3172 .result_unpriv = REJECT,
3129 .result = REJECT, 3173 .result = REJECT,
3174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3130 }, 3175 },
3131 { 3176 {
3132 "multiple registers share map_lookup_elem result", 3177 "multiple registers share map_lookup_elem result",
@@ -3250,6 +3295,7 @@ static struct bpf_test tests[] = {
3250 .result = REJECT, 3295 .result = REJECT,
3251 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3296 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3252 .result_unpriv = REJECT, 3297 .result_unpriv = REJECT,
3298 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3253 }, 3299 },
3254 { 3300 {
3255 "constant register |= constant should keep constant type", 3301 "constant register |= constant should keep constant type",
@@ -3416,6 +3462,26 @@ static struct bpf_test tests[] = {
3416 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3462 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3417 }, 3463 },
3418 { 3464 {
3465 "overlapping checks for direct packet access",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3476 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3477 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3478 BPF_MOV64_IMM(BPF_REG_0, 0),
3479 BPF_EXIT_INSN(),
3480 },
3481 .result = ACCEPT,
3482 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3483 },
3484 {
3419 "invalid access of tc_classid for LWT_IN", 3485 "invalid access of tc_classid for LWT_IN",
3420 .insns = { 3486 .insns = {
3421 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -3959,7 +4025,208 @@ static struct bpf_test tests[] = {
3959 .result_unpriv = REJECT, 4025 .result_unpriv = REJECT,
3960 }, 4026 },
3961 { 4027 {
3962 "map element value (adjusted) is preserved across register spilling", 4028 "map element value or null is marked on register spilling",
4029 .insns = {
4030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4032 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4033 BPF_LD_MAP_FD(BPF_REG_1, 0),
4034 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4037 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4039 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4040 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4041 BPF_EXIT_INSN(),
4042 },
4043 .fixup_map2 = { 3 },
4044 .errstr_unpriv = "R0 leaks addr",
4045 .result = ACCEPT,
4046 .result_unpriv = REJECT,
4047 },
4048 {
4049 "map element value store of cleared call register",
4050 .insns = {
4051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4053 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4054 BPF_LD_MAP_FD(BPF_REG_1, 0),
4055 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4058 BPF_EXIT_INSN(),
4059 },
4060 .fixup_map2 = { 3 },
4061 .errstr_unpriv = "R1 !read_ok",
4062 .errstr = "R1 !read_ok",
4063 .result = REJECT,
4064 .result_unpriv = REJECT,
4065 },
4066 {
4067 "map element value with unaligned store",
4068 .insns = {
4069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4071 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4072 BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4074 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4076 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4079 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4081 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4085 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4087 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4089 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4090 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4092 BPF_EXIT_INSN(),
4093 },
4094 .fixup_map2 = { 3 },
4095 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4096 .result = ACCEPT,
4097 .result_unpriv = REJECT,
4098 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4099 },
4100 {
4101 "map element value with unaligned load",
4102 .insns = {
4103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4106 BPF_LD_MAP_FD(BPF_REG_1, 0),
4107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4109 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4110 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4112 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4113 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4114 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4116 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4119 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4120 BPF_EXIT_INSN(),
4121 },
4122 .fixup_map2 = { 3 },
4123 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4124 .result = ACCEPT,
4125 .result_unpriv = REJECT,
4126 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4127 },
4128 {
4129 "map element value illegal alu op, 1",
4130 .insns = {
4131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4133 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4134 BPF_LD_MAP_FD(BPF_REG_1, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4137 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4138 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4139 BPF_EXIT_INSN(),
4140 },
4141 .fixup_map2 = { 3 },
4142 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4143 .errstr = "invalid mem access 'inv'",
4144 .result = REJECT,
4145 .result_unpriv = REJECT,
4146 },
4147 {
4148 "map element value illegal alu op, 2",
4149 .insns = {
4150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4152 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4153 BPF_LD_MAP_FD(BPF_REG_1, 0),
4154 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4156 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4157 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4158 BPF_EXIT_INSN(),
4159 },
4160 .fixup_map2 = { 3 },
4161 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4162 .errstr = "invalid mem access 'inv'",
4163 .result = REJECT,
4164 .result_unpriv = REJECT,
4165 },
4166 {
4167 "map element value illegal alu op, 3",
4168 .insns = {
4169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4171 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4172 BPF_LD_MAP_FD(BPF_REG_1, 0),
4173 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4175 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4176 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4177 BPF_EXIT_INSN(),
4178 },
4179 .fixup_map2 = { 3 },
4180 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4181 .errstr = "invalid mem access 'inv'",
4182 .result = REJECT,
4183 .result_unpriv = REJECT,
4184 },
4185 {
4186 "map element value illegal alu op, 4",
4187 .insns = {
4188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4190 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4191 BPF_LD_MAP_FD(BPF_REG_1, 0),
4192 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4194 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map2 = { 3 },
4199 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4200 .errstr = "invalid mem access 'inv'",
4201 .result = REJECT,
4202 .result_unpriv = REJECT,
4203 },
4204 {
4205 "map element value illegal alu op, 5",
4206 .insns = {
4207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4213 BPF_MOV64_IMM(BPF_REG_3, 4096),
4214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4216 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4217 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4220 BPF_EXIT_INSN(),
4221 },
4222 .fixup_map2 = { 3 },
4223 .errstr_unpriv = "R0 invalid mem access 'inv'",
4224 .errstr = "R0 invalid mem access 'inv'",
4225 .result = REJECT,
4226 .result_unpriv = REJECT,
4227 },
4228 {
4229 "map element value is preserved across register spilling",
3963 .insns = { 4230 .insns = {
3964 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3981,6 +4248,7 @@ static struct bpf_test tests[] = {
3981 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4248 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3982 .result = ACCEPT, 4249 .result = ACCEPT,
3983 .result_unpriv = REJECT, 4250 .result_unpriv = REJECT,
4251 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3984 }, 4252 },
3985 { 4253 {
3986 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 4254 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4419,6 +4687,7 @@ static struct bpf_test tests[] = {
4419 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4687 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4420 .result = REJECT, 4688 .result = REJECT,
4421 .result_unpriv = REJECT, 4689 .result_unpriv = REJECT,
4690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4422 }, 4691 },
4423 { 4692 {
4424 "invalid range check", 4693 "invalid range check",
@@ -4450,6 +4719,7 @@ static struct bpf_test tests[] = {
4450 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4719 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4451 .result = REJECT, 4720 .result = REJECT,
4452 .result_unpriv = REJECT, 4721 .result_unpriv = REJECT,
4722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4453 } 4723 }
4454}; 4724};
4455 4725
@@ -4528,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4528static void do_test_single(struct bpf_test *test, bool unpriv, 4798static void do_test_single(struct bpf_test *test, bool unpriv,
4529 int *passes, int *errors) 4799 int *passes, int *errors)
4530{ 4800{
4801 int fd_prog, expected_ret, reject_from_alignment;
4531 struct bpf_insn *prog = test->insns; 4802 struct bpf_insn *prog = test->insns;
4532 int prog_len = probe_filter_length(prog); 4803 int prog_len = probe_filter_length(prog);
4533 int prog_type = test->prog_type; 4804 int prog_type = test->prog_type;
4534 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; 4805 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4535 int fd_prog, expected_ret;
4536 const char *expected_err; 4806 const char *expected_err;
4537 4807
4538 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 4808 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
@@ -4545,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4545 test->result_unpriv : test->result; 4815 test->result_unpriv : test->result;
4546 expected_err = unpriv && test->errstr_unpriv ? 4816 expected_err = unpriv && test->errstr_unpriv ?
4547 test->errstr_unpriv : test->errstr; 4817 test->errstr_unpriv : test->errstr;
4818
4819 reject_from_alignment = fd_prog < 0 &&
4820 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4821 strstr(bpf_vlog, "Unknown alignment.");
4822#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4823 if (reject_from_alignment) {
4824 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4825 strerror(errno));
4826 goto fail_log;
4827 }
4828#endif
4548 if (expected_ret == ACCEPT) { 4829 if (expected_ret == ACCEPT) {
4549 if (fd_prog < 0) { 4830 if (fd_prog < 0 && !reject_from_alignment) {
4550 printf("FAIL\nFailed to load prog '%s'!\n", 4831 printf("FAIL\nFailed to load prog '%s'!\n",
4551 strerror(errno)); 4832 strerror(errno));
4552 goto fail_log; 4833 goto fail_log;
@@ -4556,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4556 printf("FAIL\nUnexpected success to load!\n"); 4837 printf("FAIL\nUnexpected success to load!\n");
4557 goto fail_log; 4838 goto fail_log;
4558 } 4839 }
4559 if (!strstr(bpf_vlog, expected_err)) { 4840 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
4560 printf("FAIL\nUnexpected error message!\n"); 4841 printf("FAIL\nUnexpected error message!\n");
4561 goto fail_log; 4842 goto fail_log;
4562 } 4843 }
4563 } 4844 }
4564 4845
4565 (*passes)++; 4846 (*passes)++;
4566 printf("OK\n"); 4847 printf("OK%s\n", reject_from_alignment ?
4848 " (NOTE: reject due to unknown alignment)" : "");
4567close_fds: 4849close_fds:
4568 close(fd_prog); 4850 close(fd_prog);
4569 close(fd_f1); 4851 close(fd_f1);
@@ -4583,10 +4865,12 @@ static bool is_admin(void)
4583 cap_flag_value_t sysadmin = CAP_CLEAR; 4865 cap_flag_value_t sysadmin = CAP_CLEAR;
4584 const cap_value_t cap_val = CAP_SYS_ADMIN; 4866 const cap_value_t cap_val = CAP_SYS_ADMIN;
4585 4867
4868#ifdef CAP_IS_SUPPORTED
4586 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 4869 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4587 perror("cap_get_flag"); 4870 perror("cap_get_flag");
4588 return false; 4871 return false;
4589 } 4872 }
4873#endif
4590 caps = cap_get_proc(); 4874 caps = cap_get_proc();
4591 if (!caps) { 4875 if (!caps) {
4592 perror("cap_get_proc"); 4876 perror("cap_get_proc");
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1c5d0575802e..bf13fc2297aa 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -34,34 +34,34 @@ endif
34all: $(SUB_DIRS) 34all: $(SUB_DIRS)
35 35
36$(SUB_DIRS): 36$(SUB_DIRS):
37 BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all 37 BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
38 38
39include ../lib.mk 39include ../lib.mk
40 40
41override define RUN_TESTS 41override define RUN_TESTS
42 @for TARGET in $(SUB_DIRS); do \ 42 @for TARGET in $(SUB_DIRS); do \
43 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 43 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ 44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
45 done; 45 done;
46endef 46endef
47 47
48override define INSTALL_RULE 48override define INSTALL_RULE
49 @for TARGET in $(SUB_DIRS); do \ 49 @for TARGET in $(SUB_DIRS); do \
50 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 50 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ 51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
52 done; 52 done;
53endef 53endef
54 54
55override define EMIT_TESTS 55override define EMIT_TESTS
56 @for TARGET in $(SUB_DIRS); do \ 56 @for TARGET in $(SUB_DIRS); do \
57 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 57 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ 58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
59 done; 59 done;
60endef 60endef
61 61
62clean: 62clean:
63 @for TARGET in $(SUB_DIRS); do \ 63 @for TARGET in $(SUB_DIRS); do \
64 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 64 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ 65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
66 done; 66 done;
67 rm -f tags 67 rm -f tags
diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index d828bfb6ef2d..54064ced9e95 100644
--- a/tools/testing/selftests/powerpc/include/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
@@ -16,56 +16,56 @@
16 */ 16 */
17FUNC_START(load_vsx) 17FUNC_START(load_vsx)
18 li r5,0 18 li r5,0
19 lxvx vs20,r5,r3 19 lxvd2x vs20,r5,r3
20 addi r5,r5,16 20 addi r5,r5,16
21 lxvx vs21,r5,r3 21 lxvd2x vs21,r5,r3
22 addi r5,r5,16 22 addi r5,r5,16
23 lxvx vs22,r5,r3 23 lxvd2x vs22,r5,r3
24 addi r5,r5,16 24 addi r5,r5,16
25 lxvx vs23,r5,r3 25 lxvd2x vs23,r5,r3
26 addi r5,r5,16 26 addi r5,r5,16
27 lxvx vs24,r5,r3 27 lxvd2x vs24,r5,r3
28 addi r5,r5,16 28 addi r5,r5,16
29 lxvx vs25,r5,r3 29 lxvd2x vs25,r5,r3
30 addi r5,r5,16 30 addi r5,r5,16
31 lxvx vs26,r5,r3 31 lxvd2x vs26,r5,r3
32 addi r5,r5,16 32 addi r5,r5,16
33 lxvx vs27,r5,r3 33 lxvd2x vs27,r5,r3
34 addi r5,r5,16 34 addi r5,r5,16
35 lxvx vs28,r5,r3 35 lxvd2x vs28,r5,r3
36 addi r5,r5,16 36 addi r5,r5,16
37 lxvx vs29,r5,r3 37 lxvd2x vs29,r5,r3
38 addi r5,r5,16 38 addi r5,r5,16
39 lxvx vs30,r5,r3 39 lxvd2x vs30,r5,r3
40 addi r5,r5,16 40 addi r5,r5,16
41 lxvx vs31,r5,r3 41 lxvd2x vs31,r5,r3
42 blr 42 blr
43FUNC_END(load_vsx) 43FUNC_END(load_vsx)
44 44
45FUNC_START(store_vsx) 45FUNC_START(store_vsx)
46 li r5,0 46 li r5,0
47 stxvx vs20,r5,r3 47 stxvd2x vs20,r5,r3
48 addi r5,r5,16 48 addi r5,r5,16
49 stxvx vs21,r5,r3 49 stxvd2x vs21,r5,r3
50 addi r5,r5,16 50 addi r5,r5,16
51 stxvx vs22,r5,r3 51 stxvd2x vs22,r5,r3
52 addi r5,r5,16 52 addi r5,r5,16
53 stxvx vs23,r5,r3 53 stxvd2x vs23,r5,r3
54 addi r5,r5,16 54 addi r5,r5,16
55 stxvx vs24,r5,r3 55 stxvd2x vs24,r5,r3
56 addi r5,r5,16 56 addi r5,r5,16
57 stxvx vs25,r5,r3 57 stxvd2x vs25,r5,r3
58 addi r5,r5,16 58 addi r5,r5,16
59 stxvx vs26,r5,r3 59 stxvd2x vs26,r5,r3
60 addi r5,r5,16 60 addi r5,r5,16
61 stxvx vs27,r5,r3 61 stxvd2x vs27,r5,r3
62 addi r5,r5,16 62 addi r5,r5,16
63 stxvx vs28,r5,r3 63 stxvd2x vs28,r5,r3
64 addi r5,r5,16 64 addi r5,r5,16
65 stxvx vs29,r5,r3 65 stxvd2x vs29,r5,r3
66 addi r5,r5,16 66 addi r5,r5,16
67 stxvx vs30,r5,r3 67 stxvd2x vs30,r5,r3
68 addi r5,r5,16 68 addi r5,r5,16
69 stxvx vs31,r5,r3 69 stxvd2x vs31,r5,r3
70 blr 70 blr
71FUNC_END(store_vsx) 71FUNC_END(store_vsx)
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
392} 392}
393 393
394/** 394/**
395 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
396 *
397 * For a specific CPU, initialize the GIC VE hardware.
398 */
399void kvm_vgic_init_cpu_hardware(void)
400{
401 BUG_ON(preemptible());
402
403 /*
404 * We want to make sure the list registers start out clear so that we
405 * only have the program the used registers.
406 */
407 if (kvm_vgic_global_state.type == VGIC_V2)
408 vgic_v2_init_lrs();
409 else
410 kvm_call_hyp(__vgic_v3_init_lrs);
411}
412
413/**
395 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 414 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
396 * according to the host GIC model. Accordingly calls either 415 * according to the host GIC model. Accordingly calls either
397 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 416 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
229 val = vmcr.ctlr; 229 val = vmcr.ctlr;
230 break; 230 break;
231 case GIC_CPU_PRIMASK: 231 case GIC_CPU_PRIMASK:
232 val = vmcr.pmr; 232 /*
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
237 * unsigned long.
238 */
239 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
240 GICV_PMR_PRIORITY_SHIFT;
233 break; 241 break;
234 case GIC_CPU_BINPOINT: 242 case GIC_CPU_BINPOINT:
235 val = vmcr.bpr; 243 val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
262 vmcr.ctlr = val; 270 vmcr.ctlr = val;
263 break; 271 break;
264 case GIC_CPU_PRIMASK: 272 case GIC_CPU_PRIMASK:
265 vmcr.pmr = val; 273 /*
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
278 * unsigned long.
279 */
280 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
281 GICV_PMR_PRIORITY_MASK;
266 break; 282 break;
267 case GIC_CPU_BINPOINT: 283 case GIC_CPU_BINPOINT:
268 vmcr.bpr = val; 284 vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
36 return (unsigned long *)val; 36 return (unsigned long *)val;
37} 37}
38 38
39static inline void vgic_v2_write_lr(int lr, u32 val)
40{
41 void __iomem *base = kvm_vgic_global_state.vctrl_base;
42
43 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
44}
45
46void vgic_v2_init_lrs(void)
47{
48 int i;
49
50 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
51 vgic_v2_write_lr(i, 0);
52}
53
39void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) 54void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40{ 55{
41 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 56 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
191 GICH_VMCR_ALIAS_BINPOINT_MASK; 206 GICH_VMCR_ALIAS_BINPOINT_MASK;
192 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 207 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
193 GICH_VMCR_BINPOINT_MASK; 208 GICH_VMCR_BINPOINT_MASK;
194 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & 209 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
195 GICH_VMCR_PRIMASK_MASK; 210 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
196 211
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 212 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
198} 213}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 222 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
208 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 223 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
209 GICH_VMCR_BINPOINT_SHIFT; 224 GICH_VMCR_BINPOINT_SHIFT;
210 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> 225 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
211 GICH_VMCR_PRIMASK_SHIFT; 226 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
212} 227}
213 228
214void vgic_v2_enable(struct kvm_vcpu *vcpu) 229void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
81 return irq->pending_latch || irq->line_level; 81 return irq->pending_latch || irq->line_level;
82} 82}
83 83
84/*
85 * This struct provides an intermediate representation of the fields contained
86 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
87 * state to userspace can generate either GICv2 or GICv3 CPU interface
88 * registers regardless of the hardware backed GIC used.
89 */
84struct vgic_vmcr { 90struct vgic_vmcr {
85 u32 ctlr; 91 u32 ctlr;
86 u32 abpr; 92 u32 abpr;
87 u32 bpr; 93 u32 bpr;
88 u32 pmr; 94 u32 pmr; /* Priority mask field in the GICC_PMR and
95 * ICC_PMR_EL1 priority field format */
89 /* Below member variable are valid only for GICv3 */ 96 /* Below member variable are valid only for GICv3 */
90 u32 grpen0; 97 u32 grpen0;
91 u32 grpen1; 98 u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
130int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 137int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
131 enum vgic_type); 138 enum vgic_type);
132 139
140void vgic_v2_init_lrs(void);
141
133static inline void vgic_get_irq_kref(struct vgic_irq *irq) 142static inline void vgic_get_irq_kref(struct vgic_irq *irq)
134{ 143{
135 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786dd9522..4d28a9ddbee0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
870 continue; 870 continue;
871 871
872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
873 kvm->buses[bus_idx]->ioeventfd_count--; 873 if (kvm->buses[bus_idx])
874 kvm->buses[bus_idx]->ioeventfd_count--;
874 ioeventfd_release(p); 875 ioeventfd_release(p);
875 ret = 0; 876 ret = 0;
876 break; 877 break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a17d78759727..88257b311cb5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
727 list_del(&kvm->vm_list); 727 list_del(&kvm->vm_list);
728 spin_unlock(&kvm_lock); 728 spin_unlock(&kvm_lock);
729 kvm_free_irq_routing(kvm); 729 kvm_free_irq_routing(kvm);
730 for (i = 0; i < KVM_NR_BUSES; i++) 730 for (i = 0; i < KVM_NR_BUSES; i++) {
731 kvm_io_bus_destroy(kvm->buses[i]); 731 if (kvm->buses[i])
732 kvm_io_bus_destroy(kvm->buses[i]);
733 kvm->buses[i] = NULL;
734 }
732 kvm_coalesced_mmio_free(kvm); 735 kvm_coalesced_mmio_free(kvm);
733#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 736#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
734 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 737 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1062 * changes) is disallowed above, so any other attribute changes getting 1065 * changes) is disallowed above, so any other attribute changes getting
1063 * here can be skipped. 1066 * here can be skipped.
1064 */ 1067 */
1065 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1068 if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
1066 r = kvm_iommu_map_pages(kvm, &new); 1069 r = kvm_iommu_map_pages(kvm, &new);
1067 return r; 1070 return r;
1068 } 1071 }
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3474 }; 3477 };
3475 3478
3476 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3479 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3480 if (!bus)
3481 return -ENOMEM;
3477 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3482 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3478 return r < 0 ? r : 0; 3483 return r < 0 ? r : 0;
3479} 3484}
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3491 }; 3496 };
3492 3497
3493 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3498 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3499 if (!bus)
3500 return -ENOMEM;
3494 3501
3495 /* First try the device referenced by cookie. */ 3502 /* First try the device referenced by cookie. */
3496 if ((cookie >= 0) && (cookie < bus->dev_count) && 3503 if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3541 }; 3548 };
3542 3549
3543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3550 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3551 if (!bus)
3552 return -ENOMEM;
3544 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3553 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3545 return r < 0 ? r : 0; 3554 return r < 0 ? r : 0;
3546} 3555}
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3553 struct kvm_io_bus *new_bus, *bus; 3562 struct kvm_io_bus *new_bus, *bus;
3554 3563
3555 bus = kvm->buses[bus_idx]; 3564 bus = kvm->buses[bus_idx];
3565 if (!bus)
3566 return -ENOMEM;
3567
3556 /* exclude ioeventfd which is limited by maximum fd */ 3568 /* exclude ioeventfd which is limited by maximum fd */
3557 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3569 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3558 return -ENOSPC; 3570 return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3572} 3584}
3573 3585
3574/* Caller must hold slots_lock. */ 3586/* Caller must hold slots_lock. */
3575int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3587void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3576 struct kvm_io_device *dev) 3588 struct kvm_io_device *dev)
3577{ 3589{
3578 int i, r; 3590 int i;
3579 struct kvm_io_bus *new_bus, *bus; 3591 struct kvm_io_bus *new_bus, *bus;
3580 3592
3581 bus = kvm->buses[bus_idx]; 3593 bus = kvm->buses[bus_idx];
3582 r = -ENOENT; 3594 if (!bus)
3595 return;
3596
3583 for (i = 0; i < bus->dev_count; i++) 3597 for (i = 0; i < bus->dev_count; i++)
3584 if (bus->range[i].dev == dev) { 3598 if (bus->range[i].dev == dev) {
3585 r = 0;
3586 break; 3599 break;
3587 } 3600 }
3588 3601
3589 if (r) 3602 if (i == bus->dev_count)
3590 return r; 3603 return;
3591 3604
3592 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3605 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3593 sizeof(struct kvm_io_range)), GFP_KERNEL); 3606 sizeof(struct kvm_io_range)), GFP_KERNEL);
3594 if (!new_bus) 3607 if (!new_bus) {
3595 return -ENOMEM; 3608 pr_err("kvm: failed to shrink bus, removing it completely\n");
3609 goto broken;
3610 }
3596 3611
3597 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3612 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3598 new_bus->dev_count--; 3613 new_bus->dev_count--;
3599 memcpy(new_bus->range + i, bus->range + i + 1, 3614 memcpy(new_bus->range + i, bus->range + i + 1,
3600 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3615 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3601 3616
3617broken:
3602 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3618 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3603 synchronize_srcu_expedited(&kvm->srcu); 3619 synchronize_srcu_expedited(&kvm->srcu);
3604 kfree(bus); 3620 kfree(bus);
3605 return r; 3621 return;
3606} 3622}
3607 3623
3608struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3624struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3615 srcu_idx = srcu_read_lock(&kvm->srcu); 3631 srcu_idx = srcu_read_lock(&kvm->srcu);
3616 3632
3617 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3633 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3634 if (!bus)
3635 goto out_unlock;
3618 3636
3619 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3637 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3620 if (dev_idx < 0) 3638 if (dev_idx < 0)