aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi/acpi-lid.txt16
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt16
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst19
-rw-r--r--Documentation/admin-guide/pm/index.rst1
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst755
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt281
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt7
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt6
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-timers.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt4
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bus.txt76
-rw-r--r--Documentation/devicetree/bindings/spi/spi-meson.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt2
-rw-r--r--Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt31
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt1
-rw-r--r--Documentation/input/devices/edt-ft5x06.rst2
-rw-r--r--Documentation/networking/dpaa.txt194
-rw-r--r--Documentation/networking/scaling.txt2
-rw-r--r--Documentation/networking/tcp.txt31
-rw-r--r--Documentation/sound/hd-audio/models.rst114
-rw-r--r--Documentation/spi/spi-summary27
-rw-r--r--Documentation/usb/typec.rst6
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt2
-rw-r--r--MAINTAINERS52
-rw-r--r--Makefile6
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/mm/mmap.c2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/compressed/efi-header.S5
-rw-r--r--arch/arm/boot/compressed/head.S17
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts8
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi27
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts17
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts6
l---------arch/arm/boot/dts/include/arm1
l---------arch/arm/boot/dts/include/arm641
l---------arch/arm/boot/dts/include/dt-bindings1
-rw-r--r--arch/arm/boot/dts/keystone-k2l-netcp.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts6
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4-panda-a4.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda-es.dts2
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi7
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/configs/gemini_defconfig68
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h1
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kvm/coproc.c106
-rw-r--r--arch/arm/kvm/handle_exit.c4
-rw-r--r--arch/arm/kvm/hyp/Makefile2
-rw-r--r--arch/arm/kvm/hyp/switch.c4
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm/kvm/trace.h8
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-at91/pm.c2
-rw-r--r--arch/arm/mach-bcm/bcm_kona_smc.c2
-rw-r--r--arch/arm/mach-cns3xxx/core.c2
-rw-r--r--arch/arm/mach-davinci/pm.c7
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c10
-rw-r--r--arch/arm/mach-omap2/omap-smp.c11
-rw-r--r--arch/arm/mach-omap2/prm_common.c2
-rw-r--r--arch/arm/mach-omap2/vc.c2
-rw-r--r--arch/arm/mach-spear/time.c2
-rw-r--r--arch/arm/mm/dma-mapping.c29
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/Kconfig.platforms5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi2
l---------arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts78
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi31
l---------arch/arm64/boot/dts/include/arm1
l---------arch/arm64/boot/dts/include/arm641
l---------arch/arm64/boot/dts/include/dt-bindings1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi73
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/configs/defconfig116
-rw-r--r--arch/arm64/include/asm/acpi.h6
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/kernel/cpufeature.c23
-rw-r--r--arch/arm64/kernel/pci.c4
-rw-r--r--arch/arm64/kernel/perf_event.c23
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S1
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c10
-rw-r--r--arch/arm64/net/bpf_jit_comp.c12
-rw-r--r--arch/blackfin/include/asm/processor.h5
-rw-r--r--arch/c6x/include/asm/processor.h5
-rw-r--r--arch/cris/arch-v10/kernel/process.c8
-rw-r--r--arch/cris/arch-v32/kernel/process.c8
l---------arch/cris/boot/dts/include/dt-bindings1
-rw-r--r--arch/cris/include/asm/processor.h2
-rw-r--r--arch/frv/include/asm/processor.h5
-rw-r--r--arch/frv/include/asm/timex.h6
-rw-r--r--arch/frv/kernel/process.c9
-rw-r--r--arch/frv/mm/elf-fdpic.c2
-rw-r--r--arch/h8300/include/asm/processor.h4
-rw-r--r--arch/h8300/kernel/process.c5
-rw-r--r--arch/hexagon/include/asm/processor.h3
-rw-r--r--arch/hexagon/kernel/process.c8
-rw-r--r--arch/hexagon/mm/uaccess.c5
-rw-r--r--arch/ia64/include/asm/processor.h17
-rw-r--r--arch/m32r/include/asm/processor.h2
-rw-r--r--arch/m32r/kernel/process.c8
-rw-r--r--arch/m68k/include/asm/processor.h2
-rw-r--r--arch/m68k/kernel/process.c14
l---------arch/metag/boot/dts/include/dt-bindings1
-rw-r--r--arch/microblaze/include/asm/processor.h6
-rw-r--r--arch/microblaze/kernel/process.c17
-rw-r--r--arch/mips/boot/Makefile10
l---------arch/mips/boot/dts/include/dt-bindings1
-rw-r--r--arch/mips/include/asm/highmem.h5
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/ftrace.c24
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/kernel/pm-cps.c9
-rw-r--r--arch/mips/kernel/process.c1
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/math-emu/dp_maddf.c5
-rw-r--r--arch/mips/math-emu/sp_maddf.c5
-rw-r--r--arch/mips/mm/dma-default.c23
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mn10300/include/asm/processor.h5
-rw-r--r--arch/mn10300/kernel/process.c8
-rw-r--r--arch/nios2/include/asm/processor.h3
-rw-r--r--arch/openrisc/include/asm/processor.h5
-rw-r--r--arch/openrisc/kernel/process.c7
-rw-r--r--arch/parisc/include/asm/processor.h5
-rw-r--r--arch/parisc/kernel/process.c5
-rw-r--r--arch/parisc/kernel/sys_parisc.c15
-rw-r--r--arch/powerpc/Kconfig21
l---------arch/powerpc/boot/dts/include/dt-bindings1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/kprobes.h1
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/processor.h31
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/include/asm/uaccess.h8
-rw-r--r--arch/powerpc/include/asm/xive.h12
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h2
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c58
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/idle_book3s.S2
-rw-r--r--arch/powerpc/kernel/kprobes.c20
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c35
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S59
-rw-r--r--arch/powerpc/kvm/Kconfig2
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S75
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c70
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c2
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c102
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/configs/default_defconfig39
-rw-r--r--arch/s390/configs/gcov_defconfig28
-rw-r--r--arch/s390/configs/performance_defconfig27
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/include/asm/debug.h3
-rw-r--r--arch/s390/include/asm/dis.h2
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/sysinfo.h2
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/entry.S40
-rw-r--r--arch/s390/kernel/ftrace.c4
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/process.c25
-rw-r--r--arch/s390/kernel/vmlinux.lds.S8
-rw-r--r--arch/s390/kvm/gaccess.c15
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/probes.c1
-rw-r--r--arch/s390/lib/uaccess.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/score/include/asm/processor.h1
-rw-r--r--arch/score/kernel/process.c5
-rw-r--r--arch/sh/mm/mmap.c4
-rw-r--r--arch/sparc/Kconfig15
-rw-r--r--arch/sparc/include/asm/hugetlb.h6
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/processor_32.h3
-rw-r--r--arch/sparc/include/asm/processor_64.h2
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/ftrace.c13
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/process_32.c8
-rw-r--r--arch/sparc/kernel/process_64.c19
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/multi3.S35
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c89
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/tile/include/asm/processor.h7
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/kernel/um_arch.c6
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/kaslr.c3
-rw-r--r--arch/x86/boot/compressed/misc.c6
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/include/asm/extable.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/uaccess.h11
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c13
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c16
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c3
-rw-r--r--arch/x86/kernel/fpu/init.c1
-rw-r--r--arch/x86/kernel/ftrace.c20
-rw-r--r--arch/x86/kernel/kprobes/core.c9
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/process.c11
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/sys_x86_64.c4
-rw-r--r--arch/x86/kernel/tboot.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/unwind_frame.c49
-rw-r--r--arch/x86/kvm/cpuid.c20
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/paging_tmpl.h35
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/svm.c29
-rw-r--r--arch/x86/kvm/vmx.c153
-rw-r--r--arch/x86/kvm/x86.c117
-rw-r--r--arch/x86/mm/extable.c3
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/platform/efi/efi.c6
-rw-r--r--arch/x86/platform/efi/efi_64.c79
-rw-r--r--arch/x86/platform/efi/quirks.c3
-rw-r--r--arch/x86/xen/enlighten_pv.c15
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/mmu_pv.c102
-rw-r--r--arch/xtensa/include/asm/irq.h3
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/kernel/irq.c5
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S6
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c10
-rw-r--r--block/bfq-cgroup.c116
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bfq-iosched.h23
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-mq-sched.c58
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq.c70
-rw-r--r--block/blk-sysfs.c42
-rw-r--r--block/blk-throttle.c184
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c17
-rw-r--r--block/partition-generic.c4
-rw-r--r--block/partitions/msdos.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c2
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c4
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/drbg.c5
-rw-r--r--crypto/gcm.c6
-rw-r--r--crypto/skcipher.c40
-rw-r--r--drivers/acpi/acpica/tbutils.c38
-rw-r--r--drivers/acpi/acpica/utresrc.c9
-rw-r--r--drivers/acpi/arm64/iort.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c16
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/scan.c71
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/ata/ahci.c38
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/sata_mv.c13
-rw-r--r--drivers/ata/sata_rcar.c15
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/wakeup.c29
-rw-r--r--drivers/block/drbd/drbd_req.c27
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c26
-rw-r--r--drivers/block/xen-blkback/common.h26
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/char/lp.c6
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c6
-rw-r--r--drivers/char/random.c51
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c1
-rw-r--r--drivers/clocksource/timer-sun5i.c1
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpuidle/dt_idle_states.c4
-rw-r--r--drivers/dax/super.c11
-rw-r--r--drivers/devfreq/event/exynos-nocp.c6
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c8
-rw-r--r--drivers/dma/ep93xx_dma.c39
-rw-r--r--drivers/dma/mv_xor_v2.c109
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/sh/rcar-dmac.c3
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/edac/amd64_edac.c40
-rw-r--r--drivers/firmware/dmi-id.c2
-rw-r--r--drivers/firmware/dmi_scan.c50
-rw-r--r--drivers/firmware/efi/efi-bgrt.c27
-rw-r--r--drivers/firmware/efi/efi-pstore.c29
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/firmware/google/vpd.c29
-rw-r--r--drivers/firmware/ti_sci.c3
-rw-r--r--drivers/gpio/gpio-aspeed.c3
-rw-r--r--drivers/gpio/gpio-crystalcove.c54
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c95
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c20
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c47
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c36
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_connector.c38
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c83
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c26
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c67
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c120
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c15
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c5
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c75
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c41
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c45
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c11
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c115
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c23
-rw-r--r--drivers/gpu/host1x/Kconfig1
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c13
-rw-r--r--drivers/hid/Kconfig6
-rw-r--r--drivers/hid/hid-asus.c12
-rw-r--r--drivers/hid/hid-core.c285
-rw-r--r--drivers/hid/hid-elecom.c62
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c45
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c65
-rw-r--r--drivers/hwmon/coretemp.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c18
-rw-r--r--drivers/i2c/busses/i2c-imx.c8
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c25
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/i2c-mux.c26
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c21
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c8
-rw-r--r--drivers/iio/adc/max9611.c10
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c7
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c38
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c39
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h3
-rw-r--r--drivers/iio/industrialio-trigger.c3
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/proximity/as3935.c14
-rw-r--r--drivers/infiniband/core/addr.c10
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c13
-rw-r--r--drivers/infiniband/core/core_priv.h10
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c6
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c471
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h22
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c384
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c314
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c333
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c12
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c67
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h2
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h11
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c5
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c20
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c17
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c20
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c59
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h5
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c10
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c68
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c44
-rw-r--r--drivers/input/misc/soc_button_array.c20
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c30
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/mouse/synaptics.c37
-rw-r--r--drivers/input/rmi4/rmi_f03.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c17
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c2
-rw-r--r--drivers/input/touchscreen/silead.c3
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/iommu/dma-iommu.c13
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/iommu/of_iommu.c7
-rw-r--r--drivers/irqchip/irq-mbigen.c17
-rw-r--r--drivers/irqchip/irq-mips-gic.c6
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/leds/leds-bcm6328.c4
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/dm-bufio.c18
-rw-r--r--drivers/md/dm-cache-background-tracker.c5
-rw-r--r--drivers/md/dm-cache-policy-smq.c31
-rw-r--r--drivers/md/dm-cache-target.c27
-rw-r--r--drivers/md/dm-integrity.c42
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-raid1.c23
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-snap-persistent.c3
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c26
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c38
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c15
-rw-r--r--drivers/md/raid0.c116
-rw-r--r--drivers/md/raid1.c23
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5-cache.c51
-rw-r--r--drivers/md/raid5-log.h3
-rw-r--r--drivers/md/raid5-ppl.c4
-rw-r--r--drivers/md/raid5.c100
-rw-r--r--drivers/media/Kconfig6
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/Kconfig15
-rw-r--r--drivers/media/cec/Makefile2
-rw-r--r--drivers/media/cec/cec-adap.c2
-rw-r--r--drivers/media/cec/cec-api.c8
-rw-r--r--drivers/media/cec/cec-core.c8
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c8
-rw-r--r--drivers/media/platform/vivid/Kconfig3
-rw-r--r--drivers/media/rc/rc-ir-raw.c13
-rw-r--r--drivers/media/rc/sir_ir.c6
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c3
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c2
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/mfd/arizona-core.c3
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/cxl/context.c6
-rw-r--r--drivers/misc/cxl/cxl.h18
-rw-r--r--drivers/misc/cxl/fault.c23
-rw-r--r--drivers/misc/cxl/file.c7
-rw-r--r--drivers/misc/cxl/main.c17
-rw-r--r--drivers/misc/cxl/native.c43
-rw-r--r--drivers/misc/cxl/pci.c11
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/sgi-xp/xp.h12
-rw-r--r--drivers/misc/sgi-xp/xp_main.c36
-rw-r--r--drivers/mmc/core/pwrseq_simple.c7
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mmc/host/cavium.c25
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c9
-rw-r--r--drivers/mmc/host/sdhci-iproc.c3
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c14
-rw-r--r--drivers/mmc/host/sdhci-xenon.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/mtd/nand/nand_base.c46
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nand_samsung.c3
-rw-r--r--drivers/mtd/nand/tango_nand.c23
-rw-r--r--drivers/net/arcnet/arcnet.c7
-rw-r--r--drivers/net/arcnet/capmode.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c6
-rw-r--r--drivers/net/arcnet/com20020.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c29
-rw-r--r--drivers/net/bonding/bond_main.c22
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_spi.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/can/vcan.c4
-rw-r--r--drivers/net/can/vxcan.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h6
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/8390/ax88796.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c179
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c67
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h30
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c75
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c1
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/gtp.c4
-rw-r--r--drivers/net/hamradio/6pack.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c58
-rw-r--r--drivers/net/hyperv/rndis_filter.c30
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c94
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/nlmon.c2
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/marvell.c68
-rw-r--r--drivers/net/phy/mdio-mux.c11
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/phy/micrel.c44
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/slip/slip.c7
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/ax88179_178a.c16
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ether.c31
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/smsc95xx.c13
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c41
-rw-r--r--drivers/net/vsockmon.c2
-rw-r--r--drivers/net/vxlan.c41
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c9
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c2
-rw-r--r--drivers/ntb/ntb_transport.c58
-rw-r--r--drivers/ntb/test/ntb_perf.c4
-rw-r--r--drivers/nvme/host/core.c86
-rw-r--r--drivers/nvme/host/fc.c175
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c38
-rw-r--r--drivers/nvme/host/rdma.c64
-rw-r--r--drivers/nvme/target/core.c6
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c1
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/fdt.c3
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/dwc/pci-imx6.c33
-rw-r--r--drivers/pci/endpoint/Kconfig1
-rw-r--r--drivers/pci/endpoint/functions/Kconfig1
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c16
-rw-r--r--drivers/perf/arm_pmu_acpi.c11
-rw-r--r--drivers/phy/phy-qcom-qmp.c14
-rw-r--r--drivers/pinctrl/core.c20
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c24
-rw-r--r--drivers/pinctrl/pinconf-generic.c3
-rw-r--r--drivers/pinctrl/pinctrl-amd.c91
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c44
-rw-r--r--drivers/pinctrl/pinmux.c21
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c2
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c16
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c12
-rw-r--r--drivers/s390/crypto/ap_bus.c38
-rw-r--r--drivers/s390/crypto/ap_card.c9
-rw-r--r--drivers/s390/crypto/ap_queue.c9
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_core_sys.c24
-rw-r--r--drivers/s390/net/qeth_l2.h2
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_sys.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c54
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h17
-rw-r--r--drivers/scsi/cxlflash/Kconfig1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c10
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c27
-rw-r--r--drivers/scsi/libfc/fc_fcp.c15
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c146
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c100
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c419
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c376
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi.h3
-rw-r--r--drivers/scsi/qedi/qedi_fw.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c7
-rw-r--r--drivers/scsi/qedi/qedi_main.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/bcm/brcmstb/common.c2
-rw-r--r--drivers/soc/imx/Kconfig3
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/spi/Kconfig34
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-atmel.c30
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-davinci.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-imx.c92
-rw-r--r--drivers/spi/spi-loopback-test.c14
-rw-r--r--drivers/spi/spi-meson-spicc.c619
-rw-r--r--drivers/spi/spi-mt65xx.c61
-rw-r--r--drivers/spi/spi-slave-system-control.c154
-rw-r--r--drivers/spi/spi-slave-time.c129
-rw-r--r--drivers/spi/spi.c1220
-rw-r--r--drivers/staging/android/ion/devicetree.txt51
-rw-r--r--drivers/staging/ccree/Kconfig2
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c3
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c1
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig1
-rw-r--r--drivers/staging/iio/cdc/ad7152.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c9
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c24
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c15
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c86
-rw-r--r--drivers/staging/typec/pd.h10
-rw-r--r--drivers/staging/typec/pd_vdo.h4
-rw-r--r--drivers/staging/typec/tcpci.c2
-rw-r--r--drivers/staging/typec/tcpm.c77
-rw-r--r--drivers/staging/typec/tcpm.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c31
-rw-r--r--drivers/target/iscsi/iscsi_target.c52
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c194
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_tmr.c16
-rw-r--r--drivers/target/target_core_transport.c32
-rw-r--r--drivers/target/target_core_user.c46
-rw-r--r--drivers/tee/Kconfig1
-rw-r--r--drivers/thermal/broadcom/Kconfig9
-rw-r--r--drivers/thermal/qoriq_thermal.c3
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c14
-rw-r--r--drivers/tty/ehv_bytechan.c17
-rw-r--r--drivers/tty/serdev/core.c12
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c21
-rw-r--r--drivers/tty/serial/8250/8250_port.c21
-rw-r--r--drivers/tty/serial/altera_jtaguart.c1
-rw-r--r--drivers/tty/serial/altera_uart.c1
-rw-r--r--drivers/tty/serial/efm32-uart.c11
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c14
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/tty_port.c73
-rw-r--r--drivers/uio/uio.c8
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c41
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/hcd.c5
-rw-r--r--drivers/usb/core/hub.c27
-rw-r--r--drivers/usb/core/of.c3
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/dwc2/params.c2
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c13
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c9
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c19
-rw-r--r--drivers/usb/gadget/udc/net2280.c9
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c45
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c18
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c20
-rw-r--r--drivers/usb/host/xhci.c13
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/legousbtower.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/musb/tusb6010_omap.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c10
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h2
-rw-r--r--drivers/usb/serial/io_ti.c5
-rw-r--r--drivers/usb/serial/ir-usb.c21
-rw-r--r--drivers/usb/serial/mct_u232.c2
-rw-r--r--drivers/usb/serial/option.c8
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/ene_ub6250.c90
-rw-r--r--drivers/usb/usbip/vhci_hcd.c11
-rw-r--r--drivers/uwb/i1480/dfu/usb.c5
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/smscufx.c5
-rw-r--r--drivers/video/fbdev/udlfb.c9
-rw-r--r--drivers/video/fbdev/via/viafbdev.c8
-rw-r--r--drivers/virtio/virtio_balloon.c7
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c3
-rw-r--r--drivers/watchdog/cadence_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c22
-rw-r--r--drivers/watchdog/pcwd_usb.c3
-rw-r--r--drivers/watchdog/sama5d4_wdt.c77
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/zx2967_wdt.c4
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/dir-item.c2
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c126
-rw-r--r--fs/btrfs/hash.c5
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/ceph/acl.c1
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c6
-rw-r--r--fs/ceph/inode.c5
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/cifs/cifsacl.c30
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/smb2pdu.c21
-rw-r--r--fs/cifs/transport.c4
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/configfs/item.c8
-rw-r--r--fs/configfs/symlink.c3
-rw-r--r--fs/dax.c24
-rw-r--r--fs/dcache.c10
-rw-r--r--fs/exec.c28
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext4/acl.c4
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c85
-rw-r--r--fs/ext4/file.c54
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c34
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c8
-rw-r--r--fs/f2fs/f2fs.h5
-rw-r--r--fs/fuse/inode.c9
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/callback_xdr.c1
-rw-r--r--fs/nfs/dir.c51
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/namespace.c2
-rw-r--r--fs/nfs/nfs42proc.c2
-rw-r--r--fs/nfs/nfs4client.c1
-rw-r--r--fs/nfs/nfs4proc.c5
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pnfs.c25
-rw-r--r--fs/nfs/pnfs.h10
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfsd/nfs3xdr.c23
-rw-r--r--fs/nfsd/nfs4proc.c13
-rw-r--r--fs/nfsd/nfsxdr.c13
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/xattr.c23
-rw-r--r--fs/overlayfs/Kconfig1
-rw-r--r--fs/overlayfs/copy_up.c57
-rw-r--r--fs/overlayfs/dir.c61
-rw-r--r--fs/overlayfs/inode.c12
-rw-r--r--fs/overlayfs/namei.c16
-rw-r--r--fs/overlayfs/overlayfs.h16
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/super.c18
-rw-r--r--fs/overlayfs/util.c72
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/quota/dquot.c16
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/stat.c1
-rw-r--r--fs/ufs/balloc.c70
-rw-r--r--fs/ufs/inode.c96
-rw-r--r--fs/ufs/super.c96
-rw-r--r--fs/ufs/ufs_fs.h9
-rw-r--r--fs/ufs/util.c17
-rw-r--r--fs/ufs/util.h19
-rw-r--r--fs/userfaultfd.c29
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c9
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c43
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_buf.c38
-rw-r--r--fs/xfs/xfs_buf.h5
-rw-r--r--fs/xfs/xfs_file.c71
-rw-r--r--fs/xfs/xfs_fsmap.c5
-rw-r--r--fs/xfs/xfs_icache.c5
-rw-r--r--fs/xfs/xfs_iomap.c4
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/actbl.h14
-rw-r--r--include/drm/drm_dp_helper.h51
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/kvm/arm_vgic.h5
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/ceph/ceph_debug.h6
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h20
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/dax.h34
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/filter.h10
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/machine.h7
-rw-r--r--include/linux/hashtable.h1
-rw-r--r--include/linux/if_vlan.h18
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-gic.h28
-rw-r--r--include/linux/jiffies.h6
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/memblock.h8
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/mm.h64
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/moduleparam.h2
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/nvme-fc-driver.h16
-rw-r--r--include/linux/of_irq.h2
-rw-r--r--include/linux/of_platform.h1
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/platform_data/spi-mt65xx.h2
-rw-r--r--include/linux/ptrace.h7
-rw-r--r--include/linux/quotaops.h6
-rw-r--r--include/linux/serdev.h19
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/soc/renesas/rcar-rst.h5
-rw-r--r--include/linux/spi/spi.h217
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/sunrpc/svc.h3
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/linux/tty.h9
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/media/cec-notifier.h12
-rw-r--r--include/media/cec.h6
-rw-r--r--include/net/dst.h8
-rw-r--r--include/net/ip_fib.h10
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/tc_act/tc_csum.h15
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/wext.h4
-rw-r--r--include/net/x25.h4
-rw-r--r--include/net/xfrm.h17
-rw-r--r--include/rdma/ib_sa.h25
-rw-r--r--include/rdma/rdma_netlink.h10
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/trace/events/spi.h26
-rw-r--r--include/uapi/linux/a.out.h26
-rw-r--r--include/uapi/linux/bpf.h8
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/if_link.h13
-rw-r--r--include/uapi/linux/keyctl.h4
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--include/uapi/linux/usb/ch11.h3
-rw-r--r--kernel/bpf/arraymap.c1
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/stackmap.c1
-rw-r--r--kernel/bpf/syscall.c5
-rw-r--r--kernel/bpf/verifier.c192
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/cgroup/cpuset.c4
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/fork.c25
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/kprobes.c10
-rw-r--r--kernel/livepatch/Kconfig1
-rw-r--r--kernel/livepatch/patch.c8
-rw-r--r--kernel/livepatch/transition.c36
-rw-r--r--kernel/locking/rtmutex.c24
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/suspend.c29
-rw-r--r--kernel/printk/printk.c46
-rw-r--r--kernel/ptrace.c20
-rw-r--r--kernel/rcu/srcu.c5
-rw-r--r--kernel/rcu/srcutiny.c7
-rw-r--r--kernel/rcu/srcutree.c5
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/cpufreq_schedutil.c10
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/signal.c20
-rw-r--r--kernel/time/alarmtimer.c14
-rw-r--r--kernel/time/posix-cpu-timers.c24
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/timekeeping.c71
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c37
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_functions.c12
-rw-r--r--kernel/trace/trace_kprobe.c19
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--lib/cmdline.c6
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/test_bpf.c38
-rw-r--r--mm/gup.c25
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/khugepaged.c1
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memblock.c23
-rw-r--r--mm/memory-failure.c13
-rw-r--r--mm/memory.c78
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/mmap.c160
-rw-r--r--mm/page_alloc.c37
-rw-r--r--mm/slub.c46
-rw-r--r--mm/swap_cgroup.c3
-rw-r--r--mm/util.c7
-rw-r--r--mm/vmalloc.c15
-rw-r--r--mm/vmpressure.c6
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c2
-rw-r--r--net/batman-adv/soft-interface.c5
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c3
-rw-r--r--net/bridge/netfilter/ebtables.c9
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/ceph/auth_x.c13
-rw-r--r--net/ceph/ceph_common.c13
-rw-r--r--net/ceph/messenger.c26
-rw-r--r--net/ceph/mon_client.c4
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/core/dev.c131
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/devlink.c8
-rw-r--r--net/core/dst.c37
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/rtnetlink.c93
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c23
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/dsa.c47
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/legacy.c47
-rw-r--r--net/hsr/hsr_device.c4
-rw-r--r--net/hsr/hsr_forward.c3
-rw-r--r--net/hsr/hsr_framereg.c9
-rw-r--r--net/hsr/hsr_framereg.h2
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c48
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c17
-rw-r--r--net/ipv4/fib_trie.c26
-rw-r--r--net/ipv4/icmp.c8
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/ipmr.c52
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/esp6_offload.c25
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_gre.c22
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/ip6_tunnel.c34
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/output_core.c14
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udp_offload.c6
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/key/af_key.c21
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ht.c16
-rw-r--r--net/mac80211/ieee80211_i.h16
-rw-r--r--net/mac80211/iface.c18
-rw-r--r--net/mac80211/mlme.c62
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mac802154/iface.c7
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/netfilter/nf_conntrack_helper.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_tables_api.c160
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nft_bitwise.c19
-rw-r--r--net/netfilter/nft_cmp.c12
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_immediate.c5
-rw-r--r--net/netfilter/nft_range.c4
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c22
-rw-r--r--net/netfilter/x_tables.c24
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/vport-internal_dev.c4
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/phonet/pep-gprs.c2
-rw-r--r--net/rxrpc/key.c64
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/sch_api.c9
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--net/sctp/sctp_diag.c5
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/smc/Kconfig4
-rw-r--r--net/smc/smc_clc.c4
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_ib.c21
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/msg.c2
-rw-r--r--net/tipc/socket.c38
-rw-r--r--net/unix/af_unix.c7
-rw-r--r--net/vmw_vsock/af_vsock.c21
-rw-r--r--net/wireless/scan.c8
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/wireless/wext-core.c22
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/Makefile3
-rw-r--r--net/xfrm/xfrm_device.c4
-rw-r--r--net/xfrm/xfrm_policy.c51
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c1
-rw-r--r--samples/bpf/cookie_uid_helper_example.c4
-rw-r--r--samples/bpf/offwaketime_user.c1
-rw-r--r--samples/bpf/sampleip_user.c1
-rw-r--r--samples/bpf/trace_event_user.c1
-rw-r--r--samples/bpf/tracex2_user.c1
-rw-r--r--samples/bpf/xdp1_user.c9
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c8
-rw-r--r--scripts/Makefile.headersinst51
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/dtc/checks.c2
l---------scripts/dtc/include-prefixes/arc1
l---------scripts/dtc/include-prefixes/arm1
l---------scripts/dtc/include-prefixes/arm641
l---------scripts/dtc/include-prefixes/c6x1
l---------scripts/dtc/include-prefixes/cris1
l---------scripts/dtc/include-prefixes/dt-bindings1
l---------scripts/dtc/include-prefixes/h83001
l---------scripts/dtc/include-prefixes/metag1
l---------scripts/dtc/include-prefixes/microblaze1
l---------scripts/dtc/include-prefixes/mips1
l---------scripts/dtc/include-prefixes/nios21
l---------scripts/dtc/include-prefixes/openrisc1
l---------scripts/dtc/include-prefixes/powerpc1
l---------scripts/dtc/include-prefixes/sh1
l---------scripts/dtc/include-prefixes/xtensa1
-rw-r--r--scripts/gdb/linux/dmesg.py9
-rw-r--r--scripts/genksyms/genksyms.h2
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/kconfig/nconf.c12
-rw-r--r--scripts/kconfig/nconf.gui.c4
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/keys/Kconfig6
-rw-r--r--security/keys/dh.c300
-rw-r--r--security/keys/encrypted-keys/encrypted.c204
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/keys/key.c16
-rw-r--r--security/keys/keyctl.c16
-rw-r--r--security/keys/keyring.c12
-rw-r--r--security/keys/process_keys.c7
-rw-r--r--security/keys/trusted.c50
-rw-r--r--security/keys/user_defined.c16
-rw-r--r--security/selinux/hooks.c5
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/core/timer.c7
-rw-r--r--sound/firewire/amdtp-stream.c8
-rw-r--r--sound/firewire/amdtp-stream.h2
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_controller.c8
-rw-r--r--sound/pci/hda/hda_generic.c1
-rw-r--r--sound/pci/hda/hda_intel.c11
-rw-r--r--sound/pci/hda/patch_realtek.c22
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/soc/atmel/atmel-classd.c9
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/rt286.c7
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/intel/skylake/skl.c162
-rw-r--r--sound/soc/intel/skylake/skl.h4
-rw-r--r--sound/soc/sh/rcar/adg.c6
-rw-r--r--sound/soc/sh/rcar/cmd.c1
-rw-r--r--sound/soc/sh/rcar/core.c51
-rw-r--r--sound/soc/sh/rcar/gen.c1
-rw-r--r--sound/soc/sh/rcar/rsnd.h2
-rw-r--r--sound/soc/sh/rcar/src.c12
-rw-r--r--sound/soc/sh/rcar/ssi.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c3
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/usb/mixer_us16x08.c19
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h26
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/arch/x86/include/asm/required-features.h8
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h25
-rw-r--r--tools/build/feature/test-bpf.c1
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/include/uapi/linux/stat.h8
-rw-r--r--tools/lib/bpf/bpf.c22
-rw-r--r--tools/lib/bpf/bpf.h4
-rw-r--r--tools/objtool/builtin-check.c3
-rw-r--r--tools/perf/Documentation/perf-probe.txt8
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-script-python.txt23
-rw-r--r--tools/perf/Documentation/perf-script.txt4
-rw-r--r--tools/perf/Makefile.config38
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/Build2
-rw-r--r--tools/perf/arch/common.c1
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/pmu-events/Build4
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/bp_signal.c14
-rw-r--r--tools/perf/tests/builtin-test.c7
-rw-r--r--tools/perf/tests/code-reading.c20
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/tests.h3
-rw-r--r--tools/perf/ui/hist.c2
-rw-r--r--tools/perf/util/annotate.c72
-rw-r--r--tools/perf/util/build-id.c45
-rw-r--r--tools/perf/util/build-id.h1
-rw-r--r--tools/perf/util/callchain.c13
-rw-r--r--tools/perf/util/dso.c100
-rw-r--r--tools/perf/util/dso.h9
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/evsel_fprintf.c33
-rw-r--r--tools/perf/util/header.c14
-rw-r--r--tools/perf/util/machine.c21
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/srcline.c49
-rw-r--r--tools/perf/util/symbol-elf.c41
-rw-r--r--tools/perf/util/symbol.c4
-rw-r--r--tools/perf/util/unwind-libdw.c22
-rw-r--r--tools/perf/util/unwind-libunwind-local.c11
-rw-r--r--tools/power/acpi/.gitignore4
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/bpf_endian.h41
-rw-r--r--tools/testing/selftests/bpf/include/uapi/linux/types.h22
-rw-r--r--tools/testing/selftests/bpf/test_align.c453
-rw-r--r--tools/testing/selftests/bpf/test_pkt_access.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c305
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions4
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc21
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh2
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c118
-rw-r--r--usr/Kconfig1
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c18
-rw-r--r--virt/kvm/arm/mmu.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c16
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c35
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c54
-rw-r--r--virt/kvm/arm/vgic/vgic.h12
1724 files changed, 20176 insertions, 11030 deletions
diff --git a/Documentation/acpi/acpi-lid.txt b/Documentation/acpi/acpi-lid.txt
index 22cb3091f297..effe7af3a5af 100644
--- a/Documentation/acpi/acpi-lid.txt
+++ b/Documentation/acpi/acpi-lid.txt
@@ -59,20 +59,28 @@ button driver uses the following 3 modes in order not to trigger issues.
59If the userspace hasn't been prepared to ignore the unreliable "opened" 59If the userspace hasn't been prepared to ignore the unreliable "opened"
60events and the unreliable initial state notification, Linux users can use 60events and the unreliable initial state notification, Linux users can use
61the following kernel parameters to handle the possible issues: 61the following kernel parameters to handle the possible issues:
62A. button.lid_init_state=open: 62A. button.lid_init_state=method:
63 When this option is specified, the ACPI button driver reports the
64 initial lid state using the returning value of the _LID control method
65 and whether the "opened"/"closed" events are paired fully relies on the
66 firmware implementation.
67 This option can be used to fix some platforms where the returning value
68 of the _LID control method is reliable but the initial lid state
69 notification is missing.
70 This option is the default behavior during the period the userspace
71 isn't ready to handle the buggy AML tables.
72B. button.lid_init_state=open:
63 When this option is specified, the ACPI button driver always reports the 73 When this option is specified, the ACPI button driver always reports the
64 initial lid state as "opened" and whether the "opened"/"closed" events 74 initial lid state as "opened" and whether the "opened"/"closed" events
65 are paired fully relies on the firmware implementation. 75 are paired fully relies on the firmware implementation.
66 This may fix some platforms where the returning value of the _LID 76 This may fix some platforms where the returning value of the _LID
67 control method is not reliable and the initial lid state notification is 77 control method is not reliable and the initial lid state notification is
68 missing. 78 missing.
69 This option is the default behavior during the period the userspace
70 isn't ready to handle the buggy AML tables.
71 79
72If the userspace has been prepared to ignore the unreliable "opened" events 80If the userspace has been prepared to ignore the unreliable "opened" events
73and the unreliable initial state notification, Linux users should always 81and the unreliable initial state notification, Linux users should always
74use the following kernel parameter: 82use the following kernel parameter:
75B. button.lid_init_state=ignore: 83C. button.lid_init_state=ignore:
76 When this option is specified, the ACPI button driver never reports the 84 When this option is specified, the ACPI button driver never reports the
77 initial lid state and there is a compensation mechanism implemented to 85 initial lid state and there is a compensation mechanism implemented to
78 ensure that the reliable "closed" notifications can always be delievered 86 ensure that the reliable "closed" notifications can always be delievered
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 15f79c27748d..7737ab5d04b2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -866,6 +866,15 @@
866 866
867 dscc4.setup= [NET] 867 dscc4.setup= [NET]
868 868
869 dt_cpu_ftrs= [PPC]
870 Format: {"off" | "known"}
871 Control how the dt_cpu_ftrs device-tree binding is
872 used for CPU feature discovery and setup (if it
873 exists).
874 off: Do not use it, fall back to legacy cpu table.
875 known: Do not pass through unknown features to guests
876 or userspace, only those that the kernel is aware of.
877
869 dump_apple_properties [X86] 878 dump_apple_properties [X86]
870 Dump name and content of EFI device properties on 879 Dump name and content of EFI device properties on
871 x86 Macs. Useful for driver authors to determine 880 x86 Macs. Useful for driver authors to determine
@@ -3802,6 +3811,13 @@
3802 expediting. Set to zero to disable automatic 3811 expediting. Set to zero to disable automatic
3803 expediting. 3812 expediting.
3804 3813
3814 stack_guard_gap= [MM]
3815 override the default stack gap protection. The value
3816 is in page units and it defines how many pages prior
3817 to (for stacks growing down) resp. after (for stacks
3818 growing up) the main stack are reserved for no other
3819 mapping. Default value is 256 pages.
3820
3805 stacktrace [FTRACE] 3821 stacktrace [FTRACE]
3806 Enabled the stack tracer on boot up. 3822 Enabled the stack tracer on boot up.
3807 3823
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 289c80f7760e..09aa2e949787 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -1,4 +1,5 @@
1.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>` 1.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
2.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
2 3
3======================= 4=======================
4CPU Performance Scaling 5CPU Performance Scaling
@@ -75,7 +76,7 @@ feedback registers, as that information is typically specific to the hardware
75interface it comes from and may not be easily represented in an abstract, 76interface it comes from and may not be easily represented in an abstract,
76platform-independent way. For this reason, ``CPUFreq`` allows scaling drivers 77platform-independent way. For this reason, ``CPUFreq`` allows scaling drivers
77to bypass the governor layer and implement their own performance scaling 78to bypass the governor layer and implement their own performance scaling
78algorithms. That is done by the ``intel_pstate`` scaling driver. 79algorithms. That is done by the |intel_pstate| scaling driver.
79 80
80 81
81``CPUFreq`` Policy Objects 82``CPUFreq`` Policy Objects
@@ -174,13 +175,13 @@ necessary to restart the scaling governor so that it can take the new online CPU
174into account. That is achieved by invoking the governor's ``->stop`` and 175into account. That is achieved by invoking the governor's ``->stop`` and
175``->start()`` callbacks, in this order, for the entire policy. 176``->start()`` callbacks, in this order, for the entire policy.
176 177
177As mentioned before, the ``intel_pstate`` scaling driver bypasses the scaling 178As mentioned before, the |intel_pstate| scaling driver bypasses the scaling
178governor layer of ``CPUFreq`` and provides its own P-state selection algorithms. 179governor layer of ``CPUFreq`` and provides its own P-state selection algorithms.
179Consequently, if ``intel_pstate`` is used, scaling governors are not attached to 180Consequently, if |intel_pstate| is used, scaling governors are not attached to
180new policy objects. Instead, the driver's ``->setpolicy()`` callback is invoked 181new policy objects. Instead, the driver's ``->setpolicy()`` callback is invoked
181to register per-CPU utilization update callbacks for each policy. These 182to register per-CPU utilization update callbacks for each policy. These
182callbacks are invoked by the CPU scheduler in the same way as for scaling 183callbacks are invoked by the CPU scheduler in the same way as for scaling
183governors, but in the ``intel_pstate`` case they both determine the P-state to 184governors, but in the |intel_pstate| case they both determine the P-state to
184use and change the hardware configuration accordingly in one go from scheduler 185use and change the hardware configuration accordingly in one go from scheduler
185context. 186context.
186 187
@@ -257,7 +258,7 @@ are the following:
257 258
258``scaling_available_governors`` 259``scaling_available_governors``
259 List of ``CPUFreq`` scaling governors present in the kernel that can 260 List of ``CPUFreq`` scaling governors present in the kernel that can
260 be attached to this policy or (if the ``intel_pstate`` scaling driver is 261 be attached to this policy or (if the |intel_pstate| scaling driver is
261 in use) list of scaling algorithms provided by the driver that can be 262 in use) list of scaling algorithms provided by the driver that can be
262 applied to this policy. 263 applied to this policy.
263 264
@@ -274,7 +275,7 @@ are the following:
274 the CPU is actually running at (due to hardware design and other 275 the CPU is actually running at (due to hardware design and other
275 limitations). 276 limitations).
276 277
277 Some scaling drivers (e.g. ``intel_pstate``) attempt to provide 278 Some scaling drivers (e.g. |intel_pstate|) attempt to provide
278 information more precisely reflecting the current CPU frequency through 279 information more precisely reflecting the current CPU frequency through
279 this attribute, but that still may not be the exact current CPU 280 this attribute, but that still may not be the exact current CPU
280 frequency as seen by the hardware at the moment. 281 frequency as seen by the hardware at the moment.
@@ -284,13 +285,13 @@ are the following:
284 285
285``scaling_governor`` 286``scaling_governor``
286 The scaling governor currently attached to this policy or (if the 287 The scaling governor currently attached to this policy or (if the
287 ``intel_pstate`` scaling driver is in use) the scaling algorithm 288 |intel_pstate| scaling driver is in use) the scaling algorithm
288 provided by the driver that is currently applied to this policy. 289 provided by the driver that is currently applied to this policy.
289 290
290 This attribute is read-write and writing to it will cause a new scaling 291 This attribute is read-write and writing to it will cause a new scaling
291 governor to be attached to this policy or a new scaling algorithm 292 governor to be attached to this policy or a new scaling algorithm
292 provided by the scaling driver to be applied to it (in the 293 provided by the scaling driver to be applied to it (in the
293 ``intel_pstate`` case), as indicated by the string written to this 294 |intel_pstate| case), as indicated by the string written to this
294 attribute (which must be one of the names listed by the 295 attribute (which must be one of the names listed by the
295 ``scaling_available_governors`` attribute described above). 296 ``scaling_available_governors`` attribute described above).
296 297
@@ -619,7 +620,7 @@ This file is located under :file:`/sys/devices/system/cpu/cpufreq/` and controls
619the "boost" setting for the whole system. It is not present if the underlying 620the "boost" setting for the whole system. It is not present if the underlying
620scaling driver does not support the frequency boost mechanism (or supports it, 621scaling driver does not support the frequency boost mechanism (or supports it,
621but provides a driver-specific interface for controlling it, like 622but provides a driver-specific interface for controlling it, like
622``intel_pstate``). 623|intel_pstate|).
623 624
624If the value in this file is 1, the frequency boost mechanism is enabled. This 625If the value in this file is 1, the frequency boost mechanism is enabled. This
625means that either the hardware can be put into states in which it is able to 626means that either the hardware can be put into states in which it is able to
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst
index c80f087321fc..7f148f76f432 100644
--- a/Documentation/admin-guide/pm/index.rst
+++ b/Documentation/admin-guide/pm/index.rst
@@ -6,6 +6,7 @@ Power Management
6 :maxdepth: 2 6 :maxdepth: 2
7 7
8 cpufreq 8 cpufreq
9 intel_pstate
9 10
10.. only:: subproject and html 11.. only:: subproject and html
11 12
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
new file mode 100644
index 000000000000..33d703989ea8
--- /dev/null
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -0,0 +1,755 @@
1===============================================
2``intel_pstate`` CPU Performance Scaling Driver
3===============================================
4
5::
6
7 Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8
9
10General Information
11===================
12
13``intel_pstate`` is a part of the
14:doc:`CPU performance scaling subsystem <cpufreq>` in the Linux kernel
15(``CPUFreq``). It is a scaling driver for the Sandy Bridge and later
16generations of Intel processors. Note, however, that some of those processors
17may not be supported. [To understand ``intel_pstate`` it is necessary to know
18how ``CPUFreq`` works in general, so this is the time to read :doc:`cpufreq` if
19you have not done that yet.]
20
21For the processors supported by ``intel_pstate``, the P-state concept is broader
22than just an operating frequency or an operating performance point (see the
23`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more
24information about that). For this reason, the representation of P-states used
25by ``intel_pstate`` internally follows the hardware specification (for details
26refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual
27Volume 3: System Programming Guide <SDM_>`_). However, the ``CPUFreq`` core
28uses frequencies for identifying operating performance points of CPUs and
29frequencies are involved in the user space interface exposed by it, so
30``intel_pstate`` maps its internal representation of P-states to frequencies too
31(fortunately, that mapping is unambiguous). At the same time, it would not be
32practical for ``intel_pstate`` to supply the ``CPUFreq`` core with a table of
33available frequencies due to the possible size of it, so the driver does not do
34that. Some functionality of the core is limited by that.
35
36Since the hardware P-state selection interface used by ``intel_pstate`` is
37available at the logical CPU level, the driver always works with individual
38CPUs. Consequently, if ``intel_pstate`` is in use, every ``CPUFreq`` policy
39object corresponds to one logical CPU and ``CPUFreq`` policies are effectively
40equivalent to CPUs. In particular, this means that they become "inactive" every
41time the corresponding CPU is taken offline and need to be re-initialized when
42it goes back online.
43
44``intel_pstate`` is not modular, so it cannot be unloaded, which means that the
45only way to pass early-configuration-time parameters to it is via the kernel
46command line. However, its configuration can be adjusted via ``sysfs`` to a
47great extent. In some configurations it even is possible to unregister it via
48``sysfs`` which allows another ``CPUFreq`` scaling driver to be loaded and
49registered (see `below <status_attr_>`_).
50
51
52Operation Modes
53===============
54
55``intel_pstate`` can operate in three different modes: in the active mode with
56or without hardware-managed P-states support and in the passive mode. Which of
57them will be in effect depends on what kernel command line options are used and
58on the capabilities of the processor.
59
60Active Mode
61-----------
62
63This is the default operation mode of ``intel_pstate``. If it works in this
64mode, the ``scaling_driver`` policy attribute in ``sysfs`` for all ``CPUFreq``
65policies contains the string "intel_pstate".
66
67In this mode the driver bypasses the scaling governors layer of ``CPUFreq`` and
68provides its own scaling algorithms for P-state selection. Those algorithms
69can be applied to ``CPUFreq`` policies in the same way as generic scaling
70governors (that is, through the ``scaling_governor`` policy attribute in
71``sysfs``). [Note that different P-state selection algorithms may be chosen for
72different policies, but that is not recommended.]
73
74They are not generic scaling governors, but their names are the same as the
75names of some of those governors. Moreover, confusingly enough, they generally
76do not work in the same way as the generic governors they share the names with.
77For example, the ``powersave`` P-state selection algorithm provided by
78``intel_pstate`` is not a counterpart of the generic ``powersave`` governor
79(roughly, it corresponds to the ``schedutil`` and ``ondemand`` governors).
80
81There are two P-state selection algorithms provided by ``intel_pstate`` in the
82active mode: ``powersave`` and ``performance``. The way they both operate
83depends on whether or not the hardware-managed P-states (HWP) feature has been
84enabled in the processor and possibly on the processor model.
85
86Which of the P-state selection algorithms is used by default depends on the
87:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option.
88Namely, if that option is set, the ``performance`` algorithm will be used by
89default, and the other one will be used by default if it is not set.
90
91Active Mode With HWP
92~~~~~~~~~~~~~~~~~~~~
93
94If the processor supports the HWP feature, it will be enabled during the
95processor initialization and cannot be disabled after that. It is possible
96to avoid enabling it by passing the ``intel_pstate=no_hwp`` argument to the
97kernel in the command line.
98
99If the HWP feature has been enabled, ``intel_pstate`` relies on the processor to
100select P-states by itself, but still it can give hints to the processor's
101internal P-state selection logic. What those hints are depends on which P-state
102selection algorithm has been applied to the given policy (or to the CPU it
103corresponds to).
104
105Even though the P-state selection is carried out by the processor automatically,
106``intel_pstate`` registers utilization update callbacks with the CPU scheduler
107in this mode. However, they are not used for running a P-state selection
108algorithm, but for periodic updates of the current CPU frequency information to
109be made available from the ``scaling_cur_freq`` policy attribute in ``sysfs``.
110
111HWP + ``performance``
112.....................
113
114In this configuration ``intel_pstate`` will write 0 to the processor's
115Energy-Performance Preference (EPP) knob (if supported) or its
116Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's
117internal P-state selection logic is expected to focus entirely on performance.
118
119This will override the EPP/EPB setting coming from the ``sysfs`` interface
120(see `Energy vs Performance Hints`_ below).
121
122Also, in this configuration the range of P-states available to the processor's
123internal P-state selection logic is always restricted to the upper boundary
124(that is, the maximum P-state that the driver is allowed to use).
125
126HWP + ``powersave``
127...................
128
129In this configuration ``intel_pstate`` will set the processor's
130Energy-Performance Preference (EPP) knob (if supported) or its
131Energy-Performance Bias (EPB) knob (otherwise) to whatever value it was
132previously set to via ``sysfs`` (or whatever default value it was
133set to by the platform firmware). This usually causes the processor's
134internal P-state selection logic to be less performance-focused.
135
136Active Mode Without HWP
137~~~~~~~~~~~~~~~~~~~~~~~
138
139This is the default operation mode for processors that do not support the HWP
140feature. It also is used by default with the ``intel_pstate=no_hwp`` argument
141in the kernel command line. However, in this mode ``intel_pstate`` may refuse
142to work with the given processor if it does not recognize it. [Note that
143``intel_pstate`` will never refuse to work with any processor with the HWP
144feature enabled.]
145
146In this mode ``intel_pstate`` registers utilization update callbacks with the
147CPU scheduler in order to run a P-state selection algorithm, either
148``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy
149setting in ``sysfs``. The current CPU frequency information to be made
150available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is
151periodically updated by those utilization update callbacks too.
152
153``performance``
154...............
155
156Without HWP, this P-state selection algorithm is always the same regardless of
157the processor model and platform configuration.
158
159It selects the maximum P-state it is allowed to use, subject to limits set via
160``sysfs``, every time the P-state selection computations are carried out by the
161driver's utilization update callback for the given CPU (that does not happen
162more often than every 10 ms), but the hardware configuration will not be changed
163if the new P-state is the same as the current one.
164
165This is the default P-state selection algorithm if the
166:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
167is set.
168
169``powersave``
170.............
171
172Without HWP, this P-state selection algorithm generally depends on the
173processor model and/or the system profile setting in the ACPI tables and there
174are two variants of it.
175
176One of them is used with processors from the Atom line and (regardless of the
177processor model) on platforms with the system profile in the ACPI tables set to
178"mobile" (laptops mostly), "tablet", "appliance PC", "desktop", or
179"workstation". It is also used with processors supporting the HWP feature if
180that feature has not been enabled (that is, with the ``intel_pstate=no_hwp``
181argument in the kernel command line). It is similar to the algorithm
182implemented by the generic ``schedutil`` scaling governor except that the
183utilization metric used by it is based on numbers coming from feedback
184registers of the CPU. It generally selects P-states proportional to the
185current CPU utilization, so it is referred to as the "proportional" algorithm.
186
187The second variant of the ``powersave`` P-state selection algorithm, used in all
188of the other cases (generally, on processors from the Core line, so it is
189referred to as the "Core" algorithm), is based on the values read from the APERF
190and MPERF feedback registers and the previously requested target P-state.
191It does not really take CPU utilization into account explicitly, but as a rule
192it causes the CPU P-state to ramp up very quickly in response to increased
193utilization which is generally desirable in server environments.
194
195Regardless of the variant, this algorithm is run by the driver's utilization
196update callback for the given CPU when it is invoked by the CPU scheduler, but
197not more often than every 10 ms (that can be tweaked via ``debugfs`` in `this
198particular case <Tuning Interface in debugfs_>`_). Like in the ``performance``
199case, the hardware configuration is not touched if the new P-state turns out to
200be the same as the current one.
201
202This is the default P-state selection algorithm if the
203:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
204is not set.
205
206Passive Mode
207------------
208
209This mode is used if the ``intel_pstate=passive`` argument is passed to the
210kernel in the command line (it implies the ``intel_pstate=no_hwp`` setting too).
211Like in the active mode without HWP support, in this mode ``intel_pstate`` may
212refuse to work with the given processor if it does not recognize it.
213
214If the driver works in this mode, the ``scaling_driver`` policy attribute in
215``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq".
216Then, the driver behaves like a regular ``CPUFreq`` scaling driver. That is,
217it is invoked by generic scaling governors when necessary to talk to the
218hardware in order to change the P-state of a CPU (in particular, the
219``schedutil`` governor can invoke it directly from scheduler context).
220
221While in this mode, ``intel_pstate`` can be used with all of the (generic)
222scaling governors listed by the ``scaling_available_governors`` policy attribute
223in ``sysfs`` (and the P-state selection algorithms described above are not
224used). Then, it is responsible for the configuration of policy objects
225corresponding to CPUs and provides the ``CPUFreq`` core (and the scaling
226governors attached to the policy objects) with accurate information on the
227maximum and minimum operating frequencies supported by the hardware (including
228the so-called "turbo" frequency ranges). In other words, in the passive mode
229the entire range of available P-states is exposed by ``intel_pstate`` to the
230``CPUFreq`` core. However, in this mode the driver does not register
231utilization update callbacks with the CPU scheduler and the ``scaling_cur_freq``
232information comes from the ``CPUFreq`` core (and is the last frequency selected
233by the current scaling governor for the given policy).
234
235
236.. _turbo:
237
238Turbo P-states Support
239======================
240
241In the majority of cases, the entire range of P-states available to
242``intel_pstate`` can be divided into two sub-ranges that correspond to
243different types of processor behavior, above and below a boundary that
244will be referred to as the "turbo threshold" in what follows.
245
246The P-states above the turbo threshold are referred to as "turbo P-states" and
247the whole sub-range of P-states they belong to is referred to as the "turbo
248range". These names are related to the Turbo Boost technology allowing a
249multicore processor to opportunistically increase the P-state of one or more
250cores if there is enough power to do that and if that is not going to cause the
251thermal envelope of the processor package to be exceeded.
252
253Specifically, if software sets the P-state of a CPU core within the turbo range
254(that is, above the turbo threshold), the processor is permitted to take over
255performance scaling control for that core and put it into turbo P-states of its
256choice going forward. However, that permission is interpreted differently by
257different processor generations. Namely, the Sandy Bridge generation of
258processors will never use any P-states above the last one set by software for
259the given core, even if it is within the turbo range, whereas all of the later
260processor generations will take it as a license to use any P-states from the
261turbo range, even above the one set by software. In other words, on those
262processors setting any P-state from the turbo range will enable the processor
263to put the given core into all turbo P-states up to and including the maximum
264supported one as it sees fit.
265
266One important property of turbo P-states is that they are not sustainable. More
267precisely, there is no guarantee that any CPUs will be able to stay in any of
268those states indefinitely, because the power distribution within the processor
269package may change over time or the thermal envelope it was designed for might
270be exceeded if a turbo P-state was used for too long.
271
272In turn, the P-states below the turbo threshold generally are sustainable. In
273fact, if one of them is set by software, the processor is not expected to change
274it to a lower one unless in a thermal stress or a power limit violation
275situation (a higher P-state may still be used if it is set for another CPU in
276the same package at the same time, for example).
277
278Some processors allow multiple cores to be in turbo P-states at the same time,
279but the maximum P-state that can be set for them generally depends on the number
280of cores running concurrently. The maximum turbo P-state that can be set for 3
281cores at the same time usually is lower than the analogous maximum P-state for
2822 cores, which in turn usually is lower than the maximum turbo P-state that can
283be set for 1 core. The one-core maximum turbo P-state is thus the maximum
284supported one overall.
285
286The maximum supported turbo P-state, the turbo threshold (the maximum supported
287non-turbo P-state) and the minimum supported P-state are specific to the
288processor model and can be determined by reading the processor's model-specific
289registers (MSRs). Moreover, some processors support the Configurable TDP
290(Thermal Design Power) feature and, when that feature is enabled, the turbo
291threshold effectively becomes a configurable value that can be set by the
292platform firmware.
293
294Unlike ``_PSS`` objects in the ACPI tables, ``intel_pstate`` always exposes
295the entire range of available P-states, including the whole turbo range, to the
296``CPUFreq`` core and (in the passive mode) to generic scaling governors. This
297generally causes turbo P-states to be set more often when ``intel_pstate`` is
298used relative to ACPI-based CPU performance scaling (see `below <acpi-cpufreq_>`_
299for more information).
300
301Moreover, since ``intel_pstate`` always knows what the real turbo threshold is
302(even if the Configurable TDP feature is enabled in the processor), its
303``no_turbo`` attribute in ``sysfs`` (described `below <no_turbo_attr_>`_) should
304work as expected in all cases (that is, if set to disable turbo P-states, it
305always should prevent ``intel_pstate`` from using them).
306
307
308Processor Support
309=================
310
311To handle a given processor ``intel_pstate`` requires a number of different
312pieces of information on it to be known, including:
313
314 * The minimum supported P-state.
315
316 * The maximum supported `non-turbo P-state <turbo_>`_.
317
318 * Whether or not turbo P-states are supported at all.
319
320 * The maximum supported `one-core turbo P-state <turbo_>`_ (if turbo P-states
321 are supported).
322
323 * The scaling formula to translate the driver's internal representation
324 of P-states into frequencies and the other way around.
325
326Generally, ways to obtain that information are specific to the processor model
327or family. Although it often is possible to obtain all of it from the processor
328itself (using model-specific registers), there are cases in which hardware
329manuals need to be consulted to get to it too.
330
331For this reason, there is a list of supported processors in ``intel_pstate`` and
332the driver initialization will fail if the detected processor is not in that
333list, unless it supports the `HWP feature <Active Mode_>`_. [The interface to
334obtain all of the information listed above is the same for all of the processors
335supporting the HWP feature, which is why they all are supported by
336``intel_pstate``.]
337
338
339User Space Interface in ``sysfs``
340=================================
341
342Global Attributes
343-----------------
344
345``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
346control its functionality at the system level. They are located in the
347``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
348CPUs.
349
350Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
351argument is passed to the kernel in the command line.
352
353``max_perf_pct``
354 Maximum P-state the driver is allowed to set in percent of the
355 maximum supported performance level (the highest supported `turbo
356 P-state <turbo_>`_).
357
358 This attribute will not be exposed if the
359 ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel
360 command line.
361
362``min_perf_pct``
363 Minimum P-state the driver is allowed to set in percent of the
364 maximum supported performance level (the highest supported `turbo
365 P-state <turbo_>`_).
366
367 This attribute will not be exposed if the
368 ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel
369 command line.
370
371``num_pstates``
372 Number of P-states supported by the processor (between 0 and 255
373 inclusive) including both turbo and non-turbo P-states (see
374 `Turbo P-states Support`_).
375
376 The value of this attribute is not affected by the ``no_turbo``
377 setting described `below <no_turbo_attr_>`_.
378
379 This attribute is read-only.
380
381``turbo_pct``
382 Ratio of the `turbo range <turbo_>`_ size to the size of the entire
383 range of supported P-states, in percent.
384
385 This attribute is read-only.
386
387.. _no_turbo_attr:
388
389``no_turbo``
390 If set (equal to 1), the driver is not allowed to set any turbo P-states
391 (see `Turbo P-states Support`_). If unset (equalt to 0, which is the
392 default), turbo P-states can be set by the driver.
393 [Note that ``intel_pstate`` does not support the general ``boost``
394 attribute (supported by some other scaling drivers) which is replaced
395 by this one.]
396
397 This attrubute does not affect the maximum supported frequency value
398 supplied to the ``CPUFreq`` core and exposed via the policy interface,
399 but it affects the maximum possible value of per-policy P-state limits
400 (see `Interpretation of Policy Attributes`_ below for details).
401
402.. _status_attr:
403
404``status``
405 Operation mode of the driver: "active", "passive" or "off".
406
407 "active"
408 The driver is functional and in the `active mode
409 <Active Mode_>`_.
410
411 "passive"
412 The driver is functional and in the `passive mode
413 <Passive Mode_>`_.
414
415 "off"
416 The driver is not functional (it is not registered as a scaling
417 driver with the ``CPUFreq`` core).
418
419 This attribute can be written to in order to change the driver's
420 operation mode or to unregister it. The string written to it must be
421 one of the possible values of it and, if successful, the write will
422 cause the driver to switch over to the operation mode represented by
423 that string - or to be unregistered in the "off" case. [Actually,
424 switching over from the active mode to the passive mode or the other
425 way around causes the driver to be unregistered and registered again
426 with a different set of callbacks, so all of its settings (the global
427 as well as the per-policy ones) are then reset to their default
428 values, possibly depending on the target operation mode.]
429
430 That only is supported in some configurations, though (for example, if
431 the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
432 the operation mode of the driver cannot be changed), and if it is not
433 supported in the current configuration, writes to this attribute with
434 fail with an appropriate error.
435
436Interpretation of Policy Attributes
437-----------------------------------
438
439The interpretation of some ``CPUFreq`` policy attributes described in
440:doc:`cpufreq` is special with ``intel_pstate`` as the current scaling driver
441and it generally depends on the driver's `operation mode <Operation Modes_>`_.
442
443First of all, the values of the ``cpuinfo_max_freq``, ``cpuinfo_min_freq`` and
444``scaling_cur_freq`` attributes are produced by applying a processor-specific
445multiplier to the internal P-state representation used by ``intel_pstate``.
446Also, the values of the ``scaling_max_freq`` and ``scaling_min_freq``
447attributes are capped by the frequency corresponding to the maximum P-state that
448the driver is allowed to set.
449
450If the ``no_turbo`` `global attribute <no_turbo_attr_>`_ is set, the driver is
451not allowed to use turbo P-states, so the maximum value of ``scaling_max_freq``
452and ``scaling_min_freq`` is limited to the maximum non-turbo P-state frequency.
453Accordingly, setting ``no_turbo`` causes ``scaling_max_freq`` and
454``scaling_min_freq`` to go down to that value if they were above it before.
455However, the old values of ``scaling_max_freq`` and ``scaling_min_freq`` will be
456restored after unsetting ``no_turbo``, unless these attributes have been written
457to after ``no_turbo`` was set.
458
459If ``no_turbo`` is not set, the maximum possible value of ``scaling_max_freq``
460and ``scaling_min_freq`` corresponds to the maximum supported turbo P-state,
461which also is the value of ``cpuinfo_max_freq`` in either case.
462
463Next, the following policy attributes have special meaning if
464``intel_pstate`` works in the `active mode <Active Mode_>`_:
465
466``scaling_available_governors``
467 List of P-state selection algorithms provided by ``intel_pstate``.
468
469``scaling_governor``
470 P-state selection algorithm provided by ``intel_pstate`` currently in
471 use with the given policy.
472
473``scaling_cur_freq``
474 Frequency of the average P-state of the CPU represented by the given
475 policy for the time interval between the last two invocations of the
476 driver's utilization update callback by the CPU scheduler for that CPU.
477
478The meaning of these attributes in the `passive mode <Passive Mode_>`_ is the
479same as for other scaling drivers.
480
481Additionally, the value of the ``scaling_driver`` attribute for ``intel_pstate``
482depends on the operation mode of the driver. Namely, it is either
483"intel_pstate" (in the `active mode <Active Mode_>`_) or "intel_cpufreq" (in the
484`passive mode <Passive Mode_>`_).
485
486Coordination of P-State Limits
487------------------------------
488
489``intel_pstate`` allows P-state limits to be set in two ways: with the help of
490the ``max_perf_pct`` and ``min_perf_pct`` `global attributes
491<Global Attributes_>`_ or via the ``scaling_max_freq`` and ``scaling_min_freq``
492``CPUFreq`` policy attributes. The coordination between those limits is based
493on the following rules, regardless of the current operation mode of the driver:
494
495 1. All CPUs are affected by the global limits (that is, none of them can be
496 requested to run faster than the global maximum and none of them can be
497 requested to run slower than the global minimum).
498
499 2. Each individual CPU is affected by its own per-policy limits (that is, it
500 cannot be requested to run faster than its own per-policy maximum and it
501 cannot be requested to run slower than its own per-policy minimum).
502
503 3. The global and per-policy limits can be set independently.
504
505If the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, the
506resulting effective values are written into its registers whenever the limits
507change in order to request its internal P-state selection logic to always set
508P-states within these limits. Otherwise, the limits are taken into account by
509scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver
510every time before setting a new P-state for a CPU.
511
512Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument
513is passed to the kernel, ``max_perf_pct`` and ``min_perf_pct`` are not exposed
514at all and the only way to set the limits is by using the policy attributes.
515
516
517Energy vs Performance Hints
518---------------------------
519
520If ``intel_pstate`` works in the `active mode with the HWP feature enabled
521<Active Mode With HWP_>`_ in the processor, additional attributes are present
522in every ``CPUFreq`` policy directory in ``sysfs``. They are intended to allow
523user space to help ``intel_pstate`` to adjust the processor's internal P-state
524selection logic by focusing it on performance or on energy-efficiency, or
525somewhere between the two extremes:
526
527``energy_performance_preference``
528 Current value of the energy vs performance hint for the given policy
529 (or the CPU represented by it).
530
531 The hint can be changed by writing to this attribute.
532
533``energy_performance_available_preferences``
534 List of strings that can be written to the
535 ``energy_performance_preference`` attribute.
536
537 They represent different energy vs performance hints and should be
538 self-explanatory, except that ``default`` represents whatever hint
539 value was set by the platform firmware.
540
541Strings written to the ``energy_performance_preference`` attribute are
542internally translated to integer values written to the processor's
543Energy-Performance Preference (EPP) knob (if supported) or its
544Energy-Performance Bias (EPB) knob.
545
546[Note that tasks may by migrated from one CPU to another by the scheduler's
547load-balancing algorithm and if different energy vs performance hints are
548set for those CPUs, that may lead to undesirable outcomes. To avoid such
549issues it is better to set the same energy vs performance hint for all CPUs
550or to pin every task potentially sensitive to them to a specific CPU.]
551
552.. _acpi-cpufreq:
553
554``intel_pstate`` vs ``acpi-cpufreq``
555====================================
556
557On the majority of systems supported by ``intel_pstate``, the ACPI tables
558provided by the platform firmware contain ``_PSS`` objects returning information
559that can be used for CPU performance scaling (refer to the `ACPI specification`_
560for details on the ``_PSS`` objects and the format of the information returned
561by them).
562
563The information returned by the ACPI ``_PSS`` objects is used by the
564``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate``
565the ``acpi-cpufreq`` driver uses the same hardware CPU performance scaling
566interface, but the set of P-states it can use is limited by the ``_PSS``
567output.
568
569On those systems each ``_PSS`` object returns a list of P-states supported by
570the corresponding CPU which basically is a subset of the P-states range that can
571be used by ``intel_pstate`` on the same system, with one exception: the whole
572`turbo range <turbo_>`_ is represented by one item in it (the topmost one). By
573convention, the frequency returned by ``_PSS`` for that item is greater by 1 MHz
574than the frequency of the highest non-turbo P-state listed by it, but the
575corresponding P-state representation (following the hardware specification)
576returned for it matches the maximum supported turbo P-state (or is the
577special value 255 meaning essentially "go as high as you can get").
578
579The list of P-states returned by ``_PSS`` is reflected by the table of
580available frequencies supplied by ``acpi-cpufreq`` to the ``CPUFreq`` core and
581scaling governors and the minimum and maximum supported frequencies reported by
582it come from that list as well. In particular, given the special representation
583of the turbo range described above, this means that the maximum supported
584frequency reported by ``acpi-cpufreq`` is higher by 1 MHz than the frequency
585of the highest supported non-turbo P-state listed by ``_PSS`` which, of course,
586affects decisions made by the scaling governors, except for ``powersave`` and
587``performance``.
588
589For example, if a given governor attempts to select a frequency proportional to
590estimated CPU load and maps the load of 100% to the maximum supported frequency
591(possibly multiplied by a constant), then it will tend to choose P-states below
592the turbo threshold if ``acpi-cpufreq`` is used as the scaling driver, because
593in that case the turbo range corresponds to a small fraction of the frequency
594band it can use (1 MHz vs 1 GHz or more). In consequence, it will only go to
595the turbo range for the highest loads and the other loads above 50% that might
596benefit from running at turbo frequencies will be given non-turbo P-states
597instead.
598
599One more issue related to that may appear on systems supporting the
600`Configurable TDP feature <turbo_>`_ allowing the platform firmware to set the
601turbo threshold. Namely, if that is not coordinated with the lists of P-states
602returned by ``_PSS`` properly, there may be more than one item corresponding to
603a turbo P-state in those lists and there may be a problem with avoiding the
604turbo range (if desirable or necessary). Usually, to avoid using turbo
605P-states overall, ``acpi-cpufreq`` simply avoids using the topmost state listed
606by ``_PSS``, but that is not sufficient when there are other turbo P-states in
607the list returned by it.
608
609Apart from the above, ``acpi-cpufreq`` works like ``intel_pstate`` in the
610`passive mode <Passive Mode_>`_, except that the number of P-states it can set
611is limited to the ones listed by the ACPI ``_PSS`` objects.
612
613
614Kernel Command Line Options for ``intel_pstate``
615================================================
616
617Several kernel command line options can be used to pass early-configuration-time
618parameters to ``intel_pstate`` in order to enforce specific behavior of it. All
619of them have to be prepended with the ``intel_pstate=`` prefix.
620
621``disable``
622 Do not register ``intel_pstate`` as the scaling driver even if the
623 processor is supported by it.
624
625``passive``
626 Register ``intel_pstate`` in the `passive mode <Passive Mode_>`_ to
627 start with.
628
629 This option implies the ``no_hwp`` one described below.
630
631``force``
632 Register ``intel_pstate`` as the scaling driver instead of
633 ``acpi-cpufreq`` even if the latter is preferred on the given system.
634
635 This may prevent some platform features (such as thermal controls and
636 power capping) that rely on the availability of ACPI P-states
637 information from functioning as expected, so it should be used with
638 caution.
639
640 This option does not work with processors that are not supported by
641 ``intel_pstate`` and on platforms where the ``pcc-cpufreq`` scaling
642 driver is used instead of ``acpi-cpufreq``.
643
644``no_hwp``
645 Do not enable the `hardware-managed P-states (HWP) feature
646 <Active Mode With HWP_>`_ even if it is supported by the processor.
647
648``hwp_only``
649 Register ``intel_pstate`` as the scaling driver only if the
650 `hardware-managed P-states (HWP) feature <Active Mode With HWP_>`_ is
651 supported by the processor.
652
653``support_acpi_ppc``
654 Take ACPI ``_PPC`` performance limits into account.
655
656 If the preferred power management profile in the FADT (Fixed ACPI
657 Description Table) is set to "Enterprise Server" or "Performance
658 Server", the ACPI ``_PPC`` limits are taken into account by default
659 and this option has no effect.
660
661``per_cpu_perf_limits``
662 Use per-logical-CPU P-State limits (see `Coordination of P-state
663 Limits`_ for details).
664
665
666Diagnostics and Tuning
667======================
668
669Trace Events
670------------
671
672There are two static trace events that can be used for ``intel_pstate``
673diagnostics. One of them is the ``cpu_frequency`` trace event generally used
674by ``CPUFreq``, and the other one is the ``pstate_sample`` trace event specific
675to ``intel_pstate``. Both of them are triggered by ``intel_pstate`` only if
676it works in the `active mode <Active Mode_>`_.
677
678The following sequence of shell commands can be used to enable them and see
679their output (if the kernel is generally configured to support event tracing)::
680
681 # cd /sys/kernel/debug/tracing/
682 # echo 1 > events/power/pstate_sample/enable
683 # echo 1 > events/power/cpu_frequency/enable
684 # cat trace
685 gnome-terminal--4510 [001] ..s. 1177.680733: pstate_sample: core_busy=107 scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618 freq=2474476
686 cat-5235 [002] ..s. 1177.681723: cpu_frequency: state=2900000 cpu_id=2
687
688If ``intel_pstate`` works in the `passive mode <Passive Mode_>`_, the
689``cpu_frequency`` trace event will be triggered either by the ``schedutil``
690scaling governor (for the policies it is attached to), or by the ``CPUFreq``
691core (for the policies with other scaling governors).
692
693``ftrace``
694----------
695
696The ``ftrace`` interface can be used for low-level diagnostics of
697``intel_pstate``. For example, to check how often the function to set a
698P-state is called, the ``ftrace`` filter can be set to to
699:c:func:`intel_pstate_set_pstate`::
700
701 # cd /sys/kernel/debug/tracing/
702 # cat available_filter_functions | grep -i pstate
703 intel_pstate_set_pstate
704 intel_pstate_cpu_init
705 ...
706 # echo intel_pstate_set_pstate > set_ftrace_filter
707 # echo function > current_tracer
708 # cat trace | head -15
709 # tracer: function
710 #
711 # entries-in-buffer/entries-written: 80/80 #P:4
712 #
713 # _-----=> irqs-off
714 # / _----=> need-resched
715 # | / _---=> hardirq/softirq
716 # || / _--=> preempt-depth
717 # ||| / delay
718 # TASK-PID CPU# |||| TIMESTAMP FUNCTION
719 # | | | |||| | |
720 Xorg-3129 [000] ..s. 2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func
721 gnome-terminal--4510 [002] ..s. 2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func
722 gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
723 <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
724
725Tuning Interface in ``debugfs``
726-------------------------------
727
728The ``powersave`` algorithm provided by ``intel_pstate`` for `the Core line of
729processors in the active mode <powersave_>`_ is based on a `PID controller`_
730whose parameters were chosen to address a number of different use cases at the
731same time. However, it still is possible to fine-tune it to a specific workload
732and the ``debugfs`` interface under ``/sys/kernel/debug/pstate_snb/`` is
733provided for this purpose. [Note that the ``pstate_snb`` directory will be
734present only if the specific P-state selection algorithm matching the interface
735in it actually is in use.]
736
737The following files present in that directory can be used to modify the PID
738controller parameters at run time:
739
740| ``deadband``
741| ``d_gain_pct``
742| ``i_gain_pct``
743| ``p_gain_pct``
744| ``sample_rate_ms``
745| ``setpoint``
746
747Note, however, that achieving desirable results this way generally requires
748expert-level understanding of the power vs performance tradeoff, so extra care
749is recommended when attempting to do that.
750
751
752.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
753.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
754.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
755.. _PID controller: https://en.wikipedia.org/wiki/PID_controller
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
deleted file mode 100644
index 3fdcdfd968ba..000000000000
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ /dev/null
@@ -1,281 +0,0 @@
1Intel P-State driver
2--------------------
3
4This driver provides an interface to control the P-State selection for the
5SandyBridge+ Intel processors.
6
7The following document explains P-States:
8http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
9As stated in the document, P-State doesn’t exactly mean a frequency. However, for
10the sake of the relationship with cpufreq, P-State and frequency are used
11interchangeably.
12
13Understanding the cpufreq core governors and policies are important before
14discussing more details about the Intel P-State driver. Based on what callbacks
15a cpufreq driver provides to the cpufreq core, it can support two types of
16drivers:
17- with target_index() callback: In this mode, the drivers using cpufreq core
18simply provide the minimum and maximum frequency limits and an additional
19interface target_index() to set the current frequency. The cpufreq subsystem
20has a number of scaling governors ("performance", "powersave", "ondemand",
21etc.). Depending on which governor is in use, cpufreq core will call for
22transitions to a specific frequency using target_index() callback.
23- setpolicy() callback: In this mode, drivers do not provide target_index()
24callback, so cpufreq core can't request a transition to a specific frequency.
25The driver provides minimum and maximum frequency limits and callbacks to set a
26policy. The policy in cpufreq sysfs is referred to as the "scaling governor".
27The cpufreq core can request the driver to operate in any of the two policies:
28"performance" and "powersave". The driver decides which frequency to use based
29on the above policy selection considering minimum and maximum frequency limits.
30
31The Intel P-State driver falls under the latter category, which implements the
32setpolicy() callback. This driver decides what P-State to use based on the
33requested policy from the cpufreq core. If the processor is capable of
34selecting its next P-State internally, then the driver will offload this
35responsibility to the processor (aka HWP: Hardware P-States). If not, the
36driver implements algorithms to select the next P-State.
37
38Since these policies are implemented in the driver, they are not same as the
39cpufreq scaling governors implementation, even if they have the same name in
40the cpufreq sysfs (scaling_governors). For example the "performance" policy is
41similar to cpufreq’s "performance" governor, but "powersave" is completely
42different than the cpufreq "powersave" governor. The strategy here is similar
43to cpufreq "ondemand", where the requested P-State is related to the system load.
44
45Sysfs Interface
46
47In addition to the frequency-controlling interfaces provided by the cpufreq
48core, the driver provides its own sysfs files to control the P-State selection.
49These files have been added to /sys/devices/system/cpu/intel_pstate/.
50Any changes made to these files are applicable to all CPUs (even in a
51multi-package system, Refer to later section on placing "Per-CPU limits").
52
53 max_perf_pct: Limits the maximum P-State that will be requested by
54 the driver. It states it as a percentage of the available performance. The
55 available (P-State) performance may be reduced by the no_turbo
56 setting described below.
57
58 min_perf_pct: Limits the minimum P-State that will be requested by
59 the driver. It states it as a percentage of the max (non-turbo)
60 performance level.
61
62 no_turbo: Limits the driver to selecting P-State below the turbo
63 frequency range.
64
65 turbo_pct: Displays the percentage of the total performance that
66 is supported by hardware that is in the turbo range. This number
67 is independent of whether turbo has been disabled or not.
68
69 num_pstates: Displays the number of P-States that are supported
70 by hardware. This number is independent of whether turbo has
71 been disabled or not.
72
73For example, if a system has these parameters:
74 Max 1 core turbo ratio: 0x21 (Max 1 core ratio is the maximum P-State)
75 Max non turbo ratio: 0x17
76 Minimum ratio : 0x08 (Here the ratio is called max efficiency ratio)
77
78Sysfs will show :
79 max_perf_pct:100, which corresponds to 1 core ratio
80 min_perf_pct:24, max_efficiency_ratio / max 1 Core ratio
81 no_turbo:0, turbo is not disabled
82 num_pstates:26 = (max 1 Core ratio - Max Efficiency Ratio + 1)
83 turbo_pct:39 = (max 1 core ratio - max non turbo ratio) / num_pstates
84
85Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual
86Volume 3: System Programming Guide" to understand ratios.
87
88There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/
89that can be used for controlling the operation mode of the driver:
90
91 status: Three settings are possible:
92 "off" - The driver is not in use at this time.
93 "active" - The driver works as a P-state governor (default).
94 "passive" - The driver works as a regular cpufreq one and collaborates
95 with the generic cpufreq governors (it sets P-states as
96 requested by those governors).
97 The current setting is returned by reads from this attribute. Writing one
98 of the above strings to it changes the operation mode as indicated by that
99 string, if possible. If HW-managed P-states (HWP) are enabled, it is not
100 possible to change the driver's operation mode and attempts to write to
101 this attribute will fail.
102
103cpufreq sysfs for Intel P-State
104
105Since this driver registers with cpufreq, cpufreq sysfs is also presented.
106There are some important differences, which need to be considered.
107
108scaling_cur_freq: This displays the real frequency which was used during
109the last sample period instead of what is requested. Some other cpufreq driver,
110like acpi-cpufreq, displays what is requested (Some changes are on the
111way to fix this for acpi-cpufreq driver). The same is true for frequencies
112displayed at /proc/cpuinfo.
113
114scaling_governor: This displays current active policy. Since each CPU has a
115cpufreq sysfs, it is possible to set a scaling governor to each CPU. But this
116is not possible with Intel P-States, as there is one common policy for all
117CPUs. Here, the last requested policy will be applicable to all CPUs. It is
118suggested that one use the cpupower utility to change policy to all CPUs at the
119same time.
120
121scaling_setspeed: This attribute can never be used with Intel P-State.
122
123scaling_max_freq/scaling_min_freq: This interface can be used similarly to
124the max_perf_pct/min_perf_pct of Intel P-State sysfs. However since frequencies
125are converted to nearest possible P-State, this is prone to rounding errors.
126This method is not preferred to limit performance.
127
128affected_cpus: Not used
129related_cpus: Not used
130
131For contemporary Intel processors, the frequency is controlled by the
132processor itself and the P-State exposed to software is related to
133performance levels. The idea that frequency can be set to a single
134frequency is fictional for Intel Core processors. Even if the scaling
135driver selects a single P-State, the actual frequency the processor
136will run at is selected by the processor itself.
137
138Per-CPU limits
139
140The kernel command line option "intel_pstate=per_cpu_perf_limits" forces
141the intel_pstate driver to use per-CPU performance limits. When it is set,
142the sysfs control interface described above is subject to limitations.
143- The following controls are not available for both read and write
144 /sys/devices/system/cpu/intel_pstate/max_perf_pct
145 /sys/devices/system/cpu/intel_pstate/min_perf_pct
146- The following controls can be used to set performance limits, as far as the
147architecture of the processor permits:
148 /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq
149 /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq
150 /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
151- User can still observe turbo percent and number of P-States from
152 /sys/devices/system/cpu/intel_pstate/turbo_pct
153 /sys/devices/system/cpu/intel_pstate/num_pstates
154- User can read write system wide turbo status
155 /sys/devices/system/cpu/no_turbo
156
157Support of energy performance hints
158It is possible to provide hints to the HWP algorithms in the processor
159to be more performance centric to more energy centric. When the driver
160is using HWP, two additional cpufreq sysfs attributes are presented for
161each logical CPU.
162These attributes are:
163 - energy_performance_available_preferences
164 - energy_performance_preference
165
166To get list of supported hints:
167$ cat energy_performance_available_preferences
168 default performance balance_performance balance_power power
169
170The current preference can be read or changed via cpufreq sysfs
171attribute "energy_performance_preference". Reading from this attribute
172will display current effective setting. User can write any of the valid
173preference string to this attribute. User can always restore to power-on
174default by writing "default".
175
176Since threads can migrate to different CPUs, this is possible that the
177new CPU may have different energy performance preference than the previous
178one. To avoid such issues, either threads can be pinned to specific CPUs
179or set the same energy performance preference value to all CPUs.
180
181Tuning Intel P-State driver
182
183When the performance can be tuned using PID (Proportional Integral
184Derivative) controller, debugfs files are provided for adjusting performance.
185They are presented under:
186/sys/kernel/debug/pstate_snb/
187
188The PID tunable parameters are:
189 deadband
190 d_gain_pct
191 i_gain_pct
192 p_gain_pct
193 sample_rate_ms
194 setpoint
195
196To adjust these parameters, some understanding of driver implementation is
197necessary. There are some tweeks described here, but be very careful. Adjusting
198them requires expert level understanding of power and performance relationship.
199These limits are only useful when the "powersave" policy is active.
200
201-To make the system more responsive to load changes, sample_rate_ms can
202be adjusted (current default is 10ms).
203-To make the system use higher performance, even if the load is lower, setpoint
204can be adjusted to a lower number. This will also lead to faster ramp up time
205to reach the maximum P-State.
206If there are no derivative and integral coefficients, The next P-State will be
207equal to:
208 current P-State - ((setpoint - current cpu load) * p_gain_pct)
209
210For example, if the current PID parameters are (Which are defaults for the core
211processors like SandyBridge):
212 deadband = 0
213 d_gain_pct = 0
214 i_gain_pct = 0
215 p_gain_pct = 20
216 sample_rate_ms = 10
217 setpoint = 97
218
219If the current P-State = 0x08 and current load = 100, this will result in the
220next P-State = 0x08 - ((97 - 100) * 0.2) = 8.6 (rounded to 9). Here the P-State
221goes up by only 1. If during next sample interval the current load doesn't
222change and still 100, then P-State goes up by one again. This process will
223continue as long as the load is more than the setpoint until the maximum P-State
224is reached.
225
226For the same load at setpoint = 60, this will result in the next P-State
227= 0x08 - ((60 - 100) * 0.2) = 16
228So by changing the setpoint from 97 to 60, there is an increase of the
229next P-State from 9 to 16. So this will make processor execute at higher
230P-State for the same CPU load. If the load continues to be more than the
231setpoint during next sample intervals, then P-State will go up again till the
232maximum P-State is reached. But the ramp up time to reach the maximum P-State
233will be much faster when the setpoint is 60 compared to 97.
234
235Debugging Intel P-State driver
236
237Event tracing
238To debug P-State transition, the Linux event tracing interface can be used.
239There are two specific events, which can be enabled (Provided the kernel
240configs related to event tracing are enabled).
241
242# cd /sys/kernel/debug/tracing/
243# echo 1 > events/power/pstate_sample/enable
244# echo 1 > events/power/cpu_frequency/enable
245# cat trace
246gnome-terminal--4510 [001] ..s. 1177.680733: pstate_sample: core_busy=107
247 scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618
248 freq=2474476
249cat-5235 [002] ..s. 1177.681723: cpu_frequency: state=2900000 cpu_id=2
250
251
252Using ftrace
253
254If function level tracing is required, the Linux ftrace interface can be used.
255For example if we want to check how often a function to set a P-State is
256called, we can set ftrace filter to intel_pstate_set_pstate.
257
258# cd /sys/kernel/debug/tracing/
259# cat available_filter_functions | grep -i pstate
260intel_pstate_set_pstate
261intel_pstate_cpu_init
262...
263
264# echo intel_pstate_set_pstate > set_ftrace_filter
265# echo function > current_tracer
266# cat trace | head -15
267# tracer: function
268#
269# entries-in-buffer/entries-written: 80/80 #P:4
270#
271# _-----=> irqs-off
272# / _----=> need-resched
273# | / _---=> hardirq/softirq
274# || / _--=> preempt-depth
275# ||| / delay
276# TASK-PID CPU# |||| TIMESTAMP FUNCTION
277# | | | |||| | |
278 Xorg-3129 [000] ..s. 2537.644844: intel_pstate_set_pstate <-intel_pstate_timer_func
279 gnome-terminal--4510 [002] ..s. 2537.649844: intel_pstate_set_pstate <-intel_pstate_timer_func
280 gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
281 <idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index e9c5a1d9834a..f465647a4dd2 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -22,7 +22,8 @@ Required properties :
22- #clock-cells : must contain 1 22- #clock-cells : must contain 1
23- #reset-cells : must contain 1 23- #reset-cells : must contain 1
24 24
25For the PRCM CCUs on H3/A64, one more clock is needed: 25For the PRCM CCUs on H3/A64, two more clocks are needed:
26- "pll-periph": the SoC's peripheral PLL from the main CCU
26- "iosc": the SoC's internal frequency oscillator 27- "iosc": the SoC's internal frequency oscillator
27 28
28Example for generic CCU: 29Example for generic CCU:
@@ -39,8 +40,8 @@ Example for PRCM CCU:
39r_ccu: clock@01f01400 { 40r_ccu: clock@01f01400 {
40 compatible = "allwinner,sun50i-a64-r-ccu"; 41 compatible = "allwinner,sun50i-a64-r-ccu";
41 reg = <0x01f01400 0x100>; 42 reg = <0x01f01400 0x100>;
42 clocks = <&osc24M>, <&osc32k>, <&iosc>; 43 clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
43 clock-names = "hosc", "losc", "iosc"; 44 clock-names = "hosc", "losc", "iosc", "pll-periph";
44 #clock-cells = <1>; 45 #clock-cells = <1>;
45 #reset-cells = <1>; 46 #reset-cells = <1>;
46}; 47};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 42c3bb2d53e8..01e331a5f3e7 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -41,9 +41,9 @@ Required properties:
41Optional properties: 41Optional properties:
42 42
43In order to use the GPIO lines in PWM mode, some additional optional 43In order to use the GPIO lines in PWM mode, some additional optional
44properties are required. Only Armada 370 and XP support these properties. 44properties are required.
45 45
46- compatible: Must contain "marvell,armada-370-xp-gpio" 46- compatible: Must contain "marvell,armada-370-gpio"
47 47
48- reg: an additional register set is needed, for the GPIO Blink 48- reg: an additional register set is needed, for the GPIO Blink
49 Counter on/off registers. 49 Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
71 }; 71 };
72 72
73 gpio1: gpio@18140 { 73 gpio1: gpio@18140 {
74 compatible = "marvell,armada-370-xp-gpio"; 74 compatible = "marvell,armada-370-gpio";
75 reg = <0x18140 0x40>, <0x181c8 0x08>; 75 reg = <0x18140 0x40>, <0x181c8 0x08>;
76 reg-names = "gpio", "pwm"; 76 reg-names = "gpio", "pwm";
77 ngpios = <17>; 77 ngpios = <17>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index 6db22103e2dd..025cf8c9324a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -36,7 +36,7 @@ Optional properties:
36 control gpios 36 control gpios
37 37
38 - threshold: allows setting the "click"-threshold in the range 38 - threshold: allows setting the "click"-threshold in the range
39 from 20 to 80. 39 from 0 to 80.
40 40
41 - gain: allows setting the sensitivity in the range from 0 to 41 - gain: allows setting the sensitivity in the range from 0 to
42 31. Note that lower values indicate higher 42 31. Note that lower values indicate higher
diff --git a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
index 05485699d70e..9630ac0e4b56 100644
--- a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
+++ b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
@@ -16,6 +16,11 @@ Required properties:
16- reg: Base address of PMIC on Hi6220 SoC. 16- reg: Base address of PMIC on Hi6220 SoC.
17- interrupt-controller: Hi655x has internal IRQs (has own IRQ domain). 17- interrupt-controller: Hi655x has internal IRQs (has own IRQ domain).
18- pmic-gpios: The GPIO used by PMIC IRQ. 18- pmic-gpios: The GPIO used by PMIC IRQ.
19- #clock-cells: From common clock binding; shall be set to 0
20
21Optional properties:
22- clock-output-names: From common clock binding to override the
23 default output clock name
19 24
20Example: 25Example:
21 pmic: pmic@f8000000 { 26 pmic: pmic@f8000000 {
@@ -24,4 +29,5 @@ Example:
24 interrupt-controller; 29 interrupt-controller;
25 #interrupt-cells = <2>; 30 #interrupt-cells = <2>;
26 pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; 31 pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
32 #clock-cells = <0>;
27 } 33 }
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
index bbd083f5600a..1db6e0057a63 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
@@ -31,7 +31,7 @@ Example:
31 compatible = "st,stm32-timers"; 31 compatible = "st,stm32-timers";
32 reg = <0x40010000 0x400>; 32 reg = <0x40010000 0x400>;
33 clocks = <&rcc 0 160>; 33 clocks = <&rcc 0 160>;
34 clock-names = "clk_int"; 34 clock-names = "int";
35 35
36 pwm { 36 pwm {
37 compatible = "st,stm32-pwm"; 37 compatible = "st,stm32-pwm";
diff --git a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
index e25436861867..9029b45b8a22 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt
@@ -18,6 +18,8 @@ Optional properties:
18 "ext_clock" (External clock provided to the card). 18 "ext_clock" (External clock provided to the card).
19- post-power-on-delay-ms : Delay in ms after powering the card and 19- post-power-on-delay-ms : Delay in ms after powering the card and
20 de-asserting the reset-gpios (if any) 20 de-asserting the reset-gpios (if any)
21- power-off-delay-us : Delay in us after asserting the reset-gpios (if any)
22 during power off of the card.
21 23
22Example: 24Example:
23 25
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index d6c6e41648d4..8ec2ca21adeb 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -34,7 +34,7 @@ Required properties:
34 "brcm,bcm6328-switch" 34 "brcm,bcm6328-switch"
35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" 35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
36 36
37See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional 37See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
38required and optional properties. 38required and optional properties.
39 39
40Examples: 40Examples:
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7ef9dbb08957..1d4d0f49c9d0 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -26,6 +26,10 @@ Optional properties:
26- interrupt-controller : Indicates the switch is itself an interrupt 26- interrupt-controller : Indicates the switch is itself an interrupt
27 controller. This is used for the PHY interrupts. 27 controller. This is used for the PHY interrupts.
28#interrupt-cells = <2> : Controller uses two cells, number and flag 28#interrupt-cells = <2> : Controller uses two cells, number and flag
29- eeprom-length : Set to the length of an EEPROM connected to the
30 switch. Must be set if the switch can not detect
31 the presence and/or size of a connected EEPROM,
32 otherwise optional.
29- mdio : Container of PHY and devices on the switches MDIO 33- mdio : Container of PHY and devices on the switches MDIO
30 bus. 34 bus.
31- mdio? : Container of PHYs and devices on the external MDIO 35- mdio? : Container of PHYs and devices on the external MDIO
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index a1e3693cca16..6f55bdd52f8a 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -15,6 +15,10 @@ Optional properties:
15- phy-reset-active-high : If present then the reset sequence using the GPIO 15- phy-reset-active-high : If present then the reset sequence using the GPIO
16 specified in the "phy-reset-gpios" property is reversed (H=reset state, 16 specified in the "phy-reset-gpios" property is reversed (H=reset state,
17 L=operation state). 17 L=operation state).
18- phy-reset-post-delay : Post reset delay in milliseconds. If present then
19 a delay of phy-reset-post-delay milliseconds will be observed after the
20 phy-reset-gpios has been toggled. Can be omitted thus no delay is
21 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
18- phy-supply : regulator that powers the Ethernet PHY. 22- phy-supply : regulator that powers the Ethernet PHY.
19- phy-handle : phandle to the PHY device connected to this device. 23- phy-handle : phandle to the PHY device connected to this device.
20- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. 24- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 16c3a9501f5d..acfafc8e143c 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -27,6 +27,7 @@ Optional properties:
27 of the device. On many systems this is wired high so the device goes 27 of the device. On many systems this is wired high so the device goes
28 out of reset at power-on, but if it is under program control, this 28 out of reset at power-on, but if it is under program control, this
29 optional GPIO can wake up in response to it. 29 optional GPIO can wake up in response to it.
30- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
30 31
31Examples: 32Examples:
32 33
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 71a3c134af1b..f01d154090da 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -247,7 +247,6 @@ bias-bus-hold - latch weakly
247bias-pull-up - pull up the pin 247bias-pull-up - pull up the pin
248bias-pull-down - pull down the pin 248bias-pull-down - pull down the pin
249bias-pull-pin-default - use pin-default pull state 249bias-pull-pin-default - use pin-default pull state
250bi-directional - pin supports simultaneous input/output operations
251drive-push-pull - drive actively high and low 250drive-push-pull - drive actively high and low
252drive-open-drain - drive with open drain 251drive-open-drain - drive with open drain
253drive-open-source - drive with open source 252drive-open-source - drive with open source
@@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X
260power-source - select between different power supplies 259power-source - select between different power supplies
261low-power-enable - enable low power mode 260low-power-enable - enable low power mode
262low-power-disable - disable low power mode 261low-power-disable - disable low power mode
263output-enable - enable output on pin regardless of output value
264output-low - set the pin to output mode with low level 262output-low - set the pin to output mode with low level
265output-high - set the pin to output mode with high level 263output-high - set the pin to output mode with high level
266slew-rate - set the slew rate 264slew-rate - set the slew rate
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index 4b1d6e74c744..1f6e86f787ef 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -1,17 +1,23 @@
1SPI (Serial Peripheral Interface) busses 1SPI (Serial Peripheral Interface) busses
2 2
3SPI busses can be described with a node for the SPI master device 3SPI busses can be described with a node for the SPI controller device
4and a set of child nodes for each SPI slave on the bus. For this 4and a set of child nodes for each SPI slave on the bus. The system's SPI
5discussion, it is assumed that the system's SPI controller is in 5controller may be described for use in SPI master mode or in SPI slave mode,
6SPI master mode. This binding does not describe SPI controllers 6but not for both at the same time.
7in slave mode.
8 7
9The SPI master node requires the following properties: 8The SPI controller node requires the following properties:
9- compatible - Name of SPI bus controller following generic names
10 recommended practice.
11
12In master mode, the SPI controller node requires the following additional
13properties:
10- #address-cells - number of cells required to define a chip select 14- #address-cells - number of cells required to define a chip select
11 address on the SPI bus. 15 address on the SPI bus.
12- #size-cells - should be zero. 16- #size-cells - should be zero.
13- compatible - name of SPI bus controller following generic names 17
14 recommended practice. 18In slave mode, the SPI controller node requires one additional property:
19- spi-slave - Empty property.
20
15No other properties are required in the SPI bus node. It is assumed 21No other properties are required in the SPI bus node. It is assumed
16that a driver for an SPI bus device will understand that it is an SPI bus. 22that a driver for an SPI bus device will understand that it is an SPI bus.
17However, the binding does not attempt to define the specific method for 23However, the binding does not attempt to define the specific method for
@@ -21,7 +27,7 @@ assumption that board specific platform code will be used to manage
21chip selects. Individual drivers can define additional properties to 27chip selects. Individual drivers can define additional properties to
22support describing the chip select layout. 28support describing the chip select layout.
23 29
24Optional properties: 30Optional properties (master mode only):
25- cs-gpios - gpios chip select. 31- cs-gpios - gpios chip select.
26- num-cs - total number of chipselects. 32- num-cs - total number of chipselects.
27 33
@@ -41,28 +47,36 @@ cs1 : native
41cs2 : &gpio1 1 0 47cs2 : &gpio1 1 0
42cs3 : &gpio1 2 0 48cs3 : &gpio1 2 0
43 49
44SPI slave nodes must be children of the SPI master node and can 50
45contain the following properties. 51SPI slave nodes must be children of the SPI controller node.
46- reg - (required) chip select address of device. 52
47- compatible - (required) name of SPI device following generic names 53In master mode, one or more slave nodes (up to the number of chip selects) can
48 recommended practice. 54be present. Required properties are:
49- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz. 55- compatible - Name of SPI device following generic names recommended
50- spi-cpol - (optional) Empty property indicating device requires 56 practice.
51 inverse clock polarity (CPOL) mode. 57- reg - Chip select address of device.
52- spi-cpha - (optional) Empty property indicating device requires 58- spi-max-frequency - Maximum SPI clocking speed of device in Hz.
53 shifted clock phase (CPHA) mode. 59
54- spi-cs-high - (optional) Empty property indicating device requires 60In slave mode, the (single) slave node is optional.
55 chip select active high. 61If present, it must be called "slave". Required properties are:
56- spi-3wire - (optional) Empty property indicating device requires 62- compatible - Name of SPI device following generic names recommended
57 3-wire mode. 63 practice.
58- spi-lsb-first - (optional) Empty property indicating device requires 64
59 LSB first mode. 65All slave nodes can contain the following optional properties:
60- spi-tx-bus-width - (optional) The bus width (number of data wires) that is 66- spi-cpol - Empty property indicating device requires inverse clock
61 used for MOSI. Defaults to 1 if not present. 67 polarity (CPOL) mode.
62- spi-rx-bus-width - (optional) The bus width (number of data wires) that is 68- spi-cpha - Empty property indicating device requires shifted clock
63 used for MISO. Defaults to 1 if not present. 69 phase (CPHA) mode.
64- spi-rx-delay-us - (optional) Microsecond delay after a read transfer. 70- spi-cs-high - Empty property indicating device requires chip select
65- spi-tx-delay-us - (optional) Microsecond delay after a write transfer. 71 active high.
72- spi-3wire - Empty property indicating device requires 3-wire mode.
73- spi-lsb-first - Empty property indicating device requires LSB first mode.
74- spi-tx-bus-width - The bus width (number of data wires) that is used for MOSI.
75 Defaults to 1 if not present.
76- spi-rx-bus-width - The bus width (number of data wires) that is used for MISO.
77 Defaults to 1 if not present.
78- spi-rx-delay-us - Microsecond delay after a read transfer.
79- spi-tx-delay-us - Microsecond delay after a write transfer.
66 80
67Some SPI controllers and devices support Dual and Quad SPI transfer mode. 81Some SPI controllers and devices support Dual and Quad SPI transfer mode.
68It allows data in the SPI system to be transferred using 2 wires (DUAL) or 4 82It allows data in the SPI system to be transferred using 2 wires (DUAL) or 4
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
index dc6d0313324a..825c39cae74a 100644
--- a/Documentation/devicetree/bindings/spi/spi-meson.txt
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -20,3 +20,34 @@ Required properties:
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <0>; 21 #size-cells = <0>;
22 }; 22 };
23
24* SPICC (SPI Communication Controller)
25
26The Meson SPICC is generic SPI controller for general purpose Full-Duplex
27communications with dedicated 16 words RX/TX PIO FIFOs.
28
29Required properties:
30 - compatible: should be "amlogic,meson-gx-spicc" on Amlogic GX SoCs.
31 - reg: physical base address and length of the controller registers
32 - interrupts: The interrupt specifier
33 - clock-names: Must contain "core"
34 - clocks: phandle of the input clock for the baud rate generator
35 - #address-cells: should be 1
36 - #size-cells: should be 0
37
38Optional properties:
39 - resets: phandle of the internal reset line
40
41See ../spi/spi-bus.txt for more details on SPI bus master and slave devices
42required and optional properties.
43
44Example :
45 spi@c1108d80 {
46 compatible = "amlogic,meson-gx-spicc";
47 reg = <0xc1108d80 0x80>;
48 interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
49 clock-names = "core";
50 clocks = <&clk81>;
51 #address-cells = <1>;
52 #size-cells = <0>;
53 };
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index e43f4cf4cf35..e0318cf92d73 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -3,7 +3,9 @@ Binding for MTK SPI controller
3Required properties: 3Required properties:
4- compatible: should be one of the following. 4- compatible: should be one of the following.
5 - mediatek,mt2701-spi: for mt2701 platforms 5 - mediatek,mt2701-spi: for mt2701 platforms
6 - mediatek,mt2712-spi: for mt2712 platforms
6 - mediatek,mt6589-spi: for mt6589 platforms 7 - mediatek,mt6589-spi: for mt6589 platforms
8 - mediatek,mt7622-spi: for mt7622 platforms
7 - mediatek,mt8135-spi: for mt8135 platforms 9 - mediatek,mt8135-spi: for mt8135 platforms
8 - mediatek,mt8173-spi: for mt8173 platforms 10 - mediatek,mt8173-spi: for mt8173 platforms
9 11
diff --git a/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt b/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt
deleted file mode 100644
index c59e27c632c1..000000000000
--- a/Documentation/devicetree/bindings/staging/ion/hi6220-ion.txt
+++ /dev/null
@@ -1,31 +0,0 @@
1Hi6220 SoC ION
2===================================================================
3Required properties:
4- compatible : "hisilicon,hi6220-ion"
5- list of the ION heaps
6 - heap name : maybe heap_sys_user@0
7 - heap id : id should be unique in the system.
8 - heap base : base ddr address of the heap,0 means that
9 it is dynamic.
10 - heap size : memory size and 0 means it is dynamic.
11 - heap type : the heap type of the heap, please also
12 see the define in ion.h(drivers/staging/android/uapi/ion.h)
13-------------------------------------------------------------------
14Example:
15 hi6220-ion {
16 compatible = "hisilicon,hi6220-ion";
17 heap_sys_user@0 {
18 heap-name = "sys_user";
19 heap-id = <0x0>;
20 heap-base = <0x0>;
21 heap-size = <0x0>;
22 heap-type = "ion_system";
23 };
24 heap_sys_contig@0 {
25 heap-name = "sys_contig";
26 heap-id = <0x1>;
27 heap-base = <0x0>;
28 heap-size = <0x0>;
29 heap-type = "ion_system_contig";
30 };
31 };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 00bea038639e..fcf199b64d3d 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -10,6 +10,7 @@ Required properties:
10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; 10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs; 11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs; 12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
13 - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
13 - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs; 14 - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
14 - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs; 15 - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
15 - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs; 16 - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
diff --git a/Documentation/input/devices/edt-ft5x06.rst b/Documentation/input/devices/edt-ft5x06.rst
index 2032f0b7a8fa..1ccc94b192b7 100644
--- a/Documentation/input/devices/edt-ft5x06.rst
+++ b/Documentation/input/devices/edt-ft5x06.rst
@@ -15,7 +15,7 @@ It has been tested with the following devices:
15The driver allows configuration of the touch screen via a set of sysfs files: 15The driver allows configuration of the touch screen via a set of sysfs files:
16 16
17/sys/class/input/eventX/device/device/threshold: 17/sys/class/input/eventX/device/device/threshold:
18 allows setting the "click"-threshold in the range from 20 to 80. 18 allows setting the "click"-threshold in the range from 0 to 80.
19 19
20/sys/class/input/eventX/device/device/gain: 20/sys/class/input/eventX/device/device/gain:
21 allows setting the sensitivity in the range from 0 to 31. Note that 21 allows setting the sensitivity in the range from 0 to 31. Note that
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644
index 000000000000..76e016d4d344
--- /dev/null
+++ b/Documentation/networking/dpaa.txt
@@ -0,0 +1,194 @@
1The QorIQ DPAA Ethernet Driver
2==============================
3
4Authors:
5Madalin Bucur <madalin.bucur@nxp.com>
6Camelia Groza <camelia.groza@nxp.com>
7
8Contents
9========
10
11 - DPAA Ethernet Overview
12 - DPAA Ethernet Supported SoCs
13 - Configuring DPAA Ethernet in your kernel
14 - DPAA Ethernet Frame Processing
15 - DPAA Ethernet Features
16 - Debugging
17
18DPAA Ethernet Overview
19======================
20
21DPAA stands for Data Path Acceleration Architecture and it is a
22set of networking acceleration IPs that are available on several
23generations of SoCs, both on PowerPC and ARM64.
24
25The Freescale DPAA architecture consists of a series of hardware blocks
26that support Ethernet connectivity. The Ethernet driver depends upon the
27following drivers in the Linux kernel:
28
29 - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
30 drivers/iommu/fsl_*
31 - Frame Manager (FMan)
32 drivers/net/ethernet/freescale/fman
33 - Queue Manager (QMan), Buffer Manager (BMan)
34 drivers/soc/fsl/qbman
35
36A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
37
38 dpaa_eth /eth0\ ... /ethN\
39 driver | | | |
40 ------------- ---- ----------- ---- -------------
41 -Ports / Tx Rx \ ... / Tx Rx \
42 FMan | | | |
43 -MACs | MAC0 | | MACN |
44 / dtsec0 \ ... / dtsecN \ (or tgec)
45 / \ / \(or memac)
46 --------- -------------- --- -------------- ---------
47 FMan, FMan Port, FMan SP, FMan MURAM drivers
48 ---------------------------------------------------------
49 FMan HW blocks: MURAM, MACs, Ports, SP
50 ---------------------------------------------------------
51
52The dpaa_eth relation to the QMan, BMan and FMan:
53 ________________________________
54 dpaa_eth / eth0 \
55 driver / \
56 --------- -^- -^- -^- --- ---------
57 QMan driver / \ / \ / \ \ / | BMan |
58 |Rx | |Rx | |Tx | |Tx | | driver |
59 --------- |Dfl| |Err| |Cnf| |FQs| | |
60 QMan HW |FQ | |FQ | |FQs| | | | |
61 / \ / \ / \ \ / | |
62 --------- --- --- --- -v- ---------
63 | FMan QMI | |
64 | FMan HW FMan BMI | BMan HW |
65 ----------------------- --------
66
67where the acronyms used above (and in the code) are:
68DPAA = Data Path Acceleration Architecture
69FMan = DPAA Frame Manager
70QMan = DPAA Queue Manager
71BMan = DPAA Buffers Manager
72QMI = QMan interface in FMan
73BMI = BMan interface in FMan
74FMan SP = FMan Storage Profiles
75MURAM = Multi-user RAM in FMan
76FQ = QMan Frame Queue
77Rx Dfl FQ = default reception FQ
78Rx Err FQ = Rx error frames FQ
79Tx Cnf FQ = Tx confirmation FQs
80Tx FQs = transmission frame queues
81dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
82tgec = ten gigabit Ethernet controller (10 Gbps)
83memac = multirate Ethernet MAC (10/100/1000/10000)
84
85DPAA Ethernet Supported SoCs
86============================
87
88The DPAA drivers enable the Ethernet controllers present on the following SoCs:
89
90# PPC
91P1023
92P2041
93P3041
94P4080
95P5020
96P5040
97T1023
98T1024
99T1040
100T1042
101T2080
102T4240
103B4860
104
105# ARM
106LS1043A
107LS1046A
108
109Configuring DPAA Ethernet in your kernel
110========================================
111
112To enable the DPAA Ethernet driver, the following Kconfig options are required:
113
114# common for arch/arm64 and arch/powerpc platforms
115CONFIG_FSL_DPAA=y
116CONFIG_FSL_FMAN=y
117CONFIG_FSL_DPAA_ETH=y
118CONFIG_FSL_XGMAC_MDIO=y
119
120# for arch/powerpc only
121CONFIG_FSL_PAMU=y
122
123# common options needed for the PHYs used on the RDBs
124CONFIG_VITESSE_PHY=y
125CONFIG_REALTEK_PHY=y
126CONFIG_AQUANTIA_PHY=y
127
128DPAA Ethernet Frame Processing
129==============================
130
131On Rx, buffers for the incoming frames are retrieved from one of the three
132existing buffers pools. The driver initializes and seeds these, each with
133buffers of different sizes: 1KB, 2KB and 4KB.
134
135On Tx, all transmitted frames are returned to the driver through Tx
136confirmation frame queues. The driver is then responsible for freeing the
137buffers. In order to do this properly, a backpointer is added to the buffer
138before transmission that points to the skb. When the buffer returns to the
139driver on a confirmation FQ, the skb can be correctly consumed.
140
141DPAA Ethernet Features
142======================
143
144Currently the DPAA Ethernet driver enables the basic features required for
145a Linux Ethernet driver. The support for advanced features will be added
146gradually.
147
148The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
149checksum offload feature is enabled by default and cannot be controlled through
150ethtool.
151
152The driver has support for multiple prioritized Tx traffic classes. Priorities
153range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
154strict priority levels. Each traffic class contains NR_CPU TX queues. By
155default, only one traffic class is enabled and the lowest priority Tx queues
156are used. Higher priority traffic classes can be enabled with the mqprio
157qdisc. For example, all four traffic classes are enabled on an interface with
158the following command. Furthermore, skb priority levels are mapped to traffic
159classes as follows:
160
161 * priorities 0 to 3 - traffic class 0 (low priority)
162 * priorities 4 to 7 - traffic class 1 (medium-low priority)
163 * priorities 8 to 11 - traffic class 2 (medium-high priority)
164 * priorities 12 to 15 - traffic class 3 (high priority)
165
166tc qdisc add dev <int> root handle 1: \
167 mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
168
169Debugging
170=========
171
172The following statistics are exported for each interface through ethtool:
173
174 - interrupt count per CPU
175 - Rx packets count per CPU
176 - Tx packets count per CPU
177 - Tx confirmed packets count per CPU
178 - Tx S/G frames count per CPU
179 - Tx error count per CPU
180 - Rx error count per CPU
181 - Rx error count per type
182 - congestion related statistics:
183 - congestion status
184 - time spent in congestion
185 - number of time the device entered congestion
186 - dropped packets count per cause
187
188The driver also exports the following information in sysfs:
189
190 - the FQ IDs for each FQ type
191 /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
192
193 - the IDs of the buffer pools in use
194 /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 59f4db2a0c85..f55639d71d35 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware
122or will be computed in the stack. Capable hardware can pass the hash in 122or will be computed in the stack. Capable hardware can pass the hash in
123the receive descriptor for the packet; this would usually be the same 123the receive descriptor for the packet; this would usually be the same
124hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in 124hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
125skb->rx_hash and can be used elsewhere in the stack as a hash of the 125skb->hash and can be used elsewhere in the stack as a hash of the
126packet’s flow. 126packet’s flow.
127 127
128Each receive hardware queue has an associated list of CPUs to which 128Each receive hardware queue has an associated list of CPUs to which
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index bdc4c0db51e1..9c7139d57e57 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -1,7 +1,7 @@
1TCP protocol 1TCP protocol
2============ 2============
3 3
4Last updated: 9 February 2008 4Last updated: 3 June 2017
5 5
6Contents 6Contents
7======== 7========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
29A congestion control mechanism can be registered through functions in 29A congestion control mechanism can be registered through functions in
30tcp_cong.c. The functions used by the congestion control mechanism are 30tcp_cong.c. The functions used by the congestion control mechanism are
31registered via passing a tcp_congestion_ops struct to 31registered via passing a tcp_congestion_ops struct to
32tcp_register_congestion_control. As a minimum name, ssthresh, 32tcp_register_congestion_control. As a minimum, the congestion control
33cong_avoid must be valid. 33mechanism must provide a valid name and must implement either ssthresh,
34cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
34 35
35Private data for a congestion control mechanism is stored in tp->ca_priv. 36Private data for a congestion control mechanism is stored in tp->ca_priv.
36tcp_ca(tp) returns a pointer to this space. This is preallocated space - it 37tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
37is important to check the size of your private data will fit this space, or 38is important to check the size of your private data will fit this space, or
38alternatively space could be allocated elsewhere and a pointer to it could 39alternatively, space could be allocated elsewhere and a pointer to it could
39be stored here. 40be stored here.
40 41
41There are three kinds of congestion control algorithms currently: The 42There are three kinds of congestion control algorithms currently: The
42simplest ones are derived from TCP reno (highspeed, scalable) and just 43simplest ones are derived from TCP reno (highspeed, scalable) and just
43provide an alternative the congestion window calculation. More complex 44provide an alternative congestion window calculation. More complex
44ones like BIC try to look at other events to provide better 45ones like BIC try to look at other events to provide better
45heuristics. There are also round trip time based algorithms like 46heuristics. There are also round trip time based algorithms like
46Vegas and Westwood+. 47Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
49needs to maintain fairness and performance. Please review current 50needs to maintain fairness and performance. Please review current
50research and RFC's before developing new modules. 51research and RFC's before developing new modules.
51 52
52The method that is used to determine which congestion control mechanism is 53The default congestion control mechanism is chosen based on the
53determined by the setting of the sysctl net.ipv4.tcp_congestion_control. 54DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
54The default congestion control will be the last one registered (LIFO); 55value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
55so if you built everything as modules, the default will be reno. If you 56module will be autoloaded if needed and you will get the expected protocol. If
56build with the defaults from Kconfig, then CUBIC will be builtin (not a 57you ask for an unknown congestion method, then the sysctl attempt will fail.
57module) and it will end up the default.
58 58
59If you really want a particular default value then you will need 59If you remove a TCP congestion control module, then you will get the next
60to set it with the sysctl. If you use a sysctl, the module will be autoloaded
61if needed and you will get the expected protocol. If you ask for an
62unknown congestion method, then the sysctl attempt will fail.
63
64If you remove a tcp congestion control module, then you will get the next
65available one. Since reno cannot be built as a module, and cannot be 60available one. Since reno cannot be built as a module, and cannot be
66deleted, it will always be available. 61removed, it will always be available.
67 62
68How the new TCP output machine [nyi] works. 63How the new TCP output machine [nyi] works.
69=========================================== 64===========================================
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 5338673c88d9..773d2bfacc6c 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -16,6 +16,8 @@ ALC880
16 6-jack in back, 2-jack in front 16 6-jack in back, 2-jack in front
176stack-digout 176stack-digout
18 6-jack with a SPDIF out 18 6-jack with a SPDIF out
196stack-automute
20 6-jack with headphone jack detection
19 21
20ALC260 22ALC260
21====== 23======
@@ -62,6 +64,8 @@ lenovo-dock
62 Enables docking station I/O for some Lenovos 64 Enables docking station I/O for some Lenovos
63hp-gpio-led 65hp-gpio-led
64 GPIO LED support on HP laptops 66 GPIO LED support on HP laptops
67hp-dock-gpio-mic1-led
68 HP dock with mic LED support
65dell-headset-multi 69dell-headset-multi
66 Headset jack, which can also be used as mic-in 70 Headset jack, which can also be used as mic-in
67dell-headset-dock 71dell-headset-dock
@@ -72,6 +76,12 @@ alc283-sense-combo
72 Combo jack sensing on ALC283 76 Combo jack sensing on ALC283
73tpt440-dock 77tpt440-dock
74 Pin configs for Lenovo Thinkpad Dock support 78 Pin configs for Lenovo Thinkpad Dock support
79tpt440
80 Lenovo Thinkpad T440s setup
81tpt460
82 Lenovo Thinkpad T460/560 setup
83dual-codecs
84 Lenovo laptops with dual codecs
75 85
76ALC66x/67x/892 86ALC66x/67x/892
77============== 87==============
@@ -97,6 +107,8 @@ inv-dmic
97 Inverted internal mic workaround 107 Inverted internal mic workaround
98dell-headset-multi 108dell-headset-multi
99 Headset jack, which can also be used as mic-in 109 Headset jack, which can also be used as mic-in
110dual-codecs
111 Lenovo laptops with dual codecs
100 112
101ALC680 113ALC680
102====== 114======
@@ -114,6 +126,8 @@ inv-dmic
114 Inverted internal mic workaround 126 Inverted internal mic workaround
115no-primary-hp 127no-primary-hp
116 VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC) 128 VAIO Z/VGC-LN51JGB workaround (for fixed speaker DAC)
129dual-codecs
130 ALC1220 dual codecs for Gaming mobos
117 131
118ALC861/660 132ALC861/660
119========== 133==========
@@ -206,65 +220,47 @@ auto
206 220
207Conexant 5045 221Conexant 5045
208============= 222=============
209laptop-hpsense 223cap-mix-amp
210 Laptop with HP sense (old model laptop) 224 Fix max input level on mixer widget
211laptop-micsense 225toshiba-p105
212 Laptop with Mic sense (old model fujitsu) 226 Toshiba P105 quirk
213laptop-hpmicsense 227hp-530
214 Laptop with HP and Mic senses 228 HP 530 quirk
215benq
216 Benq R55E
217laptop-hp530
218 HP 530 laptop
219test
220 for testing/debugging purpose, almost all controls can be
221 adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
222 229
223Conexant 5047 230Conexant 5047
224============= 231=============
225laptop 232cap-mix-amp
226 Basic Laptop config 233 Fix max input level on mixer widget
227laptop-hp
228 Laptop config for some HP models (subdevice 30A5)
229laptop-eapd
230 Laptop config with EAPD support
231test
232 for testing/debugging purpose, almost all controls can be
233 adjusted. Appearing only when compiled with $CONFIG_SND_DEBUG=y
234 234
235Conexant 5051 235Conexant 5051
236============= 236=============
237laptop 237lenovo-x200
238 Basic Laptop config (default) 238 Lenovo X200 quirk
239hp
240 HP Spartan laptop
241hp-dv6736
242 HP dv6736
243hp-f700
244 HP Compaq Presario F700
245ideapad
246 Lenovo IdeaPad laptop
247toshiba
248 Toshiba Satellite M300
249 239
250Conexant 5066 240Conexant 5066
251============= 241=============
252laptop 242stereo-dmic
253 Basic Laptop config (default) 243 Workaround for inverted stereo digital mic
254hp-laptop 244gpio1
255 HP laptops, e g G60 245 Enable GPIO1 pin
256asus 246headphone-mic-pin
257 Asus K52JU, Lenovo G560 247 Enable headphone mic NID 0x18 without detection
258dell-laptop 248tp410
259 Dell laptops 249 Thinkpad T400 & co quirks
260dell-vostro
261 Dell Vostro
262olpc-xo-1_5
263 OLPC XO 1.5
264ideapad
265 Lenovo IdeaPad U150
266thinkpad 250thinkpad
267 Lenovo Thinkpad 251 Thinkpad mute/mic LED quirk
252lemote-a1004
253 Lemote A1004 quirk
254lemote-a1205
255 Lemote A1205 quirk
256olpc-xo
257 OLPC XO quirk
258mute-led-eapd
259 Mute LED control via EAPD
260hp-dock
261 HP dock support
262mute-led-gpio
263 Mute LED control via GPIO
268 264
269STAC9200 265STAC9200
270======== 266========
@@ -444,6 +440,8 @@ dell-eq
444 Dell desktops/laptops 440 Dell desktops/laptops
445alienware 441alienware
446 Alienware M17x 442 Alienware M17x
443asus-mobo
444 Pin configs for ASUS mobo with 5.1/SPDIF out
447auto 445auto
448 BIOS setup (default) 446 BIOS setup (default)
449 447
@@ -477,6 +475,8 @@ hp-envy-ts-bass
477 Pin fixup for HP Envy TS bass speaker (NID 0x10) 475 Pin fixup for HP Envy TS bass speaker (NID 0x10)
478hp-bnb13-eq 476hp-bnb13-eq
479 Hardware equalizer setup for HP laptops 477 Hardware equalizer setup for HP laptops
478hp-envy-ts-bass
479 HP Envy TS bass support
480auto 480auto
481 BIOS setup (default) 481 BIOS setup (default)
482 482
@@ -496,10 +496,22 @@ auto
496 496
497Cirrus Logic CS4206/4207 497Cirrus Logic CS4206/4207
498======================== 498========================
499mbp53
500 MacBook Pro 5,3
499mbp55 501mbp55
500 MacBook Pro 5,5 502 MacBook Pro 5,5
501imac27 503imac27
502 IMac 27 Inch 504 IMac 27 Inch
505imac27_122
506 iMac 12,2
507apple
508 Generic Apple quirk
509mbp101
510 MacBookPro 10,1
511mbp81
512 MacBookPro 8,1
513mba42
514 MacBookAir 4,2
503auto 515auto
504 BIOS setup (default) 516 BIOS setup (default)
505 517
@@ -509,6 +521,10 @@ mba6
509 MacBook Air 6,1 and 6,2 521 MacBook Air 6,1 and 6,2
510gpio0 522gpio0
511 Enable GPIO 0 amp 523 Enable GPIO 0 amp
524mbp11
525 MacBookPro 11,2
526macmini
527 MacMini 7,1
512auto 528auto
513 BIOS setup (default) 529 BIOS setup (default)
514 530
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index d1824b399b2d..1721c1b570c3 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -62,8 +62,8 @@ chips described as using "three wire" signaling: SCK, data, nCSx.
62(That data line is sometimes called MOMI or SISO.) 62(That data line is sometimes called MOMI or SISO.)
63 63
64Microcontrollers often support both master and slave sides of the SPI 64Microcontrollers often support both master and slave sides of the SPI
65protocol. This document (and Linux) currently only supports the master 65protocol. This document (and Linux) supports both the master and slave
66side of SPI interactions. 66sides of SPI interactions.
67 67
68 68
69Who uses it? On what kinds of systems? 69Who uses it? On what kinds of systems?
@@ -154,9 +154,8 @@ control audio interfaces, present touchscreen sensors as input interfaces,
154or monitor temperature and voltage levels during industrial processing. 154or monitor temperature and voltage levels during industrial processing.
155And those might all be sharing the same controller driver. 155And those might all be sharing the same controller driver.
156 156
157A "struct spi_device" encapsulates the master-side interface between 157A "struct spi_device" encapsulates the controller-side interface between
158those two types of driver. At this writing, Linux has no slave side 158those two types of drivers.
159programming interface.
160 159
161There is a minimal core of SPI programming interfaces, focussing on 160There is a minimal core of SPI programming interfaces, focussing on
162using the driver model to connect controller and protocol drivers using 161using the driver model to connect controller and protocol drivers using
@@ -177,10 +176,24 @@ shows up in sysfs in several locations:
177 /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices 176 /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices
178 177
179 /sys/class/spi_master/spiB ... symlink (or actual device node) to 178 /sys/class/spi_master/spiB ... symlink (or actual device node) to
180 a logical node which could hold class related state for the 179 a logical node which could hold class related state for the SPI
181 controller managing bus "B". All spiB.* devices share one 180 master controller managing bus "B". All spiB.* devices share one
182 physical SPI bus segment, with SCLK, MOSI, and MISO. 181 physical SPI bus segment, with SCLK, MOSI, and MISO.
183 182
183 /sys/devices/.../CTLR/slave ... virtual file for (un)registering the
184 slave device for an SPI slave controller.
185 Writing the driver name of an SPI slave handler to this file
186 registers the slave device; writing "(null)" unregisters the slave
187 device.
188 Reading from this file shows the name of the slave device ("(null)"
189 if not registered).
190
191 /sys/class/spi_slave/spiB ... symlink (or actual device node) to
192 a logical node which could hold class related state for the SPI
193 slave controller on bus "B". When registered, a single spiB.*
194 device is present here, possible sharing the physical SPI bus
195 segment with other SPI slave devices.
196
184Note that the actual location of the controller's class state depends 197Note that the actual location of the controller's class state depends
185on whether you enabled CONFIG_SYSFS_DEPRECATED or not. At this time, 198on whether you enabled CONFIG_SYSFS_DEPRECATED or not. At this time,
186the only class-specific state is the bus number ("B" in "spiB"), so 199the only class-specific state is the bus number ("B" in "spiB"), so
diff --git a/Documentation/usb/typec.rst b/Documentation/usb/typec.rst
index b67a46779de9..8a7249f2ff04 100644
--- a/Documentation/usb/typec.rst
+++ b/Documentation/usb/typec.rst
@@ -114,8 +114,7 @@ the details during registration. The class offers the following API for
114registering/unregistering cables and their plugs: 114registering/unregistering cables and their plugs:
115 115
116.. kernel-doc:: drivers/usb/typec/typec.c 116.. kernel-doc:: drivers/usb/typec/typec.c
117 :functions: typec_register_cable typec_unregister_cable typec_register_plug 117 :functions: typec_register_cable typec_unregister_cable typec_register_plug typec_unregister_plug
118 typec_unregister_plug
119 118
120The class will provide a handle to struct typec_cable and struct typec_plug if 119The class will provide a handle to struct typec_cable and struct typec_plug if
121the registration is successful, or NULL if it isn't. 120the registration is successful, or NULL if it isn't.
@@ -137,8 +136,7 @@ during connection of a partner or cable, the port driver must use the following
137APIs to report it to the class: 136APIs to report it to the class:
138 137
139.. kernel-doc:: drivers/usb/typec/typec.c 138.. kernel-doc:: drivers/usb/typec/typec.c
140 :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role 139 :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role typec_set_pwr_opmode
141 typec_set_pwr_opmode
142 140
143Alternate Modes 141Alternate Modes
144~~~~~~~~~~~~~~~ 142~~~~~~~~~~~~~~~
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
index 4f7d86dd0a5d..914518aeb972 100644
--- a/Documentation/watchdog/watchdog-parameters.txt
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started
117------------------------------------------------- 117-------------------------------------------------
118iTCO_wdt: 118iTCO_wdt:
119heartbeat: Watchdog heartbeat in seconds. 119heartbeat: Watchdog heartbeat in seconds.
120 (2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30) 120 (5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30)
121nowayout: Watchdog cannot be stopped once started 121nowayout: Watchdog cannot be stopped once started
122 (default=kernel config parameter) 122 (default=kernel config parameter)
123------------------------------------------------- 123-------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index f7d568b8f133..767e9d202adf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -846,7 +846,6 @@ M: Laura Abbott <labbott@redhat.com>
846M: Sumit Semwal <sumit.semwal@linaro.org> 846M: Sumit Semwal <sumit.semwal@linaro.org>
847L: devel@driverdev.osuosl.org 847L: devel@driverdev.osuosl.org
848S: Supported 848S: Supported
849F: Documentation/devicetree/bindings/staging/ion/
850F: drivers/staging/android/ion 849F: drivers/staging/android/ion
851F: drivers/staging/android/uapi/ion.h 850F: drivers/staging/android/uapi/ion.h
852F: drivers/staging/android/uapi/ion_test.h 851F: drivers/staging/android/uapi/ion_test.h
@@ -1173,7 +1172,7 @@ N: clps711x
1173 1172
1174ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE 1173ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
1175M: Hartley Sweeten <hsweeten@visionengravers.com> 1174M: Hartley Sweeten <hsweeten@visionengravers.com>
1176M: Ryan Mallon <rmallon@gmail.com> 1175M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
1177L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1176L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1178S: Maintained 1177S: Maintained
1179F: arch/arm/mach-ep93xx/ 1178F: arch/arm/mach-ep93xx/
@@ -1490,13 +1489,15 @@ M: Gregory Clement <gregory.clement@free-electrons.com>
1490M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1489M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1491L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1490L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1492S: Maintained 1491S: Maintained
1493F: arch/arm/mach-mvebu/
1494F: drivers/rtc/rtc-armada38x.c
1495F: arch/arm/boot/dts/armada* 1492F: arch/arm/boot/dts/armada*
1496F: arch/arm/boot/dts/kirkwood* 1493F: arch/arm/boot/dts/kirkwood*
1494F: arch/arm/configs/mvebu_*_defconfig
1495F: arch/arm/mach-mvebu/
1497F: arch/arm64/boot/dts/marvell/armada* 1496F: arch/arm64/boot/dts/marvell/armada*
1498F: drivers/cpufreq/mvebu-cpufreq.c 1497F: drivers/cpufreq/mvebu-cpufreq.c
1499F: arch/arm/configs/mvebu_*_defconfig 1498F: drivers/irqchip/irq-armada-370-xp.c
1499F: drivers/irqchip/irq-mvebu-*
1500F: drivers/rtc/rtc-armada38x.c
1500 1501
1501ARM/Marvell Berlin SoC support 1502ARM/Marvell Berlin SoC support
1502M: Jisheng Zhang <jszhang@marvell.com> 1503M: Jisheng Zhang <jszhang@marvell.com>
@@ -1722,7 +1723,6 @@ N: rockchip
1722ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1723ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1723M: Kukjin Kim <kgene@kernel.org> 1724M: Kukjin Kim <kgene@kernel.org>
1724M: Krzysztof Kozlowski <krzk@kernel.org> 1725M: Krzysztof Kozlowski <krzk@kernel.org>
1725R: Javier Martinez Canillas <javier@osg.samsung.com>
1726L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1726L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1727L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1727L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1728Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ 1728Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1830,7 +1830,6 @@ F: drivers/edac/altera_edac.
1830ARM/STI ARCHITECTURE 1830ARM/STI ARCHITECTURE
1831M: Patrice Chotard <patrice.chotard@st.com> 1831M: Patrice Chotard <patrice.chotard@st.com>
1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1833L: kernel@stlinux.com
1834W: http://www.stlinux.com 1833W: http://www.stlinux.com
1835S: Maintained 1834S: Maintained
1836F: arch/arm/mach-sti/ 1835F: arch/arm/mach-sti/
@@ -2965,7 +2964,7 @@ F: sound/pci/oxygen/
2965 2964
2966C6X ARCHITECTURE 2965C6X ARCHITECTURE
2967M: Mark Salter <msalter@redhat.com> 2966M: Mark Salter <msalter@redhat.com>
2968M: Aurelien Jacquiot <a-jacquiot@ti.com> 2967M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
2969L: linux-c6x-dev@linux-c6x.org 2968L: linux-c6x-dev@linux-c6x.org
2970W: http://www.linux-c6x.org/wiki/index.php/Main_Page 2969W: http://www.linux-c6x.org/wiki/index.php/Main_Page
2971S: Maintained 2970S: Maintained
@@ -3116,6 +3115,14 @@ F: drivers/net/ieee802154/cc2520.c
3116F: include/linux/spi/cc2520.h 3115F: include/linux/spi/cc2520.h
3117F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt 3116F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
3118 3117
3118CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER
3119M: Gilad Ben-Yossef <gilad@benyossef.com>
3120L: linux-crypto@vger.kernel.org
3121L: driverdev-devel@linuxdriverproject.org
3122S: Supported
3123F: drivers/staging/ccree/
3124W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
3125
3119CEC FRAMEWORK 3126CEC FRAMEWORK
3120M: Hans Verkuil <hans.verkuil@cisco.com> 3127M: Hans Verkuil <hans.verkuil@cisco.com>
3121L: linux-media@vger.kernel.org 3128L: linux-media@vger.kernel.org
@@ -5615,7 +5622,7 @@ F: scripts/get_maintainer.pl
5615 5622
5616GENWQE (IBM Generic Workqueue Card) 5623GENWQE (IBM Generic Workqueue Card)
5617M: Frank Haverkamp <haver@linux.vnet.ibm.com> 5624M: Frank Haverkamp <haver@linux.vnet.ibm.com>
5618M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> 5625M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
5619S: Supported 5626S: Supported
5620F: drivers/misc/genwqe/ 5627F: drivers/misc/genwqe/
5621 5628
@@ -5660,7 +5667,6 @@ F: tools/testing/selftests/gpio/
5660 5667
5661GPIO SUBSYSTEM 5668GPIO SUBSYSTEM
5662M: Linus Walleij <linus.walleij@linaro.org> 5669M: Linus Walleij <linus.walleij@linaro.org>
5663M: Alexandre Courbot <gnurou@gmail.com>
5664L: linux-gpio@vger.kernel.org 5670L: linux-gpio@vger.kernel.org
5665T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git 5671T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
5666S: Maintained 5672S: Maintained
@@ -5695,7 +5701,7 @@ M: Alex Elder <elder@kernel.org>
5695M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 5701M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5696S: Maintained 5702S: Maintained
5697F: drivers/staging/greybus/ 5703F: drivers/staging/greybus/
5698L: greybus-dev@lists.linaro.org 5704L: greybus-dev@lists.linaro.org (moderated for non-subscribers)
5699 5705
5700GREYBUS AUDIO PROTOCOLS DRIVERS 5706GREYBUS AUDIO PROTOCOLS DRIVERS
5701M: Vaibhav Agarwal <vaibhav.sr@gmail.com> 5707M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
@@ -7136,7 +7142,7 @@ S: Maintained
7136F: drivers/media/platform/rcar_jpu.c 7142F: drivers/media/platform/rcar_jpu.c
7137 7143
7138JSM Neo PCI based serial card 7144JSM Neo PCI based serial card
7139M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> 7145M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
7140L: linux-serial@vger.kernel.org 7146L: linux-serial@vger.kernel.org
7141S: Maintained 7147S: Maintained
7142F: drivers/tty/serial/jsm/ 7148F: drivers/tty/serial/jsm/
@@ -7700,7 +7706,7 @@ F: drivers/platform/x86/hp_accel.c
7700 7706
7701LIVE PATCHING 7707LIVE PATCHING
7702M: Josh Poimboeuf <jpoimboe@redhat.com> 7708M: Josh Poimboeuf <jpoimboe@redhat.com>
7703M: Jessica Yu <jeyu@redhat.com> 7709M: Jessica Yu <jeyu@kernel.org>
7704M: Jiri Kosina <jikos@kernel.org> 7710M: Jiri Kosina <jikos@kernel.org>
7705M: Miroslav Benes <mbenes@suse.cz> 7711M: Miroslav Benes <mbenes@suse.cz>
7706R: Petr Mladek <pmladek@suse.com> 7712R: Petr Mladek <pmladek@suse.com>
@@ -8501,7 +8507,7 @@ S: Odd Fixes
8501F: drivers/media/radio/radio-miropcm20* 8507F: drivers/media/radio/radio-miropcm20*
8502 8508
8503MELLANOX MLX4 core VPI driver 8509MELLANOX MLX4 core VPI driver
8504M: Yishai Hadas <yishaih@mellanox.com> 8510M: Tariq Toukan <tariqt@mellanox.com>
8505L: netdev@vger.kernel.org 8511L: netdev@vger.kernel.org
8506L: linux-rdma@vger.kernel.org 8512L: linux-rdma@vger.kernel.org
8507W: http://www.mellanox.com 8513W: http://www.mellanox.com
@@ -8509,7 +8515,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8509S: Supported 8515S: Supported
8510F: drivers/net/ethernet/mellanox/mlx4/ 8516F: drivers/net/ethernet/mellanox/mlx4/
8511F: include/linux/mlx4/ 8517F: include/linux/mlx4/
8512F: include/uapi/rdma/mlx4-abi.h
8513 8518
8514MELLANOX MLX4 IB driver 8519MELLANOX MLX4 IB driver
8515M: Yishai Hadas <yishaih@mellanox.com> 8520M: Yishai Hadas <yishaih@mellanox.com>
@@ -8519,6 +8524,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8519S: Supported 8524S: Supported
8520F: drivers/infiniband/hw/mlx4/ 8525F: drivers/infiniband/hw/mlx4/
8521F: include/linux/mlx4/ 8526F: include/linux/mlx4/
8527F: include/uapi/rdma/mlx4-abi.h
8522 8528
8523MELLANOX MLX5 core VPI driver 8529MELLANOX MLX5 core VPI driver
8524M: Saeed Mahameed <saeedm@mellanox.com> 8530M: Saeed Mahameed <saeedm@mellanox.com>
@@ -8531,7 +8537,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8531S: Supported 8537S: Supported
8532F: drivers/net/ethernet/mellanox/mlx5/core/ 8538F: drivers/net/ethernet/mellanox/mlx5/core/
8533F: include/linux/mlx5/ 8539F: include/linux/mlx5/
8534F: include/uapi/rdma/mlx5-abi.h
8535 8540
8536MELLANOX MLX5 IB driver 8541MELLANOX MLX5 IB driver
8537M: Matan Barak <matanb@mellanox.com> 8542M: Matan Barak <matanb@mellanox.com>
@@ -8542,6 +8547,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8542S: Supported 8547S: Supported
8543F: drivers/infiniband/hw/mlx5/ 8548F: drivers/infiniband/hw/mlx5/
8544F: include/linux/mlx5/ 8549F: include/linux/mlx5/
8550F: include/uapi/rdma/mlx5-abi.h
8545 8551
8546MELEXIS MLX90614 DRIVER 8552MELEXIS MLX90614 DRIVER
8547M: Crt Mori <cmo@melexis.com> 8553M: Crt Mori <cmo@melexis.com>
@@ -8581,7 +8587,7 @@ S: Maintained
8581F: drivers/media/dvb-frontends/mn88473* 8587F: drivers/media/dvb-frontends/mn88473*
8582 8588
8583MODULE SUPPORT 8589MODULE SUPPORT
8584M: Jessica Yu <jeyu@redhat.com> 8590M: Jessica Yu <jeyu@kernel.org>
8585M: Rusty Russell <rusty@rustcorp.com.au> 8591M: Rusty Russell <rusty@rustcorp.com.au>
8586T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 8592T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
8587S: Maintained 8593S: Maintained
@@ -9553,10 +9559,6 @@ F: drivers/net/wireless/intersil/orinoco/
9553 9559
9554OSD LIBRARY and FILESYSTEM 9560OSD LIBRARY and FILESYSTEM
9555M: Boaz Harrosh <ooo@electrozaur.com> 9561M: Boaz Harrosh <ooo@electrozaur.com>
9556M: Benny Halevy <bhalevy@primarydata.com>
9557L: osd-dev@open-osd.org
9558W: http://open-osd.org
9559T: git git://git.open-osd.org/open-osd.git
9560S: Maintained 9562S: Maintained
9561F: drivers/scsi/osd/ 9563F: drivers/scsi/osd/
9562F: include/scsi/osd_* 9564F: include/scsi/osd_*
@@ -10447,7 +10449,7 @@ S: Orphan
10447 10449
10448PXA RTC DRIVER 10450PXA RTC DRIVER
10449M: Robert Jarzmik <robert.jarzmik@free.fr> 10451M: Robert Jarzmik <robert.jarzmik@free.fr>
10450L: rtc-linux@googlegroups.com 10452L: linux-rtc@vger.kernel.org
10451S: Maintained 10453S: Maintained
10452 10454
10453QAT DRIVER 10455QAT DRIVER
@@ -10754,7 +10756,7 @@ X: kernel/torture.c
10754REAL TIME CLOCK (RTC) SUBSYSTEM 10756REAL TIME CLOCK (RTC) SUBSYSTEM
10755M: Alessandro Zummo <a.zummo@towertech.it> 10757M: Alessandro Zummo <a.zummo@towertech.it>
10756M: Alexandre Belloni <alexandre.belloni@free-electrons.com> 10758M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
10757L: rtc-linux@googlegroups.com 10759L: linux-rtc@vger.kernel.org
10758Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ 10760Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
10759T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git 10761T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
10760S: Maintained 10762S: Maintained
@@ -11265,7 +11267,6 @@ F: drivers/media/rc/serial_ir.c
11265 11267
11266STI CEC DRIVER 11268STI CEC DRIVER
11267M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 11269M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
11268L: kernel@stlinux.com
11269S: Maintained 11270S: Maintained
11270F: drivers/staging/media/st-cec/ 11271F: drivers/staging/media/st-cec/
11271F: Documentation/devicetree/bindings/media/stih-cec.txt 11272F: Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11775,6 +11776,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
11775S: Supported 11776S: Supported
11776F: arch/arm/mach-davinci/ 11777F: arch/arm/mach-davinci/
11777F: drivers/i2c/busses/i2c-davinci.c 11778F: drivers/i2c/busses/i2c-davinci.c
11779F: arch/arm/boot/dts/da850*
11778 11780
11779TI DAVINCI SERIES MEDIA DRIVER 11781TI DAVINCI SERIES MEDIA DRIVER
11780M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com> 11782M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13858,7 +13860,7 @@ S: Odd fixes
13858F: drivers/net/wireless/wl3501* 13860F: drivers/net/wireless/wl3501*
13859 13861
13860WOLFSON MICROELECTRONICS DRIVERS 13862WOLFSON MICROELECTRONICS DRIVERS
13861L: patches@opensource.wolfsonmicro.com 13863L: patches@opensource.cirrus.com
13862T: git https://github.com/CirrusLogic/linux-drivers.git 13864T: git https://github.com/CirrusLogic/linux-drivers.git
13863W: https://github.com/CirrusLogic/linux-drivers/wiki 13865W: https://github.com/CirrusLogic/linux-drivers/wiki
13864S: Supported 13866S: Supported
diff --git a/Makefile b/Makefile
index b400c0604fac..283c6236438e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION =
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -1172,7 +1172,7 @@ headers_check_all: headers_install_all
1172PHONY += headers_check 1172PHONY += headers_check
1173headers_check: headers_install 1173headers_check: headers_install
1174 $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 1174 $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
1175 $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/ $(hdr-dst) HDRCHECK=1 1175 $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi $(hdr-dst) HDRCHECK=1
1176 1176
1177# --------------------------------------------------------------------------- 1177# ---------------------------------------------------------------------------
1178# Kernel selftest 1178# Kernel selftest
@@ -1437,7 +1437,7 @@ help:
1437 @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' 1437 @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
1438 @echo ' make V=2 [targets] 2 => give reason for rebuild of target' 1438 @echo ' make V=2 [targets] 2 => give reason for rebuild of target'
1439 @echo ' make O=dir [targets] Locate all output files in "dir", including .config' 1439 @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
1440 @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' 1440 @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
1441 @echo ' make C=2 [targets] Force check of all c source with $$CHECK' 1441 @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
1442 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' 1442 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
1443 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' 1443 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9ec56dc97374..ce93124a850b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1201,8 +1201,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1201 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1201 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
1202 return -EFAULT; 1202 return -EFAULT;
1203 1203
1204 err = 0; 1204 err = put_user(status, ustatus);
1205 err |= put_user(status, ustatus); 1205 if (ret < 0)
1206 return err ? err : ret;
1207
1206 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1208 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
1207 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1209 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
1208 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); 1210 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 6e1242da0159..4104a0839214 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -86,8 +86,6 @@ struct task_struct;
86#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) 86#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
87#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) 87#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
88 88
89#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
90
91extern void start_thread(struct pt_regs * regs, unsigned long pc, 89extern void start_thread(struct pt_regs * regs, unsigned long pc,
92 unsigned long usp); 90 unsigned long usp);
93 91
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3e25e8d6486b..2e13683dfb24 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
65 65
66 vma = find_vma(mm, addr); 66 vma = find_vma(mm, addr);
67 if (TASK_SIZE - len >= addr && 67 if (TASK_SIZE - len >= addr &&
68 (!vma || addr + len <= vma->vm_start)) 68 (!vma || addr + len <= vm_start_gap(vma)))
69 return addr; 69 return addr;
70 } 70 }
71 71
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c1a35f15838..c0fcab6a5504 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1416,6 +1416,7 @@ choice
1416 config VMSPLIT_3G 1416 config VMSPLIT_3G
1417 bool "3G/1G user/kernel split" 1417 bool "3G/1G user/kernel split"
1418 config VMSPLIT_3G_OPT 1418 config VMSPLIT_3G_OPT
1419 depends on !ARM_LPAE
1419 bool "3G/1G user/kernel split (for full 1G low memory)" 1420 bool "3G/1G user/kernel split (for full 1G low memory)"
1420 config VMSPLIT_2G 1421 config VMSPLIT_2G
1421 bool "2G/2G user/kernel split" 1422 bool "2G/2G user/kernel split"
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 9d5dc4fda3c1..a17ca8d78656 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,14 +17,13 @@
17 @ there. 17 @ there.
18 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 18 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
19#else 19#else
20 mov r0, r0 20 AR_CLASS( mov r0, r0 )
21 M_CLASS( nop.w )
21#endif 22#endif
22 .endm 23 .endm
23 24
24 .macro __EFI_HEADER 25 .macro __EFI_HEADER
25#ifdef CONFIG_EFI_STUB 26#ifdef CONFIG_EFI_STUB
26 b __efi_start
27
28 .set start_offset, __efi_start - start 27 .set start_offset, __efi_start - start
29 .org start + 0x3c 28 .org start + 0x3c
30 @ 29 @
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7c711ba61417..8a756870c238 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,19 +130,22 @@ start:
130 .rept 7 130 .rept 7
131 __nop 131 __nop
132 .endr 132 .endr
133 ARM( mov r0, r0 ) 133#ifndef CONFIG_THUMB2_KERNEL
134 ARM( b 1f ) 134 mov r0, r0
135 THUMB( badr r12, 1f ) 135#else
136 THUMB( bx r12 ) 136 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
137 M_CLASS( nop.w ) @ M: already in Thumb2 mode
138 .thumb
139#endif
140 W(b) 1f
137 141
138 .word _magic_sig @ Magic numbers to help the loader 142 .word _magic_sig @ Magic numbers to help the loader
139 .word _magic_start @ absolute load/run zImage address 143 .word _magic_start @ absolute load/run zImage address
140 .word _magic_end @ zImage end address 144 .word _magic_end @ zImage end address
141 .word 0x04030201 @ endianness flag 145 .word 0x04030201 @ endianness flag
142 146
143 THUMB( .thumb ) 147 __EFI_HEADER
1441: __EFI_HEADER 1481:
145
146 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 149 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
147 AR_CLASS( mrs r9, cpsr ) 150 AR_CLASS( mrs r9, cpsr )
148#ifdef CONFIG_ARM_VIRT_EXT 151#ifdef CONFIG_ARM_VIRT_EXT
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index c5d2589c55fc..fc864a855991 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -220,7 +220,7 @@
220 220
221 mmc1_pins: pinmux_mmc1_pins { 221 mmc1_pins: pinmux_mmc1_pins {
222 pinctrl-single,pins = < 222 pinctrl-single,pins = <
223 AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ 223 AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
224 >; 224 >;
225 }; 225 };
226 226
@@ -280,10 +280,6 @@
280 AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ 280 AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
281 AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ 281 AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
282 AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ 282 AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
283 /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
284 AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
285 AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
286 AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
287 /* PDI Bus - Battery system */ 283 /* PDI Bus - Battery system */
288 AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ 284 AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
289 AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ 285 AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
@@ -384,7 +380,7 @@
384 pinctrl-names = "default"; 380 pinctrl-names = "default";
385 pinctrl-0 = <&mmc1_pins>; 381 pinctrl-0 = <&mmc1_pins>;
386 bus-width = <4>; 382 bus-width = <4>;
387 cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; 383 cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
388 vmmc-supply = <&vmmcsd_fixed>; 384 vmmc-supply = <&vmmcsd_fixed>;
389}; 385};
390 386
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
index 12c981e51134..9a0599f711ff 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
@@ -1,6 +1,6 @@
1/ { 1/ {
2 aliases { 2 aliases {
3 ethernet = &ethernet; 3 ethernet0 = &ethernet;
4 }; 4 };
5}; 5};
6 6
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
index 3f0a56ebcf1f..dc7ae776db5f 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
@@ -1,6 +1,6 @@
1/ { 1/ {
2 aliases { 2 aliases {
3 ethernet = &ethernet; 3 ethernet0 = &ethernet;
4 }; 4 };
5}; 5};
6 6
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 35cea3fcaf5c..9444a9a9ba10 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
3#include <dt-bindings/clock/bcm2835-aux.h> 3#include <dt-bindings/clock/bcm2835-aux.h>
4#include <dt-bindings/gpio/gpio.h> 4#include <dt-bindings/gpio/gpio.h>
5 5
6/* firmware-provided startup stubs live here, where the secondary CPUs are
7 * spinning.
8 */
9/memreserve/ 0x00000000 0x00001000;
10
6/* This include file covers the common peripherals and configuration between 11/* This include file covers the common peripherals and configuration between
7 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to 12 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
8 * bcm2835.dtsi and bcm2836.dtsi. 13 * bcm2835.dtsi and bcm2836.dtsi.
@@ -198,8 +203,8 @@
198 brcm,pins = <0 1>; 203 brcm,pins = <0 1>;
199 brcm,function = <BCM2835_FSEL_ALT0>; 204 brcm,function = <BCM2835_FSEL_ALT0>;
200 }; 205 };
201 i2c0_gpio32: i2c0_gpio32 { 206 i2c0_gpio28: i2c0_gpio28 {
202 brcm,pins = <32 34>; 207 brcm,pins = <28 29>;
203 brcm,function = <BCM2835_FSEL_ALT0>; 208 brcm,function = <BCM2835_FSEL_ALT0>;
204 }; 209 };
205 i2c0_gpio44: i2c0_gpio44 { 210 i2c0_gpio44: i2c0_gpio44 {
@@ -295,20 +300,28 @@
295 /* Separate from the uart0_gpio14 group 300 /* Separate from the uart0_gpio14 group
296 * because it conflicts with spi1_gpio16, and 301 * because it conflicts with spi1_gpio16, and
297 * people often run uart0 on the two pins 302 * people often run uart0 on the two pins
298 * without flow contrl. 303 * without flow control.
299 */ 304 */
300 uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 { 305 uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 {
301 brcm,pins = <16 17>; 306 brcm,pins = <16 17>;
302 brcm,function = <BCM2835_FSEL_ALT3>; 307 brcm,function = <BCM2835_FSEL_ALT3>;
303 }; 308 };
304 uart0_gpio30: uart0_gpio30 { 309 uart0_ctsrts_gpio30: uart0_ctsrts_gpio30 {
305 brcm,pins = <30 31>; 310 brcm,pins = <30 31>;
306 brcm,function = <BCM2835_FSEL_ALT3>; 311 brcm,function = <BCM2835_FSEL_ALT3>;
307 }; 312 };
308 uart0_ctsrts_gpio32: uart0_ctsrts_gpio32 { 313 uart0_gpio32: uart0_gpio32 {
309 brcm,pins = <32 33>; 314 brcm,pins = <32 33>;
310 brcm,function = <BCM2835_FSEL_ALT3>; 315 brcm,function = <BCM2835_FSEL_ALT3>;
311 }; 316 };
317 uart0_gpio36: uart0_gpio36 {
318 brcm,pins = <36 37>;
319 brcm,function = <BCM2835_FSEL_ALT2>;
320 };
321 uart0_ctsrts_gpio38: uart0_ctsrts_gpio38 {
322 brcm,pins = <38 39>;
323 brcm,function = <BCM2835_FSEL_ALT2>;
324 };
312 325
313 uart1_gpio14: uart1_gpio14 { 326 uart1_gpio14: uart1_gpio14 {
314 brcm,pins = <14 15>; 327 brcm,pins = <14 15>;
@@ -326,10 +339,6 @@
326 brcm,pins = <30 31>; 339 brcm,pins = <30 31>;
327 brcm,function = <BCM2835_FSEL_ALT5>; 340 brcm,function = <BCM2835_FSEL_ALT5>;
328 }; 341 };
329 uart1_gpio36: uart1_gpio36 {
330 brcm,pins = <36 37 38 39>;
331 brcm,function = <BCM2835_FSEL_ALT2>;
332 };
333 uart1_gpio40: uart1_gpio40 { 342 uart1_gpio40: uart1_gpio40 {
334 brcm,pins = <40 41>; 343 brcm,pins = <40 41>;
335 brcm,function = <BCM2835_FSEL_ALT5>; 344 brcm,function = <BCM2835_FSEL_ALT5>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 4bc4b575c99b..31a9e061ddd0 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -204,6 +204,8 @@
204 tps659038: tps659038@58 { 204 tps659038: tps659038@58 {
205 compatible = "ti,tps659038"; 205 compatible = "ti,tps659038";
206 reg = <0x58>; 206 reg = <0x58>;
207 ti,palmas-override-powerhold;
208 ti,system-power-controller;
207 209
208 tps659038_pmic { 210 tps659038_pmic {
209 compatible = "ti,tps659038-pmic"; 211 compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 57892f264cea..e7144662af45 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -2017,4 +2017,8 @@
2017 coefficients = <0 2000>; 2017 coefficients = <0 2000>;
2018}; 2018};
2019 2019
2020&cpu_crit {
2021 temperature = <120000>; /* milli Celsius */
2022};
2023
2020/include/ "dra7xx-clocks.dtsi" 2024/include/ "dra7xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index de2215832372..4e103a905dc9 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
23 imx53-qsrb { 23 imx53-qsrb {
24 pinctrl_pmic: pmicgrp { 24 pinctrl_pmic: pmicgrp {
25 fsl,pins = < 25 fsl,pins = <
26 MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ 26 MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */
27 >; 27 >;
28 }; 28 };
29 }; 29 };
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5bb8fd57e7f5..d71da30c9cff 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -12,23 +12,6 @@
12 model = "Freescale i.MX6 SoloX SDB RevB Board"; 12 model = "Freescale i.MX6 SoloX SDB RevB Board";
13}; 13};
14 14
15&cpu0 {
16 operating-points = <
17 /* kHz uV */
18 996000 1250000
19 792000 1175000
20 396000 1175000
21 198000 1175000
22 >;
23 fsl,soc-operating-points = <
24 /* ARM kHz SOC uV */
25 996000 1250000
26 792000 1175000
27 396000 1175000
28 198000 1175000
29 >;
30};
31
32&i2c1 { 15&i2c1 {
33 clock-frequency = <100000>; 16 clock-frequency = <100000>;
34 pinctrl-names = "default"; 17 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1d0ce2..d2be8aa3370b 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
120 120
121 ethphy0: ethernet-phy@2 { 121 ethphy0: ethernet-phy@2 {
122 reg = <2>; 122 reg = <2>;
123 micrel,led-mode = <1>;
124 clocks = <&clks IMX6UL_CLK_ENET_REF>;
125 clock-names = "rmii-ref";
123 }; 126 };
124 127
125 ethphy1: ethernet-phy@1 { 128 ethphy1: ethernet-phy@1 {
126 reg = <1>; 129 reg = <1>;
130 micrel,led-mode = <1>;
131 clocks = <&clks IMX6UL_CLK_ENET2_REF>;
132 clock-names = "rmii-ref";
127 }; 133 };
128 }; 134 };
129}; 135};
diff --git a/arch/arm/boot/dts/include/arm b/arch/arm/boot/dts/include/arm
deleted file mode 120000
index a96aa0ea9d8c..000000000000
--- a/arch/arm/boot/dts/include/arm
+++ /dev/null
@@ -1 +0,0 @@
1.. \ No newline at end of file
diff --git a/arch/arm/boot/dts/include/arm64 b/arch/arm/boot/dts/include/arm64
deleted file mode 120000
index 074a835fca3e..000000000000
--- a/arch/arm/boot/dts/include/arm64
+++ /dev/null
@@ -1 +0,0 @@
1../../../../arm64/boot/dts \ No newline at end of file
diff --git a/arch/arm/boot/dts/include/dt-bindings b/arch/arm/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/arm/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f26824e83a..66f615a74118 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>; 140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
141 clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk"; 141 clock-names = "pa_clk", "ethss_clk", "cpts";
142 dma-coherent; 142 dma-coherent;
143 143
144 ti,navigator-dmas = <&dma_gbe 0>, 144 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7ebc0919..148650406cf7 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
232 }; 232 };
233 }; 233 };
234 234
235 osr: sram@70000000 {
236 compatible = "mmio-sram";
237 reg = <0x70000000 0x10000>;
238 #address-cells = <1>;
239 #size-cells = <1>;
240 clocks = <&clkosr>;
241 };
242
235 dspgpio0: keystone_dsp_gpio@02620240 { 243 dspgpio0: keystone_dsp_gpio@02620240 {
236 compatible = "ti,keystone-dsp-gpio"; 244 compatible = "ti,keystone-dsp-gpio";
237 gpio-controller; 245 gpio-controller;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 08cce17a25a0..43e9364083de 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -249,9 +249,9 @@
249 OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */ 249 OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */
250 OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */ 250 OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */
251 251
252 OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */ 252 OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */
253 OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */ 253 OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */
254 OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */ 254 OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */
255 OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */ 255 OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */
256 OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */ 256 OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */
257 OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */ 257 OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index 402579ab70d2..3a9e9b6aea68 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -72,6 +72,8 @@
72 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, 72 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
73 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, 73 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
74 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; 74 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
75 clock-frequency = <13000000>;
76 arm,cpu-registers-not-fw-configured;
75 }; 77 };
76 78
77 watchdog: watchdog@10007000 { 79 watchdog: watchdog@10007000 {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index b3a8b1f24499..9ec737069369 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -55,7 +55,8 @@
55 simple-audio-card,bitclock-master = <&telephony_link_master>; 55 simple-audio-card,bitclock-master = <&telephony_link_master>;
56 simple-audio-card,frame-master = <&telephony_link_master>; 56 simple-audio-card,frame-master = <&telephony_link_master>;
57 simple-audio-card,format = "i2s"; 57 simple-audio-card,format = "i2s";
58 58 simple-audio-card,bitclock-inversion;
59 simple-audio-card,frame-inversion;
59 simple-audio-card,cpu { 60 simple-audio-card,cpu {
60 sound-dai = <&mcbsp4>; 61 sound-dai = <&mcbsp4>;
61 }; 62 };
diff --git a/arch/arm/boot/dts/omap4-panda-a4.dts b/arch/arm/boot/dts/omap4-panda-a4.dts
index 78d363177762..f1a6476af371 100644
--- a/arch/arm/boot/dts/omap4-panda-a4.dts
+++ b/arch/arm/boot/dts/omap4-panda-a4.dts
@@ -13,7 +13,7 @@
13/* Pandaboard Rev A4+ have external pullups on SCL & SDA */ 13/* Pandaboard Rev A4+ have external pullups on SCL & SDA */
14&dss_hdmi_pins { 14&dss_hdmi_pins {
15 pinctrl-single,pins = < 15 pinctrl-single,pins = <
16 OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ 16 OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */
17 OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ 17 OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */
18 OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ 18 OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */
19 >; 19 >;
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
index 119f8e657edc..940fe4f7c5f6 100644
--- a/arch/arm/boot/dts/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/omap4-panda-es.dts
@@ -34,7 +34,7 @@
34/* PandaboardES has external pullups on SCL & SDA */ 34/* PandaboardES has external pullups on SCL & SDA */
35&dss_hdmi_pins { 35&dss_hdmi_pins {
36 pinctrl-single,pins = < 36 pinctrl-single,pins = <
37 OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */ 37 OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */
38 OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */ 38 OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */
39 OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */ 39 OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */
40 >; 40 >;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 1aeeacb3a884..d4f600dbb7eb 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -558,10 +558,11 @@
558 }; 558 };
559 559
560 r_ccu: clock@1f01400 { 560 r_ccu: clock@1f01400 {
561 compatible = "allwinner,sun50i-a64-r-ccu"; 561 compatible = "allwinner,sun8i-h3-r-ccu";
562 reg = <0x01f01400 0x100>; 562 reg = <0x01f01400 0x100>;
563 clocks = <&osc24M>, <&osc32k>, <&iosc>; 563 clocks = <&osc24M>, <&osc32k>, <&iosc>,
564 clock-names = "hosc", "losc", "iosc"; 564 <&ccu 9>;
565 clock-names = "hosc", "losc", "iosc", "pll-periph";
565 #clock-cells = <1>; 566 #clock-cells = <1>;
566 #reset-cells = <1>; 567 #reset-cells = <1>;
567 }; 568 };
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb28374e..06e2331f666d 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
1#include <versatile-ab.dts> 1#include "versatile-ab.dts"
2 2
3/ { 3/ {
4 model = "ARM Versatile PB"; 4 model = "ARM Versatile PB";
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index cf062472e07b..2b913f17d50f 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
235 return ret; 235 return ret;
236} 236}
237 237
238typedef void (*phys_reset_t)(unsigned long); 238typedef typeof(cpu_reset) phys_reset_t;
239 239
240void mcpm_cpu_power_down(void) 240void mcpm_cpu_power_down(void)
241{ 241{
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
300 * on the CPU. 300 * on the CPU.
301 */ 301 */
302 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 302 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
303 phys_reset(__pa_symbol(mcpm_entry_point)); 303 phys_reset(__pa_symbol(mcpm_entry_point), false);
304 304
305 /* should never get here */ 305 /* should never get here */
306 BUG(); 306 BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
389 __mcpm_cpu_down(cpu, cluster); 389 __mcpm_cpu_down(cpu, cluster);
390 390
391 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 391 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
392 phys_reset(__pa_symbol(mcpm_entry_point)); 392 phys_reset(__pa_symbol(mcpm_entry_point), false);
393 BUG(); 393 BUG();
394} 394}
395 395
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
new file mode 100644
index 000000000000..d2d75fa664a6
--- /dev/null
+++ b/arch/arm/configs/gemini_defconfig
@@ -0,0 +1,68 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y
3CONFIG_NO_HZ_IDLE=y
4CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_USER_NS=y
6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y
8CONFIG_PARTITION_ADVANCED=y
9CONFIG_ARCH_MULTI_V4=y
10# CONFIG_ARCH_MULTI_V7 is not set
11CONFIG_ARCH_GEMINI=y
12CONFIG_PCI=y
13CONFIG_PREEMPT=y
14CONFIG_AEABI=y
15CONFIG_CMDLINE="console=ttyS0,115200n8"
16CONFIG_KEXEC=y
17CONFIG_BINFMT_MISC=y
18CONFIG_PM=y
19CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
20CONFIG_DEVTMPFS=y
21CONFIG_MTD=y
22CONFIG_MTD_BLOCK=y
23CONFIG_MTD_CFI=y
24CONFIG_MTD_CFI_INTELEXT=y
25CONFIG_MTD_CFI_AMDSTD=y
26CONFIG_MTD_CFI_STAA=y
27CONFIG_MTD_PHYSMAP=y
28CONFIG_MTD_PHYSMAP_OF=y
29CONFIG_BLK_DEV_RAM=y
30CONFIG_BLK_DEV_RAM_SIZE=16384
31# CONFIG_SCSI_PROC_FS is not set
32CONFIG_BLK_DEV_SD=y
33# CONFIG_SCSI_LOWLEVEL is not set
34CONFIG_ATA=y
35CONFIG_INPUT_EVDEV=y
36CONFIG_KEYBOARD_GPIO=y
37# CONFIG_INPUT_MOUSE is not set
38# CONFIG_LEGACY_PTYS is not set
39CONFIG_SERIAL_8250=y
40CONFIG_SERIAL_8250_CONSOLE=y
41CONFIG_SERIAL_8250_NR_UARTS=1
42CONFIG_SERIAL_8250_RUNTIME_UARTS=1
43CONFIG_SERIAL_OF_PLATFORM=y
44# CONFIG_HW_RANDOM is not set
45# CONFIG_HWMON is not set
46CONFIG_WATCHDOG=y
47CONFIG_GEMINI_WATCHDOG=y
48CONFIG_USB=y
49CONFIG_USB_MON=y
50CONFIG_USB_FOTG210_HCD=y
51CONFIG_USB_STORAGE=y
52CONFIG_NEW_LEDS=y
53CONFIG_LEDS_CLASS=y
54CONFIG_LEDS_GPIO=y
55CONFIG_LEDS_TRIGGERS=y
56CONFIG_LEDS_TRIGGER_HEARTBEAT=y
57CONFIG_RTC_CLASS=y
58CONFIG_RTC_DRV_GEMINI=y
59CONFIG_DMADEVICES=y
60# CONFIG_DNOTIFY is not set
61CONFIG_TMPFS=y
62CONFIG_TMPFS_POSIX_ACL=y
63CONFIG_ROMFS_FS=y
64CONFIG_NLS_CODEPAGE_437=y
65CONFIG_NLS_ISO8859_1=y
66# CONFIG_ENABLE_WARN_DEPRECATED is not set
67# CONFIG_ENABLE_MUST_CHECK is not set
68CONFIG_DEBUG_FS=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8f6e16..3234fe9bba6e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@ struct dev_archdata {
19#ifdef CONFIG_XEN 19#ifdef CONFIG_XEN
20 const struct dma_map_ops *dev_dma_ops; 20 const struct dma_map_ops *dev_dma_ops;
21#endif 21#endif
22 bool dma_coherent; 22 unsigned int dma_coherent:1;
23 unsigned int dma_ops_setup:1;
23}; 24};
24 25
25struct omap_device; 26struct omap_device;
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f7e459..e74ab0fbab79 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 31int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 32int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 33int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 34int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
35int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 37int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
37 38
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 302240c19a5a..a0d726a47c8a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
66#define pgprot_noncached(prot) (prot) 66#define pgprot_noncached(prot) (prot)
67#define pgprot_writecombine(prot) (prot) 67#define pgprot_writecombine(prot) (prot)
68#define pgprot_dmacoherent(prot) (prot) 68#define pgprot_dmacoherent(prot) (prot)
69#define pgprot_device(prot) (prot)
69 70
70 71
71/* 72/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 32e1a9513dc7..4e80bf7420d4 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -315,7 +315,7 @@ static void __init cacheid_init(void)
315 if (arch >= CPU_ARCH_ARMv6) { 315 if (arch >= CPU_ARCH_ARMv6) {
316 unsigned int cachetype = read_cpuid_cachetype(); 316 unsigned int cachetype = read_cpuid_cachetype();
317 317
318 if ((arch == CPU_ARCH_ARMv7M) && !cachetype) { 318 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
319 cacheid = 0; 319 cacheid = 0;
320 } else if ((cachetype & (7 << 29)) == 4 << 29) { 320 } else if ((cachetype & (7 << 29)) == 4 << 29) {
321 /* ARMv7 register format */ 321 /* ARMv7 register format */
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 2c14b69511e9..6d1d2e26dfe5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -32,6 +32,7 @@
32#include <asm/vfp.h> 32#include <asm/vfp.h>
33#include "../vfp/vfpinstr.h" 33#include "../vfp/vfpinstr.h"
34 34
35#define CREATE_TRACE_POINTS
35#include "trace.h" 36#include "trace.h"
36#include "coproc.h" 37#include "coproc.h"
37 38
@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
111 return 1; 112 return 1;
112} 113}
113 114
114int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
115{
116 kvm_inject_undefined(vcpu);
117 return 1;
118}
119
120static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 115static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
121{ 116{
122 /* 117 /*
@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
284 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 279 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
285 * all PM registers, which doesn't crash the guest kernel at least. 280 * all PM registers, which doesn't crash the guest kernel at least.
286 */ 281 */
287static bool pm_fake(struct kvm_vcpu *vcpu, 282static bool trap_raz_wi(struct kvm_vcpu *vcpu,
288 const struct coproc_params *p, 283 const struct coproc_params *p,
289 const struct coproc_reg *r) 284 const struct coproc_reg *r)
290{ 285{
@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
294 return read_zero(vcpu, p); 289 return read_zero(vcpu, p);
295} 290}
296 291
297#define access_pmcr pm_fake 292#define access_pmcr trap_raz_wi
298#define access_pmcntenset pm_fake 293#define access_pmcntenset trap_raz_wi
299#define access_pmcntenclr pm_fake 294#define access_pmcntenclr trap_raz_wi
300#define access_pmovsr pm_fake 295#define access_pmovsr trap_raz_wi
301#define access_pmselr pm_fake 296#define access_pmselr trap_raz_wi
302#define access_pmceid0 pm_fake 297#define access_pmceid0 trap_raz_wi
303#define access_pmceid1 pm_fake 298#define access_pmceid1 trap_raz_wi
304#define access_pmccntr pm_fake 299#define access_pmccntr trap_raz_wi
305#define access_pmxevtyper pm_fake 300#define access_pmxevtyper trap_raz_wi
306#define access_pmxevcntr pm_fake 301#define access_pmxevcntr trap_raz_wi
307#define access_pmuserenr pm_fake 302#define access_pmuserenr trap_raz_wi
308#define access_pmintenset pm_fake 303#define access_pmintenset trap_raz_wi
309#define access_pmintenclr pm_fake 304#define access_pmintenclr trap_raz_wi
310 305
311/* Architected CP15 registers. 306/* Architected CP15 registers.
312 * CRn denotes the primary register number, but is copied to the CRm in the 307 * CRn denotes the primary register number, but is copied to the CRm in the
@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
532 return 1; 527 return 1;
533} 528}
534 529
535/** 530static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
536 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
537 * @vcpu: The VCPU pointer
538 * @run: The kvm_run struct
539 */
540int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
541{ 531{
542 struct coproc_params params; 532 struct coproc_params params;
543 533
@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
551 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 541 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
552 params.CRm = 0; 542 params.CRm = 0;
553 543
544 return params;
545}
546
547/**
548 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
549 * @vcpu: The VCPU pointer
550 * @run: The kvm_run struct
551 */
552int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
553{
554 struct coproc_params params = decode_64bit_hsr(vcpu);
555
554 return emulate_cp15(vcpu, &params); 556 return emulate_cp15(vcpu, &params);
555} 557}
556 558
559/**
560 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
561 * @vcpu: The VCPU pointer
562 * @run: The kvm_run struct
563 */
564int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
565{
566 struct coproc_params params = decode_64bit_hsr(vcpu);
567
568 /* raz_wi cp14 */
569 trap_raz_wi(vcpu, &params, NULL);
570
571 /* handled */
572 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
573 return 1;
574}
575
557static void reset_coproc_regs(struct kvm_vcpu *vcpu, 576static void reset_coproc_regs(struct kvm_vcpu *vcpu,
558 const struct coproc_reg *table, size_t num) 577 const struct coproc_reg *table, size_t num)
559{ 578{
@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
564 table[i].reset(vcpu, &table[i]); 583 table[i].reset(vcpu, &table[i]);
565} 584}
566 585
567/** 586static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
568 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
569 * @vcpu: The VCPU pointer
570 * @run: The kvm_run struct
571 */
572int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
573{ 587{
574 struct coproc_params params; 588 struct coproc_params params;
575 589
@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
583 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 597 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
584 params.Rt2 = 0; 598 params.Rt2 = 0;
585 599
600 return params;
601}
602
603/**
604 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
605 * @vcpu: The VCPU pointer
606 * @run: The kvm_run struct
607 */
608int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
609{
610 struct coproc_params params = decode_32bit_hsr(vcpu);
586 return emulate_cp15(vcpu, &params); 611 return emulate_cp15(vcpu, &params);
587} 612}
588 613
614/**
615 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
616 * @vcpu: The VCPU pointer
617 * @run: The kvm_run struct
618 */
619int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
620{
621 struct coproc_params params = decode_32bit_hsr(vcpu);
622
623 /* raz_wi cp14 */
624 trap_raz_wi(vcpu, &params, NULL);
625
626 /* handled */
627 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
628 return 1;
629}
630
589/****************************************************************************** 631/******************************************************************************
590 * Userspace API 632 * Userspace API
591 *****************************************************************************/ 633 *****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 5fd7968cdae9..f86a9aaef462 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = {
95 [HSR_EC_WFI] = kvm_handle_wfx, 95 [HSR_EC_WFI] = kvm_handle_wfx,
96 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
97 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
98 [HSR_EC_CP14_MR] = kvm_handle_cp14_access, 98 [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
100 [HSR_EC_CP14_64] = kvm_handle_cp14_access, 100 [HSR_EC_CP14_64] = kvm_handle_cp14_64,
101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
103 [HSR_EC_HVC] = handle_hvc, 103 [HSR_EC_HVC] = handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb530edf..8679405b0b2b 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
2# Makefile for Kernel-based Virtual Machine module, HYP part 2# Makefile for Kernel-based Virtual Machine module, HYP part
3# 3#
4 4
5ccflags-y += -fno-stack-protector
6
5KVM=../../../../virt/kvm 7KVM=../../../../virt/kvm
6 8
7obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o 9obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7bd046..624a510d31df 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
48 write_sysreg(HSTR_T(15), HSTR); 48 write_sysreg(HSTR_T(15), HSTR);
49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); 49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
50 val = read_sysreg(HDCR); 50 val = read_sysreg(HDCR);
51 write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); 51 val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
52 val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
53 write_sysreg(val, HDCR);
52} 54}
53 55
54static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) 56static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a9c261..5386528665b5 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@ __do_hyp_init:
104 @ - Write permission implies XN: disabled 104 @ - Write permission implies XN: disabled
105 @ - Instruction cache: enabled 105 @ - Instruction cache: enabled
106 @ - Data/Unified cache: enabled 106 @ - Data/Unified cache: enabled
107 @ - Memory alignment checks: enabled
108 @ - MMU: enabled (this code must be run from an identity mapping) 107 @ - MMU: enabled (this code must be run from an identity mapping)
109 mrc p15, 4, r0, c1, c0, 0 @ HSCR 108 mrc p15, 4, r0, c1, c0, 0 @ HSCR
110 ldr r2, =HSCTLR_MASK 109 ldr r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
112 mrc p15, 0, r1, c1, c0, 0 @ SCTLR 111 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
113 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) 112 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
114 and r1, r1, r2 113 and r1, r1, r2
115 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) 114 ARM( ldr r2, =(HSCTLR_M) )
116 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) 115 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
117 orr r1, r1, r2 116 orr r1, r1, r2
118 orr r0, r0, r1 117 orr r0, r0, r1
119 mcr p15, 4, r0, c1, c0, 0 @ HSCR 118 mcr p15, 4, r0, c1, c0, 0 @ HSCR
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index fc0943776db2..b0d10648c486 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -1,5 +1,5 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 1#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H 2#define _TRACE_ARM_KVM_H
3 3
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5 5
@@ -74,10 +74,10 @@ TRACE_EVENT(kvm_hvc,
74 __entry->vcpu_pc, __entry->r0, __entry->imm) 74 __entry->vcpu_pc, __entry->r0, __entry->imm)
75); 75);
76 76
77#endif /* _TRACE_KVM_H */ 77#endif /* _TRACE_ARM_KVM_H */
78 78
79#undef TRACE_INCLUDE_PATH 79#undef TRACE_INCLUDE_PATH
80#define TRACE_INCLUDE_PATH arch/arm/kvm 80#define TRACE_INCLUDE_PATH .
81#undef TRACE_INCLUDE_FILE 81#undef TRACE_INCLUDE_FILE
82#define TRACE_INCLUDE_FILE trace 82#define TRACE_INCLUDE_FILE trace
83 83
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924143f9..cbd959b73654 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,6 +1,7 @@
1menuconfig ARCH_AT91 1menuconfig ARCH_AT91
2 bool "Atmel SoCs" 2 bool "Atmel SoCs"
3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
4 select ARM_CPU_SUSPEND if PM
4 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
5 select GPIOLIB 6 select GPIOLIB
6 select PINCTRL 7 select PINCTRL
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 2cd27c830ab6..283e79ab587d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -335,7 +335,7 @@ static const struct ramc_info ramc_infos[] __initconst = {
335 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR}, 335 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
336}; 336};
337 337
338static const struct of_device_id const ramc_ids[] __initconst = { 338static const struct of_device_id ramc_ids[] __initconst = {
339 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] }, 339 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
340 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] }, 340 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
341 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] }, 341 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c
index cf3f8658f0e5..a55a7ecf146a 100644
--- a/arch/arm/mach-bcm/bcm_kona_smc.c
+++ b/arch/arm/mach-bcm/bcm_kona_smc.c
@@ -33,7 +33,7 @@ struct bcm_kona_smc_data {
33 unsigned result; 33 unsigned result;
34}; 34};
35 35
36static const struct of_device_id const bcm_kona_smc_ids[] __initconst = { 36static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
37 {.compatible = "brcm,kona-smc"}, 37 {.compatible = "brcm,kona-smc"},
38 {.compatible = "bcm,kona-smc"}, /* deprecated name */ 38 {.compatible = "bcm,kona-smc"}, /* deprecated name */
39 {}, 39 {},
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 03da3813f1ab..7d5a44a06648 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -346,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = {
346 .power_off = csn3xxx_usb_power_off, 346 .power_off = csn3xxx_usb_power_off,
347}; 347};
348 348
349static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = { 349static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
350 { "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata }, 350 { "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
351 { "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata }, 351 { "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
352 { "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL }, 352 { "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb80354f303..b5cc05dc2cb2 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); 153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
154 if (!davinci_sram_suspend) { 154 if (!davinci_sram_suspend) {
155 pr_err("PM: cannot allocate SRAM memory\n"); 155 pr_err("PM: cannot allocate SRAM memory\n");
156 return -ENOMEM; 156 ret = -ENOMEM;
157 goto no_sram_mem;
157 } 158 }
158 159
159 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, 160 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
161 162
162 suspend_set_ops(&davinci_pm_ops); 163 suspend_set_ops(&davinci_pm_ops);
163 164
165 return 0;
166
167no_sram_mem:
168 iounmap(pm_config.ddrpsc_reg_base);
164no_ddrpsc_mem: 169no_ddrpsc_mem:
165 iounmap(pm_config.ddrpll_reg_base); 170 iounmap(pm_config.ddrpll_reg_base);
166no_ddrpll_mem: 171no_ddrpll_mem:
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 3089d3bfa19b..8cc6338fcb12 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -266,11 +266,12 @@ extern int omap4_cpu_kill(unsigned int cpu);
266extern const struct smp_operations omap4_smp_ops; 266extern const struct smp_operations omap4_smp_ops;
267#endif 267#endif
268 268
269extern u32 omap4_get_cpu1_ns_pa_addr(void);
270
269#if defined(CONFIG_SMP) && defined(CONFIG_PM) 271#if defined(CONFIG_SMP) && defined(CONFIG_PM)
270extern int omap4_mpuss_init(void); 272extern int omap4_mpuss_init(void);
271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 273extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 274extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
273extern u32 omap4_get_cpu1_ns_pa_addr(void);
274#else 275#else
275static inline int omap4_enter_lowpower(unsigned int cpu, 276static inline int omap4_enter_lowpower(unsigned int cpu,
276 unsigned int power_state) 277 unsigned int power_state)
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 03ec6d307c82..4cfc4f9b2c69 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -213,11 +213,6 @@ static void __init save_l2x0_context(void)
213{} 213{}
214#endif 214#endif
215 215
216u32 omap4_get_cpu1_ns_pa_addr(void)
217{
218 return old_cpu1_ns_pa_addr;
219}
220
221/** 216/**
222 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 217 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
223 * The purpose of this function is to manage low power programming 218 * The purpose of this function is to manage low power programming
@@ -457,6 +452,11 @@ int __init omap4_mpuss_init(void)
457 452
458#endif 453#endif
459 454
455u32 omap4_get_cpu1_ns_pa_addr(void)
456{
457 return old_cpu1_ns_pa_addr;
458}
459
460/* 460/*
461 * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to 461 * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
462 * current kernel's secondary_startup() early before 462 * current kernel's secondary_startup() early before
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 3faf454ba487..33e4953c61a8 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -306,7 +306,6 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
306 306
307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + 307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
308 OMAP_AUX_CORE_BOOT_1); 308 OMAP_AUX_CORE_BOOT_1);
309 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
310 309
311 /* Did the configured secondary_startup() get overwritten? */ 310 /* Did the configured secondary_startup() get overwritten? */
312 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) 311 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
@@ -316,9 +315,13 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
316 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a 315 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
317 * deeper idle state in WFI and will wake to an invalid address. 316 * deeper idle state in WFI and will wake to an invalid address.
318 */ 317 */
319 if ((soc_is_omap44xx() || soc_is_omap54xx()) && 318 if ((soc_is_omap44xx() || soc_is_omap54xx())) {
320 !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) 319 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
321 needs_reset = true; 320 if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
321 needs_reset = true;
322 } else {
323 cpu1_ns_pa_addr = 0;
324 }
322 325
323 if (!needs_reset || !c->cpu1_rstctrl_va) 326 if (!needs_reset || !c->cpu1_rstctrl_va)
324 return; 327 return;
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 2b138b65129a..dc11841ca334 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -711,7 +711,7 @@ static struct omap_prcm_init_data scrm_data __initdata = {
711}; 711};
712#endif 712#endif
713 713
714static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = { 714static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
715#ifdef CONFIG_SOC_AM33XX 715#ifdef CONFIG_SOC_AM33XX
716 { .compatible = "ti,am3-prcm", .data = &am3_prm_data }, 716 { .compatible = "ti,am3-prcm", .data = &am3_prm_data },
717#endif 717#endif
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 2028167fff31..d76b1e5eb8ba 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -559,7 +559,7 @@ struct i2c_init_data {
559 u8 hsscll_12; 559 u8 hsscll_12;
560}; 560};
561 561
562static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = { 562static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = {
563 { 563 {
564 .load = 50, 564 .load = 50,
565 .loadbits = 0x3, 565 .loadbits = 0x3,
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 4878ba90026d..289e036c9c30 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -204,7 +204,7 @@ static void __init spear_clockevent_init(int irq)
204 setup_irq(irq, &spear_timer_irq); 204 setup_irq(irq, &spear_timer_irq);
205} 205}
206 206
207static const struct of_device_id const timer_of_match[] __initconst = { 207static const struct of_device_id timer_of_match[] __initconst = {
208 { .compatible = "st,spear-timer", }, 208 { .compatible = "st,spear-timer", },
209 { }, 209 { },
210}; 210};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd2967b..bd83c531828a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
2311} 2311}
2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2313 2313
2314static void __arm_iommu_detach_device(struct device *dev) 2314/**
2315 * arm_iommu_detach_device
2316 * @dev: valid struct device pointer
2317 *
2318 * Detaches the provided device from a previously attached map.
2319 * This voids the dma operations (dma_map_ops pointer)
2320 */
2321void arm_iommu_detach_device(struct device *dev)
2315{ 2322{
2316 struct dma_iommu_mapping *mapping; 2323 struct dma_iommu_mapping *mapping;
2317 2324
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
2324 iommu_detach_device(mapping->domain, dev); 2331 iommu_detach_device(mapping->domain, dev);
2325 kref_put(&mapping->kref, release_iommu_mapping); 2332 kref_put(&mapping->kref, release_iommu_mapping);
2326 to_dma_iommu_mapping(dev) = NULL; 2333 to_dma_iommu_mapping(dev) = NULL;
2334 set_dma_ops(dev, NULL);
2327 2335
2328 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2336 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2329} 2337}
2330
2331/**
2332 * arm_iommu_detach_device
2333 * @dev: valid struct device pointer
2334 *
2335 * Detaches the provided device from a previously attached map.
2336 * This voids the dma operations (dma_map_ops pointer)
2337 */
2338void arm_iommu_detach_device(struct device *dev)
2339{
2340 __arm_iommu_detach_device(dev);
2341 set_dma_ops(dev, NULL);
2342}
2343EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2338EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2344 2339
2345static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2340static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2379 if (!mapping) 2374 if (!mapping)
2380 return; 2375 return;
2381 2376
2382 __arm_iommu_detach_device(dev); 2377 arm_iommu_detach_device(dev);
2383 arm_iommu_release_mapping(mapping); 2378 arm_iommu_release_mapping(mapping);
2384} 2379}
2385 2380
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2430 dev->dma_ops = xen_dma_ops; 2425 dev->dma_ops = xen_dma_ops;
2431 } 2426 }
2432#endif 2427#endif
2428 dev->archdata.dma_ops_setup = true;
2433} 2429}
2434 2430
2435void arch_teardown_dma_ops(struct device *dev) 2431void arch_teardown_dma_ops(struct device *dev)
2436{ 2432{
2433 if (!dev->archdata.dma_ops_setup)
2434 return;
2435
2437 arm_teardown_iommu_dma_ops(dev); 2436 arm_teardown_iommu_dma_ops(dev);
2438} 2437}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2239fde10b80..f0701d8d24df 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 90
91 vma = find_vma(mm, addr); 91 vma = find_vma(mm, addr);
92 if (TASK_SIZE - len >= addr && 92 if (TASK_SIZE - len >= addr &&
93 (!vma || addr + len <= vma->vm_start)) 93 (!vma || addr + len <= vm_start_gap(vma)))
94 return addr; 94 return addr;
95 } 95 }
96 96
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141 addr = PAGE_ALIGN(addr); 141 addr = PAGE_ALIGN(addr);
142 vma = find_vma(mm, addr); 142 vma = find_vma(mm, addr);
143 if (TASK_SIZE - len >= addr && 143 if (TASK_SIZE - len >= addr &&
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vm_start_gap(vma)))
145 return addr; 145 return addr;
146 } 146 }
147 147
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 31af3cb59a60..e46a6a446cdd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1218,15 +1218,15 @@ void __init adjust_lowmem_bounds(void)
1218 1218
1219 high_memory = __va(arm_lowmem_limit - 1) + 1; 1219 high_memory = __va(arm_lowmem_limit - 1) + 1;
1220 1220
1221 if (!memblock_limit)
1222 memblock_limit = arm_lowmem_limit;
1223
1221 /* 1224 /*
1222 * Round the memblock limit down to a pmd size. This 1225 * Round the memblock limit down to a pmd size. This
1223 * helps to ensure that we will allocate memory from the 1226 * helps to ensure that we will allocate memory from the
1224 * last full pmd, which should be mapped. 1227 * last full pmd, which should be mapped.
1225 */ 1228 */
1226 if (memblock_limit) 1229 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1227 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1228 if (!memblock_limit)
1229 memblock_limit = arm_lowmem_limit;
1230 1230
1231 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1231 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1232 if (memblock_end_of_DRAM() > arm_lowmem_limit) { 1232 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec69bca..b2024db225a9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
1084 def_bool y 1084 def_bool y
1085 depends on COMPAT && SYSVIPC 1085 depends on COMPAT && SYSVIPC
1086 1086
1087config KEYS_COMPAT
1088 def_bool y
1089 depends on COMPAT && KEYS
1090
1091endmenu 1087endmenu
1092 1088
1093menu "Power management options" 1089menu "Power management options"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4afcffcb46cb..73272f43ca01 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -106,8 +106,13 @@ config ARCH_MVEBU
106 select ARMADA_AP806_SYSCON 106 select ARMADA_AP806_SYSCON
107 select ARMADA_CP110_SYSCON 107 select ARMADA_CP110_SYSCON
108 select ARMADA_37XX_CLK 108 select ARMADA_37XX_CLK
109 select GPIOLIB
110 select GPIOLIB_IRQCHIP
109 select MVEBU_ODMI 111 select MVEBU_ODMI
110 select MVEBU_PIC 112 select MVEBU_PIC
113 select OF_GPIO
114 select PINCTRL
115 select PINCTRL_ARMADA_37XX
111 help 116 help
112 This enables support for Marvell EBU familly, including: 117 This enables support for Marvell EBU familly, including:
113 - Armada 3700 SoC Family 118 - Armada 3700 SoC Family
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index c7f669f5884f..166c9ef884dc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -406,8 +406,9 @@
406 r_ccu: clock@1f01400 { 406 r_ccu: clock@1f01400 {
407 compatible = "allwinner,sun50i-a64-r-ccu"; 407 compatible = "allwinner,sun50i-a64-r-ccu";
408 reg = <0x01f01400 0x100>; 408 reg = <0x01f01400 0x100>;
409 clocks = <&osc24M>, <&osc32k>, <&iosc>; 409 clocks = <&osc24M>, <&osc32k>, <&iosc>,
410 clock-names = "hosc", "losc", "iosc"; 410 <&ccu 11>;
411 clock-names = "hosc", "losc", "iosc", "pll-periph";
411 #clock-cells = <1>; 412 #clock-cells = <1>;
412 #reset-cells = <1>; 413 #reset-cells = <1>;
413 }; 414 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 4d314a253fd9..732e2e06f503 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -40,7 +40,7 @@
40 * OTHER DEALINGS IN THE SOFTWARE. 40 * OTHER DEALINGS IN THE SOFTWARE.
41 */ 41 */
42 42
43#include "sunxi-h3-h5.dtsi" 43#include <arm/sunxi-h3-h5.dtsi>
44 44
45/ { 45/ {
46 cpus { 46 cpus {
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
deleted file mode 120000
index 036f01dc2b9b..000000000000
--- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
+++ /dev/null
@@ -1 +0,0 @@
1../../../../arm/boot/dts/sunxi-h3-h5.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 75bce2d0b1a8..49f6a6242cf9 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -81,6 +81,45 @@
81 }; 81 };
82 }; 82 };
83 83
84 reg_sys_5v: regulator@0 {
85 compatible = "regulator-fixed";
86 regulator-name = "SYS_5V";
87 regulator-min-microvolt = <5000000>;
88 regulator-max-microvolt = <5000000>;
89 regulator-boot-on;
90 regulator-always-on;
91 };
92
93 reg_vdd_3v3: regulator@1 {
94 compatible = "regulator-fixed";
95 regulator-name = "VDD_3V3";
96 regulator-min-microvolt = <3300000>;
97 regulator-max-microvolt = <3300000>;
98 regulator-boot-on;
99 regulator-always-on;
100 vin-supply = <&reg_sys_5v>;
101 };
102
103 reg_5v_hub: regulator@2 {
104 compatible = "regulator-fixed";
105 regulator-name = "5V_HUB";
106 regulator-min-microvolt = <5000000>;
107 regulator-max-microvolt = <5000000>;
108 regulator-boot-on;
109 gpio = <&gpio0 7 0>;
110 regulator-always-on;
111 vin-supply = <&reg_sys_5v>;
112 };
113
114 wl1835_pwrseq: wl1835-pwrseq {
115 compatible = "mmc-pwrseq-simple";
116 /* WLAN_EN GPIO */
117 reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
118 clocks = <&pmic>;
119 clock-names = "ext_clock";
120 power-off-delay-us = <10>;
121 };
122
84 soc { 123 soc {
85 spi0: spi@f7106000 { 124 spi0: spi@f7106000 {
86 status = "ok"; 125 status = "ok";
@@ -256,11 +295,31 @@
256 295
257 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ 296 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
258 297
298 dwmmc_0: dwmmc0@f723d000 {
299 cap-mmc-highspeed;
300 non-removable;
301 bus-width = <0x8>;
302 vmmc-supply = <&ldo19>;
303 };
304
305 dwmmc_1: dwmmc1@f723e000 {
306 card-detect-delay = <200>;
307 cap-sd-highspeed;
308 sd-uhs-sdr12;
309 sd-uhs-sdr25;
310 sd-uhs-sdr50;
311 vqmmc-supply = <&ldo7>;
312 vmmc-supply = <&ldo10>;
313 bus-width = <0x4>;
314 disable-wp;
315 cd-gpios = <&gpio1 0 1>;
316 };
317
259 dwmmc_2: dwmmc2@f723f000 { 318 dwmmc_2: dwmmc2@f723f000 {
260 ti,non-removable; 319 bus-width = <0x4>;
261 non-removable; 320 non-removable;
262 /* WL_EN */ 321 vmmc-supply = <&reg_vdd_3v3>;
263 vmmc-supply = <&wlan_en_reg>; 322 mmc-pwrseq = <&wl1835_pwrseq>;
264 323
265 #address-cells = <0x1>; 324 #address-cells = <0x1>;
266 #size-cells = <0x0>; 325 #size-cells = <0x0>;
@@ -272,18 +331,6 @@
272 interrupts = <3 IRQ_TYPE_EDGE_RISING>; 331 interrupts = <3 IRQ_TYPE_EDGE_RISING>;
273 }; 332 };
274 }; 333 };
275
276 wlan_en_reg: regulator@1 {
277 compatible = "regulator-fixed";
278 regulator-name = "wlan-en-regulator";
279 regulator-min-microvolt = <1800000>;
280 regulator-max-microvolt = <1800000>;
281 /* WLAN_EN GPIO */
282 gpio = <&gpio0 5 0>;
283 /* WLAN card specific delay */
284 startup-delay-us = <70000>;
285 enable-active-high;
286 };
287 }; 334 };
288 335
289 leds { 336 leds {
@@ -330,6 +377,7 @@
330 pmic: pmic@f8000000 { 377 pmic: pmic@f8000000 {
331 compatible = "hisilicon,hi655x-pmic"; 378 compatible = "hisilicon,hi655x-pmic";
332 reg = <0x0 0xf8000000 0x0 0x1000>; 379 reg = <0x0 0xf8000000 0x0 0x1000>;
380 #clock-cells = <0>;
333 interrupt-controller; 381 interrupt-controller;
334 #interrupt-cells = <2>; 382 #interrupt-cells = <2>;
335 pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; 383 pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 1e5129b19280..5013e4b2ea71 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -725,20 +725,10 @@
725 status = "disabled"; 725 status = "disabled";
726 }; 726 };
727 727
728 fixed_5v_hub: regulator@0 {
729 compatible = "regulator-fixed";
730 regulator-name = "fixed_5v_hub";
731 regulator-min-microvolt = <5000000>;
732 regulator-max-microvolt = <5000000>;
733 regulator-boot-on;
734 gpio = <&gpio0 7 0>;
735 regulator-always-on;
736 };
737
738 usb_phy: usbphy { 728 usb_phy: usbphy {
739 compatible = "hisilicon,hi6220-usb-phy"; 729 compatible = "hisilicon,hi6220-usb-phy";
740 #phy-cells = <0>; 730 #phy-cells = <0>;
741 phy-supply = <&fixed_5v_hub>; 731 phy-supply = <&reg_5v_hub>;
742 hisilicon,peripheral-syscon = <&sys_ctrl>; 732 hisilicon,peripheral-syscon = <&sys_ctrl>;
743 }; 733 };
744 734
@@ -766,17 +756,12 @@
766 756
767 dwmmc_0: dwmmc0@f723d000 { 757 dwmmc_0: dwmmc0@f723d000 {
768 compatible = "hisilicon,hi6220-dw-mshc"; 758 compatible = "hisilicon,hi6220-dw-mshc";
769 num-slots = <0x1>;
770 cap-mmc-highspeed;
771 non-removable;
772 reg = <0x0 0xf723d000 0x0 0x1000>; 759 reg = <0x0 0xf723d000 0x0 0x1000>;
773 interrupts = <0x0 0x48 0x4>; 760 interrupts = <0x0 0x48 0x4>;
774 clocks = <&sys_ctrl 2>, <&sys_ctrl 1>; 761 clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
775 clock-names = "ciu", "biu"; 762 clock-names = "ciu", "biu";
776 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>; 763 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
777 reset-names = "reset"; 764 reset-names = "reset";
778 bus-width = <0x8>;
779 vmmc-supply = <&ldo19>;
780 pinctrl-names = "default"; 765 pinctrl-names = "default";
781 pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func 766 pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
782 &emmc_cfg_func &emmc_rst_cfg_func>; 767 &emmc_cfg_func &emmc_rst_cfg_func>;
@@ -784,13 +769,7 @@
784 769
785 dwmmc_1: dwmmc1@f723e000 { 770 dwmmc_1: dwmmc1@f723e000 {
786 compatible = "hisilicon,hi6220-dw-mshc"; 771 compatible = "hisilicon,hi6220-dw-mshc";
787 num-slots = <0x1>;
788 card-detect-delay = <200>;
789 hisilicon,peripheral-syscon = <&ao_ctrl>; 772 hisilicon,peripheral-syscon = <&ao_ctrl>;
790 cap-sd-highspeed;
791 sd-uhs-sdr12;
792 sd-uhs-sdr25;
793 sd-uhs-sdr50;
794 reg = <0x0 0xf723e000 0x0 0x1000>; 773 reg = <0x0 0xf723e000 0x0 0x1000>;
795 interrupts = <0x0 0x49 0x4>; 774 interrupts = <0x0 0x49 0x4>;
796 #address-cells = <0x1>; 775 #address-cells = <0x1>;
@@ -799,11 +778,6 @@
799 clock-names = "ciu", "biu"; 778 clock-names = "ciu", "biu";
800 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>; 779 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
801 reset-names = "reset"; 780 reset-names = "reset";
802 vqmmc-supply = <&ldo7>;
803 vmmc-supply = <&ldo10>;
804 bus-width = <0x4>;
805 disable-wp;
806 cd-gpios = <&gpio1 0 1>;
807 pinctrl-names = "default", "idle"; 781 pinctrl-names = "default", "idle";
808 pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>; 782 pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
809 pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>; 783 pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
@@ -811,15 +785,12 @@
811 785
812 dwmmc_2: dwmmc2@f723f000 { 786 dwmmc_2: dwmmc2@f723f000 {
813 compatible = "hisilicon,hi6220-dw-mshc"; 787 compatible = "hisilicon,hi6220-dw-mshc";
814 num-slots = <0x1>;
815 reg = <0x0 0xf723f000 0x0 0x1000>; 788 reg = <0x0 0xf723f000 0x0 0x1000>;
816 interrupts = <0x0 0x4a 0x4>; 789 interrupts = <0x0 0x4a 0x4>;
817 clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>; 790 clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
818 clock-names = "ciu", "biu"; 791 clock-names = "ciu", "biu";
819 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>; 792 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
820 reset-names = "reset"; 793 reset-names = "reset";
821 bus-width = <0x4>;
822 broken-cd;
823 pinctrl-names = "default", "idle"; 794 pinctrl-names = "default", "idle";
824 pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>; 795 pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
825 pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>; 796 pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
diff --git a/arch/arm64/boot/dts/include/arm b/arch/arm64/boot/dts/include/arm
deleted file mode 120000
index cf63d80e2b93..000000000000
--- a/arch/arm64/boot/dts/include/arm
+++ /dev/null
@@ -1 +0,0 @@
1../../../../arm/boot/dts \ No newline at end of file
diff --git a/arch/arm64/boot/dts/include/arm64 b/arch/arm64/boot/dts/include/arm64
deleted file mode 120000
index a96aa0ea9d8c..000000000000
--- a/arch/arm64/boot/dts/include/arm64
+++ /dev/null
@@ -1 +0,0 @@
1.. \ No newline at end of file
diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/arm64/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index cef5f976bc0f..a89855f57091 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -79,6 +79,8 @@
79}; 79};
80 80
81&i2c0 { 81&i2c0 {
82 pinctrl-names = "default";
83 pinctrl-0 = <&i2c1_pins>;
82 status = "okay"; 84 status = "okay";
83 85
84 gpio_exp: pca9555@22 { 86 gpio_exp: pca9555@22 {
@@ -113,6 +115,8 @@
113 115
114&spi0 { 116&spi0 {
115 status = "okay"; 117 status = "okay";
118 pinctrl-names = "default";
119 pinctrl-0 = <&spi_quad_pins>;
116 120
117 m25p80@0 { 121 m25p80@0 {
118 compatible = "jedec,spi-nor"; 122 compatible = "jedec,spi-nor";
@@ -143,6 +147,8 @@
143 147
144/* Exported on the micro USB connector CON32 through an FTDI */ 148/* Exported on the micro USB connector CON32 through an FTDI */
145&uart0 { 149&uart0 {
150 pinctrl-names = "default";
151 pinctrl-0 = <&uart1_pins>;
146 status = "okay"; 152 status = "okay";
147}; 153};
148 154
@@ -184,6 +190,8 @@
184}; 190};
185 191
186&eth0 { 192&eth0 {
193 pinctrl-names = "default";
194 pinctrl-0 = <&rgmii_pins>;
187 phy-mode = "rgmii-id"; 195 phy-mode = "rgmii-id";
188 phy = <&phy0>; 196 phy = <&phy0>;
189 status = "okay"; 197 status = "okay";
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 58ae9e095af2..4d495ec39202 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -161,16 +161,83 @@
161 #clock-cells = <1>; 161 #clock-cells = <1>;
162 }; 162 };
163 163
164 gpio1: gpio@13800 { 164 pinctrl_nb: pinctrl@13800 {
165 compatible = "marvell,mvebu-gpio-3700", 165 compatible = "marvell,armada3710-nb-pinctrl",
166 "syscon", "simple-mfd"; 166 "syscon", "simple-mfd";
167 reg = <0x13800 0x500>; 167 reg = <0x13800 0x100>, <0x13C00 0x20>;
168 gpionb: gpio {
169 #gpio-cells = <2>;
170 gpio-ranges = <&pinctrl_nb 0 0 36>;
171 gpio-controller;
172 interrupts =
173 <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
174 <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
175 <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
176 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
177 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
178 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
179 <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
180 <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
181 <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
182 <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
183 <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
184 <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
185
186 };
168 187
169 xtalclk: xtal-clk { 188 xtalclk: xtal-clk {
170 compatible = "marvell,armada-3700-xtal-clock"; 189 compatible = "marvell,armada-3700-xtal-clock";
171 clock-output-names = "xtal"; 190 clock-output-names = "xtal";
172 #clock-cells = <0>; 191 #clock-cells = <0>;
173 }; 192 };
193
194 spi_quad_pins: spi-quad-pins {
195 groups = "spi_quad";
196 function = "spi";
197 };
198
199 i2c1_pins: i2c1-pins {
200 groups = "i2c1";
201 function = "i2c";
202 };
203
204 i2c2_pins: i2c2-pins {
205 groups = "i2c2";
206 function = "i2c";
207 };
208
209 uart1_pins: uart1-pins {
210 groups = "uart1";
211 function = "uart";
212 };
213
214 uart2_pins: uart2-pins {
215 groups = "uart2";
216 function = "uart";
217 };
218 };
219
220 pinctrl_sb: pinctrl@18800 {
221 compatible = "marvell,armada3710-sb-pinctrl",
222 "syscon", "simple-mfd";
223 reg = <0x18800 0x100>, <0x18C00 0x20>;
224 gpiosb: gpio {
225 #gpio-cells = <2>;
226 gpio-ranges = <&pinctrl_sb 0 0 29>;
227 gpio-controller;
228 interrupts =
229 <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
230 <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
231 <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
232 <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
233 <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
234 };
235
236 rgmii_pins: mii-pins {
237 groups = "rgmii";
238 function = "mii";
239 };
240
174 }; 241 };
175 242
176 eth0: ethernet@30000 { 243 eth0: ethernet@30000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df5201cd6..b4bc42ece754 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -231,8 +231,7 @@
231 cpm_crypto: crypto@800000 { 231 cpm_crypto: crypto@800000 {
232 compatible = "inside-secure,safexcel-eip197"; 232 compatible = "inside-secure,safexcel-eip197";
233 reg = <0x800000 0x200000>; 233 reg = <0x800000 0x200000>;
234 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 234 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
235 | IRQ_TYPE_LEVEL_HIGH)>,
236 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 235 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
237 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 236 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
238 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, 237 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75a8230..6e2058847ddc 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -221,8 +221,7 @@
221 cps_crypto: crypto@800000 { 221 cps_crypto: crypto@800000 {
222 compatible = "inside-secure,safexcel-eip197"; 222 compatible = "inside-secure,safexcel-eip197";
223 reg = <0x800000 0x200000>; 223 reg = <0x800000 0x200000>;
224 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 224 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
225 | IRQ_TYPE_LEVEL_HIGH)>,
226 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 225 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
227 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 226 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
228 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>, 227 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 0ecaad4333a7..1c3634fa94bf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -134,6 +134,9 @@
134 bus-width = <8>; 134 bus-width = <8>;
135 max-frequency = <50000000>; 135 max-frequency = <50000000>;
136 cap-mmc-highspeed; 136 cap-mmc-highspeed;
137 mediatek,hs200-cmd-int-delay=<26>;
138 mediatek,hs400-cmd-int-delay=<14>;
139 mediatek,hs400-cmd-resp-sel-rising;
137 vmmc-supply = <&mt6397_vemc_3v3_reg>; 140 vmmc-supply = <&mt6397_vemc_3v3_reg>;
138 vqmmc-supply = <&mt6397_vio18_reg>; 141 vqmmc-supply = <&mt6397_vio18_reg>;
139 non-removable; 142 non-removable;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 658bb9dc9dfd..7bd31066399b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -44,7 +44,7 @@
44 44
45/dts-v1/; 45/dts-v1/;
46#include "rk3399-gru.dtsi" 46#include "rk3399-gru.dtsi"
47#include <include/dt-bindings/input/linux-event-codes.h> 47#include <dt-bindings/input/linux-event-codes.h>
48 48
49/* 49/*
50 * Kevin-specific things 50 * Kevin-specific things
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ce072859e3b2..97c123e09e45 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -30,7 +30,6 @@ CONFIG_PROFILING=y
30CONFIG_JUMP_LABEL=y 30CONFIG_JUMP_LABEL=y
31CONFIG_MODULES=y 31CONFIG_MODULES=y
32CONFIG_MODULE_UNLOAD=y 32CONFIG_MODULE_UNLOAD=y
33# CONFIG_BLK_DEV_BSG is not set
34# CONFIG_IOSCHED_DEADLINE is not set 33# CONFIG_IOSCHED_DEADLINE is not set
35CONFIG_ARCH_SUNXI=y 34CONFIG_ARCH_SUNXI=y
36CONFIG_ARCH_ALPINE=y 35CONFIG_ARCH_ALPINE=y
@@ -62,16 +61,16 @@ CONFIG_ARCH_XGENE=y
62CONFIG_ARCH_ZX=y 61CONFIG_ARCH_ZX=y
63CONFIG_ARCH_ZYNQMP=y 62CONFIG_ARCH_ZYNQMP=y
64CONFIG_PCI=y 63CONFIG_PCI=y
65CONFIG_PCI_MSI=y
66CONFIG_PCI_IOV=y 64CONFIG_PCI_IOV=y
67CONFIG_PCI_AARDVARK=y
68CONFIG_PCIE_RCAR=y
69CONFIG_PCI_HOST_GENERIC=y
70CONFIG_PCI_XGENE=y
71CONFIG_PCI_LAYERSCAPE=y 65CONFIG_PCI_LAYERSCAPE=y
72CONFIG_PCI_HISI=y 66CONFIG_PCI_HISI=y
73CONFIG_PCIE_QCOM=y 67CONFIG_PCIE_QCOM=y
74CONFIG_PCIE_ARMADA_8K=y 68CONFIG_PCIE_ARMADA_8K=y
69CONFIG_PCI_AARDVARK=y
70CONFIG_PCIE_RCAR=y
71CONFIG_PCIE_ROCKCHIP=m
72CONFIG_PCI_HOST_GENERIC=y
73CONFIG_PCI_XGENE=y
75CONFIG_ARM64_VA_BITS_48=y 74CONFIG_ARM64_VA_BITS_48=y
76CONFIG_SCHED_MC=y 75CONFIG_SCHED_MC=y
77CONFIG_NUMA=y 76CONFIG_NUMA=y
@@ -80,12 +79,11 @@ CONFIG_KSM=y
80CONFIG_TRANSPARENT_HUGEPAGE=y 79CONFIG_TRANSPARENT_HUGEPAGE=y
81CONFIG_CMA=y 80CONFIG_CMA=y
82CONFIG_SECCOMP=y 81CONFIG_SECCOMP=y
83CONFIG_XEN=y
84CONFIG_KEXEC=y 82CONFIG_KEXEC=y
85CONFIG_CRASH_DUMP=y 83CONFIG_CRASH_DUMP=y
84CONFIG_XEN=y
86# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 85# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
87CONFIG_COMPAT=y 86CONFIG_COMPAT=y
88CONFIG_CPU_IDLE=y
89CONFIG_HIBERNATION=y 87CONFIG_HIBERNATION=y
90CONFIG_ARM_CPUIDLE=y 88CONFIG_ARM_CPUIDLE=y
91CONFIG_CPU_FREQ=y 89CONFIG_CPU_FREQ=y
@@ -155,8 +153,8 @@ CONFIG_MTD_SPI_NOR=y
155CONFIG_BLK_DEV_LOOP=y 153CONFIG_BLK_DEV_LOOP=y
156CONFIG_BLK_DEV_NBD=m 154CONFIG_BLK_DEV_NBD=m
157CONFIG_VIRTIO_BLK=y 155CONFIG_VIRTIO_BLK=y
158CONFIG_EEPROM_AT25=m
159CONFIG_SRAM=y 156CONFIG_SRAM=y
157CONFIG_EEPROM_AT25=m
160# CONFIG_SCSI_PROC_FS is not set 158# CONFIG_SCSI_PROC_FS is not set
161CONFIG_BLK_DEV_SD=y 159CONFIG_BLK_DEV_SD=y
162CONFIG_SCSI_SAS_ATA=y 160CONFIG_SCSI_SAS_ATA=y
@@ -168,8 +166,8 @@ CONFIG_AHCI_CEVA=y
168CONFIG_AHCI_MVEBU=y 166CONFIG_AHCI_MVEBU=y
169CONFIG_AHCI_XGENE=y 167CONFIG_AHCI_XGENE=y
170CONFIG_AHCI_QORIQ=y 168CONFIG_AHCI_QORIQ=y
171CONFIG_SATA_RCAR=y
172CONFIG_SATA_SIL24=y 169CONFIG_SATA_SIL24=y
170CONFIG_SATA_RCAR=y
173CONFIG_PATA_PLATFORM=y 171CONFIG_PATA_PLATFORM=y
174CONFIG_PATA_OF_PLATFORM=y 172CONFIG_PATA_OF_PLATFORM=y
175CONFIG_NETDEVICES=y 173CONFIG_NETDEVICES=y
@@ -186,18 +184,17 @@ CONFIG_HNS_ENET=y
186CONFIG_E1000E=y 184CONFIG_E1000E=y
187CONFIG_IGB=y 185CONFIG_IGB=y
188CONFIG_IGBVF=y 186CONFIG_IGBVF=y
189CONFIG_MVPP2=y
190CONFIG_MVNETA=y 187CONFIG_MVNETA=y
188CONFIG_MVPP2=y
191CONFIG_SKY2=y 189CONFIG_SKY2=y
192CONFIG_RAVB=y 190CONFIG_RAVB=y
193CONFIG_SMC91X=y 191CONFIG_SMC91X=y
194CONFIG_SMSC911X=y 192CONFIG_SMSC911X=y
195CONFIG_STMMAC_ETH=m 193CONFIG_STMMAC_ETH=m
196CONFIG_REALTEK_PHY=m 194CONFIG_MDIO_BUS_MUX_MMIOREG=y
197CONFIG_MESON_GXL_PHY=m 195CONFIG_MESON_GXL_PHY=m
198CONFIG_MICREL_PHY=y 196CONFIG_MICREL_PHY=y
199CONFIG_MDIO_BUS_MUX=y 197CONFIG_REALTEK_PHY=m
200CONFIG_MDIO_BUS_MUX_MMIOREG=y
201CONFIG_USB_PEGASUS=m 198CONFIG_USB_PEGASUS=m
202CONFIG_USB_RTL8150=m 199CONFIG_USB_RTL8150=m
203CONFIG_USB_RTL8152=m 200CONFIG_USB_RTL8152=m
@@ -212,6 +209,8 @@ CONFIG_BRCMFMAC=m
212CONFIG_WL18XX=m 209CONFIG_WL18XX=m
213CONFIG_WLCORE_SDIO=m 210CONFIG_WLCORE_SDIO=m
214CONFIG_INPUT_EVDEV=y 211CONFIG_INPUT_EVDEV=y
212CONFIG_KEYBOARD_ADC=m
213CONFIG_KEYBOARD_CROS_EC=y
215CONFIG_KEYBOARD_GPIO=y 214CONFIG_KEYBOARD_GPIO=y
216CONFIG_INPUT_MISC=y 215CONFIG_INPUT_MISC=y
217CONFIG_INPUT_PM8941_PWRKEY=y 216CONFIG_INPUT_PM8941_PWRKEY=y
@@ -230,14 +229,14 @@ CONFIG_SERIAL_8250_UNIPHIER=y
230CONFIG_SERIAL_OF_PLATFORM=y 229CONFIG_SERIAL_OF_PLATFORM=y
231CONFIG_SERIAL_AMBA_PL011=y 230CONFIG_SERIAL_AMBA_PL011=y
232CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 231CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
232CONFIG_SERIAL_MESON=y
233CONFIG_SERIAL_MESON_CONSOLE=y
233CONFIG_SERIAL_SAMSUNG=y 234CONFIG_SERIAL_SAMSUNG=y
234CONFIG_SERIAL_SAMSUNG_CONSOLE=y 235CONFIG_SERIAL_SAMSUNG_CONSOLE=y
235CONFIG_SERIAL_TEGRA=y 236CONFIG_SERIAL_TEGRA=y
236CONFIG_SERIAL_SH_SCI=y 237CONFIG_SERIAL_SH_SCI=y
237CONFIG_SERIAL_SH_SCI_NR_UARTS=11 238CONFIG_SERIAL_SH_SCI_NR_UARTS=11
238CONFIG_SERIAL_SH_SCI_CONSOLE=y 239CONFIG_SERIAL_SH_SCI_CONSOLE=y
239CONFIG_SERIAL_MESON=y
240CONFIG_SERIAL_MESON_CONSOLE=y
241CONFIG_SERIAL_MSM=y 240CONFIG_SERIAL_MSM=y
242CONFIG_SERIAL_MSM_CONSOLE=y 241CONFIG_SERIAL_MSM_CONSOLE=y
243CONFIG_SERIAL_XILINX_PS_UART=y 242CONFIG_SERIAL_XILINX_PS_UART=y
@@ -261,14 +260,15 @@ CONFIG_I2C_UNIPHIER_F=y
261CONFIG_I2C_RCAR=y 260CONFIG_I2C_RCAR=y
262CONFIG_I2C_CROS_EC_TUNNEL=y 261CONFIG_I2C_CROS_EC_TUNNEL=y
263CONFIG_SPI=y 262CONFIG_SPI=y
264CONFIG_SPI_MESON_SPIFC=m
265CONFIG_SPI_BCM2835=m 263CONFIG_SPI_BCM2835=m
266CONFIG_SPI_BCM2835AUX=m 264CONFIG_SPI_BCM2835AUX=m
265CONFIG_SPI_MESON_SPIFC=m
267CONFIG_SPI_ORION=y 266CONFIG_SPI_ORION=y
268CONFIG_SPI_PL022=y 267CONFIG_SPI_PL022=y
269CONFIG_SPI_QUP=y 268CONFIG_SPI_QUP=y
270CONFIG_SPI_SPIDEV=m 269CONFIG_SPI_ROCKCHIP=y
271CONFIG_SPI_S3C64XX=y 270CONFIG_SPI_S3C64XX=y
271CONFIG_SPI_SPIDEV=m
272CONFIG_SPMI=y 272CONFIG_SPMI=y
273CONFIG_PINCTRL_SINGLE=y 273CONFIG_PINCTRL_SINGLE=y
274CONFIG_PINCTRL_MAX77620=y 274CONFIG_PINCTRL_MAX77620=y
@@ -286,33 +286,33 @@ CONFIG_GPIO_PCA953X=y
286CONFIG_GPIO_PCA953X_IRQ=y 286CONFIG_GPIO_PCA953X_IRQ=y
287CONFIG_GPIO_MAX77620=y 287CONFIG_GPIO_MAX77620=y
288CONFIG_POWER_RESET_MSM=y 288CONFIG_POWER_RESET_MSM=y
289CONFIG_BATTERY_BQ27XXX=y
290CONFIG_POWER_RESET_XGENE=y 289CONFIG_POWER_RESET_XGENE=y
291CONFIG_POWER_RESET_SYSCON=y 290CONFIG_POWER_RESET_SYSCON=y
291CONFIG_BATTERY_BQ27XXX=y
292CONFIG_SENSORS_ARM_SCPI=y
292CONFIG_SENSORS_LM90=m 293CONFIG_SENSORS_LM90=m
293CONFIG_SENSORS_INA2XX=m 294CONFIG_SENSORS_INA2XX=m
294CONFIG_SENSORS_ARM_SCPI=y
295CONFIG_THERMAL=y
296CONFIG_THERMAL_EMULATION=y
297CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y 295CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
298CONFIG_CPU_THERMAL=y 296CONFIG_CPU_THERMAL=y
299CONFIG_BCM2835_THERMAL=y 297CONFIG_THERMAL_EMULATION=y
300CONFIG_EXYNOS_THERMAL=y 298CONFIG_EXYNOS_THERMAL=y
299CONFIG_ROCKCHIP_THERMAL=m
301CONFIG_WATCHDOG=y 300CONFIG_WATCHDOG=y
302CONFIG_BCM2835_WDT=y
303CONFIG_RENESAS_WDT=y
304CONFIG_S3C2410_WATCHDOG=y 301CONFIG_S3C2410_WATCHDOG=y
305CONFIG_MESON_GXBB_WATCHDOG=m 302CONFIG_MESON_GXBB_WATCHDOG=m
306CONFIG_MESON_WATCHDOG=m 303CONFIG_MESON_WATCHDOG=m
304CONFIG_RENESAS_WDT=y
305CONFIG_BCM2835_WDT=y
306CONFIG_MFD_CROS_EC=y
307CONFIG_MFD_CROS_EC_I2C=y
308CONFIG_MFD_CROS_EC_SPI=y
307CONFIG_MFD_EXYNOS_LPASS=m 309CONFIG_MFD_EXYNOS_LPASS=m
310CONFIG_MFD_HI655X_PMIC=y
308CONFIG_MFD_MAX77620=y 311CONFIG_MFD_MAX77620=y
309CONFIG_MFD_RK808=y
310CONFIG_MFD_SPMI_PMIC=y 312CONFIG_MFD_SPMI_PMIC=y
313CONFIG_MFD_RK808=y
311CONFIG_MFD_SEC_CORE=y 314CONFIG_MFD_SEC_CORE=y
312CONFIG_MFD_HI655X_PMIC=y 315CONFIG_REGULATOR_FAN53555=y
313CONFIG_REGULATOR=y
314CONFIG_MFD_CROS_EC=y
315CONFIG_MFD_CROS_EC_I2C=y
316CONFIG_REGULATOR_FIXED_VOLTAGE=y 316CONFIG_REGULATOR_FIXED_VOLTAGE=y
317CONFIG_REGULATOR_GPIO=y 317CONFIG_REGULATOR_GPIO=y
318CONFIG_REGULATOR_HI655X=y 318CONFIG_REGULATOR_HI655X=y
@@ -345,13 +345,12 @@ CONFIG_DRM_EXYNOS_DSI=y
345CONFIG_DRM_EXYNOS_HDMI=y 345CONFIG_DRM_EXYNOS_HDMI=y
346CONFIG_DRM_EXYNOS_MIC=y 346CONFIG_DRM_EXYNOS_MIC=y
347CONFIG_DRM_RCAR_DU=m 347CONFIG_DRM_RCAR_DU=m
348CONFIG_DRM_RCAR_HDMI=y
349CONFIG_DRM_RCAR_LVDS=y 348CONFIG_DRM_RCAR_LVDS=y
350CONFIG_DRM_RCAR_VSP=y 349CONFIG_DRM_RCAR_VSP=y
351CONFIG_DRM_TEGRA=m 350CONFIG_DRM_TEGRA=m
352CONFIG_DRM_VC4=m
353CONFIG_DRM_PANEL_SIMPLE=m 351CONFIG_DRM_PANEL_SIMPLE=m
354CONFIG_DRM_I2C_ADV7511=m 352CONFIG_DRM_I2C_ADV7511=m
353CONFIG_DRM_VC4=m
355CONFIG_DRM_HISI_KIRIN=m 354CONFIG_DRM_HISI_KIRIN=m
356CONFIG_DRM_MESON=m 355CONFIG_DRM_MESON=m
357CONFIG_FB=y 356CONFIG_FB=y
@@ -366,39 +365,37 @@ CONFIG_SOUND=y
366CONFIG_SND=y 365CONFIG_SND=y
367CONFIG_SND_SOC=y 366CONFIG_SND_SOC=y
368CONFIG_SND_BCM2835_SOC_I2S=m 367CONFIG_SND_BCM2835_SOC_I2S=m
369CONFIG_SND_SOC_RCAR=y
370CONFIG_SND_SOC_SAMSUNG=y 368CONFIG_SND_SOC_SAMSUNG=y
369CONFIG_SND_SOC_RCAR=y
371CONFIG_SND_SOC_AK4613=y 370CONFIG_SND_SOC_AK4613=y
372CONFIG_USB=y 371CONFIG_USB=y
373CONFIG_USB_OTG=y 372CONFIG_USB_OTG=y
374CONFIG_USB_XHCI_HCD=y 373CONFIG_USB_XHCI_HCD=y
375CONFIG_USB_XHCI_PLATFORM=y
376CONFIG_USB_XHCI_RCAR=y
377CONFIG_USB_EHCI_EXYNOS=y
378CONFIG_USB_XHCI_TEGRA=y 374CONFIG_USB_XHCI_TEGRA=y
379CONFIG_USB_EHCI_HCD=y 375CONFIG_USB_EHCI_HCD=y
380CONFIG_USB_EHCI_MSM=y 376CONFIG_USB_EHCI_MSM=y
377CONFIG_USB_EHCI_EXYNOS=y
381CONFIG_USB_EHCI_HCD_PLATFORM=y 378CONFIG_USB_EHCI_HCD_PLATFORM=y
382CONFIG_USB_OHCI_EXYNOS=y
383CONFIG_USB_OHCI_HCD=y 379CONFIG_USB_OHCI_HCD=y
380CONFIG_USB_OHCI_EXYNOS=y
384CONFIG_USB_OHCI_HCD_PLATFORM=y 381CONFIG_USB_OHCI_HCD_PLATFORM=y
385CONFIG_USB_RENESAS_USBHS=m 382CONFIG_USB_RENESAS_USBHS=m
386CONFIG_USB_STORAGE=y 383CONFIG_USB_STORAGE=y
387CONFIG_USB_DWC2=y
388CONFIG_USB_DWC3=y 384CONFIG_USB_DWC3=y
385CONFIG_USB_DWC2=y
389CONFIG_USB_CHIPIDEA=y 386CONFIG_USB_CHIPIDEA=y
390CONFIG_USB_CHIPIDEA_UDC=y 387CONFIG_USB_CHIPIDEA_UDC=y
391CONFIG_USB_CHIPIDEA_HOST=y 388CONFIG_USB_CHIPIDEA_HOST=y
392CONFIG_USB_ISP1760=y 389CONFIG_USB_ISP1760=y
393CONFIG_USB_HSIC_USB3503=y 390CONFIG_USB_HSIC_USB3503=y
394CONFIG_USB_MSM_OTG=y 391CONFIG_USB_MSM_OTG=y
392CONFIG_USB_QCOM_8X16_PHY=y
395CONFIG_USB_ULPI=y 393CONFIG_USB_ULPI=y
396CONFIG_USB_GADGET=y 394CONFIG_USB_GADGET=y
397CONFIG_USB_RENESAS_USBHS_UDC=m 395CONFIG_USB_RENESAS_USBHS_UDC=m
398CONFIG_MMC=y 396CONFIG_MMC=y
399CONFIG_MMC_BLOCK_MINORS=32 397CONFIG_MMC_BLOCK_MINORS=32
400CONFIG_MMC_ARMMMCI=y 398CONFIG_MMC_ARMMMCI=y
401CONFIG_MMC_MESON_GX=y
402CONFIG_MMC_SDHCI=y 399CONFIG_MMC_SDHCI=y
403CONFIG_MMC_SDHCI_ACPI=y 400CONFIG_MMC_SDHCI_ACPI=y
404CONFIG_MMC_SDHCI_PLTFM=y 401CONFIG_MMC_SDHCI_PLTFM=y
@@ -406,6 +403,7 @@ CONFIG_MMC_SDHCI_OF_ARASAN=y
406CONFIG_MMC_SDHCI_OF_ESDHC=y 403CONFIG_MMC_SDHCI_OF_ESDHC=y
407CONFIG_MMC_SDHCI_CADENCE=y 404CONFIG_MMC_SDHCI_CADENCE=y
408CONFIG_MMC_SDHCI_TEGRA=y 405CONFIG_MMC_SDHCI_TEGRA=y
406CONFIG_MMC_MESON_GX=y
409CONFIG_MMC_SDHCI_MSM=y 407CONFIG_MMC_SDHCI_MSM=y
410CONFIG_MMC_SPI=y 408CONFIG_MMC_SPI=y
411CONFIG_MMC_SDHI=y 409CONFIG_MMC_SDHI=y
@@ -414,32 +412,31 @@ CONFIG_MMC_DW_EXYNOS=y
414CONFIG_MMC_DW_K3=y 412CONFIG_MMC_DW_K3=y
415CONFIG_MMC_DW_ROCKCHIP=y 413CONFIG_MMC_DW_ROCKCHIP=y
416CONFIG_MMC_SUNXI=y 414CONFIG_MMC_SUNXI=y
417CONFIG_MMC_SDHCI_XENON=y
418CONFIG_MMC_BCM2835=y 415CONFIG_MMC_BCM2835=y
416CONFIG_MMC_SDHCI_XENON=y
419CONFIG_NEW_LEDS=y 417CONFIG_NEW_LEDS=y
420CONFIG_LEDS_CLASS=y 418CONFIG_LEDS_CLASS=y
421CONFIG_LEDS_GPIO=y 419CONFIG_LEDS_GPIO=y
422CONFIG_LEDS_PWM=y 420CONFIG_LEDS_PWM=y
423CONFIG_LEDS_SYSCON=y 421CONFIG_LEDS_SYSCON=y
424CONFIG_LEDS_TRIGGERS=y
425CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
426CONFIG_LEDS_TRIGGER_HEARTBEAT=y 422CONFIG_LEDS_TRIGGER_HEARTBEAT=y
427CONFIG_LEDS_TRIGGER_CPU=y 423CONFIG_LEDS_TRIGGER_CPU=y
424CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
428CONFIG_RTC_CLASS=y 425CONFIG_RTC_CLASS=y
429CONFIG_RTC_DRV_MAX77686=y 426CONFIG_RTC_DRV_MAX77686=y
427CONFIG_RTC_DRV_RK808=m
430CONFIG_RTC_DRV_S5M=y 428CONFIG_RTC_DRV_S5M=y
431CONFIG_RTC_DRV_DS3232=y 429CONFIG_RTC_DRV_DS3232=y
432CONFIG_RTC_DRV_EFI=y 430CONFIG_RTC_DRV_EFI=y
431CONFIG_RTC_DRV_S3C=y
433CONFIG_RTC_DRV_PL031=y 432CONFIG_RTC_DRV_PL031=y
434CONFIG_RTC_DRV_SUN6I=y 433CONFIG_RTC_DRV_SUN6I=y
435CONFIG_RTC_DRV_RK808=m
436CONFIG_RTC_DRV_TEGRA=y 434CONFIG_RTC_DRV_TEGRA=y
437CONFIG_RTC_DRV_XGENE=y 435CONFIG_RTC_DRV_XGENE=y
438CONFIG_RTC_DRV_S3C=y
439CONFIG_DMADEVICES=y 436CONFIG_DMADEVICES=y
437CONFIG_DMA_BCM2835=m
440CONFIG_MV_XOR_V2=y 438CONFIG_MV_XOR_V2=y
441CONFIG_PL330_DMA=y 439CONFIG_PL330_DMA=y
442CONFIG_DMA_BCM2835=m
443CONFIG_TEGRA20_APB_DMA=y 440CONFIG_TEGRA20_APB_DMA=y
444CONFIG_QCOM_BAM_DMA=y 441CONFIG_QCOM_BAM_DMA=y
445CONFIG_QCOM_HIDMA_MGMT=y 442CONFIG_QCOM_HIDMA_MGMT=y
@@ -452,52 +449,56 @@ CONFIG_VIRTIO_BALLOON=y
452CONFIG_VIRTIO_MMIO=y 449CONFIG_VIRTIO_MMIO=y
453CONFIG_XEN_GNTDEV=y 450CONFIG_XEN_GNTDEV=y
454CONFIG_XEN_GRANT_DEV_ALLOC=y 451CONFIG_XEN_GRANT_DEV_ALLOC=y
452CONFIG_COMMON_CLK_RK808=y
455CONFIG_COMMON_CLK_SCPI=y 453CONFIG_COMMON_CLK_SCPI=y
456CONFIG_COMMON_CLK_CS2000_CP=y 454CONFIG_COMMON_CLK_CS2000_CP=y
457CONFIG_COMMON_CLK_S2MPS11=y 455CONFIG_COMMON_CLK_S2MPS11=y
458CONFIG_COMMON_CLK_PWM=y
459CONFIG_COMMON_CLK_RK808=y
460CONFIG_CLK_QORIQ=y 456CONFIG_CLK_QORIQ=y
457CONFIG_COMMON_CLK_PWM=y
461CONFIG_COMMON_CLK_QCOM=y 458CONFIG_COMMON_CLK_QCOM=y
459CONFIG_QCOM_CLK_SMD_RPM=y
462CONFIG_MSM_GCC_8916=y 460CONFIG_MSM_GCC_8916=y
463CONFIG_MSM_GCC_8994=y 461CONFIG_MSM_GCC_8994=y
464CONFIG_MSM_MMCC_8996=y 462CONFIG_MSM_MMCC_8996=y
465CONFIG_HWSPINLOCK_QCOM=y 463CONFIG_HWSPINLOCK_QCOM=y
466CONFIG_MAILBOX=y
467CONFIG_ARM_MHU=y 464CONFIG_ARM_MHU=y
468CONFIG_PLATFORM_MHU=y 465CONFIG_PLATFORM_MHU=y
469CONFIG_BCM2835_MBOX=y 466CONFIG_BCM2835_MBOX=y
470CONFIG_HI6220_MBOX=y 467CONFIG_HI6220_MBOX=y
471CONFIG_ARM_SMMU=y 468CONFIG_ARM_SMMU=y
472CONFIG_ARM_SMMU_V3=y 469CONFIG_ARM_SMMU_V3=y
470CONFIG_RPMSG_QCOM_SMD=y
473CONFIG_RASPBERRYPI_POWER=y 471CONFIG_RASPBERRYPI_POWER=y
474CONFIG_QCOM_SMEM=y 472CONFIG_QCOM_SMEM=y
475CONFIG_QCOM_SMD=y
476CONFIG_QCOM_SMD_RPM=y 473CONFIG_QCOM_SMD_RPM=y
474CONFIG_QCOM_SMP2P=y
475CONFIG_QCOM_SMSM=y
477CONFIG_ROCKCHIP_PM_DOMAINS=y 476CONFIG_ROCKCHIP_PM_DOMAINS=y
478CONFIG_ARCH_TEGRA_132_SOC=y 477CONFIG_ARCH_TEGRA_132_SOC=y
479CONFIG_ARCH_TEGRA_210_SOC=y 478CONFIG_ARCH_TEGRA_210_SOC=y
480CONFIG_ARCH_TEGRA_186_SOC=y 479CONFIG_ARCH_TEGRA_186_SOC=y
481CONFIG_EXTCON_USB_GPIO=y 480CONFIG_EXTCON_USB_GPIO=y
481CONFIG_IIO=y
482CONFIG_EXYNOS_ADC=y
483CONFIG_ROCKCHIP_SARADC=m
482CONFIG_PWM=y 484CONFIG_PWM=y
483CONFIG_PWM_BCM2835=m 485CONFIG_PWM_BCM2835=m
486CONFIG_PWM_CROS_EC=m
487CONFIG_PWM_MESON=m
484CONFIG_PWM_ROCKCHIP=y 488CONFIG_PWM_ROCKCHIP=y
489CONFIG_PWM_SAMSUNG=y
485CONFIG_PWM_TEGRA=m 490CONFIG_PWM_TEGRA=m
486CONFIG_PWM_MESON=m
487CONFIG_COMMON_RESET_HI6220=y
488CONFIG_PHY_RCAR_GEN3_USB2=y 491CONFIG_PHY_RCAR_GEN3_USB2=y
489CONFIG_PHY_HI6220_USB=y 492CONFIG_PHY_HI6220_USB=y
493CONFIG_PHY_SUN4I_USB=y
490CONFIG_PHY_ROCKCHIP_INNO_USB2=y 494CONFIG_PHY_ROCKCHIP_INNO_USB2=y
491CONFIG_PHY_ROCKCHIP_EMMC=y 495CONFIG_PHY_ROCKCHIP_EMMC=y
492CONFIG_PHY_SUN4I_USB=y 496CONFIG_PHY_ROCKCHIP_PCIE=m
493CONFIG_PHY_XGENE=y 497CONFIG_PHY_XGENE=y
494CONFIG_PHY_TEGRA_XUSB=y 498CONFIG_PHY_TEGRA_XUSB=y
495CONFIG_ARM_SCPI_PROTOCOL=y 499CONFIG_ARM_SCPI_PROTOCOL=y
496CONFIG_ACPI=y
497CONFIG_IIO=y
498CONFIG_EXYNOS_ADC=y
499CONFIG_PWM_SAMSUNG=y
500CONFIG_RASPBERRYPI_FIRMWARE=y 500CONFIG_RASPBERRYPI_FIRMWARE=y
501CONFIG_ACPI=y
501CONFIG_EXT2_FS=y 502CONFIG_EXT2_FS=y
502CONFIG_EXT3_FS=y 503CONFIG_EXT3_FS=y
503CONFIG_EXT4_FS_POSIX_ACL=y 504CONFIG_EXT4_FS_POSIX_ACL=y
@@ -511,7 +512,6 @@ CONFIG_FUSE_FS=m
511CONFIG_CUSE=m 512CONFIG_CUSE=m
512CONFIG_OVERLAY_FS=m 513CONFIG_OVERLAY_FS=m
513CONFIG_VFAT_FS=y 514CONFIG_VFAT_FS=y
514CONFIG_TMPFS=y
515CONFIG_HUGETLBFS=y 515CONFIG_HUGETLBFS=y
516CONFIG_CONFIGFS_FS=y 516CONFIG_CONFIGFS_FS=y
517CONFIG_EFIVAR_FS=y 517CONFIG_EFIVAR_FS=y
@@ -539,11 +539,9 @@ CONFIG_MEMTEST=y
539CONFIG_SECURITY=y 539CONFIG_SECURITY=y
540CONFIG_CRYPTO_ECHAINIV=y 540CONFIG_CRYPTO_ECHAINIV=y
541CONFIG_CRYPTO_ANSI_CPRNG=y 541CONFIG_CRYPTO_ANSI_CPRNG=y
542CONFIG_CRYPTO_DEV_SAFEXCEL=m
543CONFIG_ARM64_CRYPTO=y 542CONFIG_ARM64_CRYPTO=y
544CONFIG_CRYPTO_SHA1_ARM64_CE=y 543CONFIG_CRYPTO_SHA1_ARM64_CE=y
545CONFIG_CRYPTO_SHA2_ARM64_CE=y 544CONFIG_CRYPTO_SHA2_ARM64_CE=y
546CONFIG_CRYPTO_GHASH_ARM64_CE=y 545CONFIG_CRYPTO_GHASH_ARM64_CE=y
547CONFIG_CRYPTO_AES_ARM64_CE_CCM=y 546CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
548CONFIG_CRYPTO_AES_ARM64_CE_BLK=y 547CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
549# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 0e99978da3f0..59cca1d6ec54 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -23,9 +23,9 @@
23#define ACPI_MADT_GICC_LENGTH \ 23#define ACPI_MADT_GICC_LENGTH \
24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) 24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
25 25
26#define BAD_MADT_GICC_ENTRY(entry, end) \ 26#define BAD_MADT_GICC_ENTRY(entry, end) \
27 (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ 27 (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
28 (entry)->header.length != ACPI_MADT_GICC_LENGTH) 28 (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
29 29
30/* Basic configuration for ACPI */ 30/* Basic configuration for ACPI */
31#ifdef CONFIG_ACPI 31#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f819fdcff1ac..f5a2d09afb38 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ 264 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
265 " cbnz %w[tmp], 1b\n" \ 265 " cbnz %w[tmp], 1b\n" \
266 " " #mb "\n" \ 266 " " #mb "\n" \
267 " mov %" #w "[oldval], %" #w "[old]\n" \
268 "2:" \ 267 "2:" \
269 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ 268 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
270 [v] "+Q" (*(unsigned long *)ptr) \ 269 [v] "+Q" (*(unsigned long *)ptr) \
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e7f84a7b4465..428ee1f2468c 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
115 115
116extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 116extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
117extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 117extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
118extern struct static_key_false arm64_const_caps_ready;
118 119
119bool this_cpu_has_cap(unsigned int cap); 120bool this_cpu_has_cap(unsigned int cap);
120 121
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
124} 125}
125 126
126/* System capability check for constant caps */ 127/* System capability check for constant caps */
127static inline bool cpus_have_const_cap(int num) 128static inline bool __cpus_have_const_cap(int num)
128{ 129{
129 if (num >= ARM64_NCAPS) 130 if (num >= ARM64_NCAPS)
130 return false; 131 return false;
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
138 return test_bit(num, cpu_hwcaps); 139 return test_bit(num, cpu_hwcaps);
139} 140}
140 141
142static inline bool cpus_have_const_cap(int num)
143{
144 if (static_branch_likely(&arm64_const_caps_ready))
145 return __cpus_have_const_cap(num);
146 else
147 return cpus_have_cap(num);
148}
149
141static inline void cpus_set_cap(unsigned int num) 150static inline void cpus_set_cap(unsigned int num)
142{ 151{
143 if (num >= ARM64_NCAPS) { 152 if (num >= ARM64_NCAPS) {
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
145 num, ARM64_NCAPS); 154 num, ARM64_NCAPS);
146 } else { 155 } else {
147 __set_bit(num, cpu_hwcaps); 156 __set_bit(num, cpu_hwcaps);
148 static_branch_enable(&cpu_hwcap_keys[num]);
149 } 157 }
150} 158}
151 159
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5e19165c5fa8..1f252a95bc02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/kvm_types.h> 26#include <linux/kvm_types.h>
27#include <asm/cpufeature.h>
27#include <asm/kvm.h> 28#include <asm/kvm.h>
28#include <asm/kvm_asm.h> 29#include <asm/kvm_asm.h>
29#include <asm/kvm_mmio.h> 30#include <asm/kvm_mmio.h>
@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
355 unsigned long vector_ptr) 356 unsigned long vector_ptr)
356{ 357{
357 /* 358 /*
358 * Call initialization code, and switch to the full blown 359 * Call initialization code, and switch to the full blown HYP code.
359 * HYP code. 360 * If the cpucaps haven't been finalized yet, something has gone very
361 * wrong, and hyp will crash and burn when it uses any
362 * cpus_have_const_cap() wrapper.
360 */ 363 */
364 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
361 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); 365 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
362} 366}
363 367
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142ce991c..b4d13d9267ff 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
286#define SCTLR_ELx_A (1 << 1) 286#define SCTLR_ELx_A (1 << 1)
287#define SCTLR_ELx_M 1 287#define SCTLR_ELx_M 1
288 288
289#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
290 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
291 (1 << 28) | (1 << 29))
292
289#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ 293#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
290 SCTLR_ELx_SA | SCTLR_ELx_I) 294 SCTLR_ELx_SA | SCTLR_ELx_I)
291 295
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 94b8f7fc3310..817ce3365e20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
985 */ 985 */
986void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 986void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
987{ 987{
988 for (; caps->matches; caps++) 988 for (; caps->matches; caps++) {
989 if (caps->enable && cpus_have_cap(caps->capability)) 989 unsigned int num = caps->capability;
990
991 if (!cpus_have_cap(num))
992 continue;
993
994 /* Ensure cpus_have_const_cap(num) works */
995 static_branch_enable(&cpu_hwcap_keys[num]);
996
997 if (caps->enable) {
990 /* 998 /*
991 * Use stop_machine() as it schedules the work allowing 999 * Use stop_machine() as it schedules the work allowing
992 * us to modify PSTATE, instead of on_each_cpu() which 1000 * us to modify PSTATE, instead of on_each_cpu() which
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
994 * we return. 1002 * we return.
995 */ 1003 */
996 stop_machine(caps->enable, NULL, cpu_online_mask); 1004 stop_machine(caps->enable, NULL, cpu_online_mask);
1005 }
1006 }
997} 1007}
998 1008
999/* 1009/*
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
1096 enable_cpu_capabilities(arm64_features); 1106 enable_cpu_capabilities(arm64_features);
1097} 1107}
1098 1108
1109DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1110EXPORT_SYMBOL(arm64_const_caps_ready);
1111
1112static void __init mark_const_caps_ready(void)
1113{
1114 static_branch_enable(&arm64_const_caps_ready);
1115}
1116
1099/* 1117/*
1100 * Check if the current CPU has a given feature capability. 1118 * Check if the current CPU has a given feature capability.
1101 * Should be called from non-preemptible context. 1119 * Should be called from non-preemptible context.
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
1131 /* Set the CPU feature capabilies */ 1149 /* Set the CPU feature capabilies */
1132 setup_feature_capabilities(); 1150 setup_feature_capabilities();
1133 enable_errata_workarounds(); 1151 enable_errata_workarounds();
1152 mark_const_caps_ready();
1134 setup_elf_hwcaps(arm64_elf_hwcaps); 1153 setup_elf_hwcaps(arm64_elf_hwcaps);
1135 1154
1136 if (system_supports_32bit_el0()) 1155 if (system_supports_32bit_el0())
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4f0e3ebfea4b..c7e3e6387a49 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
191 return NULL; 191 return NULL;
192 192
193 root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); 193 root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
194 if (!root_ops) 194 if (!root_ops) {
195 kfree(ri);
195 return NULL; 196 return NULL;
197 }
196 198
197 ri->cfg = pci_acpi_setup_ecam_mapping(root); 199 ri->cfg = pci_acpi_setup_ecam_mapping(root);
198 if (!ri->cfg) { 200 if (!ri->cfg) {
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bcc79471b38e..83a1b1ad189f 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
877 877
878 if (attr->exclude_idle) 878 if (attr->exclude_idle)
879 return -EPERM; 879 return -EPERM;
880 if (is_kernel_in_hyp_mode() && 880
881 attr->exclude_kernel != attr->exclude_hv) 881 /*
882 return -EINVAL; 882 * If we're running in hyp mode, then we *are* the hypervisor.
883 * Therefore we ignore exclude_hv in this configuration, since
884 * there's no hypervisor to sample anyway. This is consistent
885 * with other architectures (x86 and Power).
886 */
887 if (is_kernel_in_hyp_mode()) {
888 if (!attr->exclude_kernel)
889 config_base |= ARMV8_PMU_INCLUDE_EL2;
890 } else {
891 if (attr->exclude_kernel)
892 config_base |= ARMV8_PMU_EXCLUDE_EL1;
893 if (!attr->exclude_hv)
894 config_base |= ARMV8_PMU_INCLUDE_EL2;
895 }
883 if (attr->exclude_user) 896 if (attr->exclude_user)
884 config_base |= ARMV8_PMU_EXCLUDE_EL0; 897 config_base |= ARMV8_PMU_EXCLUDE_EL0;
885 if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
886 config_base |= ARMV8_PMU_EXCLUDE_EL1;
887 if (!attr->exclude_hv)
888 config_base |= ARMV8_PMU_INCLUDE_EL2;
889 898
890 /* 899 /*
891 * Install the filter into config_base as this is used to 900 * Install the filter into config_base as this is used to
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 41b6e31f8f55..d0cb007fa482 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */ 221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; 222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
223 vdso_data->raw_time_sec = tk->raw_time.tv_sec; 223 vdso_data->raw_time_sec = tk->raw_time.tv_sec;
224 vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; 224 vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
225 tk->tkr_raw.shift) +
226 tk->tkr_raw.xtime_nsec;
225 vdso_data->xtime_clock_sec = tk->xtime_sec; 227 vdso_data->xtime_clock_sec = tk->xtime_sec;
226 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; 228 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
227 /* tkr_raw.xtime_nsec == 0 */
228 vdso_data->cs_mono_mult = tk->tkr_mono.mult; 229 vdso_data->cs_mono_mult = tk->tkr_mono.mult;
229 vdso_data->cs_raw_mult = tk->tkr_raw.mult; 230 vdso_data->cs_raw_mult = tk->tkr_raw.mult;
230 /* tkr_mono.shift == tkr_raw.shift */ 231 /* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b4671bd7c..76320e920965 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@ monotonic_raw:
256 seqcnt_check fail=monotonic_raw 256 seqcnt_check fail=monotonic_raw
257 257
258 /* All computations are done with left-shifted nsecs. */ 258 /* All computations are done with left-shifted nsecs. */
259 lsl x14, x14, x12
260 get_nsec_per_sec res=x9 259 get_nsec_per_sec res=x9
261 lsl x9, x9, x12 260 lsl x9, x9, x12
262 261
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c24b1c..3f9615582377 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@ __do_hyp_init:
106 tlbi alle2 106 tlbi alle2
107 dsb sy 107 dsb sy
108 108
109 mrs x4, sctlr_el2 109 /*
110 and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 110 * Preserve all the RES1 bits while setting the default flags,
111 ldr x5, =SCTLR_ELx_FLAGS 111 * as well as the EE bit on BE. Drop the A flag since the compiler
112 orr x4, x4, x5 112 * is allowed to generate unaligned accesses.
113 */
114 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
115CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
113 msr sctlr_el2, x4 116 msr sctlr_el2, x4
114 isb 117 isb
115 118
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae8d8c3..14c4e3b14bcb 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
2# Makefile for Kernel-based Virtual Machine module, HYP part 2# Makefile for Kernel-based Virtual Machine module, HYP part
3# 3#
4 4
5ccflags-y += -fno-stack-protector
6
5KVM=../../../../virt/kvm 7KVM=../../../../virt/kvm
6 8
7obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o 9obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e37d367..6260b69e5622 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
67 */ 67 */
68 vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; 68 vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
69 vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; 69 vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
70 vgic_set_vmcr(vcpu, &vmcr); 70 vgic_set_vmcr(vcpu, &vmcr);
71 } else { 71 } else {
72 val = 0; 72 val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions. 84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
85 */ 85 */
86 val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; 86 val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
87 val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; 87 val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
88 88
89 p->regval = val; 89 p->regval = val;
90 } 90 }
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
135 p->regval = 0; 135 p->regval = 0;
136 136
137 vgic_get_vmcr(vcpu, &vmcr); 137 vgic_get_vmcr(vcpu, &vmcr);
138 if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { 138 if (!vmcr.cbpr) {
139 if (p->is_write) { 139 if (p->is_write) {
140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
141 ICC_BPR1_EL1_SHIFT; 141 ICC_BPR1_EL1_SHIFT;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c6e53580aefe..c870d6f01ac2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
36#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 36#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
37#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 37#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
38#define TCALL_CNT (MAX_BPF_JIT_REG + 2) 38#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
39#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
39 40
40/* Map BPF registers to A64 registers */ 41/* Map BPF registers to A64 registers */
41static const int bpf2a64[] = { 42static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
57 /* temporary registers for internal BPF JIT */ 58 /* temporary registers for internal BPF JIT */
58 [TMP_REG_1] = A64_R(10), 59 [TMP_REG_1] = A64_R(10),
59 [TMP_REG_2] = A64_R(11), 60 [TMP_REG_2] = A64_R(11),
61 [TMP_REG_3] = A64_R(12),
60 /* tail_call_cnt */ 62 /* tail_call_cnt */
61 [TCALL_CNT] = A64_R(26), 63 [TCALL_CNT] = A64_R(26),
62 /* temporary register for blinding constants */ 64 /* temporary register for blinding constants */
@@ -253,8 +255,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
253 */ 255 */
254 off = offsetof(struct bpf_array, ptrs); 256 off = offsetof(struct bpf_array, ptrs);
255 emit_a64_mov_i64(tmp, off, ctx); 257 emit_a64_mov_i64(tmp, off, ctx);
256 emit(A64_LDR64(tmp, r2, tmp), ctx); 258 emit(A64_ADD(1, tmp, r2, tmp), ctx);
257 emit(A64_LDR64(prg, tmp, r3), ctx); 259 emit(A64_LSL(1, prg, r3, 3), ctx);
260 emit(A64_LDR64(prg, tmp, prg), ctx);
258 emit(A64_CBZ(1, prg, jmp_offset), ctx); 261 emit(A64_CBZ(1, prg, jmp_offset), ctx);
259 262
260 /* goto *(prog->bpf_func + prologue_size); */ 263 /* goto *(prog->bpf_func + prologue_size); */
@@ -318,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
318 const u8 src = bpf2a64[insn->src_reg]; 321 const u8 src = bpf2a64[insn->src_reg];
319 const u8 tmp = bpf2a64[TMP_REG_1]; 322 const u8 tmp = bpf2a64[TMP_REG_1];
320 const u8 tmp2 = bpf2a64[TMP_REG_2]; 323 const u8 tmp2 = bpf2a64[TMP_REG_2];
324 const u8 tmp3 = bpf2a64[TMP_REG_3];
321 const s16 off = insn->off; 325 const s16 off = insn->off;
322 const s32 imm = insn->imm; 326 const s32 imm = insn->imm;
323 const int i = insn - ctx->prog->insnsi; 327 const int i = insn - ctx->prog->insnsi;
@@ -688,10 +692,10 @@ emit_cond_jmp:
688 emit(A64_PRFM(tmp, PST, L1, STRM), ctx); 692 emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
689 emit(A64_LDXR(isdw, tmp2, tmp), ctx); 693 emit(A64_LDXR(isdw, tmp2, tmp), ctx);
690 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); 694 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
691 emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); 695 emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
692 jmp_offset = -3; 696 jmp_offset = -3;
693 check_imm19(jmp_offset); 697 check_imm19(jmp_offset);
694 emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); 698 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
695 break; 699 break;
696 700
697 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ 701 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 85d4af97c986..dbdbb8a558df 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
75{ 75{
76} 76}
77 77
78/*
79 * Return saved PC of a blocked thread.
80 */
81#define thread_saved_pc(tsk) (tsk->thread.pc)
82
83unsigned long get_wchan(struct task_struct *p); 78unsigned long get_wchan(struct task_struct *p);
84 79
85#define KSTK_EIP(tsk) \ 80#define KSTK_EIP(tsk) \
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index b9eb3da7f278..7c87b5be53b5 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -96,11 +96,6 @@ static inline void release_thread(struct task_struct *dead_task)
96#define release_segments(mm) do { } while (0) 96#define release_segments(mm) do { } while (0)
97 97
98/* 98/*
99 * saved PC of a blocked thread.
100 */
101#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
102
103/*
104 * saved kernel SP and DP of a blocked thread. 99 * saved kernel SP and DP of a blocked thread.
105 */ 100 */
106#ifdef _BIG_ENDIAN 101#ifdef _BIG_ENDIAN
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index e299d30105b5..a2cdb1521aca 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -69,14 +69,6 @@ void hard_reset_now (void)
69 while(1) /* waiting for RETRIBUTION! */ ; 69 while(1) /* waiting for RETRIBUTION! */ ;
70} 70}
71 71
72/*
73 * Return saved PC of a blocked thread.
74 */
75unsigned long thread_saved_pc(struct task_struct *t)
76{
77 return task_pt_regs(t)->irp;
78}
79
80/* setup the child's kernel stack with a pt_regs and switch_stack on it. 72/* setup the child's kernel stack with a pt_regs and switch_stack on it.
81 * it will be un-nested during _resume and _ret_from_sys_call when the 73 * it will be un-nested during _resume and _ret_from_sys_call when the
82 * new thread is scheduled. 74 * new thread is scheduled.
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index c530a8fa87ce..fe87b383fbf3 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -85,14 +85,6 @@ hard_reset_now(void)
85} 85}
86 86
87/* 87/*
88 * Return saved PC of a blocked thread.
89 */
90unsigned long thread_saved_pc(struct task_struct *t)
91{
92 return task_pt_regs(t)->erp;
93}
94
95/*
96 * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. 88 * Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
97 * It will be unnested during _resume and _ret_from_sys_call when the new thread 89 * It will be unnested during _resume and _ret_from_sys_call when the new thread
98 * is scheduled. 90 * is scheduled.
diff --git a/arch/cris/boot/dts/include/dt-bindings b/arch/cris/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/cris/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 15b815df29c1..bc2729e4b2c9 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
52 52
53#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) 53#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
54 54
55extern unsigned long thread_saved_pc(struct task_struct *tsk);
56
57/* Free all resources held by a thread. */ 55/* Free all resources held by a thread. */
58static inline void release_thread(struct task_struct *dead_task) 56static inline void release_thread(struct task_struct *dead_task)
59{ 57{
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index ddaeb9cc9143..e4d08d74ed9f 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
96#define release_segments(mm) do { } while (0) 96#define release_segments(mm) do { } while (0)
97#define forget_segments() do { } while (0) 97#define forget_segments() do { } while (0)
98 98
99/*
100 * Return saved PC of a blocked thread.
101 */
102extern unsigned long thread_saved_pc(struct task_struct *tsk);
103
104unsigned long get_wchan(struct task_struct *p); 99unsigned long get_wchan(struct task_struct *p);
105 100
106#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) 101#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bddefdacf..139093fab326 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
16#define vxtime_lock() do {} while (0) 16#define vxtime_lock() do {} while (0)
17#define vxtime_unlock() do {} while (0) 17#define vxtime_unlock() do {} while (0)
18 18
19/* This attribute is used in include/linux/jiffies.h alongside with
20 * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
21 * for frv does not contain another section specification.
22 */
23#define __jiffy_arch_data __attribute__((__section__(".data")))
24
19#endif 25#endif
20 26
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 5a4c92abc99e..a957b374e3a6 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
198 return 0; 198 return 0;
199} 199}
200 200
201unsigned long thread_saved_pc(struct task_struct *tsk)
202{
203 /* Check whether the thread is blocked in resume() */
204 if (in_sched_functions(tsk->thread.pc))
205 return ((unsigned long *)tsk->thread.fp)[2];
206 else
207 return tsk->thread.pc;
208}
209
210int elf_check_arch(const struct elf32_hdr *hdr) 201int elf_check_arch(const struct elf32_hdr *hdr)
211{ 202{
212 unsigned long hsr0 = __get_HSR(0); 203 unsigned long hsr0 = __get_HSR(0);
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index da82c25301e7..46aa289c5102 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
75 addr = PAGE_ALIGN(addr); 75 addr = PAGE_ALIGN(addr);
76 vma = find_vma(current->mm, addr); 76 vma = find_vma(current->mm, addr);
77 if (TASK_SIZE - len >= addr && 77 if (TASK_SIZE - len >= addr &&
78 (!vma || addr + len <= vma->vm_start)) 78 (!vma || addr + len <= vm_start_gap(vma)))
79 goto success; 79 goto success;
80 } 80 }
81 81
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 65132d7ae9e5..afa53147e66a 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
110{ 110{
111} 111}
112 112
113/*
114 * Return saved PC of a blocked thread.
115 */
116unsigned long thread_saved_pc(struct task_struct *tsk);
117unsigned long get_wchan(struct task_struct *p); 113unsigned long get_wchan(struct task_struct *p);
118 114
119#define KSTK_EIP(tsk) \ 115#define KSTK_EIP(tsk) \
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 0f5db5bb561b..d1ddcabbbe83 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
129 return 0; 129 return 0;
130} 130}
131 131
132unsigned long thread_saved_pc(struct task_struct *tsk)
133{
134 return ((struct pt_regs *)tsk->thread.esp0)->pc;
135}
136
137unsigned long get_wchan(struct task_struct *p) 132unsigned long get_wchan(struct task_struct *p)
138{ 133{
139 unsigned long fp, pc; 134 unsigned long fp, pc;
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index 45a825402f63..ce67940860a5 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -33,9 +33,6 @@
33/* task_struct, defined elsewhere, is the "process descriptor" */ 33/* task_struct, defined elsewhere, is the "process descriptor" */
34struct task_struct; 34struct task_struct;
35 35
36/* this is defined in arch/process.c */
37extern unsigned long thread_saved_pc(struct task_struct *tsk);
38
39extern void start_thread(struct pt_regs *, unsigned long, unsigned long); 36extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
40 37
41/* 38/*
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index de715bab7956..656050c2e6a0 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -61,14 +61,6 @@ void arch_cpu_idle(void)
61} 61}
62 62
63/* 63/*
64 * Return saved PC of a blocked thread
65 */
66unsigned long thread_saved_pc(struct task_struct *tsk)
67{
68 return 0;
69}
70
71/*
72 * Copy architecture-specific thread state 64 * Copy architecture-specific thread state
73 */ 65 */
74int copy_thread(unsigned long clone_flags, unsigned long usp, 66int copy_thread(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index ec90afdb3ad0..c599eb126c9e 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
37 long uncleared; 37 long uncleared;
38 38
39 while (count > PAGE_SIZE) { 39 while (count > PAGE_SIZE) {
40 uncleared = __copy_to_user_hexagon(dest, &empty_zero_page, 40 uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
41 PAGE_SIZE);
42 if (uncleared) 41 if (uncleared)
43 return count - (PAGE_SIZE - uncleared); 42 return count - (PAGE_SIZE - uncleared);
44 count -= PAGE_SIZE; 43 count -= PAGE_SIZE;
45 dest += PAGE_SIZE; 44 dest += PAGE_SIZE;
46 } 45 }
47 if (count) 46 if (count)
48 count = __copy_to_user_hexagon(dest, &empty_zero_page, count); 47 count = raw_copy_to_user(dest, &empty_zero_page, count);
49 48
50 return count; 49 return count;
51} 50}
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 26a63d69c599..ab982f07ea68 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -602,23 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
602} 602}
603 603
604/* 604/*
605 * Return saved PC of a blocked thread.
606 * Note that the only way T can block is through a call to schedule() -> switch_to().
607 */
608static inline unsigned long
609thread_saved_pc (struct task_struct *t)
610{
611 struct unw_frame_info info;
612 unsigned long ip;
613
614 unw_init_from_blocked_task(&info, t);
615 if (unw_unwind(&info) < 0)
616 return 0;
617 unw_get_ip(&info, &ip);
618 return ip;
619}
620
621/*
622 * Get the current instruction/program counter value. 605 * Get the current instruction/program counter value.
623 */ 606 */
624#define current_text_addr() \ 607#define current_text_addr() \
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 5767367550c6..657874eeeccc 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
122extern void copy_segments(struct task_struct *p, struct mm_struct * mm); 122extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
123extern void release_segments(struct mm_struct * mm); 123extern void release_segments(struct mm_struct * mm);
124 124
125extern unsigned long thread_saved_pc(struct task_struct *);
126
127/* Copy and release all segment info associated with a VM */ 125/* Copy and release all segment info associated with a VM */
128#define copy_segments(p, mm) do { } while (0) 126#define copy_segments(p, mm) do { } while (0)
129#define release_segments(mm) do { } while (0) 127#define release_segments(mm) do { } while (0)
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index d8ffcfec599c..8cd7e03f4370 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -39,14 +39,6 @@
39 39
40#include <linux/err.h> 40#include <linux/err.h>
41 41
42/*
43 * Return saved PC of a blocked thread.
44 */
45unsigned long thread_saved_pc(struct task_struct *tsk)
46{
47 return tsk->thread.lr;
48}
49
50void (*pm_power_off)(void) = NULL; 42void (*pm_power_off)(void) = NULL;
51EXPORT_SYMBOL(pm_power_off); 43EXPORT_SYMBOL(pm_power_off);
52 44
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 77239e81379b..94c36030440c 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
130{ 130{
131} 131}
132 132
133extern unsigned long thread_saved_pc(struct task_struct *tsk);
134
135unsigned long get_wchan(struct task_struct *p); 133unsigned long get_wchan(struct task_struct *p);
136 134
137#define KSTK_EIP(tsk) \ 135#define KSTK_EIP(tsk) \
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index e475c945c8b2..7df92f8b0781 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -40,20 +40,6 @@
40asmlinkage void ret_from_fork(void); 40asmlinkage void ret_from_fork(void);
41asmlinkage void ret_from_kernel_thread(void); 41asmlinkage void ret_from_kernel_thread(void);
42 42
43
44/*
45 * Return saved PC from a blocked thread
46 */
47unsigned long thread_saved_pc(struct task_struct *tsk)
48{
49 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
50 /* Check whether the thread is blocked in resume() */
51 if (in_sched_functions(sw->retpc))
52 return ((unsigned long *)sw->a6)[1];
53 else
54 return sw->retpc;
55}
56
57void arch_cpu_idle(void) 43void arch_cpu_idle(void)
58{ 44{
59#if defined(MACH_ATARI_ONLY) 45#if defined(MACH_ATARI_ONLY)
diff --git a/arch/metag/boot/dts/include/dt-bindings b/arch/metag/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/metag/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 37ef196e4519..330d556860ba 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
69{ 69{
70} 70}
71 71
72extern unsigned long thread_saved_pc(struct task_struct *t);
73
74extern unsigned long get_wchan(struct task_struct *p); 72extern unsigned long get_wchan(struct task_struct *p);
75 73
76# define KSTK_EIP(tsk) (0) 74# define KSTK_EIP(tsk) (0)
@@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
121{ 119{
122} 120}
123 121
124/* Return saved (kernel) PC of a blocked thread. */
125# define thread_saved_pc(tsk) \
126 ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
127
128unsigned long get_wchan(struct task_struct *p); 122unsigned long get_wchan(struct task_struct *p);
129 123
130/* The size allocated for kernel stacks. This _must_ be a power of two! */ 124/* The size allocated for kernel stacks. This _must_ be a power of two! */
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index e92a817e645f..6527ec22f158 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
119 return 0; 119 return 0;
120} 120}
121 121
122#ifndef CONFIG_MMU
123/*
124 * Return saved PC of a blocked thread.
125 */
126unsigned long thread_saved_pc(struct task_struct *tsk)
127{
128 struct cpu_context *ctx =
129 &(((struct thread_info *)(tsk->stack))->cpu_context);
130
131 /* Check whether the thread is blocked in resume() */
132 if (in_sched_functions(ctx->r15))
133 return (unsigned long)ctx->r15;
134 else
135 return ctx->r14;
136}
137#endif
138
139unsigned long get_wchan(struct task_struct *p) 122unsigned long get_wchan(struct task_struct *p)
140{ 123{
141/* TBD (used by procfs) */ 124/* TBD (used by procfs) */
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a9c7c5..145b5ce8eb7e 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
128 -DADDR_BITS=$(ADDR_BITS) \ 128 -DADDR_BITS=$(ADDR_BITS) \
129 -DADDR_CELLS=$(itb_addr_cells) 129 -DADDR_CELLS=$(itb_addr_cells)
130 130
131$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 131$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) 132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
133 133
134$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 134$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) 135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
136 136
137$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 137$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) 138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
139 139
140$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 140$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) 141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
142 142
143$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 143$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) 144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
145 145
146quiet_cmd_itb-image = ITB $@ 146quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/mips/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index d34536e7653f..279b6d14ffeb 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
35 * easily, subsequent pte tables have to be allocated in one physical 35 * easily, subsequent pte tables have to be allocated in one physical
36 * chunk of RAM. 36 * chunk of RAM.
37 */ 37 */
38#ifdef CONFIG_PHYS_ADDR_T_64BIT
39#define LAST_PKMAP 512
40#else
38#define LAST_PKMAP 1024 41#define LAST_PKMAP 1024
42#endif
43
39#define LAST_PKMAP_MASK (LAST_PKMAP-1) 44#define LAST_PKMAP_MASK (LAST_PKMAP-1)
40#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 45#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
41#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 46#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 291846d9ba83..ad1a99948f27 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
43 43
44#define flush_insn_slot(p) \ 44#define flush_insn_slot(p) \
45do { \ 45do { \
46 flush_icache_range((unsigned long)p->addr, \ 46 if (p->addr) \
47 flush_icache_range((unsigned long)p->addr, \
47 (unsigned long)p->addr + \ 48 (unsigned long)p->addr + \
48 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 49 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
49} while (0) 50} while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6f94bed571c4..74afe8c76bdd 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -19,6 +19,10 @@
19#define __ARCH_USE_5LEVEL_HACK 19#define __ARCH_USE_5LEVEL_HACK
20#include <asm-generic/pgtable-nopmd.h> 20#include <asm-generic/pgtable-nopmd.h>
21 21
22#ifdef CONFIG_HIGHMEM
23#include <asm/highmem.h>
24#endif
25
22extern int temp_tlb_entry; 26extern int temp_tlb_entry;
23 27
24/* 28/*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
62 66
63#define VMALLOC_START MAP_BASE 67#define VMALLOC_START MAP_BASE
64 68
65#define PKMAP_BASE (0xfe000000UL) 69#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
70#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
66 71
67#ifdef CONFIG_HIGHMEM 72#ifdef CONFIG_HIGHMEM
68# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 73# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b11facd11c9d..f702a459a830 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
804 break; 804 break;
805 } 805 }
806 /* Compact branch: BNEZC || JIALC */ 806 /* Compact branch: BNEZC || JIALC */
807 if (insn.i_format.rs) 807 if (!insn.i_format.rs) {
808 /* JIALC: set $31/ra */
808 regs->regs[31] = epc + 4; 809 regs->regs[31] = epc + 4;
810 }
809 regs->cp0_epc += 8; 811 regs->cp0_epc += 8;
810 break; 812 break;
811#endif 813#endif
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 8d83fc2a96b7..38a302919e6b 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
13#include <asm/compiler.h> 13#include <asm/compiler.h>
14#include <asm/irqflags.h>
14#include <asm/regdef.h> 15#include <asm/regdef.h>
15#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
16#include <asm/stackframe.h> 17#include <asm/stackframe.h>
@@ -119,6 +120,7 @@ work_pending:
119 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 120 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
120 beqz t0, work_notifysig 121 beqz t0, work_notifysig
121work_resched: 122work_resched:
123 TRACE_IRQS_OFF
122 jal schedule 124 jal schedule
123 125
124 local_irq_disable # make sure need_resched and 126 local_irq_disable # make sure need_resched and
@@ -155,6 +157,7 @@ syscall_exit_work:
155 beqz t0, work_pending # trace bit set? 157 beqz t0, work_pending # trace bit set?
156 local_irq_enable # could let syscall_trace_leave() 158 local_irq_enable # could let syscall_trace_leave()
157 # call schedule() instead 159 # call schedule() instead
160 TRACE_IRQS_ON
158 move a0, sp 161 move a0, sp
159 jal syscall_trace_leave 162 jal syscall_trace_leave
160 b resume_userspace 163 b resume_userspace
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 30a3b75e88eb..9d9b8fbae202 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
38 38
39#endif 39#endif
40 40
41/*
42 * Check if the address is in kernel space
43 *
44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
45 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
46 */
47static inline int in_kernel_space(unsigned long ip)
48{
49 if (ip >= (unsigned long)_stext &&
50 ip <= (unsigned long)_etext)
51 return 1;
52 return 0;
53}
54
55#ifdef CONFIG_DYNAMIC_FTRACE 41#ifdef CONFIG_DYNAMIC_FTRACE
56 42
57#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 43#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
198 * If ip is in kernel space, no long call, otherwise, long call is 184 * If ip is in kernel space, no long call, otherwise, long call is
199 * needed. 185 * needed.
200 */ 186 */
201 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 187 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
202#ifdef CONFIG_64BIT 188#ifdef CONFIG_64BIT
203 return ftrace_modify_code(ip, new); 189 return ftrace_modify_code(ip, new);
204#else 190#else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
218 unsigned int new; 204 unsigned int new;
219 unsigned long ip = rec->ip; 205 unsigned long ip = rec->ip;
220 206
221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 207 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
222 208
223#ifdef CONFIG_64BIT 209#ifdef CONFIG_64BIT
224 return ftrace_modify_code(ip, new); 210 return ftrace_modify_code(ip, new);
225#else 211#else
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? 212 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
227 INSN_NOP : insn_la_mcount[1]); 213 INSN_NOP : insn_la_mcount[1]);
228#endif 214#endif
229} 215}
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
289 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 275 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
290 * kernel, move after the instruction "move ra, at"(offset is 16) 276 * kernel, move after the instruction "move ra, at"(offset is 16)
291 */ 277 */
292 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 278 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
293 279
294 /* 280 /*
295 * search the text until finding the non-store instruction or "s{d,w} 281 * search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
394 * entries configured through the tracing/set_graph_function interface. 380 * entries configured through the tracing/set_graph_function interface.
395 */ 381 */
396 382
397 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 383 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 384 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
399 385
400 /* Only trace if the calling function expects to */ 386 /* Only trace if the calling function expects to */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index cf052204eb0a..d1bb506adc10 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
106 beq t0, t1, dtb_found 106 beq t0, t1, dtb_found
107#endif 107#endif
108 li t1, -2 108 li t1, -2
109 beq a0, t1, dtb_found
110 move t2, a1 109 move t2, a1
110 beq a0, t1, dtb_found
111 111
112 li t2, 0 112 li t2, 0
113dtb_found: 113dtb_found:
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 313a88b2973f..f3e301f95aef 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1597 break; 1597 break;
1598 case CPU_P5600: 1598 case CPU_P5600:
1599 case CPU_P6600: 1599 case CPU_P6600:
1600 case CPU_I6400:
1601 /* 8-bit event numbers */ 1600 /* 8-bit event numbers */
1602 raw_id = config & 0x1ff; 1601 raw_id = config & 0x1ff;
1603 base_id = raw_id & 0xff; 1602 base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1610 raw_event.range = P; 1609 raw_event.range = P;
1611#endif 1610#endif
1612 break; 1611 break;
1612 case CPU_I6400:
1613 /* 8-bit event numbers */
1614 base_id = config & 0xff;
1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1616 break;
1613 case CPU_1004K: 1617 case CPU_1004K:
1614 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) 1618 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1615 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1619 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5f928c34c148..d99416094ba9 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
56 * state. Actually per-core rather than per-CPU. 56 * state. Actually per-core rather than per-CPU.
57 */ 57 */
58static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 58static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
59static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
60 59
61/* Indicates online CPUs coupled with the current CPU */ 60/* Indicates online CPUs coupled with the current CPU */
62static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); 61static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
642{ 641{
643 enum cps_pm_state state; 642 enum cps_pm_state state;
644 unsigned core = cpu_data[cpu].core; 643 unsigned core = cpu_data[cpu].core;
645 unsigned dlinesz = cpu_data[cpu].dcache.linesz;
646 void *entry_fn, *core_rc; 644 void *entry_fn, *core_rc;
647 645
648 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { 646 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
662 } 660 }
663 661
664 if (!per_cpu(ready_count, core)) { 662 if (!per_cpu(ready_count, core)) {
665 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 663 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
666 if (!core_rc) { 664 if (!core_rc) {
667 pr_err("Failed allocate core %u ready_count\n", core); 665 pr_err("Failed allocate core %u ready_count\n", core);
668 return -ENOMEM; 666 return -ENOMEM;
669 } 667 }
670 per_cpu(ready_count_alloc, core) = core_rc;
671
672 /* Ensure ready_count is aligned to a cacheline boundary */
673 core_rc += dlinesz - 1;
674 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
675 per_cpu(ready_count, core) = core_rc; 668 per_cpu(ready_count, core) = core_rc;
676 } 669 }
677 670
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 918d4c73e951..5351e1f3950d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
120 struct thread_info *ti = task_thread_info(p); 120 struct thread_info *ti = task_thread_info(p);
121 struct pt_regs *childregs, *regs = current_pt_regs(); 121 struct pt_regs *childregs, *regs = current_pt_regs();
122 unsigned long childksp; 122 unsigned long childksp;
123 p->set_child_tid = p->clear_child_tid = NULL;
124 123
125 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
126 125
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9681b5877140..38dfa27730ff 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
201{ 201{
202 struct pt_regs regs; 202 struct pt_regs regs;
203 mm_segment_t old_fs = get_fs(); 203 mm_segment_t old_fs = get_fs();
204
205 regs.cp0_status = KSU_KERNEL;
204 if (sp) { 206 if (sp) {
205 regs.regs[29] = (unsigned long)sp; 207 regs.regs[29] = (unsigned long)sp;
206 regs.regs[31] = 0; 208 regs.regs[31] = 0;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7c6336dd2638..7cd92166a0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel) 167 bool user, bool kernel)
168{ 168{
169 int idx_user, idx_kernel; 169 /*
170 * Initialize idx_user and idx_kernel to workaround bogus
171 * maybe-initialized warning when using GCC 6.
172 */
173 int idx_user = 0, idx_kernel = 0;
170 unsigned long flags, old_entryhi; 174 unsigned long flags, old_entryhi;
171 175
172 local_irq_save(flags); 176 local_irq_save(flags);
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 4a2d03c72959..caa62f20a888 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
54 return ieee754dp_nanxcpt(z); 54 return ieee754dp_nanxcpt(z);
55 case IEEE754_CLASS_DNORM: 55 case IEEE754_CLASS_DNORM:
56 DPDNORMZ; 56 DPDNORMZ;
57 /* QNAN is handled separately below */ 57 /* QNAN and ZERO cases are handled separately below */
58 } 58 }
59 59
60 switch (CLPAIR(xc, yc)) { 60 switch (CLPAIR(xc, yc)) {
@@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
210 } 210 }
211 assert(rm & (DP_HIDDEN_BIT << 3)); 211 assert(rm & (DP_HIDDEN_BIT << 3));
212 212
213 if (zc == IEEE754_CLASS_ZERO)
214 return ieee754dp_format(rs, re, rm);
215
213 /* And now the addition */ 216 /* And now the addition */
214 assert(zm & DP_HIDDEN_BIT); 217 assert(zm & DP_HIDDEN_BIT);
215 218
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index a8cd8b4f235e..c91d5e5d9b5f 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
54 return ieee754sp_nanxcpt(z); 54 return ieee754sp_nanxcpt(z);
55 case IEEE754_CLASS_DNORM: 55 case IEEE754_CLASS_DNORM:
56 SPDNORMZ; 56 SPDNORMZ;
57 /* QNAN is handled separately below */ 57 /* QNAN and ZERO cases are handled separately below */
58 } 58 }
59 59
60 switch (CLPAIR(xc, yc)) { 60 switch (CLPAIR(xc, yc)) {
@@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
203 } 203 }
204 assert(rm & (SP_HIDDEN_BIT << 3)); 204 assert(rm & (SP_HIDDEN_BIT << 3));
205 205
206 if (zc == IEEE754_CLASS_ZERO)
207 return ieee754sp_format(rs, re, rm);
208
206 /* And now the addition */ 209 /* And now the addition */
207 210
208 assert(zm & SP_HIDDEN_BIT); 211 assert(zm & SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index fe8df14b6169..e08598c70b3e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev,
68 * systems and only the R10000 and R12000 are used in such systems, the 68 * systems and only the R10000 and R12000 are used in such systems, the
69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2. 69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
70 */ 70 */
71static inline int cpu_needs_post_dma_flush(struct device *dev) 71static inline bool cpu_needs_post_dma_flush(struct device *dev)
72{ 72{
73 return !plat_device_is_coherent(dev) && 73 if (plat_device_is_coherent(dev))
74 (boot_cpu_type() == CPU_R10000 || 74 return false;
75 boot_cpu_type() == CPU_R12000 || 75
76 boot_cpu_type() == CPU_BMIPS5000); 76 switch (boot_cpu_type()) {
77 case CPU_R10000:
78 case CPU_R12000:
79 case CPU_BMIPS5000:
80 return true;
81
82 default:
83 /*
84 * Presence of MAARs suggests that the CPU supports
85 * speculatively prefetching data, and therefore requires
86 * the post-DMA flush/invalidate.
87 */
88 return cpu_has_maar;
89 }
77} 90}
78 91
79static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 92static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 64dd8bdd92c3..28adeabe851f 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
93 93
94 vma = find_vma(mm, addr); 94 vma = find_vma(mm, addr);
95 if (TASK_SIZE - len >= addr && 95 if (TASK_SIZE - len >= addr &&
96 (!vma || addr + len <= vma->vm_start)) 96 (!vma || addr + len <= vm_start_gap(vma)))
97 return addr; 97 return addr;
98 } 98 }
99 99
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
51 /* 51 /*
52 * Fixed mappings: 52 * Fixed mappings:
53 */ 53 */
54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
55 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); 55 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
56 56
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58 /* 58 /*
59 * Permanent kmaps: 59 * Permanent kmaps:
60 */ 60 */
61 vaddr = PKMAP_BASE; 61 vaddr = PKMAP_BASE;
62 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
63 63
64 pgd = swapper_pg_dir + __pgd_offset(vaddr); 64 pgd = swapper_pg_dir + __pgd_offset(vaddr);
65 pud = pud_offset(pgd, vaddr); 65 pud = pud_offset(pgd, vaddr);
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index 18e17abf7664..3ae479117b42 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
132/* Free all resources held by a thread. */ 132/* Free all resources held by a thread. */
133extern void release_thread(struct task_struct *); 133extern void release_thread(struct task_struct *);
134 134
135/*
136 * Return saved PC of a blocked thread.
137 */
138extern unsigned long thread_saved_pc(struct task_struct *tsk);
139
140unsigned long get_wchan(struct task_struct *p); 135unsigned long get_wchan(struct task_struct *p);
141 136
142#define task_pt_regs(task) ((task)->thread.uregs) 137#define task_pt_regs(task) ((task)->thread.uregs)
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index c9fa42619c6a..89e8027e07fb 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -40,14 +40,6 @@
40#include "internal.h" 40#include "internal.h"
41 41
42/* 42/*
43 * return saved PC of a blocked thread.
44 */
45unsigned long thread_saved_pc(struct task_struct *tsk)
46{
47 return ((unsigned long *) tsk->thread.sp)[3];
48}
49
50/*
51 * power off function, if any 43 * power off function, if any
52 */ 44 */
53void (*pm_power_off)(void); 45void (*pm_power_off)(void);
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 3bbbc3d798e5..4944e2e1d8b0 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
75{ 75{
76} 76}
77 77
78/* Return saved PC of a blocked thread. */
79#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
80
81extern unsigned long get_wchan(struct task_struct *p); 78extern unsigned long get_wchan(struct task_struct *p);
82 79
83#define task_pt_regs(p) \ 80#define task_pt_regs(p) \
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index a908e6c30a00..396d8f306c21 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
84void release_thread(struct task_struct *); 84void release_thread(struct task_struct *);
85unsigned long get_wchan(struct task_struct *p); 85unsigned long get_wchan(struct task_struct *p);
86 86
87/*
88 * Return saved PC of a blocked thread. For now, this is the "user" PC
89 */
90extern unsigned long thread_saved_pc(struct task_struct *t);
91
92#define init_stack (init_thread_union.stack) 87#define init_stack (init_thread_union.stack)
93 88
94#define cpu_relax() barrier() 89#define cpu_relax() barrier()
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index f8da545854f9..f9b77003f113 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
110 show_registers(regs); 110 show_registers(regs);
111} 111}
112 112
113unsigned long thread_saved_pc(struct task_struct *t)
114{
115 return (unsigned long)user_regs(t->stack)->pc;
116}
117
118void release_thread(struct task_struct *dead_task) 113void release_thread(struct task_struct *dead_task)
119{ 114{
120} 115}
@@ -167,8 +162,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
167 162
168 top_of_kernel_stack = sp; 163 top_of_kernel_stack = sp;
169 164
170 p->set_child_tid = p->clear_child_tid = NULL;
171
172 /* Locate userspace context on stack... */ 165 /* Locate userspace context on stack... */
173 sp -= STACK_FRAME_OVERHEAD; /* redzone */ 166 sp -= STACK_FRAME_OVERHEAD; /* redzone */
174 sp -= sizeof(struct pt_regs); 167 sp -= sizeof(struct pt_regs);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index a3661ee6b060..4c6694b4e77e 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -163,12 +163,7 @@ struct thread_struct {
163 .flags = 0 \ 163 .flags = 0 \
164 } 164 }
165 165
166/*
167 * Return saved PC of a blocked thread. This is used by ps mostly.
168 */
169
170struct task_struct; 166struct task_struct;
171unsigned long thread_saved_pc(struct task_struct *t);
172void show_trace(struct task_struct *task, unsigned long *stack); 167void show_trace(struct task_struct *task, unsigned long *stack);
173 168
174/* 169/*
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4516a5b53f38..b64d7d21646e 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
239 return 0; 239 return 0;
240} 240}
241 241
242unsigned long thread_saved_pc(struct task_struct *t)
243{
244 return t->thread.regs.kpc;
245}
246
247unsigned long 242unsigned long
248get_wchan(struct task_struct *p) 243get_wchan(struct task_struct *p)
249{ 244{
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e5288638a1d9..378a754ca186 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 unsigned long len, unsigned long pgoff, unsigned long flags) 90 unsigned long len, unsigned long pgoff, unsigned long flags)
91{ 91{
92 struct mm_struct *mm = current->mm; 92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma; 93 struct vm_area_struct *vma, *prev;
94 unsigned long task_size = TASK_SIZE; 94 unsigned long task_size = TASK_SIZE;
95 int do_color_align, last_mmap; 95 int do_color_align, last_mmap;
96 struct vm_unmapped_area_info info; 96 struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
117 else 117 else
118 addr = PAGE_ALIGN(addr); 118 addr = PAGE_ALIGN(addr);
119 119
120 vma = find_vma(mm, addr); 120 vma = find_vma_prev(mm, addr, &prev);
121 if (task_size - len >= addr && 121 if (task_size - len >= addr &&
122 (!vma || addr + len <= vma->vm_start)) 122 (!vma || addr + len <= vm_start_gap(vma)) &&
123 (!prev || addr >= vm_end_gap(prev)))
123 goto found_addr; 124 goto found_addr;
124 } 125 }
125 126
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
143 const unsigned long len, const unsigned long pgoff, 144 const unsigned long len, const unsigned long pgoff,
144 const unsigned long flags) 145 const unsigned long flags)
145{ 146{
146 struct vm_area_struct *vma; 147 struct vm_area_struct *vma, *prev;
147 struct mm_struct *mm = current->mm; 148 struct mm_struct *mm = current->mm;
148 unsigned long addr = addr0; 149 unsigned long addr = addr0;
149 int do_color_align, last_mmap; 150 int do_color_align, last_mmap;
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 178 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
178 else 179 else
179 addr = PAGE_ALIGN(addr); 180 addr = PAGE_ALIGN(addr);
180 vma = find_vma(mm, addr); 181
182 vma = find_vma_prev(mm, addr, &prev);
181 if (TASK_SIZE - len >= addr && 183 if (TASK_SIZE - len >= addr &&
182 (!vma || addr + len <= vma->vm_start)) 184 (!vma || addr + len <= vm_start_gap(vma)) &&
185 (!prev || addr >= vm_end_gap(prev)))
183 goto found_addr; 186 goto found_addr;
184 } 187 }
185 188
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7c8f9972f61..bf4391d18923 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
380 380
381menu "Kernel options" 381menu "Kernel options"
382 382
383config PPC_DT_CPU_FTRS
384 bool "Device-tree based CPU feature discovery & setup"
385 depends on PPC_BOOK3S_64
386 default n
387 help
388 This enables code to use a new device tree binding for describing CPU
389 compatibility and features. Saying Y here will attempt to use the new
390 binding if the firmware provides it. Currently only the skiboot
391 firmware provides this binding.
392 If you're not sure say Y.
393
394config PPC_CPUFEATURES_ENABLE_UNKNOWN
395 bool "cpufeatures pass through unknown features to guest/userspace"
396 depends on PPC_DT_CPU_FTRS
397 default y
398
399config HIGHMEM 383config HIGHMEM
400 bool "High memory support" 384 bool "High memory support"
401 depends on PPC32 385 depends on PPC32
@@ -1215,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
1215 1199
1216source "security/Kconfig" 1200source "security/Kconfig"
1217 1201
1218config KEYS_COMPAT
1219 bool
1220 depends on COMPAT && KEYS
1221 default y
1222
1223source "crypto/Kconfig" 1202source "crypto/Kconfig"
1224 1203
1225config PPC_LIB_RHEAP 1204config PPC_LIB_RHEAP
diff --git a/arch/powerpc/boot/dts/include/dt-bindings b/arch/powerpc/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/powerpc/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
1../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b671ca..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
8#define H_PTE_INDEX_SIZE 9 8#define H_PTE_INDEX_SIZE 9
9#define H_PMD_INDEX_SIZE 7 9#define H_PMD_INDEX_SIZE 7
10#define H_PUD_INDEX_SIZE 9 10#define H_PUD_INDEX_SIZE 9
11#define H_PGD_INDEX_SIZE 12 11#define H_PGD_INDEX_SIZE 9
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) 14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a0a427..0151af6c2a50 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
104 "1: "PPC_TLNEI" %4,0\n" \ 104 "1: "PPC_TLNEI" %4,0\n" \
105 _EMIT_BUG_ENTRY \ 105 _EMIT_BUG_ENTRY \
106 : : "i" (__FILE__), "i" (__LINE__), \ 106 : : "i" (__FILE__), "i" (__LINE__), \
107 "i" (BUGFLAG_TAINT(TAINT_WARN)), \ 107 "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
108 "i" (sizeof(struct bug_entry)), \ 108 "i" (sizeof(struct bug_entry)), \
109 "r" (__ret_warn_on)); \ 109 "r" (__ret_warn_on)); \
110 } \ 110 } \
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c2d509584a98..d02ad93bf708 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -214,7 +214,6 @@ enum {
214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) 215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) 216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
217#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
218#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) 217#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
219 218
220#ifndef __ASSEMBLY__ 219#ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
463 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 462 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
464 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 463 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
465 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 464 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
466 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE) 465 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
467#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 466#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
468#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) 467#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
469#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 468#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f33ea3..8814a7249ceb 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 103extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
104extern int kprobe_handler(struct pt_regs *regs); 104extern int kprobe_handler(struct pt_regs *regs);
105extern int kprobe_post_handler(struct pt_regs *regs); 105extern int kprobe_post_handler(struct pt_regs *regs);
106extern int is_current_kprobe_addr(unsigned long addr);
106#ifdef CONFIG_KPROBES_ON_FTRACE 107#ifdef CONFIG_KPROBES_ON_FTRACE
107extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, 108extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108 struct kprobe_ctlblk *kcb); 109 struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 53885512b8d3..6c0132c7212f 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -14,6 +14,10 @@
14#include <asm-generic/module.h> 14#include <asm-generic/module.h>
15 15
16 16
17#ifdef CC_USING_MPROFILE_KERNEL
18#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
19#endif
20
17#ifndef __powerpc64__ 21#ifndef __powerpc64__
18/* 22/*
19 * Thanks to Paul M for explaining this. 23 * Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 2a32483c7b6c..8da5d4c1cab2 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
132#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 132#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
133#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 133#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
134#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 134#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
135
136#ifdef CONFIG_PPC_BOOK3S_64
137/*
138 * On hash the vmalloc and other regions alias to the kernel region when passed
139 * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
140 * return true for some vmalloc addresses, which is incorrect. So explicitly
141 * check that the address is in the kernel region.
142 */
143#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
144 pfn_valid(virt_to_pfn(kaddr)))
145#else
135#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 146#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
147#endif
136 148
137/* 149/*
138 * On Book-E parts we need __va to parse the device tree and we can't 150 * On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a2123f291ab0..1189d04f3bd1 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
110#define TASK_SIZE_128TB (0x0000800000000000UL) 110#define TASK_SIZE_128TB (0x0000800000000000UL)
111#define TASK_SIZE_512TB (0x0002000000000000UL) 111#define TASK_SIZE_512TB (0x0002000000000000UL)
112 112
113#ifdef CONFIG_PPC_BOOK3S_64 113/*
114 * For now 512TB is only supported with book3s and 64K linux page size.
115 */
116#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
114/* 117/*
115 * Max value currently used: 118 * Max value currently used:
116 */ 119 */
117#define TASK_SIZE_USER64 TASK_SIZE_512TB 120#define TASK_SIZE_USER64 TASK_SIZE_512TB
121#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
118#else 122#else
119#define TASK_SIZE_USER64 TASK_SIZE_64TB 123#define TASK_SIZE_USER64 TASK_SIZE_64TB
124#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
120#endif 125#endif
121 126
122/* 127/*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
132 * space during mmap's. 137 * space during mmap's.
133 */ 138 */
134#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 139#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
135#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4)) 140#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
136 141
137#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 142#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
138 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 143 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
143 * with 128TB and conditionally enable upto 512TB 148 * with 128TB and conditionally enable upto 512TB
144 */ 149 */
145#ifdef CONFIG_PPC_BOOK3S_64 150#ifdef CONFIG_PPC_BOOK3S_64
146#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 151#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
147 TASK_SIZE_USER32 : TASK_SIZE_128TB) 152 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
148#else 153#else
149#define DEFAULT_MAP_WINDOW TASK_SIZE 154#define DEFAULT_MAP_WINDOW TASK_SIZE
150#endif 155#endif
151 156
152#ifdef __powerpc64__ 157#ifdef __powerpc64__
153 158
154#ifdef CONFIG_PPC_BOOK3S_64 159#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
155/* Limit stack to 128TB */
156#define STACK_TOP_USER64 TASK_SIZE_128TB
157#else
158#define STACK_TOP_USER64 TASK_SIZE_USER64
159#endif
160
161#define STACK_TOP_USER32 TASK_SIZE_USER32 160#define STACK_TOP_USER32 TASK_SIZE_USER32
162 161
163#define STACK_TOP (is_32bit_task() ? \ 162#define STACK_TOP (is_32bit_task() ? \
@@ -379,12 +378,6 @@ struct thread_struct {
379} 378}
380#endif 379#endif
381 380
382/*
383 * Return saved PC of a blocked thread. For now, this is the "user" PC
384 */
385#define thread_saved_pc(tsk) \
386 ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
387
388#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) 381#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
389 382
390unsigned long get_wchan(struct task_struct *p); 383unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
44extern int sysfs_add_device_to_node(struct device *dev, int nid); 44extern int sysfs_add_device_to_node(struct device *dev, int nid);
45extern void sysfs_remove_device_from_node(struct device *dev, int nid); 45extern void sysfs_remove_device_from_node(struct device *dev, int nid);
46 46
47static inline int early_cpu_to_node(int cpu)
48{
49 int nid;
50
51 nid = numa_cpu_lookup_table[cpu];
52
53 /*
54 * Fall back to node 0 if nid is unset (it should be, except bugs).
55 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
56 */
57 return (nid < 0) ? 0 : nid;
58}
47#else 59#else
48 60
61static inline int early_cpu_to_node(int cpu) { return 0; }
62
49static inline void dump_numa_cpu_topology(void) {} 63static inline void dump_numa_cpu_topology(void) {}
50 64
51static inline int sysfs_add_device_to_node(struct device *dev, int nid) 65static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 5c0d8a8cdae5..41e88d3ce36b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -267,13 +267,7 @@ do { \
267extern unsigned long __copy_tofrom_user(void __user *to, 267extern unsigned long __copy_tofrom_user(void __user *to,
268 const void __user *from, unsigned long size); 268 const void __user *from, unsigned long size);
269 269
270#ifndef __powerpc64__ 270#ifdef __powerpc64__
271
272#define INLINE_COPY_FROM_USER
273#define INLINE_COPY_TO_USER
274
275#else /* __powerpc64__ */
276
277static inline unsigned long 271static inline unsigned long
278raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 272raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
279{ 273{
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index c8a822acf962..c23ff4389ca2 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,11 +94,13 @@ struct xive_q {
94 * store at 0 and some ESBs support doing a trigger via a 94 * store at 0 and some ESBs support doing a trigger via a
95 * separate trigger page. 95 * separate trigger page.
96 */ 96 */
97#define XIVE_ESB_GET 0x800 97#define XIVE_ESB_STORE_EOI 0x400 /* Store */
98#define XIVE_ESB_SET_PQ_00 0xc00 98#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
99#define XIVE_ESB_SET_PQ_01 0xd00 99#define XIVE_ESB_GET 0x800 /* Load */
100#define XIVE_ESB_SET_PQ_10 0xe00 100#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
101#define XIVE_ESB_SET_PQ_11 0xf00 101#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
102#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
103#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
102 104
103#define XIVE_ESB_VAL_P 0x2 105#define XIVE_ESB_VAL_P 0x2
104#define XIVE_ESB_VAL_Q 0x1 106#define XIVE_ESB_VAL_Q 0x1
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 3e7ce86d5c13..4d877144f377 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -46,6 +46,8 @@
46#define PPC_FEATURE2_HTM_NOSC 0x01000000 46#define PPC_FEATURE2_HTM_NOSC 0x01000000
47#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ 47#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
48#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ 48#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
49#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
50#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
49 51
50/* 52/*
51 * IMPORTANT! 53 * IMPORTANT!
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9b3e88b1a9c8..6f849832a669 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void);
124#define COMMON_USER_POWER9 COMMON_USER_POWER8 124#define COMMON_USER_POWER9 COMMON_USER_POWER8
125#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ 125#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
126 PPC_FEATURE2_ARCH_3_00 | \ 126 PPC_FEATURE2_ARCH_3_00 | \
127 PPC_FEATURE2_HAS_IEEE128) 127 PPC_FEATURE2_HAS_IEEE128 | \
128 PPC_FEATURE2_DARN )
128 129
129#ifdef CONFIG_PPC_BOOK3E_64 130#ifdef CONFIG_PPC_BOOK3E_64
130#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) 131#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index fcc7588a96d6..4c7656dc4e04 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -8,6 +8,7 @@
8#include <linux/export.h> 8#include <linux/export.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/jump_label.h> 10#include <linux/jump_label.h>
11#include <linux/libfdt.h>
11#include <linux/memblock.h> 12#include <linux/memblock.h>
12#include <linux/printk.h> 13#include <linux/printk.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
642 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL}, 643 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
643 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 644 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
644 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 645 {"processor-utilization-of-resources-register", feat_enable_purr, 0},
645 {"subcore", feat_enable, CPU_FTR_SUBCORE},
646 {"no-execute", feat_enable, 0}, 646 {"no-execute", feat_enable, 0},
647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
648 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, 648 {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
671 {"wait-v3", feat_enable, 0}, 671 {"wait-v3", feat_enable, 0},
672}; 672};
673 673
674/* XXX: how to configure this? Default + boot time? */ 674static bool __initdata using_dt_cpu_ftrs;
675#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN 675static bool __initdata enable_unknown = true;
676#define CPU_FEATURE_ENABLE_UNKNOWN 1 676
677#else 677static int __init dt_cpu_ftrs_parse(char *str)
678#define CPU_FEATURE_ENABLE_UNKNOWN 0 678{
679#endif 679 if (!str)
680 return 0;
681
682 if (!strcmp(str, "off"))
683 using_dt_cpu_ftrs = false;
684 else if (!strcmp(str, "known"))
685 enable_unknown = false;
686 else
687 return 1;
688
689 return 0;
690}
691early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
680 692
681static void __init cpufeatures_setup_start(u32 isa) 693static void __init cpufeatures_setup_start(u32 isa)
682{ 694{
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
707 } 719 }
708 } 720 }
709 721
710 if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { 722 if (!known && enable_unknown) {
711 if (!feat_try_enable_unknown(f)) { 723 if (!feat_try_enable_unknown(f)) {
712 pr_info("not enabling: %s (unknown and unsupported by kernel)\n", 724 pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
713 f->name); 725 f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
756 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); 768 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
757} 769}
758 770
771static int __init disabled_on_cmdline(void)
772{
773 unsigned long root, chosen;
774 const char *p;
775
776 root = of_get_flat_dt_root();
777 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
778 if (chosen == -FDT_ERR_NOTFOUND)
779 return false;
780
781 p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
782 if (!p)
783 return false;
784
785 if (strstr(p, "dt_cpu_ftrs=off"))
786 return true;
787
788 return false;
789}
790
759static int __init fdt_find_cpu_features(unsigned long node, const char *uname, 791static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
760 int depth, void *data) 792 int depth, void *data)
761{ 793{
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
766 return 0; 798 return 0;
767} 799}
768 800
769static bool __initdata using_dt_cpu_ftrs = false;
770
771bool __init dt_cpu_ftrs_in_use(void) 801bool __init dt_cpu_ftrs_in_use(void)
772{ 802{
773 return using_dt_cpu_ftrs; 803 return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
775 805
776bool __init dt_cpu_ftrs_init(void *fdt) 806bool __init dt_cpu_ftrs_init(void *fdt)
777{ 807{
808 using_dt_cpu_ftrs = false;
809
778 /* Setup and verify the FDT, if it fails we just bail */ 810 /* Setup and verify the FDT, if it fails we just bail */
779 if (!early_init_dt_verify(fdt)) 811 if (!early_init_dt_verify(fdt))
780 return false; 812 return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
782 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) 814 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
783 return false; 815 return false;
784 816
817 if (disabled_on_cmdline())
818 return false;
819
785 cpufeatures_setup_cpu(); 820 cpufeatures_setup_cpu();
786 821
787 using_dt_cpu_ftrs = true; 822 using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
1027 1062
1028void __init dt_cpu_ftrs_scan(void) 1063void __init dt_cpu_ftrs_scan(void)
1029{ 1064{
1065 if (!using_dt_cpu_ftrs)
1066 return;
1067
1030 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); 1068 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1031} 1069}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ae418b85c17c..b886795060fd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
1411 .balign IFETCH_ALIGN_BYTES 1411 .balign IFETCH_ALIGN_BYTES
1412do_hash_page: 1412do_hash_page:
1413#ifdef CONFIG_PPC_STD_MMU_64 1413#ifdef CONFIG_PPC_STD_MMU_64
1414 andis. r0,r4,0xa410 /* weird error? */ 1414 andis. r0,r4,0xa450 /* weird error? */
1415 bne- handle_page_fault /* if not, try to insert a HPTE */ 1415 bne- handle_page_fault /* if not, try to insert a HPTE */
1416 andis. r0,r4,DSISR_DABRMATCH@h
1417 bne- handle_dabr_fault
1418 CURRENT_THREAD_INFO(r11, r1) 1416 CURRENT_THREAD_INFO(r11, r1)
1419 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1417 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1420 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1418 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
1438 1436
1439 /* Error */ 1437 /* Error */
1440 blt- 13f 1438 blt- 13f
1439
1440 /* Reload DSISR into r4 for the DABR check below */
1441 ld r4,_DSISR(r1)
1441#endif /* CONFIG_PPC_STD_MMU_64 */ 1442#endif /* CONFIG_PPC_STD_MMU_64 */
1442 1443
1443/* Here we have a page fault that hash_page can't handle. */ 1444/* Here we have a page fault that hash_page can't handle. */
1444handle_page_fault: 1445handle_page_fault:
144511: ld r4,_DAR(r1) 144611: andis. r0,r4,DSISR_DABRMATCH@h
1447 bne- handle_dabr_fault
1448 ld r4,_DAR(r1)
1446 ld r5,_DSISR(r1) 1449 ld r5,_DSISR(r1)
1447 addi r3,r1,STACK_FRAME_OVERHEAD 1450 addi r3,r1,STACK_FRAME_OVERHEAD
1448 bl do_page_fault 1451 bl do_page_fault
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 07d4e0ad60db..4898d676dcae 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -416,7 +416,7 @@ power9_dd1_recover_paca:
416 * which needs to be restored from the stack. 416 * which needs to be restored from the stack.
417 */ 417 */
418 li r3, 1 418 li r3, 1
419 stb r0,PACA_NAPSTATELOST(r13) 419 stb r3,PACA_NAPSTATELOST(r13)
420 blr 420 blr
421 421
422/* 422/*
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 160ae0fa7d0d..01addfb0ed0a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43 43
44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 44struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
45 45
46int is_current_kprobe_addr(unsigned long addr)
47{
48 struct kprobe *p = kprobe_running();
49 return (p && (unsigned long)p->addr == addr) ? 1 : 0;
50}
51
46bool arch_within_kprobe_blacklist(unsigned long addr) 52bool arch_within_kprobe_blacklist(unsigned long addr)
47{ 53{
48 return (addr >= (unsigned long)__kprobes_text_start && 54 return (addr >= (unsigned long)__kprobes_text_start &&
@@ -305,16 +311,17 @@ int kprobe_handler(struct pt_regs *regs)
305 save_previous_kprobe(kcb); 311 save_previous_kprobe(kcb);
306 set_current_kprobe(p, regs, kcb); 312 set_current_kprobe(p, regs, kcb);
307 kprobes_inc_nmissed_count(p); 313 kprobes_inc_nmissed_count(p);
308 prepare_singlestep(p, regs);
309 kcb->kprobe_status = KPROBE_REENTER; 314 kcb->kprobe_status = KPROBE_REENTER;
310 if (p->ainsn.boostable >= 0) { 315 if (p->ainsn.boostable >= 0) {
311 ret = try_to_emulate(p, regs); 316 ret = try_to_emulate(p, regs);
312 317
313 if (ret > 0) { 318 if (ret > 0) {
314 restore_previous_kprobe(kcb); 319 restore_previous_kprobe(kcb);
320 preempt_enable_no_resched();
315 return 1; 321 return 1;
316 } 322 }
317 } 323 }
324 prepare_singlestep(p, regs);
318 return 1; 325 return 1;
319 } else { 326 } else {
320 if (*addr != BREAKPOINT_INSTRUCTION) { 327 if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -616,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
616 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 623 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
617#endif 624#endif
618 625
626 /*
627 * jprobes use jprobe_return() which skips the normal return
628 * path of the function, and this messes up the accounting of the
629 * function graph tracer.
630 *
631 * Pause function graph tracing while performing the jprobe function.
632 */
633 pause_graph_tracing();
634
619 return 1; 635 return 1;
620} 636}
621NOKPROBE_SYMBOL(setjmp_pre_handler); 637NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -641,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
641 * saved regs... 657 * saved regs...
642 */ 658 */
643 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 659 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
660 /* It's OK to start function graph tracing again */
661 unpause_graph_tracing();
644 preempt_enable_no_resched(); 662 preempt_enable_no_resched();
645 return 1; 663 return 1;
646} 664}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d645da302bf2..2ad725ef4368 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
864 if (!MSR_TM_SUSPENDED(mfmsr())) 864 if (!MSR_TM_SUSPENDED(mfmsr()))
865 return; 865 return;
866 866
867 /*
868 * If we are in a transaction and FP is off then we can't have
869 * used FP inside that transaction. Hence the checkpointed
870 * state is the same as the live state. We need to copy the
871 * live state to the checkpointed state so that when the
872 * transaction is restored, the checkpointed state is correct
873 * and the aborted transaction sees the correct state. We use
874 * ckpt_regs.msr here as that's what tm_reclaim will use to
875 * determine if it's going to write the checkpointed state or
876 * not. So either this will write the checkpointed registers,
877 * or reclaim will. Similarly for VMX.
878 */
879 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
880 memcpy(&thr->ckfp_state, &thr->fp_state,
881 sizeof(struct thread_fp_state));
882 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
883 memcpy(&thr->ckvr_state, &thr->vr_state,
884 sizeof(struct thread_vr_state));
885
867 giveup_all(container_of(thr, struct task_struct, thread)); 886 giveup_all(container_of(thr, struct task_struct, thread));
868 887
869 tm_reclaim(thr, thr->ckpt_regs.msr, cause); 888 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
@@ -1647,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1647#ifdef CONFIG_VSX 1666#ifdef CONFIG_VSX
1648 current->thread.used_vsr = 0; 1667 current->thread.used_vsr = 0;
1649#endif 1668#endif
1669 current->thread.load_fp = 0;
1650 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1670 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1651 current->thread.fp_save_area = NULL; 1671 current->thread.fp_save_area = NULL;
1652#ifdef CONFIG_ALTIVEC 1672#ifdef CONFIG_ALTIVEC
@@ -1655,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1655 current->thread.vr_save_area = NULL; 1675 current->thread.vr_save_area = NULL;
1656 current->thread.vrsave = 0; 1676 current->thread.vrsave = 0;
1657 current->thread.used_vr = 0; 1677 current->thread.used_vr = 0;
1678 current->thread.load_vec = 0;
1658#endif /* CONFIG_ALTIVEC */ 1679#endif /* CONFIG_ALTIVEC */
1659#ifdef CONFIG_SPE 1680#ifdef CONFIG_SPE
1660 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1681 memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1666,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1666 current->thread.tm_tfhar = 0; 1687 current->thread.tm_tfhar = 0;
1667 current->thread.tm_texasr = 0; 1688 current->thread.tm_texasr = 0;
1668 current->thread.tm_tfiar = 0; 1689 current->thread.tm_tfiar = 0;
1690 current->thread.load_tm = 0;
1669#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1691#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1670} 1692}
1671EXPORT_SYMBOL(start_thread); 1693EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 40c4887c27b6..f83056297441 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -161,7 +161,9 @@ static struct ibm_pa_feature {
161 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, 161 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
162 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, 162 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
163 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, 163 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
164#ifdef CONFIG_PPC_RADIX_MMU
164 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, 165 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
166#endif
165 { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, 167 { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
166 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, 168 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
167 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, 169 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71dcda91755d..857129acf960 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
928 928
929#ifdef CONFIG_PPC_MM_SLICES 929#ifdef CONFIG_PPC_MM_SLICES
930#ifdef CONFIG_PPC64 930#ifdef CONFIG_PPC64
931 init_mm.context.addr_limit = TASK_SIZE_128TB; 931 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
932#else 932#else
933#error "context.addr_limit not initialized." 933#error "context.addr_limit not initialized."
934#endif 934#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f35ff9dea4fb..4640f6d64f8b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void)
616#endif 616#endif
617 617
618/* 618/*
619 * Emergency stacks are used for a range of things, from asynchronous
620 * NMIs (system reset, machine check) to synchronous, process context.
621 * We set preempt_count to zero, even though that isn't necessarily correct. To
622 * get the right value we'd need to copy it from the previous thread_info, but
623 * doing that might fault causing more problems.
624 * TODO: what to do with accounting?
625 */
626static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
627{
628 ti->task = NULL;
629 ti->cpu = cpu;
630 ti->preempt_count = 0;
631 ti->local_flags = 0;
632 ti->flags = 0;
633 klp_init_thread_info(ti);
634}
635
636/*
619 * Stack space used when we detect a bad kernel stack pointer, and 637 * Stack space used when we detect a bad kernel stack pointer, and
620 * early in SMP boots before relocation is enabled. Exclusive emergency 638 * early in SMP boots before relocation is enabled. Exclusive emergency
621 * stack for machine checks. 639 * stack for machine checks.
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
633 * Since we use these as temporary stacks during secondary CPU 651 * Since we use these as temporary stacks during secondary CPU
634 * bringup, we need to get at them in real mode. This means they 652 * bringup, we need to get at them in real mode. This means they
635 * must also be within the RMO region. 653 * must also be within the RMO region.
654 *
655 * The IRQ stacks allocated elsewhere in this file are zeroed and
656 * initialized in kernel/irq.c. These are initialized here in order
657 * to have emergency stacks available as early as possible.
636 */ 658 */
637 limit = min(safe_stack_limit(), ppc64_rma_size); 659 limit = min(safe_stack_limit(), ppc64_rma_size);
638 660
639 for_each_possible_cpu(i) { 661 for_each_possible_cpu(i) {
640 struct thread_info *ti; 662 struct thread_info *ti;
641 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 663 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
642 klp_init_thread_info(ti); 664 memset(ti, 0, THREAD_SIZE);
665 emerg_stack_init_thread_info(ti, i);
643 paca[i].emergency_sp = (void *)ti + THREAD_SIZE; 666 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
644 667
645#ifdef CONFIG_PPC_BOOK3S_64 668#ifdef CONFIG_PPC_BOOK3S_64
646 /* emergency stack for NMI exception handling. */ 669 /* emergency stack for NMI exception handling. */
647 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 670 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
648 klp_init_thread_info(ti); 671 memset(ti, 0, THREAD_SIZE);
672 emerg_stack_init_thread_info(ti, i);
649 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; 673 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
650 674
651 /* emergency stack for machine check exception handling. */ 675 /* emergency stack for machine check exception handling. */
652 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 676 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
653 klp_init_thread_info(ti); 677 memset(ti, 0, THREAD_SIZE);
678 emerg_stack_init_thread_info(ti, i);
654 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; 679 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
655#endif 680#endif
656 } 681 }
@@ -661,7 +686,7 @@ void __init emergency_stack_init(void)
661 686
662static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 687static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
663{ 688{
664 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, 689 return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
665 __pa(MAX_DMA_ADDRESS)); 690 __pa(MAX_DMA_ADDRESS));
666} 691}
667 692
@@ -672,7 +697,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
672 697
673static int pcpu_cpu_distance(unsigned int from, unsigned int to) 698static int pcpu_cpu_distance(unsigned int from, unsigned int to)
674{ 699{
675 if (cpu_to_node(from) == cpu_to_node(to)) 700 if (early_cpu_to_node(from) == early_cpu_to_node(to))
676 return LOCAL_DISTANCE; 701 return LOCAL_DISTANCE;
677 else 702 else
678 return REMOTE_DISTANCE; 703 return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a99f5d5..c98e90b4ea7b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
45 stdu r1,-SWITCH_FRAME_SIZE(r1) 45 stdu r1,-SWITCH_FRAME_SIZE(r1)
46 46
47 /* Save all gprs to pt_regs */ 47 /* Save all gprs to pt_regs */
48 SAVE_8GPRS(0,r1) 48 SAVE_GPR(0, r1)
49 SAVE_8GPRS(8,r1) 49 SAVE_10GPRS(2, r1)
50 SAVE_8GPRS(16,r1) 50 SAVE_10GPRS(12, r1)
51 SAVE_8GPRS(24,r1) 51 SAVE_10GPRS(22, r1)
52
53 /* Save previous stack pointer (r1) */
54 addi r8, r1, SWITCH_FRAME_SIZE
55 std r8, GPR1(r1)
52 56
53 /* Load special regs for save below */ 57 /* Load special regs for save below */
54 mfmsr r8 58 mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
95 bl ftrace_stub 99 bl ftrace_stub
96 nop 100 nop
97 101
98 /* Load ctr with the possibly modified NIP */ 102 /* Load the possibly modified NIP */
99 ld r3, _NIP(r1) 103 ld r15, _NIP(r1)
100 mtctr r3 104
101#ifdef CONFIG_LIVEPATCH 105#ifdef CONFIG_LIVEPATCH
102 cmpd r14,r3 /* has NIP been altered? */ 106 cmpd r14, r15 /* has NIP been altered? */
107#endif
108
109#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
110 /* NIP has not been altered, skip over further checks */
111 beq 1f
112
113 /* Check if there is an active kprobe on us */
114 subi r3, r14, 4
115 bl is_current_kprobe_addr
116 nop
117
118 /*
119 * If r3 == 1, then this is a kprobe/jprobe.
120 * else, this is livepatched function.
121 *
122 * The conditional branch for livepatch_handler below will use the
123 * result of this comparison. For kprobe/jprobe, we just need to branch to
124 * the new NIP, not call livepatch_handler. The branch below is bne, so we
125 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
126 * CR0[EQ] = (r3 == 1).
127 */
128 cmpdi r3, 1
1291:
103#endif 130#endif
104 131
132 /* Load CTR with the possibly modified NIP */
133 mtctr r15
134
105 /* Restore gprs */ 135 /* Restore gprs */
106 REST_8GPRS(0,r1) 136 REST_GPR(0,r1)
107 REST_8GPRS(8,r1) 137 REST_10GPRS(2,r1)
108 REST_8GPRS(16,r1) 138 REST_10GPRS(12,r1)
109 REST_8GPRS(24,r1) 139 REST_10GPRS(22,r1)
110 140
111 /* Restore possibly modified LR */ 141 /* Restore possibly modified LR */
112 ld r0, _LINK(r1) 142 ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
119 addi r1, r1, SWITCH_FRAME_SIZE 149 addi r1, r1, SWITCH_FRAME_SIZE
120 150
121#ifdef CONFIG_LIVEPATCH 151#ifdef CONFIG_LIVEPATCH
122 /* Based on the cmpd above, if the NIP was altered handle livepatch */ 152 /*
153 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
154 * not on a kprobe/jprobe, then handle livepatch.
155 */
123 bne- livepatch_handler 156 bne- livepatch_handler
124#endif 157#endif
125 158
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 24de532c1736..0c52cb5d43f5 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -67,7 +67,7 @@ config KVM_BOOK3S_64
67 select KVM_BOOK3S_64_HANDLER 67 select KVM_BOOK3S_64_HANDLER
68 select KVM 68 select KVM
69 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE 69 select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
70 select SPAPR_TCE_IOMMU if IOMMU_SUPPORT 70 select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
71 ---help--- 71 ---help---
72 Support running unmodified book3s_64 and book3s_32 guest kernels 72 Support running unmodified book3s_64 and book3s_32 guest kernels
73 in virtual machines on book3s_64 host processors. 73 in virtual machines on book3s_64 host processors.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index d91a2604c496..381a6ec0ff3b 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -46,7 +46,7 @@ kvm-e500mc-objs := \
46 e500_emulate.o 46 e500_emulate.o
47kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) 47kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
48 48
49kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ 49kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
50 book3s_64_vio_hv.o 50 book3s_64_vio_hv.o
51 51
52kvm-pr-y := \ 52kvm-pr-y := \
@@ -90,11 +90,11 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
90 book3s_xics.o 90 book3s_xics.o
91 91
92kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o 92kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
93kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
93 94
94kvm-book3s_64-module-objs := \ 95kvm-book3s_64-module-objs := \
95 $(common-objs-y) \ 96 $(common-objs-y) \
96 book3s.o \ 97 book3s.o \
97 book3s_64_vio.o \
98 book3s_rtas.o \ 98 book3s_rtas.o \
99 $(kvm-book3s_64-objs-y) 99 $(kvm-book3s_64-objs-y)
100 100
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index eda0a8f6fae8..3adfd2f5301c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -301,6 +301,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
302 /* liobn, ioba, tce); */ 302 /* liobn, ioba, tce); */
303 303
304 /* For radix, we might be in virtual mode, so punt */
305 if (kvm_is_radix(vcpu->kvm))
306 return H_TOO_HARD;
307
304 stt = kvmppc_find_table(vcpu->kvm, liobn); 308 stt = kvmppc_find_table(vcpu->kvm, liobn);
305 if (!stt) 309 if (!stt)
306 return H_TOO_HARD; 310 return H_TOO_HARD;
@@ -381,6 +385,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
381 bool prereg = false; 385 bool prereg = false;
382 struct kvmppc_spapr_tce_iommu_table *stit; 386 struct kvmppc_spapr_tce_iommu_table *stit;
383 387
388 /* For radix, we might be in virtual mode, so punt */
389 if (kvm_is_radix(vcpu->kvm))
390 return H_TOO_HARD;
391
384 stt = kvmppc_find_table(vcpu->kvm, liobn); 392 stt = kvmppc_find_table(vcpu->kvm, liobn);
385 if (!stt) 393 if (!stt)
386 return H_TOO_HARD; 394 return H_TOO_HARD;
@@ -491,6 +499,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
491 long i, ret; 499 long i, ret;
492 struct kvmppc_spapr_tce_iommu_table *stit; 500 struct kvmppc_spapr_tce_iommu_table *stit;
493 501
502 /* For radix, we might be in virtual mode, so punt */
503 if (kvm_is_radix(vcpu->kvm))
504 return H_TOO_HARD;
505
494 stt = kvmppc_find_table(vcpu->kvm, liobn); 506 stt = kvmppc_find_table(vcpu->kvm, liobn);
495 if (!stt) 507 if (!stt)
496 return H_TOO_HARD; 508 return H_TOO_HARD;
@@ -527,6 +539,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
527 return H_SUCCESS; 539 return H_SUCCESS;
528} 540}
529 541
542/* This can be called in either virtual mode or real mode */
530long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 543long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
531 unsigned long ioba) 544 unsigned long ioba)
532{ 545{
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 42b7a4fd57d9..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1487 break; 1487 break;
1488 case KVM_REG_PPC_TB_OFFSET: 1488 case KVM_REG_PPC_TB_OFFSET:
1489 /*
1490 * POWER9 DD1 has an erratum where writing TBU40 causes
1491 * the timebase to lose ticks. So we don't let the
1492 * timebase offset be changed on P9 DD1. (It is
1493 * initialized to zero.)
1494 */
1495 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1496 break;
1489 /* round up to multiple of 2^24 */ 1497 /* round up to multiple of 2^24 */
1490 vcpu->arch.vcore->tb_offset = 1498 vcpu->arch.vcore->tb_offset =
1491 ALIGN(set_reg_val(id, *val), 1UL << 24); 1499 ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2907{ 2915{
2908 int r; 2916 int r;
2909 int srcu_idx; 2917 int srcu_idx;
2918 unsigned long ebb_regs[3] = {}; /* shut up GCC */
2919 unsigned long user_tar = 0;
2920 unsigned int user_vrsave;
2910 2921
2911 if (!vcpu->arch.sane) { 2922 if (!vcpu->arch.sane) {
2912 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2923 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2913 return -EINVAL; 2924 return -EINVAL;
2914 } 2925 }
2915 2926
2927 /*
2928 * Don't allow entry with a suspended transaction, because
2929 * the guest entry/exit code will lose it.
2930 * If the guest has TM enabled, save away their TM-related SPRs
2931 * (they will get restored by the TM unavailable interrupt).
2932 */
2933#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2934 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
2935 (current->thread.regs->msr & MSR_TM)) {
2936 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
2937 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2938 run->fail_entry.hardware_entry_failure_reason = 0;
2939 return -EINVAL;
2940 }
2941 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
2942 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
2943 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
2944 current->thread.regs->msr &= ~MSR_TM;
2945 }
2946#endif
2947
2916 kvmppc_core_prepare_to_enter(vcpu); 2948 kvmppc_core_prepare_to_enter(vcpu);
2917 2949
2918 /* No need to go into the guest when all we'll do is come back out */ 2950 /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2934 2966
2935 flush_all_to_thread(current); 2967 flush_all_to_thread(current);
2936 2968
2969 /* Save userspace EBB and other register values */
2970 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
2971 ebb_regs[0] = mfspr(SPRN_EBBHR);
2972 ebb_regs[1] = mfspr(SPRN_EBBRR);
2973 ebb_regs[2] = mfspr(SPRN_BESCR);
2974 user_tar = mfspr(SPRN_TAR);
2975 }
2976 user_vrsave = mfspr(SPRN_VRSAVE);
2977
2937 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2978 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2938 vcpu->arch.pgdir = current->mm->pgd; 2979 vcpu->arch.pgdir = current->mm->pgd;
2939 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 2980 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2960 } 3001 }
2961 } while (is_kvmppc_resume_guest(r)); 3002 } while (is_kvmppc_resume_guest(r));
2962 3003
3004 /* Restore userspace EBB and other register values */
3005 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3006 mtspr(SPRN_EBBHR, ebb_regs[0]);
3007 mtspr(SPRN_EBBRR, ebb_regs[1]);
3008 mtspr(SPRN_BESCR, ebb_regs[2]);
3009 mtspr(SPRN_TAR, user_tar);
3010 mtspr(SPRN_FSCR, current->thread.fscr);
3011 }
3012 mtspr(SPRN_VRSAVE, user_vrsave);
3013
2963 out: 3014 out:
2964 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; 3015 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2965 atomic_dec(&vcpu->kvm->arch.vcpus_running); 3016 atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 88a65923c649..ee4c2558c305 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -207,7 +207,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
207 207
208long kvmppc_h_random(struct kvm_vcpu *vcpu) 208long kvmppc_h_random(struct kvm_vcpu *vcpu)
209{ 209{
210 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 210 int r;
211
212 /* Only need to do the expensive mfmsr() on radix */
213 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
214 r = powernv_get_random_long(&vcpu->arch.gpr[4]);
215 else
216 r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
217 if (r)
211 return H_SUCCESS; 218 return H_SUCCESS;
212 219
213 return H_HARDWARE; 220 return H_HARDWARE;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a28970b..404deb512844 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
121 * Put whatever is in the decrementer into the 121 * Put whatever is in the decrementer into the
122 * hypervisor decrementer. 122 * hypervisor decrementer.
123 */ 123 */
124BEGIN_FTR_SECTION
125 ld r5, HSTATE_KVM_VCORE(r13)
126 ld r6, VCORE_KVM(r5)
127 ld r9, KVM_HOST_LPCR(r6)
128 andis. r9, r9, LPCR_LD@h
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
124 mfspr r8,SPRN_DEC 130 mfspr r8,SPRN_DEC
125 mftb r7 131 mftb r7
126 mtspr SPRN_HDEC,r8 132BEGIN_FTR_SECTION
133 /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
134 bne 32f
135END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
127 extsw r8,r8 136 extsw r8,r8
13732: mtspr SPRN_HDEC,r8
128 add r8,r8,r7 138 add r8,r8,r7
129 std r8,HSTATE_DECEXP(r13) 139 std r8,HSTATE_DECEXP(r13)
130 140
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bdb3f76ceb6b..4888dd494604 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,12 +32,29 @@
32#include <asm/opal.h> 32#include <asm/opal.h>
33#include <asm/xive-regs.h> 33#include <asm/xive-regs.h>
34 34
35/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
35#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 41#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 42
37/* Values in HSTATE_NAPPING(r13) */ 43/* Values in HSTATE_NAPPING(r13) */
38#define NAPPING_CEDE 1 44#define NAPPING_CEDE 1
39#define NAPPING_NOVCPU 2 45#define NAPPING_NOVCPU 2
40 46
47/* Stack frame offsets for kvmppc_hv_entry */
48#define SFS 144
49#define STACK_SLOT_TRAP (SFS-4)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR (SFS-56)
56#define STACK_SLOT_DAWRX (SFS-64)
57
41/* 58/*
42 * Call kvmppc_hv_entry in real mode. 59 * Call kvmppc_hv_entry in real mode.
43 * Must be called with interrupts hard-disabled. 60 * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
214kvmppc_primary_no_guest: 231kvmppc_primary_no_guest:
215 /* We handle this much like a ceded vcpu */ 232 /* We handle this much like a ceded vcpu */
216 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 233 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
234 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
235 /* HDEC value came from DEC in the first place, it will fit */
217 mfspr r3, SPRN_HDEC 236 mfspr r3, SPRN_HDEC
218 mtspr SPRN_DEC, r3 237 mtspr SPRN_DEC, r3
219 /* 238 /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
295 314
296 /* See if our timeslice has expired (HDEC is negative) */ 315 /* See if our timeslice has expired (HDEC is negative) */
297 mfspr r0, SPRN_HDEC 316 mfspr r0, SPRN_HDEC
317 EXTEND_HDEC(r0)
298 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 318 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
299 cmpwi r0, 0 319 cmpdi r0, 0
300 blt kvm_novcpu_exit 320 blt kvm_novcpu_exit
301 321
302 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 322 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
319 bl kvmhv_accumulate_time 339 bl kvmhv_accumulate_time
320#endif 340#endif
32113: mr r3, r12 34113: mr r3, r12
322 stw r12, 112-4(r1) 342 stw r12, STACK_SLOT_TRAP(r1)
323 bl kvmhv_commence_exit 343 bl kvmhv_commence_exit
324 nop 344 nop
325 lwz r12, 112-4(r1) 345 lwz r12, STACK_SLOT_TRAP(r1)
326 b kvmhv_switch_to_host 346 b kvmhv_switch_to_host
327 347
328/* 348/*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
390 lbz r4, HSTATE_PTID(r13) 410 lbz r4, HSTATE_PTID(r13)
391 cmpwi r4, 0 411 cmpwi r4, 0
392 bne 63f 412 bne 63f
393 lis r6, 0x7fff 413 LOAD_REG_ADDR(r6, decrementer_max)
394 ori r6, r6, 0xffff 414 ld r6, 0(r6)
395 mtspr SPRN_HDEC, r6 415 mtspr SPRN_HDEC, r6
396 /* and set per-LPAR registers, if doing dynamic micro-threading */ 416 /* and set per-LPAR registers, if doing dynamic micro-threading */
397 ld r6, HSTATE_SPLIT_MODE(r13) 417 ld r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
545 * * 565 * *
546 *****************************************************************************/ 566 *****************************************************************************/
547 567
548/* Stack frame offsets */
549#define STACK_SLOT_TID (112-16)
550#define STACK_SLOT_PSSCR (112-24)
551#define STACK_SLOT_PID (112-32)
552
553.global kvmppc_hv_entry 568.global kvmppc_hv_entry
554kvmppc_hv_entry: 569kvmppc_hv_entry:
555 570
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
565 */ 580 */
566 mflr r0 581 mflr r0
567 std r0, PPC_LR_STKOFF(r1) 582 std r0, PPC_LR_STKOFF(r1)
568 stdu r1, -112(r1) 583 stdu r1, -SFS(r1)
569 584
570 /* Save R1 in the PACA */ 585 /* Save R1 in the PACA */
571 std r1, HSTATE_HOST_R1(r13) 586 std r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
749 mfspr r5, SPRN_TIDR 764 mfspr r5, SPRN_TIDR
750 mfspr r6, SPRN_PSSCR 765 mfspr r6, SPRN_PSSCR
751 mfspr r7, SPRN_PID 766 mfspr r7, SPRN_PID
767 mfspr r8, SPRN_IAMR
752 std r5, STACK_SLOT_TID(r1) 768 std r5, STACK_SLOT_TID(r1)
753 std r6, STACK_SLOT_PSSCR(r1) 769 std r6, STACK_SLOT_PSSCR(r1)
754 std r7, STACK_SLOT_PID(r1) 770 std r7, STACK_SLOT_PID(r1)
771 std r8, STACK_SLOT_IAMR(r1)
755END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 772END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
773BEGIN_FTR_SECTION
774 mfspr r5, SPRN_CIABR
775 mfspr r6, SPRN_DAWR
776 mfspr r7, SPRN_DAWRX
777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
780END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
756 781
757BEGIN_FTR_SECTION 782BEGIN_FTR_SECTION
758 /* Set partition DABR */ 783 /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
968 993
969 /* Check if HDEC expires soon */ 994 /* Check if HDEC expires soon */
970 mfspr r3, SPRN_HDEC 995 mfspr r3, SPRN_HDEC
971 cmpwi r3, 512 /* 1 microsecond */ 996 EXTEND_HDEC(r3)
997 cmpdi r3, 512 /* 1 microsecond */
972 blt hdec_soon 998 blt hdec_soon
973 999
974#ifdef CONFIG_KVM_XICS 1000#ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1505 * set by the guest could disrupt the host. 1531 * set by the guest could disrupt the host.
1506 */ 1532 */
1507 li r0, 0 1533 li r0, 0
1508 mtspr SPRN_IAMR, r0 1534 mtspr SPRN_PSPB, r0
1509 mtspr SPRN_CIABR, r0
1510 mtspr SPRN_DAWRX, r0
1511 mtspr SPRN_WORT, r0 1535 mtspr SPRN_WORT, r0
1512BEGIN_FTR_SECTION 1536BEGIN_FTR_SECTION
1537 mtspr SPRN_IAMR, r0
1513 mtspr SPRN_TCSCR, r0 1538 mtspr SPRN_TCSCR, r0
1514 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1539 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1515 li r0, 1 1540 li r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1525 std r6,VCPU_UAMOR(r9) 1550 std r6,VCPU_UAMOR(r9)
1526 li r6,0 1551 li r6,0
1527 mtspr SPRN_AMR,r6 1552 mtspr SPRN_AMR,r6
1553 mtspr SPRN_UAMOR, r6
1528 1554
1529 /* Switch DSCR back to host value */ 1555 /* Switch DSCR back to host value */
1530 mfspr r8, SPRN_DSCR 1556 mfspr r8, SPRN_DSCR
@@ -1670,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1670 1696
1671 /* Restore host values of some registers */ 1697 /* Restore host values of some registers */
1672BEGIN_FTR_SECTION 1698BEGIN_FTR_SECTION
1699 ld r5, STACK_SLOT_CIABR(r1)
1700 ld r6, STACK_SLOT_DAWR(r1)
1701 ld r7, STACK_SLOT_DAWRX(r1)
1702 mtspr SPRN_CIABR, r5
1703 mtspr SPRN_DAWR, r6
1704 mtspr SPRN_DAWRX, r7
1705END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1706BEGIN_FTR_SECTION
1673 ld r5, STACK_SLOT_TID(r1) 1707 ld r5, STACK_SLOT_TID(r1)
1674 ld r6, STACK_SLOT_PSSCR(r1) 1708 ld r6, STACK_SLOT_PSSCR(r1)
1675 ld r7, STACK_SLOT_PID(r1) 1709 ld r7, STACK_SLOT_PID(r1)
1710 ld r8, STACK_SLOT_IAMR(r1)
1676 mtspr SPRN_TIDR, r5 1711 mtspr SPRN_TIDR, r5
1677 mtspr SPRN_PSSCR, r6 1712 mtspr SPRN_PSSCR, r6
1678 mtspr SPRN_PID, r7 1713 mtspr SPRN_PID, r7
1714 mtspr SPRN_IAMR, r8
1679END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1715END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1680BEGIN_FTR_SECTION 1716BEGIN_FTR_SECTION
1681 PPC_INVALIDATE_ERAT 1717 PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1819 li r0, KVM_GUEST_MODE_NONE 1855 li r0, KVM_GUEST_MODE_NONE
1820 stb r0, HSTATE_IN_GUEST(r13) 1856 stb r0, HSTATE_IN_GUEST(r13)
1821 1857
1822 ld r0, 112+PPC_LR_STKOFF(r1) 1858 ld r0, SFS+PPC_LR_STKOFF(r1)
1823 addi r1, r1, 112 1859 addi r1, r1, SFS
1824 mtlr r0 1860 mtlr r0
1825 blr 1861 blr
1826 1862
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
2366 mfspr r3, SPRN_DEC 2402 mfspr r3, SPRN_DEC
2367 mfspr r4, SPRN_HDEC 2403 mfspr r4, SPRN_HDEC
2368 mftb r5 2404 mftb r5
2369 cmpw r3, r4 2405 extsw r3, r3
2406 EXTEND_HDEC(r4)
2407 cmpd r3, r4
2370 ble 67f 2408 ble 67f
2371 mtspr SPRN_DEC, r4 2409 mtspr SPRN_DEC, r4
237267: 241067:
2373 /* save expiry time of guest decrementer */ 2411 /* save expiry time of guest decrementer */
2374 extsw r3, r3
2375 add r3, r3, r5 2412 add r3, r3, r5
2376 ld r4, HSTATE_KVM_VCPU(r13) 2413 ld r4, HSTATE_KVM_VCPU(r13)
2377 ld r5, HSTATE_KVM_VCORE(r13) 2414 ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index bcbeeb62dd13..8a4205fa774f 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
50 pteg_addr = get_pteg_addr(vcpu, pte_index); 50 pteg_addr = get_pteg_addr(vcpu, pte_index);
51 51
52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 52 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
53 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); 53 ret = H_FUNCTION;
54 if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
55 goto done;
54 hpte = pteg; 56 hpte = pteg;
55 57
56 ret = H_PTEG_FULL; 58 ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
71 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); 73 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
72 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); 74 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
73 pteg_addr += i * HPTE_SIZE; 75 pteg_addr += i * HPTE_SIZE;
74 copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); 76 ret = H_FUNCTION;
77 if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
78 goto done;
75 kvmppc_set_gpr(vcpu, 4, pte_index | i); 79 kvmppc_set_gpr(vcpu, 4, pte_index | i);
76 ret = H_SUCCESS; 80 ret = H_SUCCESS;
77 81
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
93 97
94 pteg = get_pteg_addr(vcpu, pte_index); 98 pteg = get_pteg_addr(vcpu, pte_index);
95 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 99 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
96 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 100 ret = H_FUNCTION;
101 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
102 goto done;
97 pte[0] = be64_to_cpu((__force __be64)pte[0]); 103 pte[0] = be64_to_cpu((__force __be64)pte[0]);
98 pte[1] = be64_to_cpu((__force __be64)pte[1]); 104 pte[1] = be64_to_cpu((__force __be64)pte[1]);
99 105
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
103 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) 109 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
104 goto done; 110 goto done;
105 111
106 copy_to_user((void __user *)pteg, &v, sizeof(v)); 112 ret = H_FUNCTION;
113 if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
114 goto done;
107 115
108 rb = compute_tlbie_rb(pte[0], pte[1], pte_index); 116 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
109 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 117 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
171 } 179 }
172 180
173 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); 181 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
174 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 182 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
183 ret = H_FUNCTION;
184 break;
185 }
175 pte[0] = be64_to_cpu((__force __be64)pte[0]); 186 pte[0] = be64_to_cpu((__force __be64)pte[0]);
176 pte[1] = be64_to_cpu((__force __be64)pte[1]); 187 pte[1] = be64_to_cpu((__force __be64)pte[1]);
177 188
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
184 tsh |= H_BULK_REMOVE_NOT_FOUND; 195 tsh |= H_BULK_REMOVE_NOT_FOUND;
185 } else { 196 } else {
186 /* Splat the pteg in (userland) hpt */ 197 /* Splat the pteg in (userland) hpt */
187 copy_to_user((void __user *)pteg, &v, sizeof(v)); 198 if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
199 ret = H_FUNCTION;
200 break;
201 }
188 202
189 rb = compute_tlbie_rb(pte[0], pte[1], 203 rb = compute_tlbie_rb(pte[0], pte[1],
190 tsh & H_BULK_REMOVE_PTEX); 204 tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
211 225
212 pteg = get_pteg_addr(vcpu, pte_index); 226 pteg = get_pteg_addr(vcpu, pte_index);
213 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 227 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
214 copy_from_user(pte, (void __user *)pteg, sizeof(pte)); 228 ret = H_FUNCTION;
229 if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
230 goto done;
215 pte[0] = be64_to_cpu((__force __be64)pte[0]); 231 pte[0] = be64_to_cpu((__force __be64)pte[0]);
216 pte[1] = be64_to_cpu((__force __be64)pte[1]); 232 pte[1] = be64_to_cpu((__force __be64)pte[1]);
217 233
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
234 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); 250 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
235 pte[0] = (__force u64)cpu_to_be64(pte[0]); 251 pte[0] = (__force u64)cpu_to_be64(pte[0]);
236 pte[1] = (__force u64)cpu_to_be64(pte[1]); 252 pte[1] = (__force u64)cpu_to_be64(pte[1]);
237 copy_to_user((void __user *)pteg, pte, sizeof(pte)); 253 ret = H_FUNCTION;
254 if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
255 goto done;
238 ret = H_SUCCESS; 256 ret = H_SUCCESS;
239 257
240 done: 258 done:
@@ -244,36 +262,37 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
244 return EMULATE_DONE; 262 return EMULATE_DONE;
245} 263}
246 264
247static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) 265static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
248{ 266{
249 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
250 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
251 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
252 long rc; 267 long rc;
253 268
254 rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); 269 rc = kvmppc_h_logical_ci_load(vcpu);
255 if (rc == H_TOO_HARD) 270 if (rc == H_TOO_HARD)
256 return EMULATE_FAIL; 271 return EMULATE_FAIL;
257 kvmppc_set_gpr(vcpu, 3, rc); 272 kvmppc_set_gpr(vcpu, 3, rc);
258 return EMULATE_DONE; 273 return EMULATE_DONE;
259} 274}
260 275
261static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) 276static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
262{ 277{
263 long rc; 278 long rc;
264 279
265 rc = kvmppc_h_logical_ci_load(vcpu); 280 rc = kvmppc_h_logical_ci_store(vcpu);
266 if (rc == H_TOO_HARD) 281 if (rc == H_TOO_HARD)
267 return EMULATE_FAIL; 282 return EMULATE_FAIL;
268 kvmppc_set_gpr(vcpu, 3, rc); 283 kvmppc_set_gpr(vcpu, 3, rc);
269 return EMULATE_DONE; 284 return EMULATE_DONE;
270} 285}
271 286
272static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) 287#ifdef CONFIG_SPAPR_TCE_IOMMU
288static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
273{ 289{
290 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
291 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
292 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
274 long rc; 293 long rc;
275 294
276 rc = kvmppc_h_logical_ci_store(vcpu); 295 rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
277 if (rc == H_TOO_HARD) 296 if (rc == H_TOO_HARD)
278 return EMULATE_FAIL; 297 return EMULATE_FAIL;
279 kvmppc_set_gpr(vcpu, 3, rc); 298 kvmppc_set_gpr(vcpu, 3, rc);
@@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
311 return EMULATE_DONE; 330 return EMULATE_DONE;
312} 331}
313 332
333#else /* CONFIG_SPAPR_TCE_IOMMU */
334static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
335{
336 return EMULATE_FAIL;
337}
338
339static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
340{
341 return EMULATE_FAIL;
342}
343
344static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
345{
346 return EMULATE_FAIL;
347}
348#endif /* CONFIG_SPAPR_TCE_IOMMU */
349
314static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) 350static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
315{ 351{
316 long rc = kvmppc_xics_hcall(vcpu, cmd); 352 long rc = kvmppc_xics_hcall(vcpu, cmd);
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 023a31133c37..4636ca6e7d38 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
69{ 69{
70 /* If the XIVE supports the new "store EOI facility, use it */ 70 /* If the XIVE supports the new "store EOI facility, use it */
71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
72 __x_writeq(0, __x_eoi_page(xd)); 72 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
74 opal_int_eoi(hw_irq); 74 opal_int_eoi(hw_irq);
75 } else { 75 } else {
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
89 * properly. 89 * properly.
90 */ 90 */
91 if (xd->flags & XIVE_IRQ_FLAG_LSI) 91 if (xd->flags & XIVE_IRQ_FLAG_LSI)
92 __x_readq(__x_eoi_page(xd)); 92 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
93 else { 93 else {
94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); 94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
95 95
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f7cf2cd564ef..7f71ab5fcad1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1749,7 +1749,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
1749 r = kvm_vm_ioctl_enable_cap(kvm, &cap); 1749 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1750 break; 1750 break;
1751 } 1751 }
1752#ifdef CONFIG_PPC_BOOK3S_64 1752#ifdef CONFIG_SPAPR_TCE_IOMMU
1753 case KVM_CREATE_SPAPR_TCE_64: { 1753 case KVM_CREATE_SPAPR_TCE_64: {
1754 struct kvm_create_spapr_tce_64 create_tce_64; 1754 struct kvm_create_spapr_tce_64 create_tce_64;
1755 1755
@@ -1780,6 +1780,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
1780 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); 1780 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1781 goto out; 1781 goto out;
1782 } 1782 }
1783#endif
1784#ifdef CONFIG_PPC_BOOK3S_64
1783 case KVM_PPC_GET_SMMU_INFO: { 1785 case KVM_PPC_GET_SMMU_INFO: {
1784 struct kvm_ppc_smmu_info info; 1786 struct kvm_ppc_smmu_info info;
1785 struct kvm *kvm = filp->private_data; 1787 struct kvm *kvm = filp->private_data;
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index d659345a98d6..44fe4833910f 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -16,6 +16,7 @@
16 */ 16 */
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/hugetlb.h>
19#include <linux/io.h> 20#include <linux/io.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
@@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
391 392
392 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 393 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
393 addr = start + i * PMD_SIZE; 394 addr = start + i * PMD_SIZE;
394 if (!pmd_none(*pmd)) 395 if (!pmd_none(*pmd) && !pmd_huge(*pmd))
395 /* pmd exists */ 396 /* pmd exists */
396 walk_pte(st, pmd, addr); 397 walk_pte(st, pmd, addr);
397 else 398 else
@@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
407 408
408 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 409 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
409 addr = start + i * PUD_SIZE; 410 addr = start + i * PUD_SIZE;
410 if (!pud_none(*pud)) 411 if (!pud_none(*pud) && !pud_huge(*pud))
411 /* pud exists */ 412 /* pud exists */
412 walk_pmd(st, pud, addr); 413 walk_pmd(st, pud, addr);
413 else 414 else
@@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st)
427 */ 428 */
428 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 429 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
429 addr = KERN_VIRT_START + i * PGDIR_SIZE; 430 addr = KERN_VIRT_START + i * PGDIR_SIZE;
430 if (!pgd_none(*pgd)) 431 if (!pgd_none(*pgd) && !pgd_huge(*pgd))
431 /* pgd exists */ 432 /* pgd exists */
432 walk_pud(st, pgd, addr); 433 walk_pud(st, pgd, addr);
433 else 434 else
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9aabef4..a12e86395025 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
68 addr = ALIGN(addr, huge_page_size(h)); 68 addr = ALIGN(addr, huge_page_size(h));
69 vma = find_vma(mm, addr); 69 vma = find_vma(mm, addr);
70 if (mm->task_size - len >= addr && 70 if (mm->task_size - len >= addr &&
71 (!vma || addr + len <= vma->vm_start)) 71 (!vma || addr + len <= vm_start_gap(vma)))
72 return addr; 72 return addr;
73 } 73 }
74 /* 74 /*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a733d6b..0ee6be4f1ba4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
112 addr = PAGE_ALIGN(addr); 112 addr = PAGE_ALIGN(addr);
113 vma = find_vma(mm, addr); 113 vma = find_vma(mm, addr);
114 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 114 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
115 (!vma || addr + len <= vma->vm_start)) 115 (!vma || addr + len <= vm_start_gap(vma)))
116 return addr; 116 return addr;
117 } 117 }
118 118
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
157 addr = PAGE_ALIGN(addr); 157 addr = PAGE_ALIGN(addr);
158 vma = find_vma(mm, addr); 158 vma = find_vma(mm, addr);
159 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 159 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
160 (!vma || addr + len <= vma->vm_start)) 160 (!vma || addr + len <= vm_start_gap(vma)))
161 return addr; 161 return addr;
162 } 162 }
163 163
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2ae78ef..a3edf813d455 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
99 * mm->context.addr_limit. Default to max task size so that we copy the 99 * mm->context.addr_limit. Default to max task size so that we copy the
100 * default values to paca which will help us to handle slb miss early. 100 * default values to paca which will help us to handle slb miss early.
101 */ 101 */
102 mm->context.addr_limit = TASK_SIZE_128TB; 102 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
103 103
104 /* 104 /*
105 * The old code would re-promote on fork, we don't do that when using 105 * The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fccfa66..45f6740dd407 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
99 if ((mm->task_size - len) < addr) 99 if ((mm->task_size - len) < addr)
100 return 0; 100 return 0;
101 vma = find_vma(mm, addr); 101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vma->vm_start); 102 return (!vma || (addr + len) <= vm_start_gap(vma));
103} 103}
104 104
105static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) 105static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fde5770..09ceea6175ba 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
101 struct pt_regs *regs_user_copy) 101 struct pt_regs *regs_user_copy)
102{ 102{
103 regs_user->regs = task_pt_regs(current); 103 regs_user->regs = task_pt_regs(current);
104 regs_user->abi = perf_reg_abi(current); 104 regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
105 PERF_SAMPLE_REGS_ABI_NONE;
105} 106}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e90ac35..bb28e1a41257 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
402 .name = "POWER9", 402 .name = "POWER9",
403 .n_counter = MAX_PMU_COUNTERS, 403 .n_counter = MAX_PMU_COUNTERS,
404 .add_fields = ISA207_ADD_FIELDS, 404 .add_fields = ISA207_ADD_FIELDS,
405 .test_adder = ISA207_TEST_ADDER, 405 .test_adder = P9_DD1_TEST_ADDER,
406 .compute_mmcr = isa207_compute_mmcr, 406 .compute_mmcr = isa207_compute_mmcr,
407 .config_bhrb = power9_config_bhrb, 407 .config_bhrb = power9_config_bhrb,
408 .bhrb_filter_map = power9_bhrb_filter_map, 408 .bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
421 .name = "POWER9", 421 .name = "POWER9",
422 .n_counter = MAX_PMU_COUNTERS, 422 .n_counter = MAX_PMU_COUNTERS,
423 .add_fields = ISA207_ADD_FIELDS, 423 .add_fields = ISA207_ADD_FIELDS,
424 .test_adder = P9_DD1_TEST_ADDER, 424 .test_adder = ISA207_TEST_ADDER,
425 .compute_mmcr = isa207_compute_mmcr, 425 .compute_mmcr = isa207_compute_mmcr,
426 .config_bhrb = power9_config_bhrb, 426 .config_bhrb = power9_config_bhrb,
427 .bhrb_filter_map = power9_bhrb_filter_map, 427 .bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3d9375..4fd64d3f5c44 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
59 59
60 In case of doubt, say Y 60 In case of doubt, say Y
61 61
62config PPC_DT_CPU_FTRS
63 bool "Device-tree based CPU feature discovery & setup"
64 depends on PPC_BOOK3S_64
65 default y
66 help
67 This enables code to use a new device tree binding for describing CPU
68 compatibility and features. Saying Y here will attempt to use the new
69 binding if the firmware provides it. Currently only the skiboot
70 firmware provides this binding.
71 If you're not sure say Y.
72
62config UDBG_RTAS_CONSOLE 73config UDBG_RTAS_CONSOLE
63 bool "RTAS based debug console" 74 bool "RTAS based debug console"
64 depends on PPC_RTAS 75 depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 96c2b8a40630..0c45cdbac4cf 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
197 (REGION_ID(ea) != USER_REGION_ID)) { 197 (REGION_ID(ea) != USER_REGION_ID)) {
198 198
199 spin_unlock(&spu->register_lock); 199 spin_unlock(&spu->register_lock);
200 ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr); 200 ret = hash_page(ea,
201 _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
202 0x300, dsisr);
201 spin_lock(&spu->register_lock); 203 spin_lock(&spu->register_lock);
202 204
203 if (!ret) { 205 if (!ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891ae80ee..84b7ac926ce6 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; 175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
176 if (!dump_skip(cprm, skip)) 176 if (!dump_skip(cprm, skip))
177 goto Eio; 177 goto Eio;
178
179 rc = 0;
178out: 180out:
179 free_page((unsigned long)buf); 181 free_page((unsigned long)buf);
180 return rc; 182 return rc;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 067defeea691..b5d960d6db3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
75 if (WARN_ON(!gpdev)) 75 if (WARN_ON(!gpdev))
76 return NULL; 76 return NULL;
77 77
78 if (WARN_ON(!gpdev->dev.of_node)) 78 /* Not all PCI devices have device-tree nodes */
79 if (!gpdev->dev.of_node)
79 return NULL; 80 return NULL;
80 81
81 /* Get assoicated PCI device */ 82 /* Get assoicated PCI device */
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
448 return mmio_atsd_reg; 449 return mmio_atsd_reg;
449} 450}
450 451
451static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) 452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
452{ 453{
453 unsigned long launch; 454 unsigned long launch;
454 455
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
464 /* PID */ 465 /* PID */
465 launch |= pid << PPC_BITLSHIFT(38); 466 launch |= pid << PPC_BITLSHIFT(38);
466 467
468 /* No flush */
469 launch |= !flush << PPC_BITLSHIFT(39);
470
467 /* Invalidating the entire process doesn't use a va */ 471 /* Invalidating the entire process doesn't use a va */
468 return mmio_launch_invalidate(npu, launch, 0); 472 return mmio_launch_invalidate(npu, launch, 0);
469} 473}
470 474
471static int mmio_invalidate_va(struct npu *npu, unsigned long va, 475static int mmio_invalidate_va(struct npu *npu, unsigned long va,
472 unsigned long pid) 476 unsigned long pid, bool flush)
473{ 477{
474 unsigned long launch; 478 unsigned long launch;
475 479
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
485 /* PID */ 489 /* PID */
486 launch |= pid << PPC_BITLSHIFT(38); 490 launch |= pid << PPC_BITLSHIFT(38);
487 491
492 /* No flush */
493 launch |= !flush << PPC_BITLSHIFT(39);
494
488 return mmio_launch_invalidate(npu, launch, va); 495 return mmio_launch_invalidate(npu, launch, va);
489} 496}
490 497
491#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) 498#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
492 499
500struct mmio_atsd_reg {
501 struct npu *npu;
502 int reg;
503};
504
505static void mmio_invalidate_wait(
506 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
507{
508 struct npu *npu;
509 int i, reg;
510
511 /* Wait for all invalidations to complete */
512 for (i = 0; i <= max_npu2_index; i++) {
513 if (mmio_atsd_reg[i].reg < 0)
514 continue;
515
516 /* Wait for completion */
517 npu = mmio_atsd_reg[i].npu;
518 reg = mmio_atsd_reg[i].reg;
519 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
520 cpu_relax();
521
522 put_mmio_atsd_reg(npu, reg);
523
524 /*
525 * The GPU requires two flush ATSDs to ensure all entries have
526 * been flushed. We use PID 0 as it will never be used for a
527 * process on the GPU.
528 */
529 if (flush)
530 mmio_invalidate_pid(npu, 0, true);
531 }
532}
533
493/* 534/*
494 * Invalidate either a single address or an entire PID depending on 535 * Invalidate either a single address or an entire PID depending on
495 * the value of va. 536 * the value of va.
496 */ 537 */
497static void mmio_invalidate(struct npu_context *npu_context, int va, 538static void mmio_invalidate(struct npu_context *npu_context, int va,
498 unsigned long address) 539 unsigned long address, bool flush)
499{ 540{
500 int i, j, reg; 541 int i, j;
501 struct npu *npu; 542 struct npu *npu;
502 struct pnv_phb *nphb; 543 struct pnv_phb *nphb;
503 struct pci_dev *npdev; 544 struct pci_dev *npdev;
504 struct { 545 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
505 struct npu *npu;
506 int reg;
507 } mmio_atsd_reg[NV_MAX_NPUS];
508 unsigned long pid = npu_context->mm->context.id; 546 unsigned long pid = npu_context->mm->context.id;
509 547
510 /* 548 /*
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
524 562
525 if (va) 563 if (va)
526 mmio_atsd_reg[i].reg = 564 mmio_atsd_reg[i].reg =
527 mmio_invalidate_va(npu, address, pid); 565 mmio_invalidate_va(npu, address, pid,
566 flush);
528 else 567 else
529 mmio_atsd_reg[i].reg = 568 mmio_atsd_reg[i].reg =
530 mmio_invalidate_pid(npu, pid); 569 mmio_invalidate_pid(npu, pid, flush);
531 570
532 /* 571 /*
533 * The NPU hardware forwards the shootdown to all GPUs 572 * The NPU hardware forwards the shootdown to all GPUs
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
543 */ 582 */
544 flush_tlb_mm(npu_context->mm); 583 flush_tlb_mm(npu_context->mm);
545 584
546 /* Wait for all invalidations to complete */ 585 mmio_invalidate_wait(mmio_atsd_reg, flush);
547 for (i = 0; i <= max_npu2_index; i++) { 586 if (flush)
548 if (mmio_atsd_reg[i].reg < 0) 587 /* Wait for the flush to complete */
549 continue; 588 mmio_invalidate_wait(mmio_atsd_reg, false);
550
551 /* Wait for completion */
552 npu = mmio_atsd_reg[i].npu;
553 reg = mmio_atsd_reg[i].reg;
554 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
555 cpu_relax();
556 put_mmio_atsd_reg(npu, reg);
557 }
558} 589}
559 590
560static void pnv_npu2_mn_release(struct mmu_notifier *mn, 591static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
570 * There should be no more translation requests for this PID, but we 601 * There should be no more translation requests for this PID, but we
571 * need to ensure any entries for it are removed from the TLB. 602 * need to ensure any entries for it are removed from the TLB.
572 */ 603 */
573 mmio_invalidate(npu_context, 0, 0); 604 mmio_invalidate(npu_context, 0, 0, true);
574} 605}
575 606
576static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, 607static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
580{ 611{
581 struct npu_context *npu_context = mn_to_npu_context(mn); 612 struct npu_context *npu_context = mn_to_npu_context(mn);
582 613
583 mmio_invalidate(npu_context, 1, address); 614 mmio_invalidate(npu_context, 1, address, true);
584} 615}
585 616
586static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, 617static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
589{ 620{
590 struct npu_context *npu_context = mn_to_npu_context(mn); 621 struct npu_context *npu_context = mn_to_npu_context(mn);
591 622
592 mmio_invalidate(npu_context, 1, address); 623 mmio_invalidate(npu_context, 1, address, true);
593} 624}
594 625
595static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, 626static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
599 struct npu_context *npu_context = mn_to_npu_context(mn); 630 struct npu_context *npu_context = mn_to_npu_context(mn);
600 unsigned long address; 631 unsigned long address;
601 632
602 for (address = start; address <= end; address += PAGE_SIZE) 633 for (address = start; address < end; address += PAGE_SIZE)
603 mmio_invalidate(npu_context, 1, address); 634 mmio_invalidate(npu_context, 1, address, false);
635
636 /* Do the flush only on the final addess == end */
637 mmio_invalidate(npu_context, 1, address, true);
604} 638}
605 639
606static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 640static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
650 /* No nvlink associated with this GPU device */ 684 /* No nvlink associated with this GPU device */
651 return ERR_PTR(-ENODEV); 685 return ERR_PTR(-ENODEV);
652 686
653 if (!mm) { 687 if (!mm || mm->context.id == 0) {
654 /* kernel thread contexts are not supported */ 688 /*
689 * Kernel thread contexts are not supported and context id 0 is
690 * reserved on the GPU.
691 */
655 return ERR_PTR(-EINVAL); 692 return ERR_PTR(-EINVAL);
656 } 693 }
657 694
@@ -714,7 +751,7 @@ static void pnv_npu2_release_context(struct kref *kref)
714void pnv_npu2_destroy_context(struct npu_context *npu_context, 751void pnv_npu2_destroy_context(struct npu_context *npu_context,
715 struct pci_dev *gpdev) 752 struct pci_dev *gpdev)
716{ 753{
717 struct pnv_phb *nphb, *phb; 754 struct pnv_phb *nphb;
718 struct npu *npu; 755 struct npu *npu;
719 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); 756 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
720 struct device_node *nvlink_dn; 757 struct device_node *nvlink_dn;
@@ -728,13 +765,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
728 765
729 nphb = pci_bus_to_host(npdev->bus)->private_data; 766 nphb = pci_bus_to_host(npdev->bus)->private_data;
730 npu = &nphb->npu; 767 npu = &nphb->npu;
731 phb = pci_bus_to_host(gpdev->bus)->private_data;
732 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); 768 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
733 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", 769 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
734 &nvlink_index))) 770 &nvlink_index)))
735 return; 771 return;
736 npu_context->npdev[npu->index][nvlink_index] = NULL; 772 npu_context->npdev[npu->index][nvlink_index] = NULL;
737 opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id, 773 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
738 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 774 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
739 kref_put(&npu_context->kref, pnv_npu2_release_context); 775 kref_put(&npu_context->kref, pnv_npu2_release_context);
740} 776}
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef11136f..8c6119280c13 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
407 407
408static int subcore_init(void) 408static int subcore_init(void)
409{ 409{
410 if (!cpu_has_feature(CPU_FTR_SUBCORE)) 410 unsigned pvr_ver;
411
412 pvr_ver = PVR_VER(mfspr(SPRN_PVR));
413
414 if (pvr_ver != PVR_POWER8 &&
415 pvr_ver != PVR_POWER8E &&
416 pvr_ver != PVR_POWER8NVL)
411 return 0; 417 return 0;
412 418
413 /* 419 /*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71ea44a..1fb162ba9d1c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
124 for (i = 0; i < num_lmbs; i++) { 124 for (i = 0; i < num_lmbs; i++) {
125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
127 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
127 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
128 } 129 }
129 130
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
147 for (i = 0; i < num_lmbs; i++) { 148 for (i = 0; i < num_lmbs; i++) {
148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 149 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
151 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
150 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 152 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
151 } 153 }
152 154
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b470b04..6afddae2fb47 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
75 75
76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
77{ 77{
78 struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); 78 struct u8_gpio_chip *u8_gc =
79 container_of(mm_gc, struct u8_gpio_chip, mm_gc);
79 80
80 u8_gc->data = in_8(mm_gc->regs); 81 u8_gc->data = in_8(mm_gc->regs);
81} 82}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 913825086b8d..8f5e3035483b 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
297{ 297{
298 /* If the XIVE supports the new "store EOI facility, use it */ 298 /* If the XIVE supports the new "store EOI facility, use it */
299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
300 out_be64(xd->eoi_mmio, 0); 300 out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
302 /* 302 /*
303 * The FW told us to call it. This happens for some 303 * The FW told us to call it. This happens for some
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161fafb495b..6967addc6a89 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -363,9 +363,6 @@ config COMPAT
363config SYSVIPC_COMPAT 363config SYSVIPC_COMPAT
364 def_bool y if COMPAT && SYSVIPC 364 def_bool y if COMPAT && SYSVIPC
365 365
366config KEYS_COMPAT
367 def_bool y if COMPAT && KEYS
368
369config SMP 366config SMP
370 def_bool y 367 def_bool y
371 prompt "Symmetric multi-processing support" 368 prompt "Symmetric multi-processing support"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa89314..282072206df7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 31CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 32CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_BPF_SYSCALL=y 34CONFIG_BPF_SYSCALL=y
34CONFIG_USERFAULTFD=y 35CONFIG_USERFAULTFD=y
35# CONFIG_COMPAT_BRK is not set 36# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
90CONFIG_UNIX_DIAG=m 94CONFIG_UNIX_DIAG=m
91CONFIG_XFRM_USER=m 95CONFIG_XFRM_USER=m
92CONFIG_NET_KEY=m 96CONFIG_NET_KEY=m
97CONFIG_SMC=m
98CONFIG_SMC_DIAG=m
93CONFIG_INET=y 99CONFIG_INET=y
94CONFIG_IP_MULTICAST=y 100CONFIG_IP_MULTICAST=y
95CONFIG_IP_ADVANCED_ROUTER=y 101CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
359CONFIG_NET_ACT_SKBEDIT=m 365CONFIG_NET_ACT_SKBEDIT=m
360CONFIG_NET_ACT_CSUM=m 366CONFIG_NET_ACT_CSUM=m
361CONFIG_DNS_RESOLVER=y 367CONFIG_DNS_RESOLVER=y
368CONFIG_NETLINK_DIAG=m
362CONFIG_CGROUP_NET_PRIO=y 369CONFIG_CGROUP_NET_PRIO=y
363CONFIG_BPF_JIT=y 370CONFIG_BPF_JIT=y
364CONFIG_NET_PKTGEN=m 371CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
367CONFIG_DMA_CMA=y 374CONFIG_DMA_CMA=y
368CONFIG_CMA_SIZE_MBYTES=0 375CONFIG_CMA_SIZE_MBYTES=0
369CONFIG_CONNECTOR=y 376CONFIG_CONNECTOR=y
377CONFIG_ZRAM=m
370CONFIG_BLK_DEV_LOOP=m 378CONFIG_BLK_DEV_LOOP=m
371CONFIG_BLK_DEV_CRYPTOLOOP=m 379CONFIG_BLK_DEV_CRYPTOLOOP=m
380CONFIG_BLK_DEV_DRBD=m
372CONFIG_BLK_DEV_NBD=m 381CONFIG_BLK_DEV_NBD=m
373CONFIG_BLK_DEV_OSD=m 382CONFIG_BLK_DEV_OSD=m
374CONFIG_BLK_DEV_RAM=y 383CONFIG_BLK_DEV_RAM=y
375CONFIG_BLK_DEV_RAM_SIZE=32768 384CONFIG_BLK_DEV_RAM_SIZE=32768
376CONFIG_CDROM_PKTCDVD=m 385CONFIG_BLK_DEV_RAM_DAX=y
377CONFIG_ATA_OVER_ETH=m
378CONFIG_VIRTIO_BLK=y 386CONFIG_VIRTIO_BLK=y
387CONFIG_BLK_DEV_RBD=m
379CONFIG_ENCLOSURE_SERVICES=m 388CONFIG_ENCLOSURE_SERVICES=m
389CONFIG_GENWQE=m
380CONFIG_RAID_ATTRS=m 390CONFIG_RAID_ATTRS=m
381CONFIG_SCSI=y 391CONFIG_SCSI=y
382CONFIG_BLK_DEV_SD=y 392CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
442# CONFIG_NET_VENDOR_INTEL is not set 452# CONFIG_NET_VENDOR_INTEL is not set
443# CONFIG_NET_VENDOR_MARVELL is not set 453# CONFIG_NET_VENDOR_MARVELL is not set
444CONFIG_MLX4_EN=m 454CONFIG_MLX4_EN=m
455CONFIG_MLX5_CORE=m
456CONFIG_MLX5_CORE_EN=y
445# CONFIG_NET_VENDOR_NATSEMI is not set 457# CONFIG_NET_VENDOR_NATSEMI is not set
446CONFIG_PPP=m 458CONFIG_PPP=m
447CONFIG_PPP_BSDCOMP=m 459CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
452CONFIG_PPPOL2TP=m 464CONFIG_PPPOL2TP=m
453CONFIG_PPP_ASYNC=m 465CONFIG_PPP_ASYNC=m
454CONFIG_PPP_SYNC_TTY=m 466CONFIG_PPP_SYNC_TTY=m
455# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
456# CONFIG_INPUT_KEYBOARD is not set 467# CONFIG_INPUT_KEYBOARD is not set
457# CONFIG_INPUT_MOUSE is not set 468# CONFIG_INPUT_MOUSE is not set
458# CONFIG_SERIO is not set 469# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
471CONFIG_INFINIBAND=m 482CONFIG_INFINIBAND=m
472CONFIG_INFINIBAND_USER_ACCESS=m 483CONFIG_INFINIBAND_USER_ACCESS=m
473CONFIG_MLX4_INFINIBAND=m 484CONFIG_MLX4_INFINIBAND=m
485CONFIG_MLX5_INFINIBAND=m
474CONFIG_VIRTIO_BALLOON=m 486CONFIG_VIRTIO_BALLOON=m
475CONFIG_EXT4_FS=y 487CONFIG_EXT4_FS=y
476CONFIG_EXT4_FS_POSIX_ACL=y 488CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
487CONFIG_XFS_RT=y 499CONFIG_XFS_RT=y
488CONFIG_XFS_DEBUG=y 500CONFIG_XFS_DEBUG=y
489CONFIG_GFS2_FS=m 501CONFIG_GFS2_FS=m
502CONFIG_GFS2_FS_LOCKING_DLM=y
490CONFIG_OCFS2_FS=m 503CONFIG_OCFS2_FS=m
491CONFIG_BTRFS_FS=y 504CONFIG_BTRFS_FS=y
492CONFIG_BTRFS_FS_POSIX_ACL=y 505CONFIG_BTRFS_FS_POSIX_ACL=y
506CONFIG_BTRFS_DEBUG=y
493CONFIG_NILFS2_FS=m 507CONFIG_NILFS2_FS=m
508CONFIG_FS_DAX=y
509CONFIG_EXPORTFS_BLOCK_OPS=y
494CONFIG_FANOTIFY=y 510CONFIG_FANOTIFY=y
511CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
495CONFIG_QUOTA_NETLINK_INTERFACE=y 512CONFIG_QUOTA_NETLINK_INTERFACE=y
513CONFIG_QUOTA_DEBUG=y
496CONFIG_QFMT_V1=m 514CONFIG_QFMT_V1=m
497CONFIG_QFMT_V2=m 515CONFIG_QFMT_V2=m
498CONFIG_AUTOFS4_FS=m 516CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
558CONFIG_DEBUG_SECTION_MISMATCH=y 576CONFIG_DEBUG_SECTION_MISMATCH=y
559CONFIG_MAGIC_SYSRQ=y 577CONFIG_MAGIC_SYSRQ=y
560CONFIG_DEBUG_PAGEALLOC=y 578CONFIG_DEBUG_PAGEALLOC=y
579CONFIG_DEBUG_RODATA_TEST=y
561CONFIG_DEBUG_OBJECTS=y 580CONFIG_DEBUG_OBJECTS=y
562CONFIG_DEBUG_OBJECTS_SELFTEST=y 581CONFIG_DEBUG_OBJECTS_SELFTEST=y
563CONFIG_DEBUG_OBJECTS_FREE=y 582CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
580CONFIG_WQ_WATCHDOG=y 599CONFIG_WQ_WATCHDOG=y
581CONFIG_PANIC_ON_OOPS=y 600CONFIG_PANIC_ON_OOPS=y
582CONFIG_DEBUG_TIMEKEEPING=y 601CONFIG_DEBUG_TIMEKEEPING=y
583CONFIG_TIMER_STATS=y
584CONFIG_DEBUG_RT_MUTEXES=y 602CONFIG_DEBUG_RT_MUTEXES=y
585CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 603CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
586CONFIG_PROVE_LOCKING=y 604CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
595CONFIG_RCU_CPU_STALL_TIMEOUT=300 613CONFIG_RCU_CPU_STALL_TIMEOUT=300
596CONFIG_NOTIFIER_ERROR_INJECTION=m 614CONFIG_NOTIFIER_ERROR_INJECTION=m
597CONFIG_PM_NOTIFIER_ERROR_INJECT=m 615CONFIG_PM_NOTIFIER_ERROR_INJECT=m
616CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
598CONFIG_FAULT_INJECTION=y 617CONFIG_FAULT_INJECTION=y
599CONFIG_FAILSLAB=y 618CONFIG_FAILSLAB=y
600CONFIG_FAIL_PAGE_ALLOC=y 619CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
616CONFIG_TRACE_ENUM_MAP_FILE=y 635CONFIG_TRACE_ENUM_MAP_FILE=y
617CONFIG_LKDTM=m 636CONFIG_LKDTM=m
618CONFIG_TEST_LIST_SORT=y 637CONFIG_TEST_LIST_SORT=y
638CONFIG_TEST_SORT=y
619CONFIG_KPROBES_SANITY_TEST=y 639CONFIG_KPROBES_SANITY_TEST=y
620CONFIG_RBTREE_TEST=y 640CONFIG_RBTREE_TEST=y
621CONFIG_INTERVAL_TREE_TEST=m 641CONFIG_INTERVAL_TREE_TEST=m
622CONFIG_PERCPU_TEST=m 642CONFIG_PERCPU_TEST=m
623CONFIG_ATOMIC64_SELFTEST=y 643CONFIG_ATOMIC64_SELFTEST=y
624CONFIG_TEST_STRING_HELPERS=y
625CONFIG_TEST_KSTRTOX=y
626CONFIG_DMA_API_DEBUG=y 644CONFIG_DMA_API_DEBUG=y
627CONFIG_TEST_BPF=m 645CONFIG_TEST_BPF=m
628CONFIG_BUG_ON_DATA_CORRUPTION=y 646CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
630CONFIG_ENCRYPTED_KEYS=m 648CONFIG_ENCRYPTED_KEYS=m
631CONFIG_SECURITY=y 649CONFIG_SECURITY=y
632CONFIG_SECURITY_NETWORK=y 650CONFIG_SECURITY_NETWORK=y
651CONFIG_HARDENED_USERCOPY=y
633CONFIG_SECURITY_SELINUX=y 652CONFIG_SECURITY_SELINUX=y
634CONFIG_SECURITY_SELINUX_BOOTPARAM=y 653CONFIG_SECURITY_SELINUX_BOOTPARAM=y
635CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 654CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
640CONFIG_CRYPTO_DH=m 659CONFIG_CRYPTO_DH=m
641CONFIG_CRYPTO_ECDH=m 660CONFIG_CRYPTO_ECDH=m
642CONFIG_CRYPTO_USER=m 661CONFIG_CRYPTO_USER=m
662CONFIG_CRYPTO_PCRYPT=m
643CONFIG_CRYPTO_CRYPTD=m 663CONFIG_CRYPTO_CRYPTD=m
664CONFIG_CRYPTO_MCRYPTD=m
644CONFIG_CRYPTO_TEST=m 665CONFIG_CRYPTO_TEST=m
645CONFIG_CRYPTO_CCM=m 666CONFIG_CRYPTO_CCM=m
646CONFIG_CRYPTO_GCM=m 667CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
648CONFIG_CRYPTO_LRW=m 669CONFIG_CRYPTO_LRW=m
649CONFIG_CRYPTO_PCBC=m 670CONFIG_CRYPTO_PCBC=m
650CONFIG_CRYPTO_KEYWRAP=m 671CONFIG_CRYPTO_KEYWRAP=m
672CONFIG_CRYPTO_CMAC=m
651CONFIG_CRYPTO_XCBC=m 673CONFIG_CRYPTO_XCBC=m
652CONFIG_CRYPTO_VMAC=m 674CONFIG_CRYPTO_VMAC=m
653CONFIG_CRYPTO_CRC32=m 675CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
657CONFIG_CRYPTO_RMD256=m 679CONFIG_CRYPTO_RMD256=m
658CONFIG_CRYPTO_RMD320=m 680CONFIG_CRYPTO_RMD320=m
659CONFIG_CRYPTO_SHA512=m 681CONFIG_CRYPTO_SHA512=m
682CONFIG_CRYPTO_SHA3=m
660CONFIG_CRYPTO_TGR192=m 683CONFIG_CRYPTO_TGR192=m
661CONFIG_CRYPTO_WP512=m 684CONFIG_CRYPTO_WP512=m
685CONFIG_CRYPTO_AES_TI=m
662CONFIG_CRYPTO_ANUBIS=m 686CONFIG_CRYPTO_ANUBIS=m
663CONFIG_CRYPTO_BLOWFISH=m 687CONFIG_CRYPTO_BLOWFISH=m
664CONFIG_CRYPTO_CAMELLIA=m 688CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
674CONFIG_CRYPTO_842=m 698CONFIG_CRYPTO_842=m
675CONFIG_CRYPTO_LZ4=m 699CONFIG_CRYPTO_LZ4=m
676CONFIG_CRYPTO_LZ4HC=m 700CONFIG_CRYPTO_LZ4HC=m
701CONFIG_CRYPTO_ANSI_CPRNG=m
677CONFIG_CRYPTO_USER_API_HASH=m 702CONFIG_CRYPTO_USER_API_HASH=m
678CONFIG_CRYPTO_USER_API_SKCIPHER=m 703CONFIG_CRYPTO_USER_API_SKCIPHER=m
679CONFIG_CRYPTO_USER_API_RNG=m 704CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
685CONFIG_CRYPTO_SHA512_S390=m 710CONFIG_CRYPTO_SHA512_S390=m
686CONFIG_CRYPTO_DES_S390=m 711CONFIG_CRYPTO_DES_S390=m
687CONFIG_CRYPTO_AES_S390=m 712CONFIG_CRYPTO_AES_S390=m
713CONFIG_CRYPTO_PAES_S390=m
688CONFIG_CRYPTO_GHASH_S390=m 714CONFIG_CRYPTO_GHASH_S390=m
689CONFIG_CRYPTO_CRC32_S390=y 715CONFIG_CRYPTO_CRC32_S390=y
690CONFIG_ASYMMETRIC_KEY_TYPE=y 716CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
692CONFIG_X509_CERTIFICATE_PARSER=m 718CONFIG_X509_CERTIFICATE_PARSER=m
693CONFIG_CRC7=m 719CONFIG_CRC7=m
694CONFIG_CRC8=m 720CONFIG_CRC8=m
721CONFIG_RANDOM32_SELFTEST=y
695CONFIG_CORDIC=m 722CONFIG_CORDIC=m
696CONFIG_CMM=m 723CONFIG_CMM=m
697CONFIG_APPLDATA_BASE=y 724CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5afb2b..3c6b78189fbc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
46CONFIG_MODULE_FORCE_UNLOAD=y 47CONFIG_MODULE_FORCE_UNLOAD=y
47CONFIG_MODVERSIONS=y 48CONFIG_MODVERSIONS=y
48CONFIG_MODULE_SRCVERSION_ALL=y 49CONFIG_MODULE_SRCVERSION_ALL=y
50CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 51CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y
50CONFIG_PARTITION_ADVANCED=y 54CONFIG_PARTITION_ADVANCED=y
51CONFIG_IBM_PARTITION=y 55CONFIG_IBM_PARTITION=y
52CONFIG_BSD_DISKLABEL=y 56CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
88CONFIG_UNIX_DIAG=m 92CONFIG_UNIX_DIAG=m
89CONFIG_XFRM_USER=m 93CONFIG_XFRM_USER=m
90CONFIG_NET_KEY=m 94CONFIG_NET_KEY=m
95CONFIG_SMC=m
96CONFIG_SMC_DIAG=m
91CONFIG_INET=y 97CONFIG_INET=y
92CONFIG_IP_MULTICAST=y 98CONFIG_IP_MULTICAST=y
93CONFIG_IP_ADVANCED_ROUTER=y 99CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
356CONFIG_NET_ACT_SKBEDIT=m 362CONFIG_NET_ACT_SKBEDIT=m
357CONFIG_NET_ACT_CSUM=m 363CONFIG_NET_ACT_CSUM=m
358CONFIG_DNS_RESOLVER=y 364CONFIG_DNS_RESOLVER=y
365CONFIG_NETLINK_DIAG=m
359CONFIG_CGROUP_NET_PRIO=y 366CONFIG_CGROUP_NET_PRIO=y
360CONFIG_BPF_JIT=y 367CONFIG_BPF_JIT=y
361CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
364CONFIG_DMA_CMA=y 371CONFIG_DMA_CMA=y
365CONFIG_CMA_SIZE_MBYTES=0 372CONFIG_CMA_SIZE_MBYTES=0
366CONFIG_CONNECTOR=y 373CONFIG_CONNECTOR=y
374CONFIG_ZRAM=m
367CONFIG_BLK_DEV_LOOP=m 375CONFIG_BLK_DEV_LOOP=m
368CONFIG_BLK_DEV_CRYPTOLOOP=m 376CONFIG_BLK_DEV_CRYPTOLOOP=m
377CONFIG_BLK_DEV_DRBD=m
369CONFIG_BLK_DEV_NBD=m 378CONFIG_BLK_DEV_NBD=m
370CONFIG_BLK_DEV_OSD=m 379CONFIG_BLK_DEV_OSD=m
371CONFIG_BLK_DEV_RAM=y 380CONFIG_BLK_DEV_RAM=y
372CONFIG_BLK_DEV_RAM_SIZE=32768 381CONFIG_BLK_DEV_RAM_SIZE=32768
373CONFIG_CDROM_PKTCDVD=m 382CONFIG_BLK_DEV_RAM_DAX=y
374CONFIG_ATA_OVER_ETH=m
375CONFIG_VIRTIO_BLK=y 383CONFIG_VIRTIO_BLK=y
376CONFIG_ENCLOSURE_SERVICES=m 384CONFIG_ENCLOSURE_SERVICES=m
385CONFIG_GENWQE=m
377CONFIG_RAID_ATTRS=m 386CONFIG_RAID_ATTRS=m
378CONFIG_SCSI=y 387CONFIG_SCSI=y
379CONFIG_BLK_DEV_SD=y 388CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
439# CONFIG_NET_VENDOR_INTEL is not set 448# CONFIG_NET_VENDOR_INTEL is not set
440# CONFIG_NET_VENDOR_MARVELL is not set 449# CONFIG_NET_VENDOR_MARVELL is not set
441CONFIG_MLX4_EN=m 450CONFIG_MLX4_EN=m
451CONFIG_MLX5_CORE=m
452CONFIG_MLX5_CORE_EN=y
442# CONFIG_NET_VENDOR_NATSEMI is not set 453# CONFIG_NET_VENDOR_NATSEMI is not set
443CONFIG_PPP=m 454CONFIG_PPP=m
444CONFIG_PPP_BSDCOMP=m 455CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
449CONFIG_PPPOL2TP=m 460CONFIG_PPPOL2TP=m
450CONFIG_PPP_ASYNC=m 461CONFIG_PPP_ASYNC=m
451CONFIG_PPP_SYNC_TTY=m 462CONFIG_PPP_SYNC_TTY=m
452# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
453# CONFIG_INPUT_KEYBOARD is not set 463# CONFIG_INPUT_KEYBOARD is not set
454# CONFIG_INPUT_MOUSE is not set 464# CONFIG_INPUT_MOUSE is not set
455# CONFIG_SERIO is not set 465# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
468CONFIG_INFINIBAND=m 478CONFIG_INFINIBAND=m
469CONFIG_INFINIBAND_USER_ACCESS=m 479CONFIG_INFINIBAND_USER_ACCESS=m
470CONFIG_MLX4_INFINIBAND=m 480CONFIG_MLX4_INFINIBAND=m
481CONFIG_MLX5_INFINIBAND=m
471CONFIG_VIRTIO_BALLOON=m 482CONFIG_VIRTIO_BALLOON=m
472CONFIG_EXT4_FS=y 483CONFIG_EXT4_FS=y
473CONFIG_EXT4_FS_POSIX_ACL=y 484CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
483CONFIG_XFS_POSIX_ACL=y 494CONFIG_XFS_POSIX_ACL=y
484CONFIG_XFS_RT=y 495CONFIG_XFS_RT=y
485CONFIG_GFS2_FS=m 496CONFIG_GFS2_FS=m
497CONFIG_GFS2_FS_LOCKING_DLM=y
486CONFIG_OCFS2_FS=m 498CONFIG_OCFS2_FS=m
487CONFIG_BTRFS_FS=y 499CONFIG_BTRFS_FS=y
488CONFIG_BTRFS_FS_POSIX_ACL=y 500CONFIG_BTRFS_FS_POSIX_ACL=y
489CONFIG_NILFS2_FS=m 501CONFIG_NILFS2_FS=m
502CONFIG_FS_DAX=y
503CONFIG_EXPORTFS_BLOCK_OPS=y
490CONFIG_FANOTIFY=y 504CONFIG_FANOTIFY=y
505CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
491CONFIG_QUOTA_NETLINK_INTERFACE=y 506CONFIG_QUOTA_NETLINK_INTERFACE=y
492CONFIG_QFMT_V1=m 507CONFIG_QFMT_V1=m
493CONFIG_QFMT_V2=m 508CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
553CONFIG_MAGIC_SYSRQ=y 568CONFIG_MAGIC_SYSRQ=y
554CONFIG_DEBUG_MEMORY_INIT=y 569CONFIG_DEBUG_MEMORY_INIT=y
555CONFIG_PANIC_ON_OOPS=y 570CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 571CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 572CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_LATENCYTOP=y 573CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
576CONFIG_ENCRYPTED_KEYS=m 590CONFIG_ENCRYPTED_KEYS=m
577CONFIG_SECURITY=y 591CONFIG_SECURITY=y
578CONFIG_SECURITY_NETWORK=y 592CONFIG_SECURITY_NETWORK=y
593CONFIG_HARDENED_USERCOPY=y
579CONFIG_SECURITY_SELINUX=y 594CONFIG_SECURITY_SELINUX=y
580CONFIG_SECURITY_SELINUX_BOOTPARAM=y 595CONFIG_SECURITY_SELINUX_BOOTPARAM=y
581CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 596CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_LRW=m 614CONFIG_CRYPTO_LRW=m
600CONFIG_CRYPTO_PCBC=m 615CONFIG_CRYPTO_PCBC=m
601CONFIG_CRYPTO_KEYWRAP=m 616CONFIG_CRYPTO_KEYWRAP=m
617CONFIG_CRYPTO_CMAC=m
602CONFIG_CRYPTO_XCBC=m 618CONFIG_CRYPTO_XCBC=m
603CONFIG_CRYPTO_VMAC=m 619CONFIG_CRYPTO_VMAC=m
604CONFIG_CRYPTO_CRC32=m 620CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
611CONFIG_CRYPTO_SHA3=m 627CONFIG_CRYPTO_SHA3=m
612CONFIG_CRYPTO_TGR192=m 628CONFIG_CRYPTO_TGR192=m
613CONFIG_CRYPTO_WP512=m 629CONFIG_CRYPTO_WP512=m
630CONFIG_CRYPTO_AES_TI=m
614CONFIG_CRYPTO_ANUBIS=m 631CONFIG_CRYPTO_ANUBIS=m
615CONFIG_CRYPTO_BLOWFISH=m 632CONFIG_CRYPTO_BLOWFISH=m
616CONFIG_CRYPTO_CAMELLIA=m 633CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
626CONFIG_CRYPTO_842=m 643CONFIG_CRYPTO_842=m
627CONFIG_CRYPTO_LZ4=m 644CONFIG_CRYPTO_LZ4=m
628CONFIG_CRYPTO_LZ4HC=m 645CONFIG_CRYPTO_LZ4HC=m
646CONFIG_CRYPTO_ANSI_CPRNG=m
629CONFIG_CRYPTO_USER_API_HASH=m 647CONFIG_CRYPTO_USER_API_HASH=m
630CONFIG_CRYPTO_USER_API_SKCIPHER=m 648CONFIG_CRYPTO_USER_API_SKCIPHER=m
631CONFIG_CRYPTO_USER_API_RNG=m 649CONFIG_CRYPTO_USER_API_RNG=m
632CONFIG_CRYPTO_USER_API_AEAD=m 650CONFIG_CRYPTO_USER_API_AEAD=m
633CONFIG_ZCRYPT=m 651CONFIG_ZCRYPT=m
652CONFIG_PKEY=m
634CONFIG_CRYPTO_SHA1_S390=m 653CONFIG_CRYPTO_SHA1_S390=m
635CONFIG_CRYPTO_SHA256_S390=m 654CONFIG_CRYPTO_SHA256_S390=m
636CONFIG_CRYPTO_SHA512_S390=m 655CONFIG_CRYPTO_SHA512_S390=m
637CONFIG_CRYPTO_DES_S390=m 656CONFIG_CRYPTO_DES_S390=m
638CONFIG_CRYPTO_AES_S390=m 657CONFIG_CRYPTO_AES_S390=m
658CONFIG_CRYPTO_PAES_S390=m
639CONFIG_CRYPTO_GHASH_S390=m 659CONFIG_CRYPTO_GHASH_S390=m
640CONFIG_CRYPTO_CRC32_S390=y 660CONFIG_CRYPTO_CRC32_S390=y
641CONFIG_CRC7=m 661CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542aaf59..653d72bcc007 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
86CONFIG_UNIX_DIAG=m 90CONFIG_UNIX_DIAG=m
87CONFIG_XFRM_USER=m 91CONFIG_XFRM_USER=m
88CONFIG_NET_KEY=m 92CONFIG_NET_KEY=m
93CONFIG_SMC=m
94CONFIG_SMC_DIAG=m
89CONFIG_INET=y 95CONFIG_INET=y
90CONFIG_IP_MULTICAST=y 96CONFIG_IP_MULTICAST=y
91CONFIG_IP_ADVANCED_ROUTER=y 97CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
354CONFIG_NET_ACT_SKBEDIT=m 360CONFIG_NET_ACT_SKBEDIT=m
355CONFIG_NET_ACT_CSUM=m 361CONFIG_NET_ACT_CSUM=m
356CONFIG_DNS_RESOLVER=y 362CONFIG_DNS_RESOLVER=y
363CONFIG_NETLINK_DIAG=m
357CONFIG_CGROUP_NET_PRIO=y 364CONFIG_CGROUP_NET_PRIO=y
358CONFIG_BPF_JIT=y 365CONFIG_BPF_JIT=y
359CONFIG_NET_PKTGEN=m 366CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
362CONFIG_DMA_CMA=y 369CONFIG_DMA_CMA=y
363CONFIG_CMA_SIZE_MBYTES=0 370CONFIG_CMA_SIZE_MBYTES=0
364CONFIG_CONNECTOR=y 371CONFIG_CONNECTOR=y
372CONFIG_ZRAM=m
365CONFIG_BLK_DEV_LOOP=m 373CONFIG_BLK_DEV_LOOP=m
366CONFIG_BLK_DEV_CRYPTOLOOP=m 374CONFIG_BLK_DEV_CRYPTOLOOP=m
375CONFIG_BLK_DEV_DRBD=m
367CONFIG_BLK_DEV_NBD=m 376CONFIG_BLK_DEV_NBD=m
368CONFIG_BLK_DEV_OSD=m 377CONFIG_BLK_DEV_OSD=m
369CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
370CONFIG_BLK_DEV_RAM_SIZE=32768 379CONFIG_BLK_DEV_RAM_SIZE=32768
371CONFIG_CDROM_PKTCDVD=m 380CONFIG_BLK_DEV_RAM_DAX=y
372CONFIG_ATA_OVER_ETH=m
373CONFIG_VIRTIO_BLK=y 381CONFIG_VIRTIO_BLK=y
374CONFIG_ENCLOSURE_SERVICES=m 382CONFIG_ENCLOSURE_SERVICES=m
383CONFIG_GENWQE=m
375CONFIG_RAID_ATTRS=m 384CONFIG_RAID_ATTRS=m
376CONFIG_SCSI=y 385CONFIG_SCSI=y
377CONFIG_BLK_DEV_SD=y 386CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
437# CONFIG_NET_VENDOR_INTEL is not set 446# CONFIG_NET_VENDOR_INTEL is not set
438# CONFIG_NET_VENDOR_MARVELL is not set 447# CONFIG_NET_VENDOR_MARVELL is not set
439CONFIG_MLX4_EN=m 448CONFIG_MLX4_EN=m
449CONFIG_MLX5_CORE=m
450CONFIG_MLX5_CORE_EN=y
440# CONFIG_NET_VENDOR_NATSEMI is not set 451# CONFIG_NET_VENDOR_NATSEMI is not set
441CONFIG_PPP=m 452CONFIG_PPP=m
442CONFIG_PPP_BSDCOMP=m 453CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
447CONFIG_PPPOL2TP=m 458CONFIG_PPPOL2TP=m
448CONFIG_PPP_ASYNC=m 459CONFIG_PPP_ASYNC=m
449CONFIG_PPP_SYNC_TTY=m 460CONFIG_PPP_SYNC_TTY=m
450# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
451# CONFIG_INPUT_KEYBOARD is not set 461# CONFIG_INPUT_KEYBOARD is not set
452# CONFIG_INPUT_MOUSE is not set 462# CONFIG_INPUT_MOUSE is not set
453# CONFIG_SERIO is not set 463# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
466CONFIG_INFINIBAND=m 476CONFIG_INFINIBAND=m
467CONFIG_INFINIBAND_USER_ACCESS=m 477CONFIG_INFINIBAND_USER_ACCESS=m
468CONFIG_MLX4_INFINIBAND=m 478CONFIG_MLX4_INFINIBAND=m
479CONFIG_MLX5_INFINIBAND=m
469CONFIG_VIRTIO_BALLOON=m 480CONFIG_VIRTIO_BALLOON=m
470CONFIG_EXT4_FS=y 481CONFIG_EXT4_FS=y
471CONFIG_EXT4_FS_POSIX_ACL=y 482CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
481CONFIG_XFS_POSIX_ACL=y 492CONFIG_XFS_POSIX_ACL=y
482CONFIG_XFS_RT=y 493CONFIG_XFS_RT=y
483CONFIG_GFS2_FS=m 494CONFIG_GFS2_FS=m
495CONFIG_GFS2_FS_LOCKING_DLM=y
484CONFIG_OCFS2_FS=m 496CONFIG_OCFS2_FS=m
485CONFIG_BTRFS_FS=y 497CONFIG_BTRFS_FS=y
486CONFIG_BTRFS_FS_POSIX_ACL=y 498CONFIG_BTRFS_FS_POSIX_ACL=y
487CONFIG_NILFS2_FS=m 499CONFIG_NILFS2_FS=m
500CONFIG_FS_DAX=y
501CONFIG_EXPORTFS_BLOCK_OPS=y
488CONFIG_FANOTIFY=y 502CONFIG_FANOTIFY=y
503CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
489CONFIG_QUOTA_NETLINK_INTERFACE=y 504CONFIG_QUOTA_NETLINK_INTERFACE=y
490CONFIG_QFMT_V1=m 505CONFIG_QFMT_V1=m
491CONFIG_QFMT_V2=m 506CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
551CONFIG_MAGIC_SYSRQ=y 566CONFIG_MAGIC_SYSRQ=y
552CONFIG_DEBUG_MEMORY_INIT=y 567CONFIG_DEBUG_MEMORY_INIT=y
553CONFIG_PANIC_ON_OOPS=y 568CONFIG_PANIC_ON_OOPS=y
554CONFIG_TIMER_STATS=y
555CONFIG_RCU_TORTURE_TEST=m 569CONFIG_RCU_TORTURE_TEST=m
556CONFIG_RCU_CPU_STALL_TIMEOUT=60 570CONFIG_RCU_CPU_STALL_TIMEOUT=60
557CONFIG_LATENCYTOP=y 571CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
574CONFIG_ENCRYPTED_KEYS=m 588CONFIG_ENCRYPTED_KEYS=m
575CONFIG_SECURITY=y 589CONFIG_SECURITY=y
576CONFIG_SECURITY_NETWORK=y 590CONFIG_SECURITY_NETWORK=y
591CONFIG_HARDENED_USERCOPY=y
577CONFIG_SECURITY_SELINUX=y 592CONFIG_SECURITY_SELINUX=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM=y 593CONFIG_SECURITY_SELINUX_BOOTPARAM=y
579CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 594CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
597CONFIG_CRYPTO_LRW=m 612CONFIG_CRYPTO_LRW=m
598CONFIG_CRYPTO_PCBC=m 613CONFIG_CRYPTO_PCBC=m
599CONFIG_CRYPTO_KEYWRAP=m 614CONFIG_CRYPTO_KEYWRAP=m
615CONFIG_CRYPTO_CMAC=m
600CONFIG_CRYPTO_XCBC=m 616CONFIG_CRYPTO_XCBC=m
601CONFIG_CRYPTO_VMAC=m 617CONFIG_CRYPTO_VMAC=m
602CONFIG_CRYPTO_CRC32=m 618CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
609CONFIG_CRYPTO_SHA3=m 625CONFIG_CRYPTO_SHA3=m
610CONFIG_CRYPTO_TGR192=m 626CONFIG_CRYPTO_TGR192=m
611CONFIG_CRYPTO_WP512=m 627CONFIG_CRYPTO_WP512=m
628CONFIG_CRYPTO_AES_TI=m
612CONFIG_CRYPTO_ANUBIS=m 629CONFIG_CRYPTO_ANUBIS=m
613CONFIG_CRYPTO_BLOWFISH=m 630CONFIG_CRYPTO_BLOWFISH=m
614CONFIG_CRYPTO_CAMELLIA=m 631CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
624CONFIG_CRYPTO_842=m 641CONFIG_CRYPTO_842=m
625CONFIG_CRYPTO_LZ4=m 642CONFIG_CRYPTO_LZ4=m
626CONFIG_CRYPTO_LZ4HC=m 643CONFIG_CRYPTO_LZ4HC=m
644CONFIG_CRYPTO_ANSI_CPRNG=m
627CONFIG_CRYPTO_USER_API_HASH=m 645CONFIG_CRYPTO_USER_API_HASH=m
628CONFIG_CRYPTO_USER_API_SKCIPHER=m 646CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 647CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
635CONFIG_CRYPTO_SHA512_S390=m 653CONFIG_CRYPTO_SHA512_S390=m
636CONFIG_CRYPTO_DES_S390=m 654CONFIG_CRYPTO_DES_S390=m
637CONFIG_CRYPTO_AES_S390=m 655CONFIG_CRYPTO_AES_S390=m
656CONFIG_CRYPTO_PAES_S390=m
638CONFIG_CRYPTO_GHASH_S390=m 657CONFIG_CRYPTO_GHASH_S390=m
639CONFIG_CRYPTO_CRC32_S390=y 658CONFIG_CRYPTO_CRC32_S390=y
640CONFIG_CRC7=m 659CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c13735..afa46a7406ea 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
12CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
13# CONFIG_HOTPLUG_CPU is not set 13# CONFIG_HOTPLUG_CPU is not set
14CONFIG_HZ_100=y 14CONFIG_HZ_100=y
15# CONFIG_ARCH_RANDOM is not set
15# CONFIG_COMPACTION is not set 16# CONFIG_COMPACTION is not set
16# CONFIG_MIGRATION is not set 17# CONFIG_MIGRATION is not set
18# CONFIG_BOUNCE is not set
17# CONFIG_CHECK_STACK is not set 19# CONFIG_CHECK_STACK is not set
18# CONFIG_CHSC_SCH is not set 20# CONFIG_CHSC_SCH is not set
19# CONFIG_SCM_BUS is not set 21# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
36CONFIG_SCSI_LOGGING=y 38CONFIG_SCSI_LOGGING=y
37CONFIG_SCSI_FC_ATTRS=y 39CONFIG_SCSI_FC_ATTRS=y
38CONFIG_ZFCP=y 40CONFIG_ZFCP=y
39# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
40# CONFIG_INPUT_KEYBOARD is not set 41# CONFIG_INPUT_KEYBOARD is not set
41# CONFIG_INPUT_MOUSE is not set 42# CONFIG_INPUT_MOUSE is not set
42# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
43# CONFIG_HVC_IUCV is not set 44# CONFIG_HVC_IUCV is not set
45# CONFIG_HW_RANDOM_S390 is not set
44CONFIG_RAW_DRIVER=y 46CONFIG_RAW_DRIVER=y
45# CONFIG_SCLP_ASYNC is not set 47# CONFIG_SCLP_ASYNC is not set
46# CONFIG_HMC_DRV is not set 48# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_INOTIFY_USER is not set 56# CONFIG_INOTIFY_USER is not set
55CONFIG_CONFIGFS_FS=y 57CONFIG_CONFIGFS_FS=y
56# CONFIG_MISC_FILESYSTEMS is not set 58# CONFIG_MISC_FILESYSTEMS is not set
59# CONFIG_NETWORK_FILESYSTEMS is not set
57CONFIG_PRINTK_TIME=y 60CONFIG_PRINTK_TIME=y
58CONFIG_DEBUG_INFO=y 61CONFIG_DEBUG_INFO=y
59CONFIG_DEBUG_FS=y
60CONFIG_DEBUG_KERNEL=y 62CONFIG_DEBUG_KERNEL=y
61CONFIG_PANIC_ON_OOPS=y 63CONFIG_PANIC_ON_OOPS=y
62# CONFIG_SCHED_DEBUG is not set 64# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189dbaf34b..20244a38c886 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 30CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set
31CONFIG_BPF_SYSCALL=y 32CONFIG_BPF_SYSCALL=y
32CONFIG_USERFAULTFD=y 33CONFIG_USERFAULTFD=y
33# CONFIG_COMPAT_BRK is not set 34# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
108CONFIG_SCSI_VIRTIO=y 109CONFIG_SCSI_VIRTIO=y
109CONFIG_MD=y 110CONFIG_MD=y
110CONFIG_MD_LINEAR=m 111CONFIG_MD_LINEAR=m
111CONFIG_MD_RAID0=m
112CONFIG_MD_MULTIPATH=m 112CONFIG_MD_MULTIPATH=m
113CONFIG_BLK_DEV_DM=y 113CONFIG_BLK_DEV_DM=y
114CONFIG_DM_CRYPT=m 114CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
131CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set 132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set 133# CONFIG_NET_VENDOR_SOLARFLARE is not set
134# CONFIG_NET_VENDOR_SYNOPSYS is not set
134# CONFIG_INPUT is not set 135# CONFIG_INPUT is not set
135# CONFIG_SERIO is not set 136# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y 137CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
162CONFIG_DEBUG_PAGEALLOC=y 163CONFIG_DEBUG_PAGEALLOC=y
163CONFIG_DETECT_HUNG_TASK=y 164CONFIG_DETECT_HUNG_TASK=y
164CONFIG_PANIC_ON_OOPS=y 165CONFIG_PANIC_ON_OOPS=y
165CONFIG_TIMER_STATS=y
166CONFIG_DEBUG_RT_MUTEXES=y 166CONFIG_DEBUG_RT_MUTEXES=y
167CONFIG_PROVE_LOCKING=y 167CONFIG_PROVE_LOCKING=y
168CONFIG_LOCK_STAT=y 168CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
172CONFIG_DEBUG_SG=y 172CONFIG_DEBUG_SG=y
173CONFIG_DEBUG_NOTIFIERS=y 173CONFIG_DEBUG_NOTIFIERS=y
174CONFIG_RCU_CPU_STALL_TIMEOUT=60 174CONFIG_RCU_CPU_STALL_TIMEOUT=60
175CONFIG_RCU_TRACE=y
176CONFIG_LATENCYTOP=y 175CONFIG_LATENCYTOP=y
177CONFIG_SCHED_TRACER=y 176CONFIG_SCHED_TRACER=y
178CONFIG_FTRACE_SYSCALLS=y 177CONFIG_FTRACE_SYSCALLS=y
179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
180CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
181CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
182CONFIG_UPROBE_EVENTS=y
183CONFIG_FUNCTION_PROFILER=y 181CONFIG_FUNCTION_PROFILER=y
184CONFIG_TRACE_ENUM_MAP_FILE=y 182CONFIG_TRACE_ENUM_MAP_FILE=y
185CONFIG_KPROBES_SANITY_TEST=y 183CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
190CONFIG_CRYPTO_GCM=m 188CONFIG_CRYPTO_GCM=m
191CONFIG_CRYPTO_CBC=y 189CONFIG_CRYPTO_CBC=y
192CONFIG_CRYPTO_CTS=m 190CONFIG_CRYPTO_CTS=m
193CONFIG_CRYPTO_ECB=m
194CONFIG_CRYPTO_LRW=m 191CONFIG_CRYPTO_LRW=m
195CONFIG_CRYPTO_PCBC=m 192CONFIG_CRYPTO_PCBC=m
196CONFIG_CRYPTO_XTS=m 193CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
230CONFIG_CRYPTO_USER_API_RNG=m 227CONFIG_CRYPTO_USER_API_RNG=m
231CONFIG_ZCRYPT=m 228CONFIG_ZCRYPT=m
232CONFIG_PKEY=m 229CONFIG_PKEY=m
230CONFIG_CRYPTO_PAES_S390=m
233CONFIG_CRYPTO_SHA1_S390=m 231CONFIG_CRYPTO_SHA1_S390=m
234CONFIG_CRYPTO_SHA256_S390=m 232CONFIG_CRYPTO_SHA256_S390=m
235CONFIG_CRYPTO_SHA512_S390=m 233CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 0206c8052328..df7b54ea956d 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -10,6 +10,7 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/refcount.h>
13#include <uapi/asm/debug.h> 14#include <uapi/asm/debug.h>
14 15
15#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */ 16#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@@ -31,7 +32,7 @@ struct debug_view;
31typedef struct debug_info { 32typedef struct debug_info {
32 struct debug_info* next; 33 struct debug_info* next;
33 struct debug_info* prev; 34 struct debug_info* prev;
34 atomic_t ref_count; 35 refcount_t ref_count;
35 spinlock_t lock; 36 spinlock_t lock;
36 int level; 37 int level;
37 int nr_areas; 38 int nr_areas;
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 60323c21938b..37f617dfbede 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code)
40 return ((((int) code + 64) >> 7) + 1) << 1; 40 return ((((int) code + 64) >> 7) + 1) << 1;
41} 41}
42 42
43struct pt_regs;
44
43void show_code(struct pt_regs *regs); 45void show_code(struct pt_regs *regs);
44void print_fn_code(unsigned char *code, unsigned long len); 46void print_fn_code(unsigned char *code, unsigned long len);
45int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); 47int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 1293c4066cfc..28792ef82c83 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -27,12 +27,21 @@
27 * 2005-Dec Used as a template for s390 by Mike Grundy 27 * 2005-Dec Used as a template for s390 by Mike Grundy
28 * <grundym@us.ibm.com> 28 * <grundym@us.ibm.com>
29 */ 29 */
30#include <linux/types.h>
30#include <asm-generic/kprobes.h> 31#include <asm-generic/kprobes.h>
31 32
32#define BREAKPOINT_INSTRUCTION 0x0002 33#define BREAKPOINT_INSTRUCTION 0x0002
33 34
35#define FIXUP_PSW_NORMAL 0x08
36#define FIXUP_BRANCH_NOT_TAKEN 0x04
37#define FIXUP_RETURN_REGISTER 0x02
38#define FIXUP_NOT_REQUIRED 0x01
39
40int probe_is_prohibited_opcode(u16 *insn);
41int probe_get_fixup_type(u16 *insn);
42int probe_is_insn_relative_long(u16 *insn);
43
34#ifdef CONFIG_KPROBES 44#ifdef CONFIG_KPROBES
35#include <linux/types.h>
36#include <linux/ptrace.h> 45#include <linux/ptrace.h>
37#include <linux/percpu.h> 46#include <linux/percpu.h>
38#include <linux/sched/task_stack.h> 47#include <linux/sched/task_stack.h>
@@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t;
56 65
57#define KPROBE_SWAP_INST 0x10 66#define KPROBE_SWAP_INST 0x10
58 67
59#define FIXUP_PSW_NORMAL 0x08
60#define FIXUP_BRANCH_NOT_TAKEN 0x04
61#define FIXUP_RETURN_REGISTER 0x02
62#define FIXUP_NOT_REQUIRED 0x01
63
64/* Architecture specific copy of original instruction */ 68/* Architecture specific copy of original instruction */
65struct arch_specific_insn { 69struct arch_specific_insn {
66 /* copy of original instruction */ 70 /* copy of original instruction */
@@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
90int kprobe_exceptions_notify(struct notifier_block *self, 94int kprobe_exceptions_notify(struct notifier_block *self,
91 unsigned long val, void *data); 95 unsigned long val, void *data);
92 96
93int probe_is_prohibited_opcode(u16 *insn);
94int probe_get_fixup_type(u16 *insn);
95int probe_is_insn_relative_long(u16 *insn);
96
97#define flush_insn_slot(p) do { } while (0) 97#define flush_insn_slot(p) do { } while (0)
98 98
99#endif /* CONFIG_KPROBES */ 99#endif /* CONFIG_KPROBES */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a882a9..65d07ac34647 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
541 struct mutex ais_lock; 541 struct mutex ais_lock;
542 u8 simm; 542 u8 simm;
543 u8 nimm; 543 u8 nimm;
544 int ais_enabled;
545}; 544};
546 545
547struct kvm_hw_wp_info_arch { 546struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 60d395fdc864..aeac013968f2 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *);
221/* Free guarded storage control block for current */ 221/* Free guarded storage control block for current */
222void exit_thread_gs(void); 222void exit_thread_gs(void);
223 223
224/*
225 * Return saved PC of a blocked thread.
226 */
227extern unsigned long thread_saved_pc(struct task_struct *t);
228
229unsigned long get_wchan(struct task_struct *p); 224unsigned long get_wchan(struct task_struct *p);
230#define task_pt_regs(tsk) ((struct pt_regs *) \ 225#define task_pt_regs(tsk) ((struct pt_regs *) \
231 (task_stack_page(tsk) + THREAD_SIZE) - 1) 226 (task_stack_page(tsk) + THREAD_SIZE) - 1)
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 73bff45ced55..e784bed6ed7f 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -146,7 +146,7 @@ extern int topology_max_mnest;
146 * Returns the maximum nesting level supported by the cpu topology code. 146 * Returns the maximum nesting level supported by the cpu topology code.
147 * The current maximum level is 4 which is the drawer level. 147 * The current maximum level is 4 which is the drawer level.
148 */ 148 */
149static inline int topology_mnest_limit(void) 149static inline unsigned char topology_mnest_limit(void)
150{ 150{
151 return min(topology_max_mnest, 4); 151 return min(topology_max_mnest, 4);
152} 152}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 530226b6cb19..86b3e74f569e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); 277 memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS * 278 memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
279 sizeof(struct dentry*)); 279 sizeof(struct dentry*));
280 atomic_set(&(rc->ref_count), 0); 280 refcount_set(&(rc->ref_count), 0);
281 281
282 return rc; 282 return rc;
283 283
@@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas,
361 debug_area_last = rc; 361 debug_area_last = rc;
362 rc->next = NULL; 362 rc->next = NULL;
363 363
364 debug_info_get(rc); 364 refcount_set(&rc->ref_count, 1);
365out: 365out:
366 return rc; 366 return rc;
367} 367}
@@ -416,7 +416,7 @@ static void
416debug_info_get(debug_info_t * db_info) 416debug_info_get(debug_info_t * db_info)
417{ 417{
418 if (db_info) 418 if (db_info)
419 atomic_inc(&db_info->ref_count); 419 refcount_inc(&db_info->ref_count);
420} 420}
421 421
422/* 422/*
@@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
431 431
432 if (!db_info) 432 if (!db_info)
433 return; 433 return;
434 if (atomic_dec_and_test(&db_info->ref_count)) { 434 if (refcount_dec_and_test(&db_info->ref_count)) {
435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) { 435 for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
436 if (!db_info->views[i]) 436 if (!db_info->views[i])
437 continue; 437 continue;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index a5f5d3bb3dbc..6315037335ba 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,17 @@ ENTRY(sie64a)
231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
232.Lsie_done: 232.Lsie_done:
233# some program checks are suppressing. C code (e.g. do_protection_exception) 233# some program checks are suppressing. C code (e.g. do_protection_exception)
234# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 234# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
235# instructions between sie64a and .Lsie_done should not cause program 235# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
236# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 236# Other instructions between sie64a and .Lsie_done should not cause program
237# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
237# See also .Lcleanup_sie 238# See also .Lcleanup_sie
238.Lrewind_pad: 239.Lrewind_pad6:
239 nop 0 240 nopr 7
241.Lrewind_pad4:
242 nopr 7
243.Lrewind_pad2:
244 nopr 7
240 .globl sie_exit 245 .globl sie_exit
241sie_exit: 246sie_exit:
242 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 247 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
249 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 254 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
250 j sie_exit 255 j sie_exit
251 256
252 EX_TABLE(.Lrewind_pad,.Lsie_fault) 257 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
258 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
259 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
253 EX_TABLE(sie_exit,.Lsie_fault) 260 EX_TABLE(sie_exit,.Lsie_fault)
254EXPORT_SYMBOL(sie64a) 261EXPORT_SYMBOL(sie64a)
255EXPORT_SYMBOL(sie_exit) 262EXPORT_SYMBOL(sie_exit)
@@ -312,6 +319,7 @@ ENTRY(system_call)
312 lg %r14,__LC_VDSO_PER_CPU 319 lg %r14,__LC_VDSO_PER_CPU
313 lmg %r0,%r10,__PT_R0(%r11) 320 lmg %r0,%r10,__PT_R0(%r11)
314 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 321 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
322.Lsysc_exit_timer:
315 stpt __LC_EXIT_TIMER 323 stpt __LC_EXIT_TIMER
316 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 324 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
317 lmg %r11,%r15,__PT_R11(%r11) 325 lmg %r11,%r15,__PT_R11(%r11)
@@ -623,6 +631,7 @@ ENTRY(io_int_handler)
623 lg %r14,__LC_VDSO_PER_CPU 631 lg %r14,__LC_VDSO_PER_CPU
624 lmg %r0,%r10,__PT_R0(%r11) 632 lmg %r0,%r10,__PT_R0(%r11)
625 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 633 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
634.Lio_exit_timer:
626 stpt __LC_EXIT_TIMER 635 stpt __LC_EXIT_TIMER
627 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 636 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
628 lmg %r11,%r15,__PT_R11(%r11) 637 lmg %r11,%r15,__PT_R11(%r11)
@@ -1174,15 +1183,23 @@ cleanup_critical:
1174 br %r14 1183 br %r14
1175 1184
1176.Lcleanup_sysc_restore: 1185.Lcleanup_sysc_restore:
1186 # check if stpt has been executed
1177 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1187 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1188 jh 0f
1189 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1190 cghi %r11,__LC_SAVE_AREA_ASYNC
1178 je 0f 1191 je 0f
1192 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
11930: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1194 je 1f
1179 lg %r9,24(%r11) # get saved pointer to pt_regs 1195 lg %r9,24(%r11) # get saved pointer to pt_regs
1180 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1196 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1181 mvc 0(64,%r11),__PT_R8(%r9) 1197 mvc 0(64,%r11),__PT_R8(%r9)
1182 lmg %r0,%r7,__PT_R0(%r9) 1198 lmg %r0,%r7,__PT_R0(%r9)
11830: lmg %r8,%r9,__LC_RETURN_PSW 11991: lmg %r8,%r9,__LC_RETURN_PSW
1184 br %r14 1200 br %r14
1185.Lcleanup_sysc_restore_insn: 1201.Lcleanup_sysc_restore_insn:
1202 .quad .Lsysc_exit_timer
1186 .quad .Lsysc_done - 4 1203 .quad .Lsysc_done - 4
1187 1204
1188.Lcleanup_io_tif: 1205.Lcleanup_io_tif:
@@ -1190,15 +1207,20 @@ cleanup_critical:
1190 br %r14 1207 br %r14
1191 1208
1192.Lcleanup_io_restore: 1209.Lcleanup_io_restore:
1210 # check if stpt has been executed
1193 clg %r9,BASED(.Lcleanup_io_restore_insn) 1211 clg %r9,BASED(.Lcleanup_io_restore_insn)
1194 je 0f 1212 jh 0f
1213 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
12140: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1215 je 1f
1195 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1216 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1196 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1217 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1197 mvc 0(64,%r11),__PT_R8(%r9) 1218 mvc 0(64,%r11),__PT_R8(%r9)
1198 lmg %r0,%r7,__PT_R0(%r9) 1219 lmg %r0,%r7,__PT_R0(%r9)
11990: lmg %r8,%r9,__LC_RETURN_PSW 12201: lmg %r8,%r9,__LC_RETURN_PSW
1200 br %r14 1221 br %r14
1201.Lcleanup_io_restore_insn: 1222.Lcleanup_io_restore_insn:
1223 .quad .Lio_exit_timer
1202 .quad .Lio_done - 4 1224 .quad .Lio_done - 4
1203 1225
1204.Lcleanup_idle: 1226.Lcleanup_idle:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 27477f34cc0a..d03a6d12c4bd 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void)
173 return 0; 173 return 0;
174} 174}
175 175
176#ifdef CONFIG_MODULES
177
176static int __init ftrace_plt_init(void) 178static int __init ftrace_plt_init(void)
177{ 179{
178 unsigned int *ip; 180 unsigned int *ip;
@@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void)
191} 193}
192device_initcall(ftrace_plt_init); 194device_initcall(ftrace_plt_init);
193 195
196#endif /* CONFIG_MODULES */
197
194#ifdef CONFIG_FUNCTION_GRAPH_TRACER 198#ifdef CONFIG_FUNCTION_GRAPH_TRACER
195/* 199/*
196 * Hook the return address and push it in the stack of return addresses 200 * Hook the return address and push it in the stack of return addresses
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e545ffe5155a..8e622bb52f7a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,8 +564,6 @@ static struct kset *ipl_kset;
564 564
565static void __ipl_run(void *unused) 565static void __ipl_run(void *unused)
566{ 566{
567 if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
568 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
569 diag308(DIAG308_LOAD_CLEAR, NULL); 567 diag308(DIAG308_LOAD_CLEAR, NULL);
570 if (MACHINE_IS_VM) 568 if (MACHINE_IS_VM)
571 __cpcmd("IPL", NULL, 0, NULL); 569 __cpcmd("IPL", NULL, 0, NULL);
@@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
1088 break; 1086 break;
1089 case REIPL_METHOD_CCW_DIAG: 1087 case REIPL_METHOD_CCW_DIAG:
1090 diag308(DIAG308_SET, reipl_block_ccw); 1088 diag308(DIAG308_SET, reipl_block_ccw);
1091 if (MACHINE_IS_LPAR) 1089 diag308(DIAG308_LOAD_CLEAR, NULL);
1092 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
1093 else
1094 diag308(DIAG308_LOAD_CLEAR, NULL);
1095 break; 1090 break;
1096 case REIPL_METHOD_FCP_RW_DIAG: 1091 case REIPL_METHOD_FCP_RW_DIAG:
1097 diag308(DIAG308_SET, reipl_block_fcp); 1092 diag308(DIAG308_SET, reipl_block_fcp);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 999d7154bbdc..bb32b8618bf6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -41,31 +41,6 @@
41 41
42asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 42asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
43 43
44/*
45 * Return saved PC of a blocked thread. used in kernel/sched.
46 * resume in entry.S does not create a new stack frame, it
47 * just stores the registers %r6-%r15 to the frame given by
48 * schedule. We want to return the address of the caller of
49 * schedule, so we have to walk the backchain one time to
50 * find the frame schedule() store its return address.
51 */
52unsigned long thread_saved_pc(struct task_struct *tsk)
53{
54 struct stack_frame *sf, *low, *high;
55
56 if (!tsk || !task_stack_page(tsk))
57 return 0;
58 low = task_stack_page(tsk);
59 high = (struct stack_frame *) task_pt_regs(tsk);
60 sf = (struct stack_frame *) tsk->thread.ksp;
61 if (sf <= low || sf > high)
62 return 0;
63 sf = (struct stack_frame *) sf->back_chain;
64 if (sf <= low || sf > high)
65 return 0;
66 return sf->gprs[8];
67}
68
69extern void kernel_thread_starter(void); 44extern void kernel_thread_starter(void);
70 45
71/* 46/*
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 72307f108c40..6e2c42bd1c3b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,8 +31,14 @@ SECTIONS
31{ 31{
32 . = 0x00000000; 32 . = 0x00000000;
33 .text : { 33 .text : {
34 _text = .; /* Text and read-only data */ 34 /* Text and read-only data */
35 HEAD_TEXT 35 HEAD_TEXT
36 /*
37 * E.g. perf doesn't like symbols starting at address zero,
38 * therefore skip the initial PSW and channel program located
39 * at address zero and let _text start at 0x200.
40 */
41 _text = 0x200;
36 TEXT_TEXT 42 TEXT_TEXT
37 SCHED_TEXT 43 SCHED_TEXT
38 CPUIDLE_TEXT 44 CPUIDLE_TEXT
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d94cc3..3b297fa3aa67 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
977 ptr = asce.origin * 4096; 977 ptr = asce.origin * 4096;
978 if (asce.r) { 978 if (asce.r) {
979 *fake = 1; 979 *fake = 1;
980 ptr = 0;
980 asce.dt = ASCE_TYPE_REGION1; 981 asce.dt = ASCE_TYPE_REGION1;
981 } 982 }
982 switch (asce.dt) { 983 switch (asce.dt) {
983 case ASCE_TYPE_REGION1: 984 case ASCE_TYPE_REGION1:
984 if (vaddr.rfx01 > asce.tl && !asce.r) 985 if (vaddr.rfx01 > asce.tl && !*fake)
985 return PGM_REGION_FIRST_TRANS; 986 return PGM_REGION_FIRST_TRANS;
986 break; 987 break;
987 case ASCE_TYPE_REGION2: 988 case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1009 union region1_table_entry rfte; 1010 union region1_table_entry rfte;
1010 1011
1011 if (*fake) { 1012 if (*fake) {
1012 /* offset in 16EB guest memory block */ 1013 ptr += (unsigned long) vaddr.rfx << 53;
1013 ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
1014 rfte.val = ptr; 1014 rfte.val = ptr;
1015 goto shadow_r2t; 1015 goto shadow_r2t;
1016 } 1016 }
@@ -1036,8 +1036,7 @@ shadow_r2t:
1036 union region2_table_entry rste; 1036 union region2_table_entry rste;
1037 1037
1038 if (*fake) { 1038 if (*fake) {
1039 /* offset in 8PB guest memory block */ 1039 ptr += (unsigned long) vaddr.rsx << 42;
1040 ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
1041 rste.val = ptr; 1040 rste.val = ptr;
1042 goto shadow_r3t; 1041 goto shadow_r3t;
1043 } 1042 }
@@ -1064,8 +1063,7 @@ shadow_r3t:
1064 union region3_table_entry rtte; 1063 union region3_table_entry rtte;
1065 1064
1066 if (*fake) { 1065 if (*fake) {
1067 /* offset in 4TB guest memory block */ 1066 ptr += (unsigned long) vaddr.rtx << 31;
1068 ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
1069 rtte.val = ptr; 1067 rtte.val = ptr;
1070 goto shadow_sgt; 1068 goto shadow_sgt;
1071 } 1069 }
@@ -1101,8 +1099,7 @@ shadow_sgt:
1101 union segment_table_entry ste; 1099 union segment_table_entry ste;
1102 1100
1103 if (*fake) { 1101 if (*fake) {
1104 /* offset in 2G guest memory block */ 1102 ptr += (unsigned long) vaddr.sx << 20;
1105 ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
1106 ste.val = ptr; 1103 ste.val = ptr;
1107 goto shadow_pgt; 1104 goto shadow_pgt;
1108 } 1105 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8a8948..2d120fef7d90 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2160 struct kvm_s390_ais_req req; 2160 struct kvm_s390_ais_req req;
2161 int ret = 0; 2161 int ret = 0;
2162 2162
2163 if (!fi->ais_enabled) 2163 if (!test_kvm_facility(kvm, 72))
2164 return -ENOTSUPP; 2164 return -ENOTSUPP;
2165 2165
2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
2204 }; 2204 };
2205 int ret = 0; 2205 int ret = 0;
2206 2206
2207 if (!fi->ais_enabled || !adapter->suppressible) 2207 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2208 return kvm_s390_inject_vm(kvm, &s390int); 2208 return kvm_s390_inject_vm(kvm, &s390int);
2209 2209
2210 mutex_lock(&fi->ais_lock); 2210 mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48361c6..f28e2e776931 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
558 } else { 558 } else {
559 set_kvm_facility(kvm->arch.model.fac_mask, 72); 559 set_kvm_facility(kvm->arch.model.fac_mask, 72);
560 set_kvm_facility(kvm->arch.model.fac_list, 72); 560 set_kvm_facility(kvm->arch.model.fac_list, 72);
561 kvm->arch.float_int.ais_enabled = 1;
562 r = 0; 561 r = 0;
563 } 562 }
564 mutex_unlock(&kvm->lock); 563 mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1533 mutex_init(&kvm->arch.float_int.ais_lock); 1532 mutex_init(&kvm->arch.float_int.ais_lock);
1534 kvm->arch.float_int.simm = 0; 1533 kvm->arch.float_int.simm = 0;
1535 kvm->arch.float_int.nimm = 0; 1534 kvm->arch.float_int.nimm = 0;
1536 kvm->arch.float_int.ais_enabled = 0;
1537 spin_lock_init(&kvm->arch.float_int.lock); 1535 spin_lock_init(&kvm->arch.float_int.lock);
1538 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1536 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1539 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); 1537 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
index ae90e1ae3607..1963ddbf4ab3 100644
--- a/arch/s390/lib/probes.c
+++ b/arch/s390/lib/probes.c
@@ -4,6 +4,7 @@
4 * Copyright IBM Corp. 2014 4 * Copyright IBM Corp. 2014
5 */ 5 */
6 6
7#include <linux/errno.h>
7#include <asm/kprobes.h> 8#include <asm/kprobes.h>
8#include <asm/dis.h> 9#include <asm/dis.h>
9 10
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 1e5bb2b86c42..b3bd3f23b8e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
337 return 0; 337 return 0;
338 done = 0; 338 done = 0;
339 do { 339 do {
340 offset = (size_t)src & ~PAGE_MASK; 340 offset = (size_t)src & (L1_CACHE_BYTES - 1);
341 len = min(size - done, PAGE_SIZE - offset); 341 len = min(size - done, L1_CACHE_BYTES - offset);
342 if (copy_from_user(dst, src, len)) 342 if (copy_from_user(dst, src, len))
343 return -EFAULT; 343 return -EFAULT;
344 len_str = strnlen(dst, len); 344 len_str = strnlen(dst, len);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017daed6887..b854b1da281a 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101 addr = PAGE_ALIGN(addr); 101 addr = PAGE_ALIGN(addr);
102 vma = find_vma(mm, addr); 102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
104 (!vma || addr + len <= vma->vm_start)) 104 (!vma || addr + len <= vm_start_gap(vma)))
105 goto check_asce_limit; 105 goto check_asce_limit;
106 } 106 }
107 107
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
151 addr = PAGE_ALIGN(addr); 151 addr = PAGE_ALIGN(addr);
152 vma = find_vma(mm, addr); 152 vma = find_vma(mm, addr);
153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
154 (!vma || addr + len <= vma->vm_start)) 154 (!vma || addr + len <= vm_start_gap(vma)))
155 goto check_asce_limit; 155 goto check_asce_limit;
156 } 156 }
157 157
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index d9a922d8711b..299274581968 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -13,7 +13,6 @@ struct task_struct;
13 */ 13 */
14extern void (*cpu_wait)(void); 14extern void (*cpu_wait)(void);
15 15
16extern unsigned long thread_saved_pc(struct task_struct *tsk);
17extern void start_thread(struct pt_regs *regs, 16extern void start_thread(struct pt_regs *regs,
18 unsigned long pc, unsigned long sp); 17 unsigned long pc, unsigned long sp);
19extern unsigned long get_wchan(struct task_struct *p); 18extern unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index eb64d7a677cb..6e20241a1ed4 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
101 return 1; 101 return 1;
102} 102}
103 103
104unsigned long thread_saved_pc(struct task_struct *tsk)
105{
106 return task_pt_regs(tsk)->cp0_epc;
107}
108
109unsigned long get_wchan(struct task_struct *task) 104unsigned long get_wchan(struct task_struct *task)
110{ 105{
111 if (!task || task == current || task->state == TASK_RUNNING) 106 if (!task || task == current || task->state == TASK_RUNNING)
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 08e7af0be4a7..6a1a1297baae 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
64 64
65 vma = find_vma(mm, addr); 65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr && 66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start)) 67 (!vma || addr + len <= vm_start_gap(vma)))
68 return addr; 68 return addr;
69 } 69 }
70 70
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
114 114
115 vma = find_vma(mm, addr); 115 vma = find_vma(mm, addr);
116 if (TASK_SIZE - len >= addr && 116 if (TASK_SIZE - len >= addr &&
117 (!vma || addr + len <= vma->vm_start)) 117 (!vma || addr + len <= vm_start_gap(vma)))
118 return addr; 118 return addr;
119 } 119 }
120 120
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0d21c0..5639c9fe5b55 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@ config NR_CPUS
192 int "Maximum number of CPUs" 192 int "Maximum number of CPUs"
193 depends on SMP 193 depends on SMP
194 range 2 32 if SPARC32 194 range 2 32 if SPARC32
195 range 2 1024 if SPARC64 195 range 2 4096 if SPARC64
196 default 32 if SPARC32 196 default 32 if SPARC32
197 default 64 if SPARC64 197 default 4096 if SPARC64
198 198
199source kernel/Kconfig.hz 199source kernel/Kconfig.hz
200 200
@@ -295,9 +295,13 @@ config NUMA
295 depends on SPARC64 && SMP 295 depends on SPARC64 && SMP
296 296
297config NODES_SHIFT 297config NODES_SHIFT
298 int 298 int "Maximum NUMA Nodes (as a power of 2)"
299 default "4" 299 range 4 5 if SPARC64
300 default "5"
300 depends on NEED_MULTIPLE_NODES 301 depends on NEED_MULTIPLE_NODES
302 help
303 Specify the maximum number of NUMA Nodes available on the target
304 system. Increases memory reserved to accommodate various tables.
301 305
302# Some NUMA nodes have memory ranges that span 306# Some NUMA nodes have memory ranges that span
303# other nodes. Even though a pfn is valid and 307# other nodes. Even though a pfn is valid and
@@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
573 depends on COMPAT && SYSVIPC 577 depends on COMPAT && SYSVIPC
574 default y 578 default y
575 579
576config KEYS_COMPAT
577 def_bool y if COMPAT && KEYS
578
579endmenu 580endmenu
580 581
581source "net/Kconfig" 582source "net/Kconfig"
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index dcbf985ab243..d1f837dc77a4 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
24static inline int prepare_hugepage_range(struct file *file, 24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len) 25 unsigned long addr, unsigned long len)
26{ 26{
27 if (len & ~HPAGE_MASK) 27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
28 return -EINVAL; 30 return -EINVAL;
29 if (addr & ~HPAGE_MASK) 31 if (addr & ~huge_page_mask(h))
30 return -EINVAL; 32 return -EINVAL;
31 return 0; 33 return 0;
32} 34}
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
52#define CTX_NR_MASK TAG_CONTEXT_BITS 52#define CTX_NR_MASK TAG_CONTEXT_BITS
53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) 53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
54 54
55#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) 55#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
56#define CTX_VALID(__ctx) \ 56#define CTX_VALID(__ctx) \
57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) 57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) 58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6eba11..2cddcda4f85f 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
19extern unsigned long tlb_context_cache; 19extern unsigned long tlb_context_cache;
20extern unsigned long mmu_context_bmap[]; 20extern unsigned long mmu_context_bmap[];
21 21
22DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
22void get_new_mmu_context(struct mm_struct *mm); 23void get_new_mmu_context(struct mm_struct *mm);
23#ifdef CONFIG_SMP
24void smp_new_mmu_context_version(void);
25#else
26#define smp_new_mmu_context_version() do { } while (0)
27#endif
28
29int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 24int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
30void destroy_context(struct mm_struct *mm); 25void destroy_context(struct mm_struct *mm);
31 26
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
76static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 71static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
77{ 72{
78 unsigned long ctx_valid, flags; 73 unsigned long ctx_valid, flags;
79 int cpu; 74 int cpu = smp_processor_id();
80 75
76 per_cpu(per_cpu_secondary_mm, cpu) = mm;
81 if (unlikely(mm == &init_mm)) 77 if (unlikely(mm == &init_mm))
82 return; 78 return;
83 79
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
123 * for the first time, we must flush that context out of the 119 * for the first time, we must flush that context out of the
124 * local TLB. 120 * local TLB.
125 */ 121 */
126 cpu = smp_processor_id();
127 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 122 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
128 cpumask_set_cpu(cpu, mm_cpumask(mm)); 123 cpumask_set_cpu(cpu, mm_cpumask(mm));
129 __flush_tlb_mm(CTX_HWBITS(mm->context), 124 __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
133} 128}
134 129
135#define deactivate_mm(tsk,mm) do { } while (0) 130#define deactivate_mm(tsk,mm) do { } while (0)
136 131#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
137/* Activate a new MM instance for the current task. */
138static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
139{
140 unsigned long flags;
141 int cpu;
142
143 spin_lock_irqsave(&mm->context.lock, flags);
144 if (!CTX_VALID(mm->context))
145 get_new_mmu_context(mm);
146 cpu = smp_processor_id();
147 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
148 cpumask_set_cpu(cpu, mm_cpumask(mm));
149
150 load_secondary_context(mm);
151 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
152 tsb_context_switch(mm);
153 spin_unlock_irqrestore(&mm->context.lock, flags);
154}
155
156#endif /* !(__ASSEMBLY__) */ 132#endif /* !(__ASSEMBLY__) */
157 133
158#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 134#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f56980aef..cf190728360b 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
91 * ZERO_PAGE is a global shared page that is always zero: used 91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc.. 92 * for zero-mapped memory areas etc..
93 */ 93 */
94extern unsigned long empty_zero_page; 94extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
95 95
96#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) 96#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
97 97
98/* 98/*
99 * In general all page table modifications should use the V8 atomic 99 * In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
20#define PIL_SMP_CALL_FUNC 1 20#define PIL_SMP_CALL_FUNC 1
21#define PIL_SMP_RECEIVE_SIGNAL 2 21#define PIL_SMP_RECEIVE_SIGNAL 2
22#define PIL_SMP_CAPTURE 3 22#define PIL_SMP_CAPTURE 3
23#define PIL_SMP_CTX_NEW_VERSION 4
24#define PIL_DEVICE_IRQ 5 23#define PIL_DEVICE_IRQ 5
25#define PIL_SMP_CALL_FUNC_SNGL 6 24#define PIL_SMP_CALL_FUNC_SNGL 6
26#define PIL_DEFERRED_PCR_WORK 7 25#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index dd27159819eb..b395e5620c0b 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -67,9 +67,6 @@ struct thread_struct {
67 .current_ds = KERNEL_DS, \ 67 .current_ds = KERNEL_DS, \
68} 68}
69 69
70/* Return saved PC of a blocked thread. */
71unsigned long thread_saved_pc(struct task_struct *t);
72
73/* Do necessary setup to start up a newly executed thread. */ 70/* Do necessary setup to start up a newly executed thread. */
74static inline void start_thread(struct pt_regs * regs, unsigned long pc, 71static inline void start_thread(struct pt_regs * regs, unsigned long pc,
75 unsigned long sp) 72 unsigned long sp)
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index b58ee9018433..f04dc5a43062 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -89,9 +89,7 @@ struct thread_struct {
89#include <linux/types.h> 89#include <linux/types.h>
90#include <asm/fpumacro.h> 90#include <asm/fpumacro.h>
91 91
92/* Return saved PC of a blocked thread. */
93struct task_struct; 92struct task_struct;
94unsigned long thread_saved_pc(struct task_struct *);
95 93
96/* On Uniprocessor, even in RMO processes see TSO semantics */ 94/* On Uniprocessor, even in RMO processes see TSO semantics */
97#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 478bf6bb4598..3fae200dd251 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@ extern char reboot_command[];
16 */ 16 */
17extern unsigned char boot_cpu_id; 17extern unsigned char boot_cpu_id;
18 18
19extern unsigned long empty_zero_page; 19extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
20 20
21extern int serial_console; 21extern int serial_console;
22static inline int con_is_present(void) 22static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
327 int compat_len; 327 int compat_len;
328 328
329 u64 dev_no; 329 u64 dev_no;
330 u64 id;
330 331
331 unsigned long channel_id; 332 unsigned long channel_id;
332 333
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7c8d94..f87265afb175 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
909 pbuf.req.handle = cp->handle; 909 pbuf.req.handle = cp->handle;
910 pbuf.req.major = 1; 910 pbuf.req.major = 1;
911 pbuf.req.minor = 0; 911 pbuf.req.minor = 0;
912 strcpy(pbuf.req.svc_id, cp->service_id); 912 strcpy(pbuf.id_buf, cp->service_id);
913 913
914 err = __ds_send(lp, &pbuf, msg_len); 914 err = __ds_send(lp, &pbuf, msg_len);
915 if (err > 0) 915 if (err > 0)
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff698069b..cec54dc4ab81 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,17 +130,16 @@ unsigned long prepare_ftrace_return(unsigned long parent,
130 if (unlikely(atomic_read(&current->tracing_graph_pause))) 130 if (unlikely(atomic_read(&current->tracing_graph_pause)))
131 return parent + 8UL; 131 return parent + 8UL;
132 132
133 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
134 frame_pointer, NULL) == -EBUSY)
135 return parent + 8UL;
136
137 trace.func = self_addr; 133 trace.func = self_addr;
134 trace.depth = current->curr_ret_stack + 1;
138 135
139 /* Only trace if the calling function expects to */ 136 /* Only trace if the calling function expects to */
140 if (!ftrace_graph_entry(&trace)) { 137 if (!ftrace_graph_entry(&trace))
141 current->curr_ret_stack--; 138 return parent + 8UL;
139
140 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
141 frame_pointer, NULL) == -EBUSY)
142 return parent + 8UL; 142 return parent + 8UL;
143 }
144 143
145 return return_hooker; 144 return return_hooker;
146} 145}
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248aa0928..99dd133a029f 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1034{ 1034{
1035#ifdef CONFIG_SMP 1035#ifdef CONFIG_SMP
1036 unsigned long page; 1036 unsigned long page;
1037 void *mondo, *p;
1037 1038
1038 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 1039 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1040
1041 /* Make sure mondo block is 64byte aligned */
1042 p = kzalloc(127, GFP_KERNEL);
1043 if (!p) {
1044 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1045 prom_halt();
1046 }
1047 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1048 tb->cpu_mondo_block_pa = __pa(mondo);
1039 1049
1040 page = get_zeroed_page(GFP_KERNEL); 1050 page = get_zeroed_page(GFP_KERNEL);
1041 if (!page) { 1051 if (!page) {
1042 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 1052 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1043 prom_halt(); 1053 prom_halt();
1044 } 1054 }
1045 1055
1046 tb->cpu_mondo_block_pa = __pa(page); 1056 tb->cpu_list_pa = __pa(page);
1047 tb->cpu_list_pa = __pa(page + 64);
1048#endif 1057#endif
1049} 1058}
1050 1059
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551262c..6ae1e77be0bf 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
37/* smp_64.c */ 37/* smp_64.c */
38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); 39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
40void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
41void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); 40void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
42void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); 41void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
43 42
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b6dac8e980f0..9245f93398c7 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -177,14 +177,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
177} 177}
178 178
179/* 179/*
180 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
181 */
182unsigned long thread_saved_pc(struct task_struct *tsk)
183{
184 return task_thread_info(tsk)->kpc;
185}
186
187/*
188 * Free current thread data structures etc.. 180 * Free current thread data structures etc..
189 */ 181 */
190void exit_thread(struct task_struct *tsk) 182void exit_thread(struct task_struct *tsk)
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 1badc493e62e..b96104da5bd6 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
400 400
401#endif 401#endif
402 402
403unsigned long thread_saved_pc(struct task_struct *tsk)
404{
405 struct thread_info *ti = task_thread_info(tsk);
406 unsigned long ret = 0xdeadbeefUL;
407
408 if (ti && ti->ksp) {
409 unsigned long *sp;
410 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
411 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
412 sp[14]) {
413 unsigned long *fp;
414 fp = (unsigned long *)(sp[14] + STACK_BIAS);
415 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
416 ret = fp[15];
417 }
418 }
419 return ret;
420}
421
422/* Free current thread data structures etc.. */ 403/* Free current thread data structures etc.. */
423void exit_thread(struct task_struct *tsk) 404void exit_thread(struct task_struct *tsk)
424{ 405{
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac757cc..fdf31040a7dc 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
964 preempt_enable(); 964 preempt_enable();
965} 965}
966 966
967void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
968{
969 struct mm_struct *mm;
970 unsigned long flags;
971
972 clear_softint(1 << irq);
973
974 /* See if we need to allocate a new TLB context because
975 * the version of the one we are using is now out of date.
976 */
977 mm = current->active_mm;
978 if (unlikely(!mm || (mm == &init_mm)))
979 return;
980
981 spin_lock_irqsave(&mm->context.lock, flags);
982
983 if (unlikely(!CTX_VALID(mm->context)))
984 get_new_mmu_context(mm);
985
986 spin_unlock_irqrestore(&mm->context.lock, flags);
987
988 load_secondary_context(mm);
989 __flush_tlb_mm(CTX_HWBITS(mm->context),
990 SECONDARY_CONTEXT);
991}
992
993void smp_new_mmu_context_version(void)
994{
995 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
996}
997
998#ifdef CONFIG_KGDB 967#ifdef CONFIG_KGDB
999void kgdb_roundup_cpus(unsigned long flags) 968void kgdb_roundup_cpus(unsigned long flags)
1000{ 969{
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520efc813..043544d0cda3 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
120 120
121 vma = find_vma(mm, addr); 121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr && 122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vma->vm_start)) 123 (!vma || addr + len <= vm_start_gap(vma)))
124 return addr; 124 return addr;
125 } 125 }
126 126
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
183 183
184 vma = find_vma(mm, addr); 184 vma = find_vma(mm, addr);
185 if (task_size - len >= addr && 185 if (task_size - len >= addr &&
186 (!vma || addr + len <= vma->vm_start)) 186 (!vma || addr + len <= vm_start_gap(vma)))
187 return addr; 187 return addr;
188 } 188 }
189 189
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cfd0ad4..07c0df924960 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@ __tsb_context_switch:
455 .type copy_tsb,#function 455 .type copy_tsb,#function
456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
457 * %o2=new_tsb_base, %o3=new_tsb_size 457 * %o2=new_tsb_base, %o3=new_tsb_size
458 * %o4=page_size_shift
458 */ 459 */
459 sethi %uhi(TSB_PASS_BITS), %g7 460 sethi %uhi(TSB_PASS_BITS), %g7
460 srlx %o3, 4, %o3 461 srlx %o3, 4, %o3
461 add %o0, %o1, %g1 /* end of old tsb */ 462 add %o0, %o1, %o1 /* end of old tsb */
462 sllx %g7, 32, %g7 463 sllx %g7, 32, %g7
463 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 464 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
464 465
466 mov %o4, %g1 /* page_size_shift */
467
465661: prefetcha [%o0] ASI_N, #one_read 468661: prefetcha [%o0] ASI_N, #one_read
466 .section .tsb_phys_patch, "ax" 469 .section .tsb_phys_patch, "ax"
467 .word 661b 470 .word 661b
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
486 /* This can definitely be computed faster... */ 489 /* This can definitely be computed faster... */
487 srlx %o0, 4, %o5 /* Build index */ 490 srlx %o0, 4, %o5 /* Build index */
488 and %o5, 511, %o5 /* Mask index */ 491 and %o5, 511, %o5 /* Mask index */
489 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 492 sllx %o5, %g1, %o5 /* Put into vaddr position */
490 or %o4, %o5, %o4 /* Full VADDR. */ 493 or %o4, %o5, %o4 /* Full VADDR. */
491 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 494 srlx %o4, %g1, %o4 /* Shift down to create index */
492 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 495 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
493 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 496 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
494 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ 497 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
496 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 499 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
497 500
49880: add %o0, 16, %o0 50180: add %o0, 16, %o0
499 cmp %o0, %g1 502 cmp %o0, %o1
500 bne,pt %xcc, 90b 503 bne,pt %xcc, 90b
501 nop 504 nop
502 505
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f6556352..efe93ab4a9c0 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) 53tl0_irq4: BTRAP(0x44)
54#else 54#else
55tl0_irq1: BTRAP(0x41) 55tl0_irq1: BTRAP(0x41)
56tl0_irq2: BTRAP(0x42) 56tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857254fc..075d38980dee 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
302 if (!id) { 302 if (!id) {
303 dev_set_name(&vdev->dev, "%s", bus_id_name); 303 dev_set_name(&vdev->dev, "%s", bus_id_name);
304 vdev->dev_no = ~(u64)0; 304 vdev->dev_no = ~(u64)0;
305 vdev->id = ~(u64)0;
305 } else if (!cfg_handle) { 306 } else if (!cfg_handle) {
306 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); 307 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
307 vdev->dev_no = *id; 308 vdev->dev_no = *id;
309 vdev->id = ~(u64)0;
308 } else { 310 } else {
309 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, 311 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
310 *cfg_handle, *id); 312 *cfg_handle, *id);
311 vdev->dev_no = *cfg_handle; 313 vdev->dev_no = *cfg_handle;
314 vdev->id = *id;
312 } 315 }
313 316
314 vdev->dev.parent = parent; 317 vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
351 (void) vio_create_one(hp, node, &root_vdev->dev); 354 (void) vio_create_one(hp, node, &root_vdev->dev);
352} 355}
353 356
357struct vio_md_node_query {
358 const char *type;
359 u64 dev_no;
360 u64 id;
361};
362
354static int vio_md_node_match(struct device *dev, void *arg) 363static int vio_md_node_match(struct device *dev, void *arg)
355{ 364{
365 struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
356 struct vio_dev *vdev = to_vio_dev(dev); 366 struct vio_dev *vdev = to_vio_dev(dev);
357 367
358 if (vdev->mp == (u64) arg) 368 if (vdev->dev_no != query->dev_no)
359 return 1; 369 return 0;
370 if (vdev->id != query->id)
371 return 0;
372 if (strcmp(vdev->type, query->type))
373 return 0;
360 374
361 return 0; 375 return 1;
362} 376}
363 377
364static void vio_remove(struct mdesc_handle *hp, u64 node) 378static void vio_remove(struct mdesc_handle *hp, u64 node)
365{ 379{
380 const char *type;
381 const u64 *id, *cfg_handle;
382 u64 a;
383 struct vio_md_node_query query;
366 struct device *dev; 384 struct device *dev;
367 385
368 dev = device_find_child(&root_vdev->dev, (void *) node, 386 type = mdesc_get_property(hp, node, "device-type", NULL);
387 if (!type) {
388 type = mdesc_get_property(hp, node, "name", NULL);
389 if (!type)
390 type = mdesc_node_name(hp, node);
391 }
392
393 query.type = type;
394
395 id = mdesc_get_property(hp, node, "id", NULL);
396 cfg_handle = NULL;
397 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
398 u64 target;
399
400 target = mdesc_arc_target(hp, a);
401 cfg_handle = mdesc_get_property(hp, target,
402 "cfg-handle", NULL);
403 if (cfg_handle)
404 break;
405 }
406
407 if (!id) {
408 query.dev_no = ~(u64)0;
409 query.id = ~(u64)0;
410 } else if (!cfg_handle) {
411 query.dev_no = *id;
412 query.id = ~(u64)0;
413 } else {
414 query.dev_no = *cfg_handle;
415 query.id = *id;
416 }
417
418 dev = device_find_child(&root_vdev->dev, &query,
369 vio_md_node_match); 419 vio_md_node_match);
370 if (dev) { 420 if (dev) {
371 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); 421 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
372 422
373 device_unregister(dev); 423 device_unregister(dev);
374 put_device(dev); 424 put_device(dev);
425 } else {
426 if (!id)
427 printk(KERN_ERR "VIO: Removed unknown %s node.\n",
428 type);
429 else if (!cfg_handle)
430 printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
431 type, *id);
432 else
433 printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
434 type, *cfg_handle, *id);
375 } 435 }
376} 436}
377 437
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2f8b54..07c03e72d812 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
15lib-$(CONFIG_SPARC64) += atomic_64.o 15lib-$(CONFIG_SPARC64) += atomic_64.o
16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o 16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o 17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
18lib-$(CONFIG_SPARC64) += multi3.o
18 19
19lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o 20lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
20lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o 21lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 000000000000..d6b6c97fe3c7
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
1#include <linux/linkage.h>
2#include <asm/export.h>
3
4 .text
5 .align 4
6ENTRY(__multi3) /* %o0 = u, %o1 = v */
7 mov %o1, %g1
8 srl %o3, 0, %g4
9 mulx %g4, %g1, %o1
10 srlx %g1, 0x20, %g3
11 mulx %g3, %g4, %g5
12 sllx %g5, 0x20, %o5
13 srl %g1, 0, %g4
14 sub %o1, %o5, %o5
15 srlx %o5, 0x20, %o5
16 addcc %g5, %o5, %g5
17 srlx %o3, 0x20, %o5
18 mulx %g4, %o5, %g4
19 mulx %g3, %o5, %o5
20 sethi %hi(0x80000000), %g3
21 addcc %g5, %g4, %g5
22 srlx %g5, 0x20, %g5
23 add %g3, %g3, %g3
24 movcc %xcc, %g0, %g3
25 addcc %o5, %g5, %o5
26 sllx %g4, 0x20, %g4
27 add %o1, %g4, %o1
28 add %o5, %g3, %g2
29 mulx %g1, %o2, %g1
30 add %g1, %g2, %g1
31 mulx %o0, %o3, %o0
32 retl
33 add %g1, %o0, %o0
34ENDPROC(__multi3)
35EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38e6b99..88855e383b34 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
120 addr = ALIGN(addr, huge_page_size(h)); 120 addr = ALIGN(addr, huge_page_size(h));
121 vma = find_vma(mm, addr); 121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr && 122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vma->vm_start)) 123 (!vma || addr + len <= vm_start_gap(vma)))
124 return addr; 124 return addr;
125 } 125 }
126 if (mm->get_unmapped_area == arch_get_unmapped_area) 126 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c6afe98de4d9..3bd0d513bddb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -290,7 +290,7 @@ void __init mem_init(void)
290 290
291 291
292 /* Saves us work later. */ 292 /* Saves us work later. */
293 memset((void *)&empty_zero_page, 0, PAGE_SIZE); 293 memset((void *)empty_zero_page, 0, PAGE_SIZE);
294 294
295 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); 295 i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
296 i += 1; 296 i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
358 } 358 }
359 359
360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { 360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
361 pr_warn("hugepagesz=%llu not supported by MMU.\n", 361 hugetlb_bad_size();
362 pr_err("hugepagesz=%llu not supported by MMU.\n",
362 hugepage_size); 363 hugepage_size);
363 goto out; 364 goto out;
364 } 365 }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
706 707
707/* get_new_mmu_context() uses "cache + 1". */ 708/* get_new_mmu_context() uses "cache + 1". */
708DEFINE_SPINLOCK(ctx_alloc_lock); 709DEFINE_SPINLOCK(ctx_alloc_lock);
709unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 710unsigned long tlb_context_cache = CTX_FIRST_VERSION;
710#define MAX_CTX_NR (1UL << CTX_NR_BITS) 711#define MAX_CTX_NR (1UL << CTX_NR_BITS)
711#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 712#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
712DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 713DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
714DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
715
716static void mmu_context_wrap(void)
717{
718 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
719 unsigned long new_ver, new_ctx, old_ctx;
720 struct mm_struct *mm;
721 int cpu;
722
723 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
724
725 /* Reserve kernel context */
726 set_bit(0, mmu_context_bmap);
727
728 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
729 if (unlikely(new_ver == 0))
730 new_ver = CTX_FIRST_VERSION;
731 tlb_context_cache = new_ver;
732
733 /*
734 * Make sure that any new mm that are added into per_cpu_secondary_mm,
735 * are going to go through get_new_mmu_context() path.
736 */
737 mb();
738
739 /*
740 * Updated versions to current on those CPUs that had valid secondary
741 * contexts
742 */
743 for_each_online_cpu(cpu) {
744 /*
745 * If a new mm is stored after we took this mm from the array,
746 * it will go into get_new_mmu_context() path, because we
747 * already bumped the version in tlb_context_cache.
748 */
749 mm = per_cpu(per_cpu_secondary_mm, cpu);
750
751 if (unlikely(!mm || mm == &init_mm))
752 continue;
753
754 old_ctx = mm->context.sparc64_ctx_val;
755 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
756 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
757 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
758 mm->context.sparc64_ctx_val = new_ctx;
759 }
760 }
761}
713 762
714/* Caller does TLB context flushing on local CPU if necessary. 763/* Caller does TLB context flushing on local CPU if necessary.
715 * The caller also ensures that CTX_VALID(mm->context) is false. 764 * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
725{ 774{
726 unsigned long ctx, new_ctx; 775 unsigned long ctx, new_ctx;
727 unsigned long orig_pgsz_bits; 776 unsigned long orig_pgsz_bits;
728 int new_version;
729 777
730 spin_lock(&ctx_alloc_lock); 778 spin_lock(&ctx_alloc_lock);
779retry:
780 /* wrap might have happened, test again if our context became valid */
781 if (unlikely(CTX_VALID(mm->context)))
782 goto out;
731 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 783 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
732 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 784 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
733 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 785 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
734 new_version = 0;
735 if (new_ctx >= (1 << CTX_NR_BITS)) { 786 if (new_ctx >= (1 << CTX_NR_BITS)) {
736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 787 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
737 if (new_ctx >= ctx) { 788 if (new_ctx >= ctx) {
738 int i; 789 mmu_context_wrap();
739 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 790 goto retry;
740 CTX_FIRST_VERSION;
741 if (new_ctx == 1)
742 new_ctx = CTX_FIRST_VERSION;
743
744 /* Don't call memset, for 16 entries that's just
745 * plain silly...
746 */
747 mmu_context_bmap[0] = 3;
748 mmu_context_bmap[1] = 0;
749 mmu_context_bmap[2] = 0;
750 mmu_context_bmap[3] = 0;
751 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
752 mmu_context_bmap[i + 0] = 0;
753 mmu_context_bmap[i + 1] = 0;
754 mmu_context_bmap[i + 2] = 0;
755 mmu_context_bmap[i + 3] = 0;
756 }
757 new_version = 1;
758 goto out;
759 } 791 }
760 } 792 }
793 if (mm->context.sparc64_ctx_val)
794 cpumask_clear(mm_cpumask(mm));
761 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 795 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
762 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 796 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
763out:
764 tlb_context_cache = new_ctx; 797 tlb_context_cache = new_ctx;
765 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 798 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
799out:
766 spin_unlock(&ctx_alloc_lock); 800 spin_unlock(&ctx_alloc_lock);
767
768 if (unlikely(new_version))
769 smp_new_mmu_context_version();
770} 801}
771 802
772static int numa_enabled = 1; 803static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b22a47..0d4b998c7d7b 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ retry_tsb_alloc:
496 extern void copy_tsb(unsigned long old_tsb_base, 496 extern void copy_tsb(unsigned long old_tsb_base,
497 unsigned long old_tsb_size, 497 unsigned long old_tsb_size,
498 unsigned long new_tsb_base, 498 unsigned long new_tsb_base,
499 unsigned long new_tsb_size); 499 unsigned long new_tsb_size,
500 unsigned long page_size_shift);
500 unsigned long old_tsb_base = (unsigned long) old_tsb; 501 unsigned long old_tsb_base = (unsigned long) old_tsb;
501 unsigned long new_tsb_base = (unsigned long) new_tsb; 502 unsigned long new_tsb_base = (unsigned long) new_tsb;
502 503
@@ -504,7 +505,9 @@ retry_tsb_alloc:
504 old_tsb_base = __pa(old_tsb_base); 505 old_tsb_base = __pa(old_tsb_base);
505 new_tsb_base = __pa(new_tsb_base); 506 new_tsb_base = __pa(new_tsb_base);
506 } 507 }
507 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 508 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
509 tsb_index == MM_TSB_BASE ?
510 PAGE_SHIFT : REAL_HPAGE_SHIFT);
508 } 511 }
509 512
510 mm->context.tsb_block[tsb_index].tsb = new_tsb; 513 mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
972 retry 972 retry
973 973
974 .globl xcall_new_mmu_context_version
975xcall_new_mmu_context_version:
976 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
977 retry
978
979#ifdef CONFIG_KGDB 974#ifdef CONFIG_KGDB
980 .globl xcall_kgdb_capture 975 .globl xcall_kgdb_capture
981xcall_kgdb_capture: 976xcall_kgdb_capture:
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 0bc9968b97a1..f71e5206650b 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
214 214
215extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags); 215extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
216 216
217
218/*
219 * Return saved (kernel) PC of a blocked thread.
220 * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
221 */
222#define thread_saved_pc(t) ((t)->thread.pc)
223
224unsigned long get_wchan(struct task_struct *p); 217unsigned long get_wchan(struct task_struct *p);
225 218
226/* Return initial ksp value for given task. */ 219/* Return initial ksp value for given task. */
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153b5c9f..03e5cc4e76e4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
233 addr = ALIGN(addr, huge_page_size(h)); 233 addr = ALIGN(addr, huge_page_size(h));
234 vma = find_vma(mm, addr); 234 vma = find_vma(mm, addr);
235 if (TASK_SIZE - len >= addr && 235 if (TASK_SIZE - len >= addr &&
236 (!vma || addr + len <= vma->vm_start)) 236 (!vma || addr + len <= vm_start_gap(vma)))
237 return addr; 237 return addr;
238 } 238 }
239 if (current->mm->get_unmapped_area == arch_get_unmapped_area) 239 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 2d1e0dd5bb0b..f6d1a3f747a9 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
58{ 58{
59} 59}
60 60
61extern unsigned long thread_saved_pc(struct task_struct *t);
62
63static inline void mm_copy_segments(struct mm_struct *from_mm, 61static inline void mm_copy_segments(struct mm_struct *from_mm,
64 struct mm_struct *new_mm) 62 struct mm_struct *new_mm)
65{ 63{
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 64a1fd06f3fd..7b5640117325 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
56 __attribute__((__section__(".data..init_irqstack"))) = 56 __attribute__((__section__(".data..init_irqstack"))) =
57 { INIT_THREAD_INFO(init_task) }; 57 { INIT_THREAD_INFO(init_task) };
58 58
59unsigned long thread_saved_pc(struct task_struct *task)
60{
61 /* FIXME: Need to look up userspace_pid by cpu */
62 return os_process_pc(userspace_pid[0]);
63}
64
65/* Changed in setup_arch, which is called in early boot */ 59/* Changed in setup_arch, which is called in early boot */
66static char host_info[(__NEW_UTS_LEN + 1) * 5]; 60static char host_info[(__NEW_UTS_LEN + 1) * 5];
67 61
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cd18994a9555..0efb4c9497bc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -360,7 +360,7 @@ config SMP
360 Management" code will be disabled if you say Y here. 360 Management" code will be disabled if you say Y here.
361 361
362 See also <file:Documentation/x86/i386/IO-APIC.txt>, 362 See also <file:Documentation/x86/i386/IO-APIC.txt>,
363 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at 363 <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
364 <http://www.tldp.org/docs.html#howto>. 364 <http://www.tldp.org/docs.html#howto>.
365 365
366 If you don't know what to do here, say N. 366 If you don't know what to do here, say N.
@@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
2776config SYSVIPC_COMPAT 2776config SYSVIPC_COMPAT
2777 def_bool y 2777 def_bool y
2778 depends on SYSVIPC 2778 depends on SYSVIPC
2779
2780config KEYS_COMPAT
2781 def_bool y
2782 depends on KEYS
2783endif 2779endif
2784 2780
2785endmenu 2781endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5851411e60fb..bf240b920473 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
159 # If '-Os' is enabled, disable it and print a warning. 159 # If '-Os' is enabled, disable it and print a warning.
160 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 160 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
161 undefine CONFIG_CC_OPTIMIZE_FOR_SIZE 161 undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
162 $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) 162 $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
163 endif 163 endif
164 164
165 endif 165 endif
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 44163e8c3868..2c860ad4fe06 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
94quiet_cmd_check_data_rel = DATAREL $@ 94quiet_cmd_check_data_rel = DATAREL $@
95define cmd_check_data_rel 95define cmd_check_data_rel
96 for obj in $(filter %.o,$^); do \ 96 for obj in $(filter %.o,$^); do \
97 readelf -S $$obj | grep -qF .rel.local && { \ 97 ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
98 echo "error: $$obj has data relocations!" >&2; \ 98 echo "error: $$obj has data relocations!" >&2; \
99 exit 1; \ 99 exit 1; \
100 } || true; \ 100 } || true; \
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 54c24f0a43d3..56a7e9201741 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -564,9 +564,6 @@ void choose_random_location(unsigned long input,
564{ 564{
565 unsigned long random_addr, min_addr; 565 unsigned long random_addr, min_addr;
566 566
567 /* By default, keep output position unchanged. */
568 *virt_addr = *output;
569
570 if (cmdline_find_option_bool("nokaslr")) { 567 if (cmdline_find_option_bool("nokaslr")) {
571 warn("KASLR disabled: 'nokaslr' on cmdline."); 568 warn("KASLR disabled: 'nokaslr' on cmdline.");
572 return; 569 return;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b3c5a5f030ce..00241c815524 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
338 unsigned long output_len) 338 unsigned long output_len)
339{ 339{
340 const unsigned long kernel_total_size = VO__end - VO__text; 340 const unsigned long kernel_total_size = VO__end - VO__text;
341 unsigned long virt_addr = (unsigned long)output; 341 unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
342 342
343 /* Retain x86 boot parameters pointer passed from startup_32/64. */ 343 /* Retain x86 boot parameters pointer passed from startup_32/64. */
344 boot_params = rmode; 344 boot_params = rmode;
@@ -390,6 +390,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
390#ifdef CONFIG_X86_64 390#ifdef CONFIG_X86_64
391 if (heap > 0x3fffffffffffUL) 391 if (heap > 0x3fffffffffffUL)
392 error("Destination address too large"); 392 error("Destination address too large");
393 if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
394 error("Destination virtual address is beyond the kernel mapping area");
393#else 395#else
394 if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) 396 if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
395 error("Destination address too large"); 397 error("Destination address too large");
@@ -397,7 +399,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
397#ifndef CONFIG_RELOCATABLE 399#ifndef CONFIG_RELOCATABLE
398 if ((unsigned long)output != LOAD_PHYSICAL_ADDR) 400 if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
399 error("Destination address does not match LOAD_PHYSICAL_ADDR"); 401 error("Destination address does not match LOAD_PHYSICAL_ADDR");
400 if ((unsigned long)output != virt_addr) 402 if (virt_addr != LOAD_PHYSICAL_ADDR)
401 error("Destination virtual address changed when not relocatable"); 403 error("Destination virtual address changed when not relocatable");
402#endif 404#endif
403 405
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 1c8355eadbd1..766a5211f827 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
81 unsigned long output_size, 81 unsigned long output_size,
82 unsigned long *virt_addr) 82 unsigned long *virt_addr)
83{ 83{
84 /* No change from existing output location. */
85 *virt_addr = *output;
86} 84}
87#endif 85#endif
88 86
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 50bc26949e9e..48ef7bb32c42 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -252,6 +252,23 @@ ENTRY(__switch_to_asm)
252END(__switch_to_asm) 252END(__switch_to_asm)
253 253
254/* 254/*
255 * The unwinder expects the last frame on the stack to always be at the same
256 * offset from the end of the page, which allows it to validate the stack.
257 * Calling schedule_tail() directly would break that convention because its an
258 * asmlinkage function so its argument has to be pushed on the stack. This
259 * wrapper creates a proper "end of stack" frame header before the call.
260 */
261ENTRY(schedule_tail_wrapper)
262 FRAME_BEGIN
263
264 pushl %eax
265 call schedule_tail
266 popl %eax
267
268 FRAME_END
269 ret
270ENDPROC(schedule_tail_wrapper)
271/*
255 * A newly forked process directly context switches into this address. 272 * A newly forked process directly context switches into this address.
256 * 273 *
257 * eax: prev task we switched from 274 * eax: prev task we switched from
@@ -259,24 +276,15 @@ END(__switch_to_asm)
259 * edi: kernel thread arg 276 * edi: kernel thread arg
260 */ 277 */
261ENTRY(ret_from_fork) 278ENTRY(ret_from_fork)
262 FRAME_BEGIN /* help unwinder find end of stack */ 279 call schedule_tail_wrapper
263
264 /*
265 * schedule_tail() is asmlinkage so we have to put its 'prev' argument
266 * on the stack.
267 */
268 pushl %eax
269 call schedule_tail
270 popl %eax
271 280
272 testl %ebx, %ebx 281 testl %ebx, %ebx
273 jnz 1f /* kernel threads are uncommon */ 282 jnz 1f /* kernel threads are uncommon */
274 283
2752: 2842:
276 /* When we fork, we trace the syscall return in the child, too. */ 285 /* When we fork, we trace the syscall return in the child, too. */
277 leal FRAME_OFFSET(%esp), %eax 286 movl %esp, %eax
278 call syscall_return_slowpath 287 call syscall_return_slowpath
279 FRAME_END
280 jmp restore_all 288 jmp restore_all
281 289
282 /* kernel thread */ 290 /* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 607d72c4a485..4a4c0834f965 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,7 +36,6 @@
36#include <asm/smap.h> 36#include <asm/smap.h>
37#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
38#include <asm/export.h> 38#include <asm/export.h>
39#include <asm/frame.h>
40#include <linux/err.h> 39#include <linux/err.h>
41 40
42.code64 41.code64
@@ -406,19 +405,17 @@ END(__switch_to_asm)
406 * r12: kernel thread arg 405 * r12: kernel thread arg
407 */ 406 */
408ENTRY(ret_from_fork) 407ENTRY(ret_from_fork)
409 FRAME_BEGIN /* help unwinder find end of stack */
410 movq %rax, %rdi 408 movq %rax, %rdi
411 call schedule_tail /* rdi: 'prev' task parameter */ 409 call schedule_tail /* rdi: 'prev' task parameter */
412 410
413 testq %rbx, %rbx /* from kernel_thread? */ 411 testq %rbx, %rbx /* from kernel_thread? */
414 jnz 1f /* kernel threads are uncommon */ 412 jnz 1f /* kernel threads are uncommon */
415 413
4162: 4142:
417 leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */ 415 movq %rsp, %rdi
418 call syscall_return_slowpath /* returns with IRQs disabled */ 416 call syscall_return_slowpath /* returns with IRQs disabled */
419 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 417 TRACE_IRQS_ON /* user mode is traced as IRQS on */
420 SWAPGS 418 SWAPGS
421 FRAME_END
422 jmp restore_regs_and_iret 419 jmp restore_regs_and_iret
423 420
4241: 4211:
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a6d91d4e37a1..110ce8238466 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
431 [ C(DTLB) ] = { 431 [ C(DTLB) ] = {
432 [ C(OP_READ) ] = { 432 [ C(OP_READ) ] = {
433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
434 [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 434 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
435 }, 435 },
436 [ C(OP_WRITE) ] = { 436 [ C(OP_WRITE) ] = {
437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
438 [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 438 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
439 }, 439 },
440 [ C(OP_PREFETCH) ] = { 440 [ C(OP_PREFETCH) ] = {
441 [ C(RESULT_ACCESS) ] = 0x0, 441 [ C(RESULT_ACCESS) ] = 0x0,
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 758c1aa5009d..44ec523287f6 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1170,7 +1170,7 @@ static int uncore_event_cpu_online(unsigned int cpu)
1170 pmu = type->pmus; 1170 pmu = type->pmus;
1171 for (i = 0; i < type->num_boxes; i++, pmu++) { 1171 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172 box = pmu->boxes[pkg]; 1172 box = pmu->boxes[pkg];
1173 if (!box && atomic_inc_return(&box->refcnt) == 1) 1173 if (box && atomic_inc_return(&box->refcnt) == 1)
1174 uncore_box_init(box); 1174 uncore_box_init(box);
1175 } 1175 }
1176 } 1176 }
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index b8ad261d11dc..c66d19e3c23e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,6 +29,7 @@ struct pt_regs;
29 } while (0) 29 } while (0)
30 30
31extern int fixup_exception(struct pt_regs *regs, int trapnr); 31extern int fixup_exception(struct pt_regs *regs, int trapnr);
32extern int fixup_bug(struct pt_regs *regs, int trapnr);
32extern bool ex_has_fault_handler(unsigned long ip); 33extern bool ex_has_fault_handler(unsigned long ip);
33extern void early_fixup_exception(struct pt_regs *regs, int trapnr); 34extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
34 35
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 055962615779..722d0e568863 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
296 296
297 bool perm_ok; /* do not check permissions if true */ 297 bool perm_ok; /* do not check permissions if true */
298 bool ud; /* inject an #UD if host doesn't support insn */ 298 bool ud; /* inject an #UD if host doesn't support insn */
299 bool tf; /* TF value before instruction (after for syscall/sysret) */
299 300
300 bool have_exception; 301 bool have_exception;
301 struct x86_exception exception; 302 struct x86_exception exception;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9c761fea0c98..695605eb1dfb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
43#define KVM_PRIVATE_MEM_SLOTS 3 43#define KVM_PRIVATE_MEM_SLOTS 3
44#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 44#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
45 45
46#define KVM_HALT_POLL_NS_DEFAULT 400000 46#define KVM_HALT_POLL_NS_DEFAULT 200000
47 47
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49 49
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4fd5195deed0..3f9a3d2a5209 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
266#endif 266#endif
267 267
268int mce_available(struct cpuinfo_x86 *c); 268int mce_available(struct cpuinfo_x86 *c);
269bool mce_is_memory_error(struct mce *m);
269 270
270DECLARE_PER_CPU(unsigned, mce_exception_count); 271DECLARE_PER_CPU(unsigned, mce_exception_count);
271DECLARE_PER_CPU(unsigned, mce_poll_count); 272DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index fba100713924..d5acc27ed1cc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -2,8 +2,7 @@
2#define _ASM_X86_MSHYPER_H 2#define _ASM_X86_MSHYPER_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/interrupt.h> 5#include <linux/atomic.h>
6#include <linux/clocksource.h>
7#include <asm/hyperv.h> 6#include <asm/hyperv.h>
8 7
9/* 8/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3cada998a402..a28b671f1549 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -860,8 +860,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
860 860
861#endif /* CONFIG_X86_64 */ 861#endif /* CONFIG_X86_64 */
862 862
863extern unsigned long thread_saved_pc(struct task_struct *tsk);
864
865extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 863extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
866 unsigned long new_sp); 864 unsigned long new_sp);
867 865
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 68766b276d9e..a059aac9e937 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -319,10 +319,10 @@ do { \
319#define __get_user_asm_u64(x, ptr, retval, errret) \ 319#define __get_user_asm_u64(x, ptr, retval, errret) \
320({ \ 320({ \
321 __typeof__(ptr) __ptr = (ptr); \ 321 __typeof__(ptr) __ptr = (ptr); \
322 asm volatile(ASM_STAC "\n" \ 322 asm volatile("\n" \
323 "1: movl %2,%%eax\n" \ 323 "1: movl %2,%%eax\n" \
324 "2: movl %3,%%edx\n" \ 324 "2: movl %3,%%edx\n" \
325 "3: " ASM_CLAC "\n" \ 325 "3:\n" \
326 ".section .fixup,\"ax\"\n" \ 326 ".section .fixup,\"ax\"\n" \
327 "4: mov %4,%0\n" \ 327 "4: mov %4,%0\n" \
328 " xorl %%eax,%%eax\n" \ 328 " xorl %%eax,%%eax\n" \
@@ -331,7 +331,7 @@ do { \
331 ".previous\n" \ 331 ".previous\n" \
332 _ASM_EXTABLE(1b, 4b) \ 332 _ASM_EXTABLE(1b, 4b) \
333 _ASM_EXTABLE(2b, 4b) \ 333 _ASM_EXTABLE(2b, 4b) \
334 : "=r" (retval), "=A"(x) \ 334 : "=r" (retval), "=&A"(x) \
335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 335 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
336 "i" (errret), "0" (retval)); \ 336 "i" (errret), "0" (retval)); \
337}) 337})
@@ -703,14 +703,15 @@ extern struct movsl_mask {
703#define unsafe_put_user(x, ptr, err_label) \ 703#define unsafe_put_user(x, ptr, err_label) \
704do { \ 704do { \
705 int __pu_err; \ 705 int __pu_err; \
706 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 706 __typeof__(*(ptr)) __pu_val = (x); \
707 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
707 if (unlikely(__pu_err)) goto err_label; \ 708 if (unlikely(__pu_err)) goto err_label; \
708} while (0) 709} while (0)
709 710
710#define unsafe_get_user(x, ptr, err_label) \ 711#define unsafe_get_user(x, ptr, err_label) \
711do { \ 712do { \
712 int __gu_err; \ 713 int __gu_err; \
713 unsigned long __gu_val; \ 714 __inttype(*(ptr)) __gu_val; \
714 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 715 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
715 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 716 (x) = (__force __typeof__(*(ptr)))__gu_val; \
716 if (unlikely(__gu_err)) goto err_label; \ 717 if (unlikely(__gu_err)) goto err_label; \
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c5b8f760473c..32e14d137416 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
409 memcpy(insnbuf, replacement, a->replacementlen); 409 memcpy(insnbuf, replacement, a->replacementlen);
410 insnbuf_sz = a->replacementlen; 410 insnbuf_sz = a->replacementlen;
411 411
412 /* 0xe8 is a relative jump; fix the offset. */ 412 /*
413 if (*insnbuf == 0xe8 && a->replacementlen == 5) { 413 * 0xe8 is a relative jump; fix the offset.
414 *
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
417 */
418 if (a->replacementlen == 5 && *insnbuf == 0xe8) {
414 *(s32 *)(insnbuf + 1) += replacement - instr; 419 *(s32 *)(insnbuf + 1) += replacement - instr;
415 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
416 *(s32 *)(insnbuf + 1), 421 *(s32 *)(insnbuf + 1),
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61095f8..6f077445647a 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
255 break; 255 break;
256 256
257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
258 case 11: /* GX1 with inverted Device ID */
258#ifdef CONFIG_PCI 259#ifdef CONFIG_PCI
259 { 260 {
260 u32 vendor, device; 261 u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index f5af0cc7eb0d..9257bd9dc664 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -856,11 +856,13 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
856 dentry = kernfs_mount(fs_type, flags, rdt_root, 856 dentry = kernfs_mount(fs_type, flags, rdt_root,
857 RDTGROUP_SUPER_MAGIC, NULL); 857 RDTGROUP_SUPER_MAGIC, NULL);
858 if (IS_ERR(dentry)) 858 if (IS_ERR(dentry))
859 goto out_cdp; 859 goto out_destroy;
860 860
861 static_branch_enable(&rdt_enable_key); 861 static_branch_enable(&rdt_enable_key);
862 goto out; 862 goto out;
863 863
864out_destroy:
865 kernfs_remove(kn_info);
864out_cdp: 866out_cdp:
865 cdp_disable(); 867 cdp_disable();
866out: 868out:
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5abd4bf73d6e..5cfbaeb6529a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)
499 return 1; 499 return 1;
500} 500}
501 501
502static bool memory_error(struct mce *m) 502bool mce_is_memory_error(struct mce *m)
503{ 503{
504 struct cpuinfo_x86 *c = &boot_cpu_data; 504 if (m->cpuvendor == X86_VENDOR_AMD) {
505
506 if (c->x86_vendor == X86_VENDOR_AMD) {
507 /* ErrCodeExt[20:16] */ 505 /* ErrCodeExt[20:16] */
508 u8 xec = (m->status >> 16) & 0x1f; 506 u8 xec = (m->status >> 16) & 0x1f;
509 507
510 return (xec == 0x0 || xec == 0x8); 508 return (xec == 0x0 || xec == 0x8);
511 } else if (c->x86_vendor == X86_VENDOR_INTEL) { 509 } else if (m->cpuvendor == X86_VENDOR_INTEL) {
512 /* 510 /*
513 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes 511 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
514 * 512 *
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)
529 527
530 return false; 528 return false;
531} 529}
530EXPORT_SYMBOL_GPL(mce_is_memory_error);
532 531
533static bool cec_add_mce(struct mce *m) 532static bool cec_add_mce(struct mce *m)
534{ 533{
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)
536 return false; 535 return false;
537 536
538 /* We eat only correctable DRAM errors with usable addresses. */ 537 /* We eat only correctable DRAM errors with usable addresses. */
539 if (memory_error(m) && 538 if (mce_is_memory_error(m) &&
540 !(m->status & MCI_STATUS_UC) && 539 !(m->status & MCI_STATUS_UC) &&
541 mce_usable_address(m)) 540 mce_usable_address(m))
542 if (!cec_add_elem(m->addr >> PAGE_SHIFT)) 541 if (!cec_add_elem(m->addr >> PAGE_SHIFT))
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
713 712
714 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); 713 severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
715 714
716 if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) 715 if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
717 if (m.status & MCI_STATUS_ADDRV) 716 if (m.status & MCI_STATUS_ADDRV)
718 m.severity = severity; 717 m.severity = severity;
719 718
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 45db4d2ebd01..e9f4d762aa5b 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
320} 320}
321 321
322static enum ucode_state 322static enum ucode_state
323load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); 323load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
324 324
325int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) 325int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
326{ 326{
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
338 if (!desc.mc) 338 if (!desc.mc)
339 return -EINVAL; 339 return -EINVAL;
340 340
341 ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), 341 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
342 desc.data, desc.size);
343 if (ret != UCODE_OK) 342 if (ret != UCODE_OK)
344 return -EINVAL; 343 return -EINVAL;
345 344
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
675} 674}
676 675
677static enum ucode_state 676static enum ucode_state
678load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) 677load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
679{ 678{
680 enum ucode_state ret; 679 enum ucode_state ret;
681 680
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
689 688
690#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
691 /* save BSP's matching patch for early load */ 690 /* save BSP's matching patch for early load */
692 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 691 if (save) {
693 struct ucode_patch *p = find_patch(cpu); 692 struct ucode_patch *p = find_patch(0);
694 if (p) { 693 if (p) {
695 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 694 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
696 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), 695 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
722{ 721{
723 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 722 char fw_name[36] = "amd-ucode/microcode_amd.bin";
724 struct cpuinfo_x86 *c = &cpu_data(cpu); 723 struct cpuinfo_x86 *c = &cpu_data(cpu);
724 bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
725 enum ucode_state ret = UCODE_NFOUND; 725 enum ucode_state ret = UCODE_NFOUND;
726 const struct firmware *fw; 726 const struct firmware *fw;
727 727
728 /* reload ucode container only on the boot cpu */ 728 /* reload ucode container only on the boot cpu */
729 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) 729 if (!refresh_fw || !bsp)
730 return UCODE_OK; 730 return UCODE_OK;
731 731
732 if (c->x86 >= 0x15) 732 if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
743 goto fw_release; 743 goto fw_release;
744 } 744 }
745 745
746 ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); 746 ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
747 747
748 fw_release: 748 fw_release:
749 release_firmware(fw); 749 release_firmware(fw);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index afdfd237b59f..f522415bf9e5 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
619 619
620 show_saved_mc(); 620 show_saved_mc();
621 621
622 /* initrd is going away, clear patch ptr. */
623 intel_ucode_patch = NULL;
624
622 return 0; 625 return 0;
623} 626}
624 627
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index c2f8dde3255c..d5d44c452624 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
90 * Boot time FPU feature detection code: 90 * Boot time FPU feature detection code:
91 */ 91 */
92unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 92unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
93EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
93 94
94static void __init fpu__init_system_mxcsr(void) 95static void __init fpu__init_system_mxcsr(void)
95{ 96{
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0651e974dcb3..9bef1bbeba63 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
689{ 689{
690 return module_alloc(size); 690 return module_alloc(size);
691} 691}
692static inline void tramp_free(void *tramp) 692static inline void tramp_free(void *tramp, int size)
693{ 693{
694 int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
695
696 set_memory_nx((unsigned long)tramp, npages);
697 set_memory_rw((unsigned long)tramp, npages);
694 module_memfree(tramp); 698 module_memfree(tramp);
695} 699}
696#else 700#else
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
699{ 703{
700 return NULL; 704 return NULL;
701} 705}
702static inline void tramp_free(void *tramp) { } 706static inline void tramp_free(void *tramp, int size) { }
703#endif 707#endif
704 708
705/* Defined as markers to the end of the ftrace default trampolines */ 709/* Defined as markers to the end of the ftrace default trampolines */
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
771 /* Copy ftrace_caller onto the trampoline memory */ 775 /* Copy ftrace_caller onto the trampoline memory */
772 ret = probe_kernel_read(trampoline, (void *)start_offset, size); 776 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
773 if (WARN_ON(ret < 0)) { 777 if (WARN_ON(ret < 0)) {
774 tramp_free(trampoline); 778 tramp_free(trampoline, *tramp_size);
775 return 0; 779 return 0;
776 } 780 }
777 781
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
797 801
798 /* Are we pointing to the reference? */ 802 /* Are we pointing to the reference? */
799 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { 803 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
800 tramp_free(trampoline); 804 tramp_free(trampoline, *tramp_size);
801 return 0; 805 return 0;
802 } 806 }
803 807
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
839 unsigned long offset; 843 unsigned long offset;
840 unsigned long ip; 844 unsigned long ip;
841 unsigned int size; 845 unsigned int size;
842 int ret; 846 int ret, npages;
843 847
844 if (ops->trampoline) { 848 if (ops->trampoline) {
845 /* 849 /*
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
848 */ 852 */
849 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 853 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
850 return; 854 return;
855 npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
856 set_memory_rw(ops->trampoline, npages);
851 } else { 857 } else {
852 ops->trampoline = create_trampoline(ops, &size); 858 ops->trampoline = create_trampoline(ops, &size);
853 if (!ops->trampoline) 859 if (!ops->trampoline)
854 return; 860 return;
855 ops->trampoline_size = size; 861 ops->trampoline_size = size;
862 npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
856 } 863 }
857 864
858 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 865 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
863 /* Do a safe modify in case the trampoline is executing */ 870 /* Do a safe modify in case the trampoline is executing */
864 new = ftrace_call_replace(ip, (unsigned long)func); 871 new = ftrace_call_replace(ip, (unsigned long)func);
865 ret = update_ftrace_func(ip, new); 872 ret = update_ftrace_func(ip, new);
873 set_memory_ro(ops->trampoline, npages);
866 874
867 /* The update should never fail */ 875 /* The update should never fail */
868 WARN_ON(ret); 876 WARN_ON(ret);
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
939 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 947 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
940 return; 948 return;
941 949
942 tramp_free((void *)ops->trampoline); 950 tramp_free((void *)ops->trampoline, ops->trampoline_size);
943 ops->trampoline = 0; 951 ops->trampoline = 0;
944} 952}
945 953
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 5b2bbfbb3712..6b877807598b 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -52,6 +52,7 @@
52#include <linux/ftrace.h> 52#include <linux/ftrace.h>
53#include <linux/frame.h> 53#include <linux/frame.h>
54#include <linux/kasan.h> 54#include <linux/kasan.h>
55#include <linux/moduleloader.h>
55 56
56#include <asm/text-patching.h> 57#include <asm/text-patching.h>
57#include <asm/cacheflush.h> 58#include <asm/cacheflush.h>
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
417 } 418 }
418} 419}
419 420
421/* Recover page to RW mode before releasing it */
422void free_insn_page(void *page)
423{
424 set_memory_nx((unsigned long)page & PAGE_MASK, 1);
425 set_memory_rw((unsigned long)page & PAGE_MASK, 1);
426 module_memfree(page);
427}
428
420static int arch_copy_kprobe(struct kprobe *p) 429static int arch_copy_kprobe(struct kprobe *p)
421{ 430{
422 struct insn insn; 431 struct insn insn;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c09789984..43e10d6fdbed 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
161 */ 161 */
162 rcu_irq_exit(); 162 rcu_irq_exit();
163 native_safe_halt(); 163 native_safe_halt();
164 rcu_irq_enter();
165 local_irq_disable(); 164 local_irq_disable();
165 rcu_irq_enter();
166 } 166 }
167 } 167 }
168 if (!n.halted) 168 if (!n.halted)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0bb88428cbf2..3ca198080ea9 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,17 +545,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
545} 545}
546 546
547/* 547/*
548 * Return saved PC of a blocked thread.
549 * What is this good for? it will be always the scheduler or ret_from_fork.
550 */
551unsigned long thread_saved_pc(struct task_struct *tsk)
552{
553 struct inactive_task_frame *frame =
554 (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
555 return READ_ONCE_NOCHECK(frame->ret_addr);
556}
557
558/*
559 * Called from fs/proc with a reference on @p to find the function 548 * Called from fs/proc with a reference on @p to find the function
560 * which called into schedule(). This needs to be done carefully 549 * which called into schedule(). This needs to be done carefully
561 * because the task might wake up and we might look at a stack 550 * because the task might wake up and we might look at a stack
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index ff40e74c9181..ffeae818aa7a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
78 78
79 printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); 79 printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
80 printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, 80 printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
81 smp_processor_id()); 81 raw_smp_processor_id());
82 82
83 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 83 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
84 regs->ax, regs->bx, regs->cx, regs->dx); 84 regs->ax, regs->bx, regs->cx, regs->dx);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0b4d3c686b1e..f81823695014 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
980 */ 980 */
981 x86_configure_nx(); 981 x86_configure_nx();
982 982
983 simple_udelay_calibration();
984
985 parse_early_param(); 983 parse_early_param();
986 984
987#ifdef CONFIG_MEMORY_HOTPLUG 985#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
1041 */ 1039 */
1042 init_hypervisor_platform(); 1040 init_hypervisor_platform();
1043 1041
1042 simple_udelay_calibration();
1043
1044 x86_init.resources.probe_roms(); 1044 x86_init.resources.probe_roms();
1045 1045
1046 /* after parse_early_param, so could debug it */ 1046 /* after parse_early_param, so could debug it */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 207b8f2582c7..213ddf3e937d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 addr = PAGE_ALIGN(addr); 144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr); 145 vma = find_vma(mm, addr);
146 if (end - len >= addr && 146 if (end - len >= addr &&
147 (!vma || addr + len <= vma->vm_start)) 147 (!vma || addr + len <= vm_start_gap(vma)))
148 return addr; 148 return addr;
149 } 149 }
150 150
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
187 addr = PAGE_ALIGN(addr); 187 addr = PAGE_ALIGN(addr);
188 vma = find_vma(mm, addr); 188 vma = find_vma(mm, addr);
189 if (TASK_SIZE - len >= addr && 189 if (TASK_SIZE - len >= addr &&
190 (!vma || addr + len <= vma->vm_start)) 190 (!vma || addr + len <= vm_start_gap(vma)))
191 return addr; 191 return addr;
192 } 192 }
193 193
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 4b1724059909..a4eb27918ceb 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,7 +514,7 @@ int tboot_force_iommu(void)
514 if (!tboot_enabled()) 514 if (!tboot_enabled())
515 return 0; 515 return 0;
516 516
517 if (!intel_iommu_tboot_noforce) 517 if (intel_iommu_tboot_noforce)
518 return 1; 518 return 1;
519 519
520 if (no_iommu || swiotlb || dmar_disabled) 520 if (no_iommu || swiotlb || dmar_disabled)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3995d3a777d4..bf54309b85da 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
182 return ud == INSN_UD0 || ud == INSN_UD2; 182 return ud == INSN_UD0 || ud == INSN_UD2;
183} 183}
184 184
185static int fixup_bug(struct pt_regs *regs, int trapnr) 185int fixup_bug(struct pt_regs *regs, int trapnr)
186{ 186{
187 if (trapnr != X86_TRAP_UD) 187 if (trapnr != X86_TRAP_UD)
188 return 0; 188 return 0;
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 82c6d7f1fd73..b9389d72b2f7 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
104 return (unsigned long *)task_pt_regs(state->task) - 2; 104 return (unsigned long *)task_pt_regs(state->task) - 2;
105} 105}
106 106
107static bool is_last_frame(struct unwind_state *state)
108{
109 return state->bp == last_frame(state);
110}
111
107#ifdef CONFIG_X86_32 112#ifdef CONFIG_X86_32
108#define GCC_REALIGN_WORDS 3 113#define GCC_REALIGN_WORDS 3
109#else 114#else
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
115 return last_frame(state) - GCC_REALIGN_WORDS; 120 return last_frame(state) - GCC_REALIGN_WORDS;
116} 121}
117 122
118static bool is_last_task_frame(struct unwind_state *state) 123static bool is_last_aligned_frame(struct unwind_state *state)
119{ 124{
120 unsigned long *last_bp = last_frame(state); 125 unsigned long *last_bp = last_frame(state);
121 unsigned long *aligned_bp = last_aligned_frame(state); 126 unsigned long *aligned_bp = last_aligned_frame(state);
122 127
123 /* 128 /*
124 * We have to check for the last task frame at two different locations 129 * GCC can occasionally decide to realign the stack pointer and change
125 * because gcc can occasionally decide to realign the stack pointer and 130 * the offset of the stack frame in the prologue of a function called
126 * change the offset of the stack frame in the prologue of a function 131 * by head/entry code. Examples:
127 * called by head/entry code. Examples:
128 * 132 *
129 * <start_secondary>: 133 * <start_secondary>:
130 * push %edi 134 * push %edi
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
141 * push %rbp 145 * push %rbp
142 * mov %rsp,%rbp 146 * mov %rsp,%rbp
143 * 147 *
144 * Note that after aligning the stack, it pushes a duplicate copy of 148 * After aligning the stack, it pushes a duplicate copy of the return
145 * the return address before pushing the frame pointer. 149 * address before pushing the frame pointer.
150 */
151 return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
152}
153
154static bool is_last_ftrace_frame(struct unwind_state *state)
155{
156 unsigned long *last_bp = last_frame(state);
157 unsigned long *last_ftrace_bp = last_bp - 3;
158
159 /*
160 * When unwinding from an ftrace handler of a function called by entry
161 * code, the stack layout of the last frame is:
162 *
163 * bp
164 * parent ret addr
165 * bp
166 * function ret addr
167 * parent ret addr
168 * pt_regs
169 * -----------------
146 */ 170 */
147 return (state->bp == last_bp || 171 return (state->bp == last_ftrace_bp &&
148 (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); 172 *state->bp == *(state->bp + 2) &&
173 *(state->bp + 1) == *(state->bp + 4));
174}
175
176static bool is_last_task_frame(struct unwind_state *state)
177{
178 return is_last_frame(state) || is_last_aligned_frame(state) ||
179 is_last_ftrace_frame(state);
149} 180}
150 181
151/* 182/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae76c71c..59ca2eea522c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@ out:
780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
781{ 781{
782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
783 int j, nent = vcpu->arch.cpuid_nent; 783 struct kvm_cpuid_entry2 *ej;
784 int j = i;
785 int nent = vcpu->arch.cpuid_nent;
784 786
785 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 787 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
786 /* when no next entry is found, the current entry[i] is reselected */ 788 /* when no next entry is found, the current entry[i] is reselected */
787 for (j = i + 1; ; j = (j + 1) % nent) { 789 do {
788 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 790 j = (j + 1) % nent;
789 if (ej->function == e->function) { 791 ej = &vcpu->arch.cpuid_entries[j];
790 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 792 } while (ej->function != e->function);
791 return j; 793
792 } 794 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
793 } 795
794 return 0; /* silence gcc, even though control never reaches here */ 796 return j;
795} 797}
796 798
797/* find an entry with matching function, matching index (if needed), and that 799/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c25cfaf584e7..80890dee66ce 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2743 } 2743 }
2744 2744
2745 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2745 return X86EMUL_CONTINUE; 2746 return X86EMUL_CONTINUE;
2746} 2747}
2747 2748
@@ -4173,7 +4174,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4173 4174
4174static int check_svme(struct x86_emulate_ctxt *ctxt) 4175static int check_svme(struct x86_emulate_ctxt *ctxt)
4175{ 4176{
4176 u64 efer; 4177 u64 efer = 0;
4177 4178
4178 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 4179 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4179 4180
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c329d2894905..d24c8742d9b0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1495 1495
1496static void cancel_hv_timer(struct kvm_lapic *apic) 1496static void cancel_hv_timer(struct kvm_lapic *apic)
1497{ 1497{
1498 preempt_disable();
1498 kvm_x86_ops->cancel_hv_timer(apic->vcpu); 1499 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1499 apic->lapic_timer.hv_timer_in_use = false; 1500 apic->lapic_timer.hv_timer_in_use = false;
1501 preempt_enable();
1500} 1502}
1501 1503
1502static bool start_hv_timer(struct kvm_lapic *apic) 1504static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1934 for (i = 0; i < KVM_APIC_LVT_NUM; i++) 1936 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
1935 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1937 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1936 apic_update_lvtt(apic); 1938 apic_update_lvtt(apic);
1937 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1939 if (kvm_vcpu_is_reset_bsp(vcpu) &&
1940 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1938 kvm_lapic_set_reg(apic, APIC_LVT0, 1941 kvm_lapic_set_reg(apic, APIC_LVT0,
1939 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1942 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1940 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); 1943 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d3376f67794..cb8225969255 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3699} 3699}
3700 3700
3701static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3701bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3702{ 3702{
3703 if (unlikely(!lapic_in_kernel(vcpu) || 3703 if (unlikely(!lapic_in_kernel(vcpu) ||
3704 kvm_event_needs_reinjection(vcpu))) 3704 kvm_event_needs_reinjection(vcpu)))
3705 return false; 3705 return false;
3706 3706
3707 if (is_guest_mode(vcpu))
3708 return false;
3709
3707 return kvm_x86_ops->interrupt_allowed(vcpu); 3710 return kvm_x86_ops->interrupt_allowed(vcpu);
3708} 3711}
3709 3712
@@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3719 if (!async) 3722 if (!async)
3720 return false; /* *pfn has correct page already */ 3723 return false; /* *pfn has correct page already */
3721 3724
3722 if (!prefault && can_do_async_pf(vcpu)) { 3725 if (!prefault && kvm_can_do_async_pf(vcpu)) {
3723 trace_kvm_try_async_get_page(gva, gfn); 3726 trace_kvm_try_async_get_page(gva, gfn);
3724 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3727 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3725 trace_kvm_async_pf_doublefault(gva, gfn); 3728 trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 27975807cc64..330bf3a811fb 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
78 bool accessed_dirty); 78 bool accessed_dirty);
79bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
79 80
80static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 81static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
81{ 82{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 56241746abbd..b0454c7e4cff 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -283,11 +283,13 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
283 pt_element_t pte; 283 pt_element_t pte;
284 pt_element_t __user *uninitialized_var(ptep_user); 284 pt_element_t __user *uninitialized_var(ptep_user);
285 gfn_t table_gfn; 285 gfn_t table_gfn;
286 unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey; 286 u64 pt_access, pte_access;
287 unsigned index, accessed_dirty, pte_pkey;
287 unsigned nested_access; 288 unsigned nested_access;
288 gpa_t pte_gpa; 289 gpa_t pte_gpa;
289 bool have_ad; 290 bool have_ad;
290 int offset; 291 int offset;
292 u64 walk_nx_mask = 0;
291 const int write_fault = access & PFERR_WRITE_MASK; 293 const int write_fault = access & PFERR_WRITE_MASK;
292 const int user_fault = access & PFERR_USER_MASK; 294 const int user_fault = access & PFERR_USER_MASK;
293 const int fetch_fault = access & PFERR_FETCH_MASK; 295 const int fetch_fault = access & PFERR_FETCH_MASK;
@@ -302,6 +304,7 @@ retry_walk:
302 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); 304 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
303 305
304#if PTTYPE == 64 306#if PTTYPE == 64
307 walk_nx_mask = 1ULL << PT64_NX_SHIFT;
305 if (walker->level == PT32E_ROOT_LEVEL) { 308 if (walker->level == PT32E_ROOT_LEVEL) {
306 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); 309 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
307 trace_kvm_mmu_paging_element(pte, walker->level); 310 trace_kvm_mmu_paging_element(pte, walker->level);
@@ -313,8 +316,6 @@ retry_walk:
313 walker->max_level = walker->level; 316 walker->max_level = walker->level;
314 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); 317 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
315 318
316 accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0;
317
318 /* 319 /*
319 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging 320 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
320 * by the MOV to CR instruction are treated as reads and do not cause the 321 * by the MOV to CR instruction are treated as reads and do not cause the
@@ -322,14 +323,14 @@ retry_walk:
322 */ 323 */
323 nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK; 324 nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
324 325
325 pt_access = pte_access = ACC_ALL; 326 pte_access = ~0;
326 ++walker->level; 327 ++walker->level;
327 328
328 do { 329 do {
329 gfn_t real_gfn; 330 gfn_t real_gfn;
330 unsigned long host_addr; 331 unsigned long host_addr;
331 332
332 pt_access &= pte_access; 333 pt_access = pte_access;
333 --walker->level; 334 --walker->level;
334 335
335 index = PT_INDEX(addr, walker->level); 336 index = PT_INDEX(addr, walker->level);
@@ -371,6 +372,12 @@ retry_walk:
371 372
372 trace_kvm_mmu_paging_element(pte, walker->level); 373 trace_kvm_mmu_paging_element(pte, walker->level);
373 374
375 /*
376 * Inverting the NX it lets us AND it like other
377 * permission bits.
378 */
379 pte_access = pt_access & (pte ^ walk_nx_mask);
380
374 if (unlikely(!FNAME(is_present_gpte)(pte))) 381 if (unlikely(!FNAME(is_present_gpte)(pte)))
375 goto error; 382 goto error;
376 383
@@ -379,14 +386,16 @@ retry_walk:
379 goto error; 386 goto error;
380 } 387 }
381 388
382 accessed_dirty &= pte;
383 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
384
385 walker->ptes[walker->level - 1] = pte; 389 walker->ptes[walker->level - 1] = pte;
386 } while (!is_last_gpte(mmu, walker->level, pte)); 390 } while (!is_last_gpte(mmu, walker->level, pte));
387 391
388 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); 392 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
389 errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access); 393 accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
394
395 /* Convert to ACC_*_MASK flags for struct guest_walker. */
396 walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
397 walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
398 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
390 if (unlikely(errcode)) 399 if (unlikely(errcode))
391 goto error; 400 goto error;
392 401
@@ -403,7 +412,7 @@ retry_walk:
403 walker->gfn = real_gpa >> PAGE_SHIFT; 412 walker->gfn = real_gpa >> PAGE_SHIFT;
404 413
405 if (!write_fault) 414 if (!write_fault)
406 FNAME(protect_clean_gpte)(mmu, &pte_access, pte); 415 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
407 else 416 else
408 /* 417 /*
409 * On a write fault, fold the dirty bit into accessed_dirty. 418 * On a write fault, fold the dirty bit into accessed_dirty.
@@ -421,10 +430,8 @@ retry_walk:
421 goto retry_walk; 430 goto retry_walk;
422 } 431 }
423 432
424 walker->pt_access = pt_access;
425 walker->pte_access = pte_access;
426 pgprintk("%s: pte %llx pte_access %x pt_access %x\n", 433 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
427 __func__, (u64)pte, pte_access, pt_access); 434 __func__, (u64)pte, walker->pte_access, walker->pt_access);
428 return 1; 435 return 1;
429 436
430error: 437error:
@@ -452,7 +459,7 @@ error:
452 */ 459 */
453 if (!(errcode & PFERR_RSVD_MASK)) { 460 if (!(errcode & PFERR_RSVD_MASK)) {
454 vcpu->arch.exit_qualification &= 0x187; 461 vcpu->arch.exit_qualification &= 0x187;
455 vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; 462 vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
456 } 463 }
457#endif 464#endif
458 walker->fault.address = addr; 465 walker->fault.address = addr;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 9d4a8504a95a..5ab4a364348e 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
294 ((u64)1 << edx.split.bit_width_fixed) - 1; 294 ((u64)1 << edx.split.bit_width_fixed) - 1;
295 } 295 }
296 296
297 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 297 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
299 pmu->global_ctrl_mask = ~pmu->global_ctrl; 299 pmu->global_ctrl_mask = ~pmu->global_ctrl;
300 300
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c27ac6923a18..ba9891ac5c56 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1272,7 +1272,8 @@ static void init_vmcb(struct vcpu_svm *svm)
1272 1272
1273} 1273}
1274 1274
1275static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index) 1275static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1276 unsigned int index)
1276{ 1277{
1277 u64 *avic_physical_id_table; 1278 u64 *avic_physical_id_table;
1278 struct kvm_arch *vm_data = &vcpu->kvm->arch; 1279 struct kvm_arch *vm_data = &vcpu->kvm->arch;
@@ -1806,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
1806 * AMD's VMCB does not have an explicit unusable field, so emulate it 1807 * AMD's VMCB does not have an explicit unusable field, so emulate it
1807 * for cross vendor migration purposes by "not present" 1808 * for cross vendor migration purposes by "not present"
1808 */ 1809 */
1809 var->unusable = !var->present || (var->type == 0); 1810 var->unusable = !var->present;
1810 1811
1811 switch (seg) { 1812 switch (seg) {
1812 case VCPU_SREG_TR: 1813 case VCPU_SREG_TR:
@@ -1839,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
1839 */ 1840 */
1840 if (var->unusable) 1841 if (var->unusable)
1841 var->db = 0; 1842 var->db = 0;
1843 /* This is symmetric with svm_set_segment() */
1842 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1844 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1843 break; 1845 break;
1844 } 1846 }
@@ -1979,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1979 s->base = var->base; 1981 s->base = var->base;
1980 s->limit = var->limit; 1982 s->limit = var->limit;
1981 s->selector = var->selector; 1983 s->selector = var->selector;
1982 if (var->unusable) 1984 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1983 s->attrib = 0; 1985 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1984 else { 1986 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1985 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1987 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1986 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1988 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1987 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1989 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1988 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 1990 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1989 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1991 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1990 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1991 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1992 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1993 }
1994 1992
1995 /* 1993 /*
1996 * This is always accurate, except if SYSRET returned to a segment 1994 * This is always accurate, except if SYSRET returned to a segment
@@ -1999,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1999 * would entail passing the CPL to userspace and back. 1997 * would entail passing the CPL to userspace and back.
2000 */ 1998 */
2001 if (seg == VCPU_SREG_SS) 1999 if (seg == VCPU_SREG_SS)
2002 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 2000 /* This is symmetric with svm_get_segment() */
2001 svm->vmcb->save.cpl = (var->dpl & 3);
2003 2002
2004 mark_dirty(svm->vmcb, VMCB_SEG); 2003 mark_dirty(svm->vmcb, VMCB_SEG);
2005} 2004}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c6f4ad44aa95..ca5d2b93385c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2425 if (!(vmcs12->exception_bitmap & (1u << nr))) 2425 if (!(vmcs12->exception_bitmap & (1u << nr)))
2426 return 0; 2426 return 0;
2427 2427
2428 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2428 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
2429 vmcs_read32(VM_EXIT_INTR_INFO), 2429 vmcs_read32(VM_EXIT_INTR_INFO),
2430 vmcs_readl(EXIT_QUALIFICATION)); 2430 vmcs_readl(EXIT_QUALIFICATION));
2431 return 1; 2431 return 1;
@@ -6504,7 +6504,7 @@ static __init int hardware_setup(void)
6504 enable_ept_ad_bits = 0; 6504 enable_ept_ad_bits = 0;
6505 } 6505 }
6506 6506
6507 if (!cpu_has_vmx_ept_ad_bits()) 6507 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
6508 enable_ept_ad_bits = 0; 6508 enable_ept_ad_bits = 0;
6509 6509
6510 if (!cpu_has_vmx_unrestricted_guest()) 6510 if (!cpu_has_vmx_unrestricted_guest())
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6914 return 0; 6914 return 0;
6915} 6915}
6916 6916
6917/* 6917static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
6918 * This function performs the various checks including
6919 * - if it's 4KB aligned
6920 * - No bits beyond the physical address width are set
6921 * - Returns 0 on success or else 1
6922 * (Intel SDM Section 30.3)
6923 */
6924static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6925 gpa_t *vmpointer)
6926{ 6918{
6927 gva_t gva; 6919 gva_t gva;
6928 gpa_t vmptr;
6929 struct x86_exception e; 6920 struct x86_exception e;
6930 struct page *page;
6931 struct vcpu_vmx *vmx = to_vmx(vcpu);
6932 int maxphyaddr = cpuid_maxphyaddr(vcpu);
6933 6921
6934 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6922 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6935 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 6923 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
6936 return 1; 6924 return 1;
6937 6925
6938 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6926 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
6939 sizeof(vmptr), &e)) { 6927 sizeof(*vmpointer), &e)) {
6940 kvm_inject_page_fault(vcpu, &e); 6928 kvm_inject_page_fault(vcpu, &e);
6941 return 1; 6929 return 1;
6942 } 6930 }
6943 6931
6944 switch (exit_reason) {
6945 case EXIT_REASON_VMON:
6946 /*
6947 * SDM 3: 24.11.5
6948 * The first 4 bytes of VMXON region contain the supported
6949 * VMCS revision identifier
6950 *
6951 * Note - IA32_VMX_BASIC[48] will never be 1
6952 * for the nested case;
6953 * which replaces physical address width with 32
6954 *
6955 */
6956 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6957 nested_vmx_failInvalid(vcpu);
6958 return kvm_skip_emulated_instruction(vcpu);
6959 }
6960
6961 page = nested_get_page(vcpu, vmptr);
6962 if (page == NULL) {
6963 nested_vmx_failInvalid(vcpu);
6964 return kvm_skip_emulated_instruction(vcpu);
6965 }
6966 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
6967 kunmap(page);
6968 nested_release_page_clean(page);
6969 nested_vmx_failInvalid(vcpu);
6970 return kvm_skip_emulated_instruction(vcpu);
6971 }
6972 kunmap(page);
6973 nested_release_page_clean(page);
6974 vmx->nested.vmxon_ptr = vmptr;
6975 break;
6976 case EXIT_REASON_VMCLEAR:
6977 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6978 nested_vmx_failValid(vcpu,
6979 VMXERR_VMCLEAR_INVALID_ADDRESS);
6980 return kvm_skip_emulated_instruction(vcpu);
6981 }
6982
6983 if (vmptr == vmx->nested.vmxon_ptr) {
6984 nested_vmx_failValid(vcpu,
6985 VMXERR_VMCLEAR_VMXON_POINTER);
6986 return kvm_skip_emulated_instruction(vcpu);
6987 }
6988 break;
6989 case EXIT_REASON_VMPTRLD:
6990 if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
6991 nested_vmx_failValid(vcpu,
6992 VMXERR_VMPTRLD_INVALID_ADDRESS);
6993 return kvm_skip_emulated_instruction(vcpu);
6994 }
6995
6996 if (vmptr == vmx->nested.vmxon_ptr) {
6997 nested_vmx_failValid(vcpu,
6998 VMXERR_VMPTRLD_VMXON_POINTER);
6999 return kvm_skip_emulated_instruction(vcpu);
7000 }
7001 break;
7002 default:
7003 return 1; /* shouldn't happen */
7004 }
7005
7006 if (vmpointer)
7007 *vmpointer = vmptr;
7008 return 0; 6932 return 0;
7009} 6933}
7010 6934
@@ -7066,6 +6990,8 @@ out_msr_bitmap:
7066static int handle_vmon(struct kvm_vcpu *vcpu) 6990static int handle_vmon(struct kvm_vcpu *vcpu)
7067{ 6991{
7068 int ret; 6992 int ret;
6993 gpa_t vmptr;
6994 struct page *page;
7069 struct vcpu_vmx *vmx = to_vmx(vcpu); 6995 struct vcpu_vmx *vmx = to_vmx(vcpu);
7070 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 6996 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
7071 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 6997 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7095 return 1; 7021 return 1;
7096 } 7022 }
7097 7023
7098 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) 7024 if (nested_vmx_get_vmptr(vcpu, &vmptr))
7099 return 1; 7025 return 1;
7100 7026
7027 /*
7028 * SDM 3: 24.11.5
7029 * The first 4 bytes of VMXON region contain the supported
7030 * VMCS revision identifier
7031 *
7032 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
7033 * which replaces physical address width with 32
7034 */
7035 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
7036 nested_vmx_failInvalid(vcpu);
7037 return kvm_skip_emulated_instruction(vcpu);
7038 }
7039
7040 page = nested_get_page(vcpu, vmptr);
7041 if (page == NULL) {
7042 nested_vmx_failInvalid(vcpu);
7043 return kvm_skip_emulated_instruction(vcpu);
7044 }
7045 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
7046 kunmap(page);
7047 nested_release_page_clean(page);
7048 nested_vmx_failInvalid(vcpu);
7049 return kvm_skip_emulated_instruction(vcpu);
7050 }
7051 kunmap(page);
7052 nested_release_page_clean(page);
7053
7054 vmx->nested.vmxon_ptr = vmptr;
7101 ret = enter_vmx_operation(vcpu); 7055 ret = enter_vmx_operation(vcpu);
7102 if (ret) 7056 if (ret)
7103 return ret; 7057 return ret;
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
7213 if (!nested_vmx_check_permission(vcpu)) 7167 if (!nested_vmx_check_permission(vcpu))
7214 return 1; 7168 return 1;
7215 7169
7216 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) 7170 if (nested_vmx_get_vmptr(vcpu, &vmptr))
7217 return 1; 7171 return 1;
7218 7172
7173 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
7174 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
7175 return kvm_skip_emulated_instruction(vcpu);
7176 }
7177
7178 if (vmptr == vmx->nested.vmxon_ptr) {
7179 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
7180 return kvm_skip_emulated_instruction(vcpu);
7181 }
7182
7219 if (vmptr == vmx->nested.current_vmptr) 7183 if (vmptr == vmx->nested.current_vmptr)
7220 nested_release_vmcs12(vmx); 7184 nested_release_vmcs12(vmx);
7221 7185
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
7545 if (!nested_vmx_check_permission(vcpu)) 7509 if (!nested_vmx_check_permission(vcpu))
7546 return 1; 7510 return 1;
7547 7511
7548 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) 7512 if (nested_vmx_get_vmptr(vcpu, &vmptr))
7549 return 1; 7513 return 1;
7550 7514
7515 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
7516 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
7517 return kvm_skip_emulated_instruction(vcpu);
7518 }
7519
7520 if (vmptr == vmx->nested.vmxon_ptr) {
7521 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
7522 return kvm_skip_emulated_instruction(vcpu);
7523 }
7524
7551 if (vmx->nested.current_vmptr != vmptr) { 7525 if (vmx->nested.current_vmptr != vmptr) {
7552 struct vmcs12 *new_vmcs12; 7526 struct vmcs12 *new_vmcs12;
7553 struct page *page; 7527 struct page *page;
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7913{ 7887{
7914 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7888 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7915 int cr = exit_qualification & 15; 7889 int cr = exit_qualification & 15;
7916 int reg = (exit_qualification >> 8) & 15; 7890 int reg;
7917 unsigned long val = kvm_register_readl(vcpu, reg); 7891 unsigned long val;
7918 7892
7919 switch ((exit_qualification >> 4) & 3) { 7893 switch ((exit_qualification >> 4) & 3) {
7920 case 0: /* mov to cr */ 7894 case 0: /* mov to cr */
7895 reg = (exit_qualification >> 8) & 15;
7896 val = kvm_register_readl(vcpu, reg);
7921 switch (cr) { 7897 switch (cr) {
7922 case 0: 7898 case 0:
7923 if (vmcs12->cr0_guest_host_mask & 7899 if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
7972 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 7948 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
7973 * cr0. Other attempted changes are ignored, with no exit. 7949 * cr0. Other attempted changes are ignored, with no exit.
7974 */ 7950 */
7951 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
7975 if (vmcs12->cr0_guest_host_mask & 0xe & 7952 if (vmcs12->cr0_guest_host_mask & 0xe &
7976 (val ^ vmcs12->cr0_read_shadow)) 7953 (val ^ vmcs12->cr0_read_shadow))
7977 return true; 7954 return true;
@@ -11213,7 +11190,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
11213 if (!nested_cpu_has_pml(vmcs12)) 11190 if (!nested_cpu_has_pml(vmcs12))
11214 return 0; 11191 return 0;
11215 11192
11216 if (vmcs12->guest_pml_index > PML_ENTITY_NUM) { 11193 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
11217 vmx->nested.pml_full = true; 11194 vmx->nested.pml_full = true;
11218 return 1; 11195 return 1;
11219 } 11196 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 464da936c53d..0e846f0cb83b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1763,6 +1763,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
1763{ 1763{
1764 struct kvm_arch *ka = &kvm->arch; 1764 struct kvm_arch *ka = &kvm->arch;
1765 struct pvclock_vcpu_time_info hv_clock; 1765 struct pvclock_vcpu_time_info hv_clock;
1766 u64 ret;
1766 1767
1767 spin_lock(&ka->pvclock_gtod_sync_lock); 1768 spin_lock(&ka->pvclock_gtod_sync_lock);
1768 if (!ka->use_master_clock) { 1769 if (!ka->use_master_clock) {
@@ -1774,10 +1775,17 @@ u64 get_kvmclock_ns(struct kvm *kvm)
1774 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 1775 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1775 spin_unlock(&ka->pvclock_gtod_sync_lock); 1776 spin_unlock(&ka->pvclock_gtod_sync_lock);
1776 1777
1778 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
1779 get_cpu();
1780
1777 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, 1781 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1778 &hv_clock.tsc_shift, 1782 &hv_clock.tsc_shift,
1779 &hv_clock.tsc_to_system_mul); 1783 &hv_clock.tsc_to_system_mul);
1780 return __pvclock_read_cycles(&hv_clock, rdtsc()); 1784 ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1785
1786 put_cpu();
1787
1788 return ret;
1781} 1789}
1782 1790
1783static void kvm_setup_pvclock_page(struct kvm_vcpu *v) 1791static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
@@ -3288,11 +3296,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3288 } 3296 }
3289} 3297}
3290 3298
3299#define XSAVE_MXCSR_OFFSET 24
3300
3291static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 3301static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3292 struct kvm_xsave *guest_xsave) 3302 struct kvm_xsave *guest_xsave)
3293{ 3303{
3294 u64 xstate_bv = 3304 u64 xstate_bv =
3295 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; 3305 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3306 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
3296 3307
3297 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 3308 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3298 /* 3309 /*
@@ -3300,11 +3311,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3300 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility 3311 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
3301 * with old userspace. 3312 * with old userspace.
3302 */ 3313 */
3303 if (xstate_bv & ~kvm_supported_xcr0()) 3314 if (xstate_bv & ~kvm_supported_xcr0() ||
3315 mxcsr & ~mxcsr_feature_mask)
3304 return -EINVAL; 3316 return -EINVAL;
3305 load_xsave(vcpu, (u8 *)guest_xsave->region); 3317 load_xsave(vcpu, (u8 *)guest_xsave->region);
3306 } else { 3318 } else {
3307 if (xstate_bv & ~XFEATURE_MASK_FPSSE) 3319 if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
3320 mxcsr & ~mxcsr_feature_mask)
3308 return -EINVAL; 3321 return -EINVAL;
3309 memcpy(&vcpu->arch.guest_fpu.state.fxsave, 3322 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3310 guest_xsave->region, sizeof(struct fxregs_state)); 3323 guest_xsave->region, sizeof(struct fxregs_state));
@@ -4818,16 +4831,20 @@ emul_write:
4818 4831
4819static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4832static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4820{ 4833{
4821 /* TODO: String I/O for in kernel device */ 4834 int r = 0, i;
4822 int r;
4823 4835
4824 if (vcpu->arch.pio.in) 4836 for (i = 0; i < vcpu->arch.pio.count; i++) {
4825 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, 4837 if (vcpu->arch.pio.in)
4826 vcpu->arch.pio.size, pd); 4838 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4827 else 4839 vcpu->arch.pio.size, pd);
4828 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, 4840 else
4829 vcpu->arch.pio.port, vcpu->arch.pio.size, 4841 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4830 pd); 4842 vcpu->arch.pio.port, vcpu->arch.pio.size,
4843 pd);
4844 if (r)
4845 break;
4846 pd += vcpu->arch.pio.size;
4847 }
4831 return r; 4848 return r;
4832} 4849}
4833 4850
@@ -4865,6 +4882,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4865 if (vcpu->arch.pio.count) 4882 if (vcpu->arch.pio.count)
4866 goto data_avail; 4883 goto data_avail;
4867 4884
4885 memset(vcpu->arch.pio_data, 0, size * count);
4886
4868 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); 4887 ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4869 if (ret) { 4888 if (ret) {
4870data_avail: 4889data_avail:
@@ -5048,6 +5067,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
5048 5067
5049 if (var.unusable) { 5068 if (var.unusable) {
5050 memset(desc, 0, sizeof(*desc)); 5069 memset(desc, 0, sizeof(*desc));
5070 if (base3)
5071 *base3 = 0;
5051 return false; 5072 return false;
5052 } 5073 }
5053 5074
@@ -5292,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5292 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5293 5314
5294 ctxt->eflags = kvm_get_rflags(vcpu); 5315 ctxt->eflags = kvm_get_rflags(vcpu);
5316 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
5317
5295 ctxt->eip = kvm_rip_read(vcpu); 5318 ctxt->eip = kvm_rip_read(vcpu);
5296 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 5319 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5297 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 5320 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5507,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5507 return dr6; 5530 return dr6;
5508} 5531}
5509 5532
5510static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) 5533static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
5511{ 5534{
5512 struct kvm_run *kvm_run = vcpu->run; 5535 struct kvm_run *kvm_run = vcpu->run;
5513 5536
5514 /* 5537 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5515 * rflags is the old, "raw" value of the flags. The new value has 5538 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
5516 * not been saved yet. 5539 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5517 * 5540 kvm_run->debug.arch.exception = DB_VECTOR;
5518 * This is correct even for TF set by the guest, because "the 5541 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5519 * processor will not generate this exception after the instruction 5542 *r = EMULATE_USER_EXIT;
5520 * that sets the TF flag". 5543 } else {
5521 */ 5544 /*
5522 if (unlikely(rflags & X86_EFLAGS_TF)) { 5545 * "Certain debug exceptions may clear bit 0-3. The
5523 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5546 * remaining contents of the DR6 register are never
5524 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | 5547 * cleared by the processor".
5525 DR6_RTM; 5548 */
5526 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5549 vcpu->arch.dr6 &= ~15;
5527 kvm_run->debug.arch.exception = DB_VECTOR; 5550 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5528 kvm_run->exit_reason = KVM_EXIT_DEBUG; 5551 kvm_queue_exception(vcpu, DB_VECTOR);
5529 *r = EMULATE_USER_EXIT;
5530 } else {
5531 /*
5532 * "Certain debug exceptions may clear bit 0-3. The
5533 * remaining contents of the DR6 register are never
5534 * cleared by the processor".
5535 */
5536 vcpu->arch.dr6 &= ~15;
5537 vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5538 kvm_queue_exception(vcpu, DB_VECTOR);
5539 }
5540 } 5552 }
5541} 5553}
5542 5554
@@ -5546,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
5546 int r = EMULATE_DONE; 5558 int r = EMULATE_DONE;
5547 5559
5548 kvm_x86_ops->skip_emulated_instruction(vcpu); 5560 kvm_x86_ops->skip_emulated_instruction(vcpu);
5549 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5561
5562 /*
5563 * rflags is the old, "raw" value of the flags. The new value has
5564 * not been saved yet.
5565 *
5566 * This is correct even for TF set by the guest, because "the
5567 * processor will not generate this exception after the instruction
5568 * that sets the TF flag".
5569 */
5570 if (unlikely(rflags & X86_EFLAGS_TF))
5571 kvm_vcpu_do_singlestep(vcpu, &r);
5550 return r == EMULATE_DONE; 5572 return r == EMULATE_DONE;
5551} 5573}
5552EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 5574EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5705,8 +5727,9 @@ restart:
5705 toggle_interruptibility(vcpu, ctxt->interruptibility); 5727 toggle_interruptibility(vcpu, ctxt->interruptibility);
5706 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5728 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5707 kvm_rip_write(vcpu, ctxt->eip); 5729 kvm_rip_write(vcpu, ctxt->eip);
5708 if (r == EMULATE_DONE) 5730 if (r == EMULATE_DONE &&
5709 kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5731 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
5732 kvm_vcpu_do_singlestep(vcpu, &r);
5710 if (!ctxt->have_exception || 5733 if (!ctxt->have_exception ||
5711 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 5734 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5712 __kvm_set_rflags(vcpu, ctxt->eflags); 5735 __kvm_set_rflags(vcpu, ctxt->eflags);
@@ -8373,10 +8396,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
8373 if (vcpu->arch.pv.pv_unhalted) 8396 if (vcpu->arch.pv.pv_unhalted)
8374 return true; 8397 return true;
8375 8398
8376 if (atomic_read(&vcpu->arch.nmi_queued)) 8399 if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
8400 (vcpu->arch.nmi_pending &&
8401 kvm_x86_ops->nmi_allowed(vcpu)))
8377 return true; 8402 return true;
8378 8403
8379 if (kvm_test_request(KVM_REQ_SMI, vcpu)) 8404 if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
8405 (vcpu->arch.smi_pending && !is_smm(vcpu)))
8380 return true; 8406 return true;
8381 8407
8382 if (kvm_arch_interrupt_allowed(vcpu) && 8408 if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -8583,8 +8609,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8583 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8609 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8584 return true; 8610 return true;
8585 else 8611 else
8586 return !kvm_event_needs_reinjection(vcpu) && 8612 return kvm_can_do_async_pf(vcpu);
8587 kvm_x86_ops->interrupt_allowed(vcpu);
8588} 8613}
8589 8614
8590void kvm_arch_start_assignment(struct kvm *kvm) 8615void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 35ea061010a1..0ea8afcb929c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
162 if (fixup_exception(regs, trapnr)) 162 if (fixup_exception(regs, trapnr))
163 return; 163 return;
164 164
165 if (fixup_bug(regs, trapnr))
166 return;
167
165fail: 168fail:
166 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", 169 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
167 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip, 170 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43fd9c28..adad702b39cd 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
148 addr = ALIGN(addr, huge_page_size(h)); 148 addr = ALIGN(addr, huge_page_size(h));
149 vma = find_vma(mm, addr); 149 vma = find_vma(mm, addr);
150 if (TASK_SIZE - len >= addr && 150 if (TASK_SIZE - len >= addr &&
151 (!vma || addr + len <= vma->vm_start)) 151 (!vma || addr + len <= vm_start_gap(vma)))
152 return addr; 152 return addr;
153 } 153 }
154 if (mm->get_unmapped_area == arch_get_unmapped_area) 154 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cbc87ea98751..9b3f9fa5b283 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,16 +161,16 @@ static int page_size_mask;
161 161
162static void __init probe_page_size_mask(void) 162static void __init probe_page_size_mask(void)
163{ 163{
164#if !defined(CONFIG_KMEMCHECK)
165 /* 164 /*
166 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will 165 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
167 * use small pages. 166 * use small pages.
168 * This will simplify cpa(), which otherwise needs to support splitting 167 * This will simplify cpa(), which otherwise needs to support splitting
169 * large pages into small in interrupt context, etc. 168 * large pages into small in interrupt context, etc.
170 */ 169 */
171 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) 170 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
172 page_size_mask |= 1 << PG_LEVEL_2M; 171 page_size_mask |= 1 << PG_LEVEL_2M;
173#endif 172 else
173 direct_gbpages = 0;
174 174
175 /* Enable PSE if available */ 175 /* Enable PSE if available */
176 if (boot_cpu_has(X86_FEATURE_PSE)) 176 if (boot_cpu_has(X86_FEATURE_PSE))
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 95651dc58e09..0a59daf799f8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -990,7 +990,13 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
990 990
991 pud_base = pud_offset(p4d, 0); 991 pud_base = pud_offset(p4d, 0);
992 remove_pud_table(pud_base, addr, next, direct); 992 remove_pud_table(pud_base, addr, next, direct);
993 free_pud_table(pud_base, p4d); 993 /*
994 * For 4-level page tables we do not want to free PUDs, but in the
995 * 5-level case we should free them. This code will have to change
996 * to adapt for boot-time switching between 4 and 5 level page tables.
997 */
998 if (CONFIG_PGTABLE_LEVELS == 5)
999 free_pud_table(pud_base, p4d);
994 } 1000 }
995 1001
996 if (direct) 1002 if (direct)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1dcd2be4cce4..c8520b2c62d2 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
186 unsigned int i, level; 186 unsigned int i, level;
187 unsigned long addr; 187 unsigned long addr;
188 188
189 BUG_ON(irqs_disabled()); 189 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
190 WARN_ON(PAGE_ALIGN(start) != start); 190 WARN_ON(PAGE_ALIGN(start) != start);
191 191
192 on_each_cpu(__cpa_flush_range, NULL, 1); 192 on_each_cpu(__cpa_flush_range, NULL, 1);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 7e76a4d8304b..43b96f5f78ba 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
828 828
829 /* 829 /*
830 * We don't do virtual mode, since we don't do runtime services, on 830 * We don't do virtual mode, since we don't do runtime services, on
831 * non-native EFI 831 * non-native EFI. With efi=old_map, we don't do runtime services in
832 * kexec kernel because in the initial boot something else might
833 * have been mapped at these virtual addresses.
832 */ 834 */
833 if (!efi_is_native()) { 835 if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
834 efi_memmap_unmap(); 836 efi_memmap_unmap();
835 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 837 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
836 return; 838 return;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c488625c9712..eb8dff15a7f6 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable)
71 71
72pgd_t * __init efi_call_phys_prolog(void) 72pgd_t * __init efi_call_phys_prolog(void)
73{ 73{
74 unsigned long vaddress; 74 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
75 pgd_t *save_pgd; 75 pgd_t *save_pgd, *pgd_k, *pgd_efi;
76 p4d_t *p4d, *p4d_k, *p4d_efi;
77 pud_t *pud;
76 78
77 int pgd; 79 int pgd;
78 int n_pgds; 80 int n_pgds, i, j;
79 81
80 if (!efi_enabled(EFI_OLD_MEMMAP)) { 82 if (!efi_enabled(EFI_OLD_MEMMAP)) {
81 save_pgd = (pgd_t *)read_cr3(); 83 save_pgd = (pgd_t *)read_cr3();
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
88 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 90 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
89 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 91 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
90 92
93 /*
94 * Build 1:1 identity mapping for efi=old_map usage. Note that
95 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
96 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
97 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
98 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
99 * This means here we can only reuse the PMD tables of the direct mapping.
100 */
91 for (pgd = 0; pgd < n_pgds; pgd++) { 101 for (pgd = 0; pgd < n_pgds; pgd++) {
92 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 102 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
93 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 103 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
94 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 104 pgd_efi = pgd_offset_k(addr_pgd);
105 save_pgd[pgd] = *pgd_efi;
106
107 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
108 if (!p4d) {
109 pr_err("Failed to allocate p4d table!\n");
110 goto out;
111 }
112
113 for (i = 0; i < PTRS_PER_P4D; i++) {
114 addr_p4d = addr_pgd + i * P4D_SIZE;
115 p4d_efi = p4d + p4d_index(addr_p4d);
116
117 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
118 if (!pud) {
119 pr_err("Failed to allocate pud table!\n");
120 goto out;
121 }
122
123 for (j = 0; j < PTRS_PER_PUD; j++) {
124 addr_pud = addr_p4d + j * PUD_SIZE;
125
126 if (addr_pud > (max_pfn << PAGE_SHIFT))
127 break;
128
129 vaddr = (unsigned long)__va(addr_pud);
130
131 pgd_k = pgd_offset_k(vaddr);
132 p4d_k = p4d_offset(pgd_k, vaddr);
133 pud[j] = *pud_offset(p4d_k, vaddr);
134 }
135 }
95 } 136 }
96out: 137out:
97 __flush_tlb_all(); 138 __flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
104 /* 145 /*
105 * After the lock is released, the original page table is restored. 146 * After the lock is released, the original page table is restored.
106 */ 147 */
107 int pgd_idx; 148 int pgd_idx, i;
108 int nr_pgds; 149 int nr_pgds;
150 pgd_t *pgd;
151 p4d_t *p4d;
152 pud_t *pud;
109 153
110 if (!efi_enabled(EFI_OLD_MEMMAP)) { 154 if (!efi_enabled(EFI_OLD_MEMMAP)) {
111 write_cr3((unsigned long)save_pgd); 155 write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
115 159
116 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 160 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
117 161
118 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) 162 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
163 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
119 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 164 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
120 165
166 if (!(pgd_val(*pgd) & _PAGE_PRESENT))
167 continue;
168
169 for (i = 0; i < PTRS_PER_P4D; i++) {
170 p4d = p4d_offset(pgd,
171 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
172
173 if (!(p4d_val(*p4d) & _PAGE_PRESENT))
174 continue;
175
176 pud = (pud_t *)p4d_page_vaddr(*p4d);
177 pud_free(&init_mm, pud);
178 }
179
180 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
181 p4d_free(&init_mm, p4d);
182 }
183
121 kfree(save_pgd); 184 kfree(save_pgd);
122 185
123 __flush_tlb_all(); 186 __flush_tlb_all();
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 26615991d69c..e0cf95a83f3f 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void)
360 free_bootmem_late(start, size); 360 free_bootmem_late(start, size);
361 } 361 }
362 362
363 if (!num_entries)
364 return;
365
363 new_size = efi.memmap.desc_size * num_entries; 366 new_size = efi.memmap.desc_size * num_entries;
364 new_phys = efi_memmap_alloc(num_entries); 367 new_phys = efi_memmap_alloc(num_entries);
365 if (!new_phys) { 368 if (!new_phys) {
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 7cd442690f9d..f33eef4ebd12 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -142,9 +142,7 @@ static void __init xen_banner(void)
142 struct xen_extraversion extra; 142 struct xen_extraversion extra;
143 HYPERVISOR_xen_version(XENVER_extraversion, &extra); 143 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
144 144
145 pr_info("Booting paravirtualized kernel %son %s\n", 145 pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
146 xen_feature(XENFEAT_auto_translated_physmap) ?
147 "with PVH extensions " : "", pv_info.name);
148 printk(KERN_INFO "Xen version: %d.%d%s%s\n", 146 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
149 version >> 16, version & 0xffff, extra.extraversion, 147 version >> 16, version & 0xffff, extra.extraversion,
150 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 148 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
@@ -957,15 +955,10 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
957 955
958void xen_setup_shared_info(void) 956void xen_setup_shared_info(void)
959{ 957{
960 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 958 set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
961 set_fixmap(FIX_PARAVIRT_BOOTMAP,
962 xen_start_info->shared_info);
963 959
964 HYPERVISOR_shared_info = 960 HYPERVISOR_shared_info =
965 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 961 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
966 } else
967 HYPERVISOR_shared_info =
968 (struct shared_info *)__va(xen_start_info->shared_info);
969 962
970#ifndef CONFIG_SMP 963#ifndef CONFIG_SMP
971 /* In UP this is as good a place as any to set up shared info */ 964 /* In UP this is as good a place as any to set up shared info */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5e375a5e815f..3be06f3caf3c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,7 +42,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
42} 42}
43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
44 44
45void xen_flush_tlb_all(void) 45static void xen_flush_tlb_all(void)
46{ 46{
47 struct mmuext_op *op; 47 struct mmuext_op *op;
48 struct multicall_space mcs; 48 struct multicall_space mcs;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 7397d8b8459d..1f386d7fdf70 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -355,10 +355,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
355 pteval_t flags = val & PTE_FLAGS_MASK; 355 pteval_t flags = val & PTE_FLAGS_MASK;
356 unsigned long mfn; 356 unsigned long mfn;
357 357
358 if (!xen_feature(XENFEAT_auto_translated_physmap)) 358 mfn = __pfn_to_mfn(pfn);
359 mfn = __pfn_to_mfn(pfn); 359
360 else
361 mfn = pfn;
362 /* 360 /*
363 * If there's no mfn for the pfn, then just create an 361 * If there's no mfn for the pfn, then just create an
364 * empty non-present pte. Unfortunately this loses 362 * empty non-present pte. Unfortunately this loses
@@ -647,9 +645,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
647 limit--; 645 limit--;
648 BUG_ON(limit >= FIXADDR_TOP); 646 BUG_ON(limit >= FIXADDR_TOP);
649 647
650 if (xen_feature(XENFEAT_auto_translated_physmap))
651 return 0;
652
653 /* 648 /*
654 * 64-bit has a great big hole in the middle of the address 649 * 64-bit has a great big hole in the middle of the address
655 * space, which contains the Xen mappings. On 32-bit these 650 * space, which contains the Xen mappings. On 32-bit these
@@ -1289,9 +1284,6 @@ static void __init xen_pagetable_cleanhighmap(void)
1289 1284
1290static void __init xen_pagetable_p2m_setup(void) 1285static void __init xen_pagetable_p2m_setup(void)
1291{ 1286{
1292 if (xen_feature(XENFEAT_auto_translated_physmap))
1293 return;
1294
1295 xen_vmalloc_p2m_tree(); 1287 xen_vmalloc_p2m_tree();
1296 1288
1297#ifdef CONFIG_X86_64 1289#ifdef CONFIG_X86_64
@@ -1314,8 +1306,7 @@ static void __init xen_pagetable_init(void)
1314 xen_build_mfn_list_list(); 1306 xen_build_mfn_list_list();
1315 1307
1316 /* Remap memory freed due to conflicts with E820 map */ 1308 /* Remap memory freed due to conflicts with E820 map */
1317 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1309 xen_remap_memory();
1318 xen_remap_memory();
1319 1310
1320 xen_setup_shared_info(); 1311 xen_setup_shared_info();
1321} 1312}
@@ -1925,21 +1916,20 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1925 /* Zap identity mapping */ 1916 /* Zap identity mapping */
1926 init_level4_pgt[0] = __pgd(0); 1917 init_level4_pgt[0] = __pgd(0);
1927 1918
1928 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1919 /* Pre-constructed entries are in pfn, so convert to mfn */
1929 /* Pre-constructed entries are in pfn, so convert to mfn */ 1920 /* L4[272] -> level3_ident_pgt */
1930 /* L4[272] -> level3_ident_pgt 1921 /* L4[511] -> level3_kernel_pgt */
1931 * L4[511] -> level3_kernel_pgt */ 1922 convert_pfn_mfn(init_level4_pgt);
1932 convert_pfn_mfn(init_level4_pgt);
1933 1923
1934 /* L3_i[0] -> level2_ident_pgt */ 1924 /* L3_i[0] -> level2_ident_pgt */
1935 convert_pfn_mfn(level3_ident_pgt); 1925 convert_pfn_mfn(level3_ident_pgt);
1936 /* L3_k[510] -> level2_kernel_pgt 1926 /* L3_k[510] -> level2_kernel_pgt */
1937 * L3_k[511] -> level2_fixmap_pgt */ 1927 /* L3_k[511] -> level2_fixmap_pgt */
1938 convert_pfn_mfn(level3_kernel_pgt); 1928 convert_pfn_mfn(level3_kernel_pgt);
1929
1930 /* L3_k[511][506] -> level1_fixmap_pgt */
1931 convert_pfn_mfn(level2_fixmap_pgt);
1939 1932
1940 /* L3_k[511][506] -> level1_fixmap_pgt */
1941 convert_pfn_mfn(level2_fixmap_pgt);
1942 }
1943 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1933 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1944 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1934 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1945 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); 1935 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
@@ -1962,34 +1952,30 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1962 if (i && i < pgd_index(__START_KERNEL_map)) 1952 if (i && i < pgd_index(__START_KERNEL_map))
1963 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; 1953 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1964 1954
1965 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1955 /* Make pagetable pieces RO */
1966 /* Make pagetable pieces RO */ 1956 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1967 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1957 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1968 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1958 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1969 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1959 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1970 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1960 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1971 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1961 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1972 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1962 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1973 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1963 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1974 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); 1964
1975 1965 /* Pin down new L4 */
1976 /* Pin down new L4 */ 1966 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1977 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1967 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1978 PFN_DOWN(__pa_symbol(init_level4_pgt))); 1968
1979 1969 /* Unpin Xen-provided one */
1980 /* Unpin Xen-provided one */ 1970 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1981 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1982 1971
1983 /* 1972 /*
1984 * At this stage there can be no user pgd, and no page 1973 * At this stage there can be no user pgd, and no page structure to
1985 * structure to attach it to, so make sure we just set kernel 1974 * attach it to, so make sure we just set kernel pgd.
1986 * pgd. 1975 */
1987 */ 1976 xen_mc_batch();
1988 xen_mc_batch(); 1977 __xen_write_cr3(true, __pa(init_level4_pgt));
1989 __xen_write_cr3(true, __pa(init_level4_pgt)); 1978 xen_mc_issue(PARAVIRT_LAZY_CPU);
1990 xen_mc_issue(PARAVIRT_LAZY_CPU);
1991 } else
1992 native_write_cr3(__pa(init_level4_pgt));
1993 1979
1994 /* We can't that easily rip out L3 and L2, as the Xen pagetables are 1980 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1995 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for 1981 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@@ -2403,9 +2389,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2403 2389
2404static void __init xen_post_allocator_init(void) 2390static void __init xen_post_allocator_init(void)
2405{ 2391{
2406 if (xen_feature(XENFEAT_auto_translated_physmap))
2407 return;
2408
2409 pv_mmu_ops.set_pte = xen_set_pte; 2392 pv_mmu_ops.set_pte = xen_set_pte;
2410 pv_mmu_ops.set_pmd = xen_set_pmd; 2393 pv_mmu_ops.set_pmd = xen_set_pmd;
2411 pv_mmu_ops.set_pud = xen_set_pud; 2394 pv_mmu_ops.set_pud = xen_set_pud;
@@ -2511,9 +2494,6 @@ void __init xen_init_mmu_ops(void)
2511{ 2494{
2512 x86_init.paging.pagetable_init = xen_pagetable_init; 2495 x86_init.paging.pagetable_init = xen_pagetable_init;
2513 2496
2514 if (xen_feature(XENFEAT_auto_translated_physmap))
2515 return;
2516
2517 pv_mmu_ops = xen_mmu_ops; 2497 pv_mmu_ops = xen_mmu_ops;
2518 2498
2519 memset(dummy_mapping, 0xff, PAGE_SIZE); 2499 memset(dummy_mapping, 0xff, PAGE_SIZE);
@@ -2650,9 +2630,6 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2650 * this function are redundant and can be ignored. 2630 * this function are redundant and can be ignored.
2651 */ 2631 */
2652 2632
2653 if (xen_feature(XENFEAT_auto_translated_physmap))
2654 return 0;
2655
2656 if (unlikely(order > MAX_CONTIG_ORDER)) 2633 if (unlikely(order > MAX_CONTIG_ORDER))
2657 return -ENOMEM; 2634 return -ENOMEM;
2658 2635
@@ -2689,9 +2666,6 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2689 int success; 2666 int success;
2690 unsigned long vstart; 2667 unsigned long vstart;
2691 2668
2692 if (xen_feature(XENFEAT_auto_translated_physmap))
2693 return;
2694
2695 if (unlikely(order > MAX_CONTIG_ORDER)) 2669 if (unlikely(order > MAX_CONTIG_ORDER))
2696 return; 2670 return;
2697 2671
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88ea7646..19707db966f1 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
29# define PLATFORM_NR_IRQS 0 29# define PLATFORM_NR_IRQS 0
30#endif 30#endif
31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS 31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) 32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
33#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
33 34
34#if VARIANT_NR_IRQS == 0 35#if VARIANT_NR_IRQS == 0
35static inline void variant_init_irq(void) { } 36static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 003eeee3fbc6..30ee8c608853 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -213,8 +213,6 @@ struct mm_struct;
213#define release_segments(mm) do { } while(0) 213#define release_segments(mm) do { } while(0)
214#define forget_segments() do { } while (0) 214#define forget_segments() do { } while (0)
215 215
216#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
217
218extern unsigned long get_wchan(struct task_struct *p); 216extern unsigned long get_wchan(struct task_struct *p);
219 217
220#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) 218#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd6ac37..99341028cc77 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
34{ 34{
35 int irq = irq_find_mapping(NULL, hwirq); 35 int irq = irq_find_mapping(NULL, hwirq);
36 36
37 if (hwirq >= NR_IRQS) {
38 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
39 __func__, hwirq);
40 }
41
42#ifdef CONFIG_DEBUG_STACKOVERFLOW 37#ifdef CONFIG_DEBUG_STACKOVERFLOW
43 /* Debugging check for stack overflow: is there less than 1KB free? */ 38 /* Debugging check for stack overflow: is there less than 1KB free? */
44 { 39 {
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08300b6..33bfa5270d95 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
593 (ccount_freq/10000) % 100, 593 (ccount_freq/10000) % 100,
594 loops_per_jiffy/(500000/HZ), 594 loops_per_jiffy/(500000/HZ),
595 (loops_per_jiffy/(5000/HZ)) % 100); 595 (loops_per_jiffy/(5000/HZ)) % 100);
596 596 seq_puts(f, "flags\t\t: "
597 seq_printf(f,"flags\t\t: "
598#if XCHAL_HAVE_NMI 597#if XCHAL_HAVE_NMI
599 "nmi " 598 "nmi "
600#endif 599#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 06937928cb72..74afbf02d07e 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
88 /* At this point: (!vmm || addr < vmm->vm_end). */ 88 /* At this point: (!vmm || addr < vmm->vm_end). */
89 if (TASK_SIZE - len < addr) 89 if (TASK_SIZE - len < addr)
90 return -ENOMEM; 90 return -ENOMEM;
91 if (!vmm || addr + len <= vmm->vm_start) 91 if (!vmm || addr + len <= vm_start_gap(vmm))
92 return addr; 92 return addr;
93 addr = vmm->vm_end; 93 addr = vmm->vm_end;
94 if (flags & MAP_SHARED) 94 if (flags & MAP_SHARED)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc21e076..162c77e53ca8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) 118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) 119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) 120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) 121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) 122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
123#endif 123#endif
124 124
@@ -306,13 +306,13 @@ SECTIONS
306 .UserExceptionVector.literal) 306 .UserExceptionVector.literal)
307 SECTION_VECTOR (_DoubleExceptionVector_literal, 307 SECTION_VECTOR (_DoubleExceptionVector_literal,
308 .DoubleExceptionVector.literal, 308 .DoubleExceptionVector.literal,
309 DOUBLEEXC_VECTOR_VADDR - 48, 309 DOUBLEEXC_VECTOR_VADDR - 20,
310 SIZEOF(.UserExceptionVector.text), 310 SIZEOF(.UserExceptionVector.text),
311 .UserExceptionVector.text) 311 .UserExceptionVector.text)
312 SECTION_VECTOR (_DoubleExceptionVector_text, 312 SECTION_VECTOR (_DoubleExceptionVector_text,
313 .DoubleExceptionVector.text, 313 .DoubleExceptionVector.text,
314 DOUBLEEXC_VECTOR_VADDR, 314 DOUBLEEXC_VECTOR_VADDR,
315 48, 315 20,
316 .DoubleExceptionVector.literal) 316 .DoubleExceptionVector.literal)
317 317
318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb3ad3e..c45b90bb9339 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
317 if (simdisk_count > MAX_SIMDISK_COUNT) 317 if (simdisk_count > MAX_SIMDISK_COUNT)
318 simdisk_count = MAX_SIMDISK_COUNT; 318 simdisk_count = MAX_SIMDISK_COUNT;
319 319
320 sddev = kmalloc(simdisk_count * sizeof(struct simdisk), 320 sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
321 GFP_KERNEL);
322 if (sddev == NULL) 321 if (sddev == NULL)
323 goto out_unregister; 322 goto out_unregister;
324 323
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b440a1..1fda7e20dfcb 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
24 24
25/* Interrupt configuration. */ 25/* Interrupt configuration. */
26 26
27#define PLATFORM_NR_IRQS 10 27#define PLATFORM_NR_IRQS 0
28 28
29/* Default assignment of LX60 devices to external interrupts. */ 29/* Default assignment of LX60 devices to external interrupts. */
30 30
31#ifdef CONFIG_XTENSA_MX 31#ifdef CONFIG_XTENSA_MX
32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM 32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
33#define OETH_IRQ XCHAL_EXTINT4_NUM 33#define OETH_IRQ XCHAL_EXTINT4_NUM
34#define C67X00_IRQ XCHAL_EXTINT8_NUM
34#else 35#else
35#define DUART16552_INTNUM XCHAL_EXTINT0_NUM 36#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
36#define OETH_IRQ XCHAL_EXTINT1_NUM 37#define OETH_IRQ XCHAL_EXTINT1_NUM
38#define C67X00_IRQ XCHAL_EXTINT5_NUM
37#endif 39#endif
38 40
39/* 41/*
@@ -63,5 +65,5 @@
63 65
64#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) 66#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
65#define C67X00_SIZE 0x10 67#define C67X00_SIZE 0x10
66#define C67X00_IRQ 5 68
67#endif /* __XTENSA_XTAVNET_HARDWARE_H */ 69#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be723eb2b..42285f35d313 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
175 .flags = IORESOURCE_MEM, 175 .flags = IORESOURCE_MEM,
176 }, 176 },
177 [2] = { /* IRQ number */ 177 [2] = { /* IRQ number */
178 .start = OETH_IRQ, 178 .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
179 .end = OETH_IRQ, 179 .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
180 .flags = IORESOURCE_IRQ, 180 .flags = IORESOURCE_IRQ,
181 }, 181 },
182}; 182};
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
213 .flags = IORESOURCE_MEM, 213 .flags = IORESOURCE_MEM,
214 }, 214 },
215 [1] = { /* IRQ number */ 215 [1] = { /* IRQ number */
216 .start = C67X00_IRQ, 216 .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
217 .end = C67X00_IRQ, 217 .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
218 .flags = IORESOURCE_IRQ, 218 .flags = IORESOURCE_IRQ,
219 }, 219 },
220}; 220};
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
247static struct plat_serial8250_port serial_platform_data[] = { 247static struct plat_serial8250_port serial_platform_data[] = {
248 [0] = { 248 [0] = {
249 .mapbase = DUART16552_PADDR, 249 .mapbase = DUART16552_PADDR,
250 .irq = DUART16552_INTNUM, 250 .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
252 UPF_IOREMAP, 252 UPF_IOREMAP,
253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, 253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c8a32fb345cf..78b2e0db4fb2 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
52BFQG_FLAG_FNS(empty) 52BFQG_FLAG_FNS(empty)
53#undef BFQG_FLAG_FNS 53#undef BFQG_FLAG_FNS
54 54
55/* This should be called with the queue_lock held. */ 55/* This should be called with the scheduler lock held. */
56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57{ 57{
58 unsigned long long now; 58 unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
67 bfqg_stats_clear_waiting(stats); 67 bfqg_stats_clear_waiting(stats);
68} 68}
69 69
70/* This should be called with the queue_lock held. */ 70/* This should be called with the scheduler lock held. */
71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg) 72 struct bfq_group *curr_bfqg)
73{ 73{
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
81 bfqg_stats_mark_waiting(stats); 81 bfqg_stats_mark_waiting(stats);
82} 82}
83 83
84/* This should be called with the queue_lock held. */ 84/* This should be called with the scheduler lock held. */
85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86{ 86{
87 unsigned long long now; 87 unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
203 203
204static void bfqg_get(struct bfq_group *bfqg) 204static void bfqg_get(struct bfq_group *bfqg)
205{ 205{
206 return blkg_get(bfqg_to_blkg(bfqg)); 206 bfqg->ref++;
207} 207}
208 208
209void bfqg_put(struct bfq_group *bfqg) 209void bfqg_put(struct bfq_group *bfqg)
210{ 210{
211 return blkg_put(bfqg_to_blkg(bfqg)); 211 bfqg->ref--;
212
213 if (bfqg->ref == 0)
214 kfree(bfqg);
215}
216
217static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218{
219 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220 bfqg_get(bfqg);
221
222 blkg_get(bfqg_to_blkg(bfqg));
223}
224
225void bfqg_and_blkg_put(struct bfq_group *bfqg)
226{
227 bfqg_put(bfqg);
228
229 blkg_put(bfqg_to_blkg(bfqg));
212} 230}
213 231
214void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 232void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
312 if (bfqq) { 330 if (bfqq) {
313 bfqq->ioprio = bfqq->new_ioprio; 331 bfqq->ioprio = bfqq->new_ioprio;
314 bfqq->ioprio_class = bfqq->new_ioprio_class; 332 bfqq->ioprio_class = bfqq->new_ioprio_class;
315 bfqg_get(bfqg); 333 /*
334 * Make sure that bfqg and its associated blkg do not
335 * disappear before entity.
336 */
337 bfqg_and_blkg_get(bfqg);
316 } 338 }
317 entity->parent = bfqg->my_entity; /* NULL for root group */ 339 entity->parent = bfqg->my_entity; /* NULL for root group */
318 entity->sched_data = &bfqg->sched_data; 340 entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
399 return NULL; 421 return NULL;
400 } 422 }
401 423
424 /* see comments in bfq_bic_update_cgroup for why refcounting */
425 bfqg_get(bfqg);
402 return &bfqg->pd; 426 return &bfqg->pd;
403} 427}
404 428
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
426 struct bfq_group *bfqg = pd_to_bfqg(pd); 450 struct bfq_group *bfqg = pd_to_bfqg(pd);
427 451
428 bfqg_stats_exit(&bfqg->stats); 452 bfqg_stats_exit(&bfqg->stats);
429 return kfree(bfqg); 453 bfqg_put(bfqg);
430} 454}
431 455
432void bfq_pd_reset_stats(struct blkg_policy_data *pd) 456void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
496 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 520 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
497 * it on the new one. Avoid putting the entity on the old group idle tree. 521 * it on the new one. Avoid putting the entity on the old group idle tree.
498 * 522 *
499 * Must be called under the queue lock; the cgroup owning @bfqg must 523 * Must be called under the scheduler lock, to make sure that the blkg
500 * not disappear (by now this just means that we are called under 524 * owning @bfqg does not disappear (see comments in
501 * rcu_read_lock()). 525 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
526 * objects).
502 */ 527 */
503void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 528void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
504 struct bfq_group *bfqg) 529 struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
519 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 544 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
520 else if (entity->on_st) 545 else if (entity->on_st)
521 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 546 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
522 bfqg_put(bfqq_group(bfqq)); 547 bfqg_and_blkg_put(bfqq_group(bfqq));
523 548
524 /*
525 * Here we use a reference to bfqg. We don't need a refcounter
526 * as the cgroup reference will not be dropped, so that its
527 * destroy() callback will not be invoked.
528 */
529 entity->parent = bfqg->my_entity; 549 entity->parent = bfqg->my_entity;
530 entity->sched_data = &bfqg->sched_data; 550 entity->sched_data = &bfqg->sched_data;
531 bfqg_get(bfqg); 551 /* pin down bfqg and its associated blkg */
552 bfqg_and_blkg_get(bfqg);
532 553
533 if (bfq_bfqq_busy(bfqq)) { 554 if (bfq_bfqq_busy(bfqq)) {
534 bfq_pos_tree_add_move(bfqd, bfqq); 555 bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
545 * @bic: the bic to move. 566 * @bic: the bic to move.
546 * @blkcg: the blk-cgroup to move to. 567 * @blkcg: the blk-cgroup to move to.
547 * 568 *
548 * Move bic to blkcg, assuming that bfqd->queue is locked; the caller 569 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
549 * has to make sure that the reference to cgroup is valid across the call. 570 * sure that the reference to cgroup is valid across the call (see
571 * comments in bfq_bic_update_cgroup on this issue)
550 * 572 *
551 * NOTE: an alternative approach might have been to store the current 573 * NOTE: an alternative approach might have been to store the current
552 * cgroup in bfqq and getting a reference to it, reducing the lookup 574 * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
604 goto out; 626 goto out;
605 627
606 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); 628 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
629 /*
630 * Update blkg_path for bfq_log_* functions. We cache this
631 * path, and update it here, for the following
632 * reasons. Operations on blkg objects in blk-cgroup are
633 * protected with the request_queue lock, and not with the
634 * lock that protects the instances of this scheduler
635 * (bfqd->lock). This exposes BFQ to the following sort of
636 * race.
637 *
638 * The blkg_lookup performed in bfq_get_queue, protected
639 * through rcu, may happen to return the address of a copy of
640 * the original blkg. If this is the case, then the
641 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
642 * the blkg, is useless: it does not prevent blk-cgroup code
643 * from destroying both the original blkg and all objects
644 * directly or indirectly referred by the copy of the
645 * blkg.
646 *
647 * On the bright side, destroy operations on a blkg invoke, as
648 * a first step, hooks of the scheduler associated with the
649 * blkg. And these hooks are executed with bfqd->lock held for
650 * BFQ. As a consequence, for any blkg associated with the
651 * request queue this instance of the scheduler is attached
652 * to, we are guaranteed that such a blkg is not destroyed, and
653 * that all the pointers it contains are consistent, while we
654 * are holding bfqd->lock. A blkg_lookup performed with
655 * bfqd->lock held then returns a fully consistent blkg, which
656 * remains consistent until this lock is held.
657 *
658 * Thanks to the last fact, and to the fact that: (1) bfqg has
659 * been obtained through a blkg_lookup in the above
660 * assignment, and (2) bfqd->lock is being held, here we can
661 * safely use the policy data for the involved blkg (i.e., the
662 * field bfqg->pd) to get to the blkg associated with bfqg,
663 * and then we can safely use any field of blkg. After we
664 * release bfqd->lock, even just getting blkg through this
665 * bfqg may cause dangling references to be traversed, as
666 * bfqg->pd may not exist any more.
667 *
668 * In view of the above facts, here we cache, in the bfqg, any
669 * blkg data we may need for this bic, and for its associated
670 * bfq_queue. As of now, we need to cache only the path of the
671 * blkg, which is used in the bfq_log_* functions.
672 *
673 * Finally, note that bfqg itself needs to be protected from
674 * destruction on the blkg_free of the original blkg (which
675 * invokes bfq_pd_free). We use an additional private
676 * refcounter for bfqg, to let it disappear only after no
677 * bfq_queue refers to it any longer.
678 */
679 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
607 bic->blkcg_serial_nr = serial_nr; 680 bic->blkcg_serial_nr = serial_nr;
608out: 681out:
609 rcu_read_unlock(); 682 rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
640 * @bfqd: the device data structure with the root group. 713 * @bfqd: the device data structure with the root group.
641 * @bfqg: the group to move from. 714 * @bfqg: the group to move from.
642 * @st: the service tree with the entities. 715 * @st: the service tree with the entities.
643 *
644 * Needs queue_lock to be taken and reference to be valid over the call.
645 */ 716 */
646static void bfq_reparent_active_entities(struct bfq_data *bfqd, 717static void bfq_reparent_active_entities(struct bfq_data *bfqd,
647 struct bfq_group *bfqg, 718 struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
692 /* 763 /*
693 * The idle tree may still contain bfq_queues belonging 764 * The idle tree may still contain bfq_queues belonging
694 * to exited task because they never migrated to a different 765 * to exited task because they never migrated to a different
695 * cgroup from the one being destroyed now. No one else 766 * cgroup from the one being destroyed now.
696 * can access them so it's safe to act without any lock.
697 */ 767 */
698 bfq_flush_idle_tree(st); 768 bfq_flush_idle_tree(st);
699 769
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 08ce45096350..ed93da2462ab 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
3665 3665
3666 kmem_cache_free(bfq_pool, bfqq); 3666 kmem_cache_free(bfq_pool, bfqq);
3667#ifdef CONFIG_BFQ_GROUP_IOSCHED 3667#ifdef CONFIG_BFQ_GROUP_IOSCHED
3668 bfqg_put(bfqg); 3668 bfqg_and_blkg_put(bfqg);
3669#endif 3669#endif
3670} 3670}
3671 3671
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ae783c06dfd9..5c3bf9861492 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -759,6 +759,12 @@ struct bfq_group {
759 /* must be the first member */ 759 /* must be the first member */
760 struct blkg_policy_data pd; 760 struct blkg_policy_data pd;
761 761
762 /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
763 char blkg_path[128];
764
765 /* reference counter (see comments in bfq_bic_update_cgroup) */
766 int ref;
767
762 struct bfq_entity entity; 768 struct bfq_entity entity;
763 struct bfq_sched_data sched_data; 769 struct bfq_sched_data sched_data;
764 770
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
838struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); 844struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
839struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 845struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
840struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 846struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
841void bfqg_put(struct bfq_group *bfqg); 847void bfqg_and_blkg_put(struct bfq_group *bfqg);
842 848
843#ifdef CONFIG_BFQ_GROUP_IOSCHED 849#ifdef CONFIG_BFQ_GROUP_IOSCHED
844extern struct cftype bfq_blkcg_legacy_files[]; 850extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
910struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 916struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
911 917
912#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 918#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
913 char __pbuf[128]; \ 919 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
914 \
915 blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
916 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
917 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ 920 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
918 __pbuf, ##args); \ 921 bfqq_group(bfqq)->blkg_path, ##args); \
919} while (0) 922} while (0)
920 923
921#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 924#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
922 char __pbuf[128]; \ 925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
923 \
924 blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
926} while (0)
927 926
928#else /* CONFIG_BFQ_GROUP_IOSCHED */ 927#else /* CONFIG_BFQ_GROUP_IOSCHED */
929 928
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5384713d48bc..b5009a896a7f 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
176 return false; 176 return false;
177 177
178 if (!bio_sectors(bio))
179 return false;
180
178 /* Already protected? */ 181 /* Already protected? */
179 if (bio_integrity(bio)) 182 if (bio_integrity(bio))
180 return false; 183 return false;
diff --git a/block/bio.c b/block/bio.c
index 888e7801c638..26b0810fb8ea 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -240,20 +240,21 @@ fallback:
240 return bvl; 240 return bvl;
241} 241}
242 242
243static void __bio_free(struct bio *bio) 243void bio_uninit(struct bio *bio)
244{ 244{
245 bio_disassociate_task(bio); 245 bio_disassociate_task(bio);
246 246
247 if (bio_integrity(bio)) 247 if (bio_integrity(bio))
248 bio_integrity_free(bio); 248 bio_integrity_free(bio);
249} 249}
250EXPORT_SYMBOL(bio_uninit);
250 251
251static void bio_free(struct bio *bio) 252static void bio_free(struct bio *bio)
252{ 253{
253 struct bio_set *bs = bio->bi_pool; 254 struct bio_set *bs = bio->bi_pool;
254 void *p; 255 void *p;
255 256
256 __bio_free(bio); 257 bio_uninit(bio);
257 258
258 if (bs) { 259 if (bs) {
259 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 260 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
@@ -271,6 +272,11 @@ static void bio_free(struct bio *bio)
271 } 272 }
272} 273}
273 274
275/*
276 * Users of this function have their own bio allocation. Subsequently,
277 * they must remember to pair any call to bio_init() with bio_uninit()
278 * when IO has completed, or when the bio is released.
279 */
274void bio_init(struct bio *bio, struct bio_vec *table, 280void bio_init(struct bio *bio, struct bio_vec *table,
275 unsigned short max_vecs) 281 unsigned short max_vecs)
276{ 282{
@@ -297,7 +303,7 @@ void bio_reset(struct bio *bio)
297{ 303{
298 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 304 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
299 305
300 __bio_free(bio); 306 bio_uninit(bio);
301 307
302 memset(bio, 0, BIO_RESET_BYTES); 308 memset(bio, 0, BIO_RESET_BYTES);
303 bio->bi_flags = flags; 309 bio->bi_flags = flags;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7c2947128f58..0480892e97e5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
75 75
76 if (blkg->blkcg != &blkcg_root) 76 if (blkg->blkcg != &blkcg_root)
77 blk_exit_rl(&blkg->rl); 77 blk_exit_rl(blkg->q, &blkg->rl);
78 78
79 blkg_rwstat_exit(&blkg->stat_ios); 79 blkg_rwstat_exit(&blkg->stat_ios);
80 blkg_rwstat_exit(&blkg->stat_bytes); 80 blkg_rwstat_exit(&blkg->stat_bytes);
diff --git a/block/blk-core.c b/block/blk-core.c
index c7068520794b..a7421b772d0e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
648 if (!rl->rq_pool) 648 if (!rl->rq_pool)
649 return -ENOMEM; 649 return -ENOMEM;
650 650
651 if (rl != &q->root_rl)
652 WARN_ON_ONCE(!blk_get_queue(q));
653
651 return 0; 654 return 0;
652} 655}
653 656
654void blk_exit_rl(struct request_list *rl) 657void blk_exit_rl(struct request_queue *q, struct request_list *rl)
655{ 658{
656 if (rl->rq_pool) 659 if (rl->rq_pool) {
657 mempool_destroy(rl->rq_pool); 660 mempool_destroy(rl->rq_pool);
661 if (rl != &q->root_rl)
662 blk_put_queue(q);
663 }
658} 664}
659 665
660struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 666struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 1f5b692526ae..0ded5e846335 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
69} 69}
70 70
71/*
72 * Mark a hardware queue as needing a restart. For shared queues, maintain
73 * a count of how many hardware queues are marked for restart.
74 */
75static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
76{
77 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
78 return;
79
80 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
81 struct request_queue *q = hctx->queue;
82
83 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
84 atomic_inc(&q->shared_hctx_restart);
85 } else
86 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
87}
88
89static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
90{
91 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
92 return false;
93
94 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
95 struct request_queue *q = hctx->queue;
96
97 if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
98 atomic_dec(&q->shared_hctx_restart);
99 } else
100 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
101
102 if (blk_mq_hctx_has_pending(hctx)) {
103 blk_mq_run_hw_queue(hctx, true);
104 return true;
105 }
106
107 return false;
108}
109
71struct request *blk_mq_sched_get_request(struct request_queue *q, 110struct request *blk_mq_sched_get_request(struct request_queue *q,
72 struct bio *bio, 111 struct bio *bio,
73 unsigned int op, 112 unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
266 return true; 305 return true;
267} 306}
268 307
269static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
270{
271 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
272 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
273 if (blk_mq_hctx_has_pending(hctx)) {
274 blk_mq_run_hw_queue(hctx, true);
275 return true;
276 }
277 }
278 return false;
279}
280
281/** 308/**
282 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 309 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
283 * @pos: loop cursor. 310 * @pos: loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
309 unsigned int i, j; 336 unsigned int i, j;
310 337
311 if (set->flags & BLK_MQ_F_TAG_SHARED) { 338 if (set->flags & BLK_MQ_F_TAG_SHARED) {
339 /*
340 * If this is 0, then we know that no hardware queues
341 * have RESTART marked. We're done.
342 */
343 if (!atomic_read(&queue->shared_hctx_restart))
344 return;
345
312 rcu_read_lock(); 346 rcu_read_lock();
313 list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 347 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
314 tag_set_list) { 348 tag_set_list) {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index edafb5383b7b..5007edece51a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115 return false; 115 return false;
116} 116}
117 117
118/*
119 * Mark a hardware queue as needing a restart.
120 */
121static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
122{
123 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
124 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
125}
126
127static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 118static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
128{ 119{
129 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 120 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a69ad122ed66..958cedaff8b8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -628,25 +628,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
628} 628}
629EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 629EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
630 630
631void blk_mq_abort_requeue_list(struct request_queue *q)
632{
633 unsigned long flags;
634 LIST_HEAD(rq_list);
635
636 spin_lock_irqsave(&q->requeue_lock, flags);
637 list_splice_init(&q->requeue_list, &rq_list);
638 spin_unlock_irqrestore(&q->requeue_lock, flags);
639
640 while (!list_empty(&rq_list)) {
641 struct request *rq;
642
643 rq = list_first_entry(&rq_list, struct request, queuelist);
644 list_del_init(&rq->queuelist);
645 blk_mq_end_request(rq, -EIO);
646 }
647}
648EXPORT_SYMBOL(blk_mq_abort_requeue_list);
649
650struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 631struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
651{ 632{
652 if (tag < tags->nr_tags) { 633 if (tag < tags->nr_tags) {
@@ -1480,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1480 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1481} 1462}
1482 1463
1483static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1464static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1484 bool may_sleep) 1465 struct request *rq,
1466 blk_qc_t *cookie, bool may_sleep)
1485{ 1467{
1486 struct request_queue *q = rq->q; 1468 struct request_queue *q = rq->q;
1487 struct blk_mq_queue_data bd = { 1469 struct blk_mq_queue_data bd = {
1488 .rq = rq, 1470 .rq = rq,
1489 .last = true, 1471 .last = true,
1490 }; 1472 };
1491 struct blk_mq_hw_ctx *hctx;
1492 blk_qc_t new_cookie; 1473 blk_qc_t new_cookie;
1493 int ret; 1474 int ret;
1475 bool run_queue = true;
1476
1477 if (blk_mq_hctx_stopped(hctx)) {
1478 run_queue = false;
1479 goto insert;
1480 }
1494 1481
1495 if (q->elevator) 1482 if (q->elevator)
1496 goto insert; 1483 goto insert;
1497 1484
1498 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1485 if (!blk_mq_get_driver_tag(rq, NULL, false))
1499 goto insert; 1486 goto insert;
1500 1487
1501 new_cookie = request_to_qc_t(hctx, rq); 1488 new_cookie = request_to_qc_t(hctx, rq);
@@ -1519,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1519 1506
1520 __blk_mq_requeue_request(rq); 1507 __blk_mq_requeue_request(rq);
1521insert: 1508insert:
1522 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1509 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1523} 1510}
1524 1511
1525static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1512static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1527,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1527{ 1514{
1528 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1515 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1529 rcu_read_lock(); 1516 rcu_read_lock();
1530 __blk_mq_try_issue_directly(rq, cookie, false); 1517 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1531 rcu_read_unlock(); 1518 rcu_read_unlock();
1532 } else { 1519 } else {
1533 unsigned int srcu_idx; 1520 unsigned int srcu_idx;
@@ -1535,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1535 might_sleep(); 1522 might_sleep();
1536 1523
1537 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1524 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1538 __blk_mq_try_issue_directly(rq, cookie, true); 1525 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1539 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1526 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1540 } 1527 }
1541} 1528}
@@ -1638,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1638 1625
1639 blk_mq_put_ctx(data.ctx); 1626 blk_mq_put_ctx(data.ctx);
1640 1627
1641 if (same_queue_rq) 1628 if (same_queue_rq) {
1629 data.hctx = blk_mq_map_queue(q,
1630 same_queue_rq->mq_ctx->cpu);
1642 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1631 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1643 &cookie); 1632 &cookie);
1633 }
1644 } else if (q->nr_hw_queues > 1 && is_sync) { 1634 } else if (q->nr_hw_queues > 1 && is_sync) {
1645 blk_mq_put_ctx(data.ctx); 1635 blk_mq_put_ctx(data.ctx);
1646 blk_mq_bio_to_request(rq, bio); 1636 blk_mq_bio_to_request(rq, bio);
@@ -2113,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
2113 } 2103 }
2114} 2104}
2115 2105
2106/*
2107 * Caller needs to ensure that we're either frozen/quiesced, or that
2108 * the queue isn't live yet.
2109 */
2116static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2110static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2117{ 2111{
2118 struct blk_mq_hw_ctx *hctx; 2112 struct blk_mq_hw_ctx *hctx;
2119 int i; 2113 int i;
2120 2114
2121 queue_for_each_hw_ctx(q, hctx, i) { 2115 queue_for_each_hw_ctx(q, hctx, i) {
2122 if (shared) 2116 if (shared) {
2117 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2118 atomic_inc(&q->shared_hctx_restart);
2123 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2119 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2124 else 2120 } else {
2121 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2122 atomic_dec(&q->shared_hctx_restart);
2125 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2123 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2124 }
2126 } 2125 }
2127} 2126}
2128 2127
2129static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 2128static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2129 bool shared)
2130{ 2130{
2131 struct request_queue *q; 2131 struct request_queue *q;
2132 2132
@@ -2660,7 +2660,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2660 return ret; 2660 return ret;
2661} 2661}
2662 2662
2663void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2663static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2664 int nr_hw_queues)
2664{ 2665{
2665 struct request_queue *q; 2666 struct request_queue *q;
2666 2667
@@ -2684,6 +2685,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2684 list_for_each_entry(q, &set->tag_list, tag_set_list) 2685 list_for_each_entry(q, &set->tag_list, tag_set_list)
2685 blk_mq_unfreeze_queue(q); 2686 blk_mq_unfreeze_queue(q);
2686} 2687}
2688
2689void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2690{
2691 mutex_lock(&set->tag_list_lock);
2692 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2693 mutex_unlock(&set->tag_list_lock);
2694}
2687EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2695EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2688 2696
2689/* Enable polling stats and return whether they were already enabled. */ 2697/* Enable polling stats and return whether they were already enabled. */
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 504fee940052..27aceab1cc31 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
777} 777}
778 778
779/** 779/**
780 * blk_release_queue: - release a &struct request_queue when it is no longer needed 780 * __blk_release_queue - release a request queue when it is no longer needed
781 * @kobj: the kobj belonging to the request queue to be released 781 * @work: pointer to the release_work member of the request queue to be released
782 * 782 *
783 * Description: 783 * Description:
784 * blk_release_queue is the pair to blk_init_queue() or 784 * blk_release_queue is the counterpart of blk_init_queue(). It should be
785 * blk_queue_make_request(). It should be called when a request queue is 785 * called when a request queue is being released; typically when a block
786 * being released; typically when a block device is being de-registered. 786 * device is being de-registered. Its primary task it to free the queue
787 * Currently, its primary task it to free all the &struct request 787 * itself.
788 * structures that were allocated to the queue and the queue itself.
789 * 788 *
790 * Note: 789 * Notes:
791 * The low level driver must have finished any outstanding requests first 790 * The low level driver must have finished any outstanding requests first
792 * via blk_cleanup_queue(). 791 * via blk_cleanup_queue().
793 **/ 792 *
794static void blk_release_queue(struct kobject *kobj) 793 * Although blk_release_queue() may be called with preemption disabled,
794 * __blk_release_queue() may sleep.
795 */
796static void __blk_release_queue(struct work_struct *work)
795{ 797{
796 struct request_queue *q = 798 struct request_queue *q = container_of(work, typeof(*q), release_work);
797 container_of(kobj, struct request_queue, kobj);
798 799
799 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 800 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
800 blk_stat_remove_callback(q, q->poll_cb); 801 blk_stat_remove_callback(q, q->poll_cb);
@@ -809,7 +810,7 @@ static void blk_release_queue(struct kobject *kobj)
809 810
810 blk_free_queue_stats(q->stats); 811 blk_free_queue_stats(q->stats);
811 812
812 blk_exit_rl(&q->root_rl); 813 blk_exit_rl(q, &q->root_rl);
813 814
814 if (q->queue_tags) 815 if (q->queue_tags)
815 __blk_queue_free_tags(q); 816 __blk_queue_free_tags(q);
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
834 call_rcu(&q->rcu_head, blk_free_queue_rcu); 835 call_rcu(&q->rcu_head, blk_free_queue_rcu);
835} 836}
836 837
838static void blk_release_queue(struct kobject *kobj)
839{
840 struct request_queue *q =
841 container_of(kobj, struct request_queue, kobj);
842
843 INIT_WORK(&q->release_work, __blk_release_queue);
844 schedule_work(&q->release_work);
845}
846
837static const struct sysfs_ops queue_sysfs_ops = { 847static const struct sysfs_ops queue_sysfs_ops = {
838 .show = queue_attr_show, 848 .show = queue_attr_show,
839 .store = queue_attr_store, 849 .store = queue_attr_store,
@@ -887,10 +897,10 @@ int blk_register_queue(struct gendisk *disk)
887 goto unlock; 897 goto unlock;
888 } 898 }
889 899
890 if (q->mq_ops) 900 if (q->mq_ops) {
891 __blk_mq_register_dev(dev, q); 901 __blk_mq_register_dev(dev, q);
892 902 blk_mq_debugfs_register(q);
893 blk_mq_debugfs_register(q); 903 }
894 904
895 kobject_uevent(&q->kobj, KOBJ_ADD); 905 kobject_uevent(&q->kobj, KOBJ_ADD);
896 906
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index b78db2e5fdff..a7285bf2831c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -22,11 +22,18 @@ static int throtl_quantum = 32;
22#define DFL_THROTL_SLICE_HD (HZ / 10) 22#define DFL_THROTL_SLICE_HD (HZ / 10)
23#define DFL_THROTL_SLICE_SSD (HZ / 50) 23#define DFL_THROTL_SLICE_SSD (HZ / 50)
24#define MAX_THROTL_SLICE (HZ) 24#define MAX_THROTL_SLICE (HZ)
25#define DFL_IDLE_THRESHOLD_SSD (1000L) /* 1 ms */
26#define DFL_IDLE_THRESHOLD_HD (100L * 1000) /* 100 ms */
27#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ 25#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28/* default latency target is 0, eg, guarantee IO latency by default */ 26#define MIN_THROTL_BPS (320 * 1024)
29#define DFL_LATENCY_TARGET (0) 27#define MIN_THROTL_IOPS (10)
28#define DFL_LATENCY_TARGET (-1L)
29#define DFL_IDLE_THRESHOLD (0)
30#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
31#define LATENCY_FILTERED_SSD (0)
32/*
33 * For HD, very small latency comes from sequential IO. Such IO is helpless to
34 * help determine if its IO is impacted by others, hence we ignore the IO
35 */
36#define LATENCY_FILTERED_HD (1000L) /* 1ms */
30 37
31#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) 38#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
32 39
@@ -157,6 +164,7 @@ struct throtl_grp {
157 unsigned long last_check_time; 164 unsigned long last_check_time;
158 165
159 unsigned long latency_target; /* us */ 166 unsigned long latency_target; /* us */
167 unsigned long latency_target_conf; /* us */
160 /* When did we start a new slice */ 168 /* When did we start a new slice */
161 unsigned long slice_start[2]; 169 unsigned long slice_start[2];
162 unsigned long slice_end[2]; 170 unsigned long slice_end[2];
@@ -165,6 +173,7 @@ struct throtl_grp {
165 unsigned long checked_last_finish_time; /* ns / 1024 */ 173 unsigned long checked_last_finish_time; /* ns / 1024 */
166 unsigned long avg_idletime; /* ns / 1024 */ 174 unsigned long avg_idletime; /* ns / 1024 */
167 unsigned long idletime_threshold; /* us */ 175 unsigned long idletime_threshold; /* us */
176 unsigned long idletime_threshold_conf; /* us */
168 177
169 unsigned int bio_cnt; /* total bios */ 178 unsigned int bio_cnt; /* total bios */
170 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ 179 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
@@ -201,8 +210,6 @@ struct throtl_data
201 unsigned int limit_index; 210 unsigned int limit_index;
202 bool limit_valid[LIMIT_CNT]; 211 bool limit_valid[LIMIT_CNT];
203 212
204 unsigned long dft_idletime_threshold; /* us */
205
206 unsigned long low_upgrade_time; 213 unsigned long low_upgrade_time;
207 unsigned long low_downgrade_time; 214 unsigned long low_downgrade_time;
208 215
@@ -212,6 +219,7 @@ struct throtl_data
212 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE]; 219 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
213 struct latency_bucket __percpu *latency_buckets; 220 struct latency_bucket __percpu *latency_buckets;
214 unsigned long last_calculate_time; 221 unsigned long last_calculate_time;
222 unsigned long filtered_latency;
215 223
216 bool track_bio_latency; 224 bool track_bio_latency;
217}; 225};
@@ -294,8 +302,14 @@ static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
294 302
295 td = tg->td; 303 td = tg->td;
296 ret = tg->bps[rw][td->limit_index]; 304 ret = tg->bps[rw][td->limit_index];
297 if (ret == 0 && td->limit_index == LIMIT_LOW) 305 if (ret == 0 && td->limit_index == LIMIT_LOW) {
298 return tg->bps[rw][LIMIT_MAX]; 306 /* intermediate node or iops isn't 0 */
307 if (!list_empty(&blkg->blkcg->css.children) ||
308 tg->iops[rw][td->limit_index])
309 return U64_MAX;
310 else
311 return MIN_THROTL_BPS;
312 }
299 313
300 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && 314 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
301 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { 315 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
@@ -315,10 +329,17 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
315 329
316 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 330 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
317 return UINT_MAX; 331 return UINT_MAX;
332
318 td = tg->td; 333 td = tg->td;
319 ret = tg->iops[rw][td->limit_index]; 334 ret = tg->iops[rw][td->limit_index];
320 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) 335 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
321 return tg->iops[rw][LIMIT_MAX]; 336 /* intermediate node or bps isn't 0 */
337 if (!list_empty(&blkg->blkcg->css.children) ||
338 tg->bps[rw][td->limit_index])
339 return UINT_MAX;
340 else
341 return MIN_THROTL_IOPS;
342 }
322 343
323 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && 344 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
324 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { 345 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
@@ -482,6 +503,9 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
482 /* LIMIT_LOW will have default value 0 */ 503 /* LIMIT_LOW will have default value 0 */
483 504
484 tg->latency_target = DFL_LATENCY_TARGET; 505 tg->latency_target = DFL_LATENCY_TARGET;
506 tg->latency_target_conf = DFL_LATENCY_TARGET;
507 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
508 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
485 509
486 return &tg->pd; 510 return &tg->pd;
487} 511}
@@ -510,8 +534,6 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
510 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) 534 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
511 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; 535 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
512 tg->td = td; 536 tg->td = td;
513
514 tg->idletime_threshold = td->dft_idletime_threshold;
515} 537}
516 538
517/* 539/*
@@ -684,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
684static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 706static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
685 unsigned long expires) 707 unsigned long expires)
686{ 708{
687 unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice; 709 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
688 710
689 /* 711 /*
690 * Since we are adjusting the throttle limit dynamically, the sleep 712 * Since we are adjusting the throttle limit dynamically, the sleep
@@ -1349,7 +1371,7 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
1349 return 0; 1371 return 0;
1350} 1372}
1351 1373
1352static void tg_conf_updated(struct throtl_grp *tg) 1374static void tg_conf_updated(struct throtl_grp *tg, bool global)
1353{ 1375{
1354 struct throtl_service_queue *sq = &tg->service_queue; 1376 struct throtl_service_queue *sq = &tg->service_queue;
1355 struct cgroup_subsys_state *pos_css; 1377 struct cgroup_subsys_state *pos_css;
@@ -1367,8 +1389,26 @@ static void tg_conf_updated(struct throtl_grp *tg)
1367 * restrictions in the whole hierarchy and allows them to bypass 1389 * restrictions in the whole hierarchy and allows them to bypass
1368 * blk-throttle. 1390 * blk-throttle.
1369 */ 1391 */
1370 blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg)) 1392 blkg_for_each_descendant_pre(blkg, pos_css,
1371 tg_update_has_rules(blkg_to_tg(blkg)); 1393 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1394 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1395 struct throtl_grp *parent_tg;
1396
1397 tg_update_has_rules(this_tg);
1398 /* ignore root/second level */
1399 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1400 !blkg->parent->parent)
1401 continue;
1402 parent_tg = blkg_to_tg(blkg->parent);
1403 /*
1404 * make sure all children has lower idle time threshold and
1405 * higher latency target
1406 */
1407 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1408 parent_tg->idletime_threshold);
1409 this_tg->latency_target = max(this_tg->latency_target,
1410 parent_tg->latency_target);
1411 }
1372 1412
1373 /* 1413 /*
1374 * We're already holding queue_lock and know @tg is valid. Let's 1414 * We're already holding queue_lock and know @tg is valid. Let's
@@ -1413,7 +1453,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
1413 else 1453 else
1414 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; 1454 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1415 1455
1416 tg_conf_updated(tg); 1456 tg_conf_updated(tg, false);
1417 ret = 0; 1457 ret = 0;
1418out_finish: 1458out_finish:
1419 blkg_conf_finish(&ctx); 1459 blkg_conf_finish(&ctx);
@@ -1497,34 +1537,34 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1497 tg->iops_conf[READ][off] == iops_dft && 1537 tg->iops_conf[READ][off] == iops_dft &&
1498 tg->iops_conf[WRITE][off] == iops_dft && 1538 tg->iops_conf[WRITE][off] == iops_dft &&
1499 (off != LIMIT_LOW || 1539 (off != LIMIT_LOW ||
1500 (tg->idletime_threshold == tg->td->dft_idletime_threshold && 1540 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1501 tg->latency_target == DFL_LATENCY_TARGET))) 1541 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1502 return 0; 1542 return 0;
1503 1543
1504 if (tg->bps_conf[READ][off] != bps_dft) 1544 if (tg->bps_conf[READ][off] != U64_MAX)
1505 snprintf(bufs[0], sizeof(bufs[0]), "%llu", 1545 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1506 tg->bps_conf[READ][off]); 1546 tg->bps_conf[READ][off]);
1507 if (tg->bps_conf[WRITE][off] != bps_dft) 1547 if (tg->bps_conf[WRITE][off] != U64_MAX)
1508 snprintf(bufs[1], sizeof(bufs[1]), "%llu", 1548 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1509 tg->bps_conf[WRITE][off]); 1549 tg->bps_conf[WRITE][off]);
1510 if (tg->iops_conf[READ][off] != iops_dft) 1550 if (tg->iops_conf[READ][off] != UINT_MAX)
1511 snprintf(bufs[2], sizeof(bufs[2]), "%u", 1551 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1512 tg->iops_conf[READ][off]); 1552 tg->iops_conf[READ][off]);
1513 if (tg->iops_conf[WRITE][off] != iops_dft) 1553 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1514 snprintf(bufs[3], sizeof(bufs[3]), "%u", 1554 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1515 tg->iops_conf[WRITE][off]); 1555 tg->iops_conf[WRITE][off]);
1516 if (off == LIMIT_LOW) { 1556 if (off == LIMIT_LOW) {
1517 if (tg->idletime_threshold == ULONG_MAX) 1557 if (tg->idletime_threshold_conf == ULONG_MAX)
1518 strcpy(idle_time, " idle=max"); 1558 strcpy(idle_time, " idle=max");
1519 else 1559 else
1520 snprintf(idle_time, sizeof(idle_time), " idle=%lu", 1560 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1521 tg->idletime_threshold); 1561 tg->idletime_threshold_conf);
1522 1562
1523 if (tg->latency_target == ULONG_MAX) 1563 if (tg->latency_target_conf == ULONG_MAX)
1524 strcpy(latency_time, " latency=max"); 1564 strcpy(latency_time, " latency=max");
1525 else 1565 else
1526 snprintf(latency_time, sizeof(latency_time), 1566 snprintf(latency_time, sizeof(latency_time),
1527 " latency=%lu", tg->latency_target); 1567 " latency=%lu", tg->latency_target_conf);
1528 } 1568 }
1529 1569
1530 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", 1570 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
@@ -1563,8 +1603,8 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
1563 v[2] = tg->iops_conf[READ][index]; 1603 v[2] = tg->iops_conf[READ][index];
1564 v[3] = tg->iops_conf[WRITE][index]; 1604 v[3] = tg->iops_conf[WRITE][index];
1565 1605
1566 idle_time = tg->idletime_threshold; 1606 idle_time = tg->idletime_threshold_conf;
1567 latency_time = tg->latency_target; 1607 latency_time = tg->latency_target_conf;
1568 while (true) { 1608 while (true) {
1569 char tok[27]; /* wiops=18446744073709551616 */ 1609 char tok[27]; /* wiops=18446744073709551616 */
1570 char *p; 1610 char *p;
@@ -1623,17 +1663,33 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
1623 tg->iops_conf[READ][LIMIT_MAX]); 1663 tg->iops_conf[READ][LIMIT_MAX]);
1624 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], 1664 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1625 tg->iops_conf[WRITE][LIMIT_MAX]); 1665 tg->iops_conf[WRITE][LIMIT_MAX]);
1666 tg->idletime_threshold_conf = idle_time;
1667 tg->latency_target_conf = latency_time;
1668
1669 /* force user to configure all settings for low limit */
1670 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1671 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1672 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1673 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1674 tg->bps[READ][LIMIT_LOW] = 0;
1675 tg->bps[WRITE][LIMIT_LOW] = 0;
1676 tg->iops[READ][LIMIT_LOW] = 0;
1677 tg->iops[WRITE][LIMIT_LOW] = 0;
1678 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1679 tg->latency_target = DFL_LATENCY_TARGET;
1680 } else if (index == LIMIT_LOW) {
1681 tg->idletime_threshold = tg->idletime_threshold_conf;
1682 tg->latency_target = tg->latency_target_conf;
1683 }
1626 1684
1627 if (index == LIMIT_LOW) { 1685 blk_throtl_update_limit_valid(tg->td);
1628 blk_throtl_update_limit_valid(tg->td); 1686 if (tg->td->limit_valid[LIMIT_LOW]) {
1629 if (tg->td->limit_valid[LIMIT_LOW]) 1687 if (index == LIMIT_LOW)
1630 tg->td->limit_index = LIMIT_LOW; 1688 tg->td->limit_index = LIMIT_LOW;
1631 tg->idletime_threshold = (idle_time == ULONG_MAX) ? 1689 } else
1632 ULONG_MAX : idle_time; 1690 tg->td->limit_index = LIMIT_MAX;
1633 tg->latency_target = (latency_time == ULONG_MAX) ? 1691 tg_conf_updated(tg, index == LIMIT_LOW &&
1634 ULONG_MAX : latency_time; 1692 tg->td->limit_valid[LIMIT_LOW]);
1635 }
1636 tg_conf_updated(tg);
1637 ret = 0; 1693 ret = 0;
1638out_finish: 1694out_finish:
1639 blkg_conf_finish(&ctx); 1695 blkg_conf_finish(&ctx);
@@ -1722,17 +1778,25 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg)
1722 /* 1778 /*
1723 * cgroup is idle if: 1779 * cgroup is idle if:
1724 * - single idle is too long, longer than a fixed value (in case user 1780 * - single idle is too long, longer than a fixed value (in case user
1725 * configure a too big threshold) or 4 times of slice 1781 * configure a too big threshold) or 4 times of idletime threshold
1726 * - average think time is more than threshold 1782 * - average think time is more than threshold
1727 * - IO latency is largely below threshold 1783 * - IO latency is largely below threshold
1728 */ 1784 */
1729 unsigned long time = jiffies_to_usecs(4 * tg->td->throtl_slice); 1785 unsigned long time;
1730 1786 bool ret;
1731 time = min_t(unsigned long, MAX_IDLE_TIME, time); 1787
1732 return (ktime_get_ns() >> 10) - tg->last_finish_time > time || 1788 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1733 tg->avg_idletime > tg->idletime_threshold || 1789 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1734 (tg->latency_target && tg->bio_cnt && 1790 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1791 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1792 tg->avg_idletime > tg->idletime_threshold ||
1793 (tg->latency_target && tg->bio_cnt &&
1735 tg->bad_bio_cnt * 5 < tg->bio_cnt); 1794 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1795 throtl_log(&tg->service_queue,
1796 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1797 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1798 tg->bio_cnt, ret, tg->td->scale);
1799 return ret;
1736} 1800}
1737 1801
1738static bool throtl_tg_can_upgrade(struct throtl_grp *tg) 1802static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
@@ -1828,6 +1892,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
1828 struct cgroup_subsys_state *pos_css; 1892 struct cgroup_subsys_state *pos_css;
1829 struct blkcg_gq *blkg; 1893 struct blkcg_gq *blkg;
1830 1894
1895 throtl_log(&td->service_queue, "upgrade to max");
1831 td->limit_index = LIMIT_MAX; 1896 td->limit_index = LIMIT_MAX;
1832 td->low_upgrade_time = jiffies; 1897 td->low_upgrade_time = jiffies;
1833 td->scale = 0; 1898 td->scale = 0;
@@ -1850,6 +1915,7 @@ static void throtl_downgrade_state(struct throtl_data *td, int new)
1850{ 1915{
1851 td->scale /= 2; 1916 td->scale /= 2;
1852 1917
1918 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1853 if (td->scale) { 1919 if (td->scale) {
1854 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; 1920 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1855 return; 1921 return;
@@ -2023,6 +2089,11 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
2023 td->avg_buckets[i].valid = true; 2089 td->avg_buckets[i].valid = true;
2024 last_latency = td->avg_buckets[i].latency; 2090 last_latency = td->avg_buckets[i].latency;
2025 } 2091 }
2092
2093 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2094 throtl_log(&td->service_queue,
2095 "Latency bucket %d: latency=%ld, valid=%d", i,
2096 td->avg_buckets[i].latency, td->avg_buckets[i].valid);
2026} 2097}
2027#else 2098#else
2028static inline void throtl_update_latency_buckets(struct throtl_data *td) 2099static inline void throtl_update_latency_buckets(struct throtl_data *td)
@@ -2218,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
2218 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), 2289 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2219 bio_op(bio), lat); 2290 bio_op(bio), lat);
2220 2291
2221 if (tg->latency_target) { 2292 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2222 int bucket; 2293 int bucket;
2223 unsigned int threshold; 2294 unsigned int threshold;
2224 2295
@@ -2354,18 +2425,19 @@ void blk_throtl_exit(struct request_queue *q)
2354void blk_throtl_register_queue(struct request_queue *q) 2425void blk_throtl_register_queue(struct request_queue *q)
2355{ 2426{
2356 struct throtl_data *td; 2427 struct throtl_data *td;
2357 struct cgroup_subsys_state *pos_css; 2428 int i;
2358 struct blkcg_gq *blkg;
2359 2429
2360 td = q->td; 2430 td = q->td;
2361 BUG_ON(!td); 2431 BUG_ON(!td);
2362 2432
2363 if (blk_queue_nonrot(q)) { 2433 if (blk_queue_nonrot(q)) {
2364 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2434 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2365 td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_SSD; 2435 td->filtered_latency = LATENCY_FILTERED_SSD;
2366 } else { 2436 } else {
2367 td->throtl_slice = DFL_THROTL_SLICE_HD; 2437 td->throtl_slice = DFL_THROTL_SLICE_HD;
2368 td->dft_idletime_threshold = DFL_IDLE_THRESHOLD_HD; 2438 td->filtered_latency = LATENCY_FILTERED_HD;
2439 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2440 td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2369 } 2441 }
2370#ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2442#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2371 /* if no low limit, use previous default */ 2443 /* if no low limit, use previous default */
@@ -2375,18 +2447,6 @@ void blk_throtl_register_queue(struct request_queue *q)
2375 td->track_bio_latency = !q->mq_ops && !q->request_fn; 2447 td->track_bio_latency = !q->mq_ops && !q->request_fn;
2376 if (!td->track_bio_latency) 2448 if (!td->track_bio_latency)
2377 blk_stat_enable_accounting(q); 2449 blk_stat_enable_accounting(q);
2378
2379 /*
2380 * some tg are created before queue is fully initialized, eg, nonrot
2381 * isn't initialized yet
2382 */
2383 rcu_read_lock();
2384 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
2385 struct throtl_grp *tg = blkg_to_tg(blkg);
2386
2387 tg->idletime_threshold = td->dft_idletime_threshold;
2388 }
2389 rcu_read_unlock();
2390} 2450}
2391 2451
2392#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2452#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
diff --git a/block/blk.h b/block/blk.h
index 2ed70228e44f..83c8e1100525 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
59 59
60int blk_init_rl(struct request_list *rl, struct request_queue *q, 60int blk_init_rl(struct request_list *rl, struct request_queue *q,
61 gfp_t gfp_mask); 61 gfp_t gfp_mask);
62void blk_exit_rl(struct request_list *rl); 62void blk_exit_rl(struct request_queue *q, struct request_list *rl);
63void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 63void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
64 struct bio *bio); 64 struct bio *bio);
65void blk_queue_bypass_start(struct request_queue *q); 65void blk_queue_bypass_start(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index da69b079725f..b7e9c7feeab2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
38static const int cfq_hist_divisor = 4; 38static const int cfq_hist_divisor = 4;
39 39
40/* 40/*
41 * offset from end of service tree 41 * offset from end of queue service tree for idle class
42 */ 42 */
43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) 43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
44/* offset from end of group service tree under time slice mode */
45#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46/* offset from end of group service under IOPS mode */
47#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
44 48
45/* 49/*
46 * below this threshold, we consider thinktime immediate 50 * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1362 cfqg->vfraction = max_t(unsigned, vfr, 1); 1366 cfqg->vfraction = max_t(unsigned, vfr, 1);
1363} 1367}
1364 1368
1369static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1370{
1371 if (!iops_mode(cfqd))
1372 return CFQ_SLICE_MODE_GROUP_DELAY;
1373 else
1374 return CFQ_IOPS_MODE_GROUP_DELAY;
1375}
1376
1365static void 1377static void
1366cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1378cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1367{ 1379{
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1381 n = rb_last(&st->rb); 1393 n = rb_last(&st->rb);
1382 if (n) { 1394 if (n) {
1383 __cfqg = rb_entry_cfqg(n); 1395 __cfqg = rb_entry_cfqg(n);
1384 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1396 cfqg->vdisktime = __cfqg->vdisktime +
1397 cfq_get_cfqg_vdisktime_delay(cfqd);
1385 } else 1398 } else
1386 cfqg->vdisktime = st->min_vdisktime; 1399 cfqg->vdisktime = st->min_vdisktime;
1387 cfq_group_service_tree_add(st, cfqg); 1400 cfq_group_service_tree_add(st, cfqg);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index ff07b9143ca4..c5ec8246e25e 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
320 320
321 if (info) { 321 if (info) {
322 struct partition_meta_info *pinfo = alloc_part_info(disk); 322 struct partition_meta_info *pinfo = alloc_part_info(disk);
323 if (!pinfo) 323 if (!pinfo) {
324 err = -ENOMEM;
324 goto out_free_stats; 325 goto out_free_stats;
326 }
325 memcpy(pinfo, info, sizeof(*info)); 327 memcpy(pinfo, info, sizeof(*info));
326 p->info = pinfo; 328 p->info = pinfo;
327 } 329 }
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b32edd..5610cd537da7 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
300 continue; 300 continue;
301 bsd_start = le32_to_cpu(p->p_offset); 301 bsd_start = le32_to_cpu(p->p_offset);
302 bsd_size = le32_to_cpu(p->p_size); 302 bsd_size = le32_to_cpu(p->p_size);
303 if (memcmp(flavour, "bsd\0", 4) == 0)
304 bsd_start += offset;
303 if (offset == bsd_start && size == bsd_size) 305 if (offset == bsd_start && size == bsd_size)
304 /* full parent partition, we have it already */ 306 /* full parent partition, we have it already */
305 continue; 307 continue;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d3a989e718f5..3cd6e12cfc46 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
141 * signature and returns that to us. 141 * signature and returns that to us.
142 */ 142 */
143 ret = crypto_akcipher_verify(req); 143 ret = crypto_akcipher_verify(req);
144 if (ret == -EINPROGRESS) { 144 if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
145 wait_for_completion(&compl.completion); 145 wait_for_completion(&compl.completion);
146 ret = compl.err; 146 ret = compl.err;
147 } 147 }
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 672a94c2c3ff..d178650fd524 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -381,7 +381,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
381 } 381 }
382 382
383error: 383error:
384 kfree(desc); 384 kzfree(desc);
385error_no_desc: 385error_no_desc:
386 crypto_free_shash(tfm); 386 crypto_free_shash(tfm);
387 kleave(" = %d", ret); 387 kleave(" = %d", ret);
@@ -450,6 +450,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen,
450 ret = pefile_digest_pe(pebuf, pelen, &ctx); 450 ret = pefile_digest_pe(pebuf, pelen, &ctx);
451 451
452error: 452error:
453 kfree(ctx.digest); 453 kzfree(ctx.digest);
454 return ret; 454 return ret;
455} 455}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b211cf..dd03fead1ca3 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
102 } 102 }
103 } 103 }
104 104
105 ret = -ENOMEM;
105 cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); 106 cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
106 if (!cert->pub->key) 107 if (!cert->pub->key)
107 goto error_decode; 108 goto error_decode;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fa749f470135..cdb27ac4b226 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1767 break; 1767 break;
1768 case -EINPROGRESS: 1768 case -EINPROGRESS:
1769 case -EBUSY: 1769 case -EBUSY:
1770 ret = wait_for_completion_interruptible( 1770 wait_for_completion(&drbg->ctr_completion);
1771 &drbg->ctr_completion); 1771 if (!drbg->ctr_async_err) {
1772 if (!ret && !drbg->ctr_async_err) {
1773 reinit_completion(&drbg->ctr_completion); 1772 reinit_completion(&drbg->ctr_completion);
1774 break; 1773 break;
1775 } 1774 }
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b7ad808be3d4..3841b5eafa7e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
152 152
153 err = crypto_skcipher_encrypt(&data->req); 153 err = crypto_skcipher_encrypt(&data->req);
154 if (err == -EINPROGRESS || err == -EBUSY) { 154 if (err == -EINPROGRESS || err == -EBUSY) {
155 err = wait_for_completion_interruptible( 155 wait_for_completion(&data->result.completion);
156 &data->result.completion); 156 err = data->result.err;
157 if (!err)
158 err = data->result.err;
159 } 157 }
160 158
161 if (err) 159 if (err)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 014af741fc6a..4faa0fd53b0c 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -764,6 +764,44 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
764 return 0; 764 return 0;
765} 765}
766 766
767static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
768 const u8 *key, unsigned int keylen)
769{
770 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
771 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
772 u8 *buffer, *alignbuffer;
773 unsigned long absize;
774 int ret;
775
776 absize = keylen + alignmask;
777 buffer = kmalloc(absize, GFP_ATOMIC);
778 if (!buffer)
779 return -ENOMEM;
780
781 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
782 memcpy(alignbuffer, key, keylen);
783 ret = cipher->setkey(tfm, alignbuffer, keylen);
784 kzfree(buffer);
785 return ret;
786}
787
788static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
789 unsigned int keylen)
790{
791 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
792 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
793
794 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
795 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
796 return -EINVAL;
797 }
798
799 if ((unsigned long)key & alignmask)
800 return skcipher_setkey_unaligned(tfm, key, keylen);
801
802 return cipher->setkey(tfm, key, keylen);
803}
804
767static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 805static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
768{ 806{
769 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 807 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -784,7 +822,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
784 tfm->__crt_alg->cra_type == &crypto_givcipher_type) 822 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
785 return crypto_init_skcipher_ops_ablkcipher(tfm); 823 return crypto_init_skcipher_ops_ablkcipher(tfm);
786 824
787 skcipher->setkey = alg->setkey; 825 skcipher->setkey = skcipher_setkey;
788 skcipher->encrypt = alg->encrypt; 826 skcipher->encrypt = alg->encrypt;
789 skcipher->decrypt = alg->decrypt; 827 skcipher->decrypt = alg->decrypt;
790 skcipher->ivsize = alg->ivsize; 828 skcipher->ivsize = alg->ivsize;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 5a968a78652b..0d2e98920069 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -416,13 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
416 } 416 }
417 } 417 }
418 418
419 table_desc->validation_count++; 419 if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
420 if (table_desc->validation_count == 0) { 420 table_desc->validation_count++;
421 ACPI_ERROR((AE_INFO, 421
422 "Table %p, Validation count is zero after increment\n", 422 /*
423 table_desc)); 423 * Detect validation_count overflows to ensure that the warning
424 table_desc->validation_count--; 424 * message will only be printed once.
425 return_ACPI_STATUS(AE_LIMIT); 425 */
426 if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
427 ACPI_WARNING((AE_INFO,
428 "Table %p, Validation count overflows\n",
429 table_desc));
430 }
426 } 431 }
427 432
428 *out_table = table_desc->pointer; 433 *out_table = table_desc->pointer;
@@ -449,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
449 454
450 ACPI_FUNCTION_TRACE(acpi_tb_put_table); 455 ACPI_FUNCTION_TRACE(acpi_tb_put_table);
451 456
452 if (table_desc->validation_count == 0) { 457 if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
453 ACPI_WARNING((AE_INFO, 458 table_desc->validation_count--;
454 "Table %p, Validation count is zero before decrement\n", 459
455 table_desc)); 460 /*
456 return_VOID; 461 * Detect validation_count underflows to ensure that the warning
462 * message will only be printed once.
463 */
464 if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
465 ACPI_WARNING((AE_INFO,
466 "Table %p, Validation count underflows\n",
467 table_desc));
468 return_VOID;
469 }
457 } 470 }
458 table_desc->validation_count--;
459 471
460 if (table_desc->validation_count == 0) { 472 if (table_desc->validation_count == 0) {
461 473
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e0587c85bafd..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
474 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 474 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
475 } 475 }
476 476
477 /*
478 * The end_tag opcode must be followed by a zero byte.
479 * Although this byte is technically defined to be a checksum,
480 * in practice, all ASL compilers set this byte to zero.
481 */
482 if (*(aml + 1) != 0) {
483 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
484 }
485
486 /* Return the pointer to the end_tag if requested */ 477 /* Return the pointer to the end_tag if requested */
487 478
488 if (!user_function) { 479 if (!user_function) {
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c5fecf97ee2f..797b28dc7b34 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
666 int ret = -ENODEV; 666 int ret = -ENODEV;
667 struct fwnode_handle *iort_fwnode; 667 struct fwnode_handle *iort_fwnode;
668 668
669 /*
670 * If we already translated the fwspec there
671 * is nothing left to do, return the iommu_ops.
672 */
673 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
674 if (ops)
675 return ops;
676
677 if (node) { 669 if (node) {
678 iort_fwnode = iort_get_fwnode(node); 670 iort_fwnode = iort_get_fwnode(node);
679 if (!iort_fwnode) 671 if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
735 u32 streamid = 0; 727 u32 streamid = 0;
736 int err; 728 int err;
737 729
730 /*
731 * If we already translated the fwspec there
732 * is nothing left to do, return the iommu_ops.
733 */
734 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
735 if (ops)
736 return ops;
737
738 if (dev_is_pci(dev)) { 738 if (dev_is_pci(dev)) {
739 struct pci_bus *bus = to_pci_dev(dev)->bus; 739 struct pci_bus *bus = to_pci_dev(dev)->bus;
740 u32 rid; 740 u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
782 if (err) 782 if (err)
783 ops = ERR_PTR(err); 783 ops = ERR_PTR(err);
784 784
785 /* Ignore all other errors apart from EPROBE_DEFER */
786 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
787 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
788 ops = NULL;
789 }
790
785 return ops; 791 return ops;
786} 792}
787 793
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a9a9ab3399d4..d42eeef9d928 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || 782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && 783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
784 (battery->capacity_now <= battery->alarm))) 784 (battery->capacity_now <= battery->alarm)))
785 pm_wakeup_hard_event(&battery->device->dev); 785 pm_wakeup_event(&battery->device->dev, 0);
786 786
787 return result; 787 return result;
788} 788}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index b7c2a06963d6..e19f530f1083 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -57,6 +57,7 @@
57 57
58#define ACPI_BUTTON_LID_INIT_IGNORE 0x00 58#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
59#define ACPI_BUTTON_LID_INIT_OPEN 0x01 59#define ACPI_BUTTON_LID_INIT_OPEN 0x01
60#define ACPI_BUTTON_LID_INIT_METHOD 0x02
60 61
61#define _COMPONENT ACPI_BUTTON_COMPONENT 62#define _COMPONENT ACPI_BUTTON_COMPONENT
62ACPI_MODULE_NAME("button"); 63ACPI_MODULE_NAME("button");
@@ -112,7 +113,7 @@ struct acpi_button {
112 113
113static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); 114static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
114static struct acpi_device *lid_device; 115static struct acpi_device *lid_device;
115static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; 116static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
116 117
117static unsigned long lid_report_interval __read_mostly = 500; 118static unsigned long lid_report_interval __read_mostly = 500;
118module_param(lid_report_interval, ulong, 0644); 119module_param(lid_report_interval, ulong, 0644);
@@ -216,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
216 } 217 }
217 218
218 if (state) 219 if (state)
219 pm_wakeup_hard_event(&device->dev); 220 pm_wakeup_event(&device->dev, 0);
220 221
221 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
222 if (ret == NOTIFY_DONE) 223 if (ret == NOTIFY_DONE)
@@ -376,6 +377,9 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
376 case ACPI_BUTTON_LID_INIT_OPEN: 377 case ACPI_BUTTON_LID_INIT_OPEN:
377 (void)acpi_lid_notify_state(device, 1); 378 (void)acpi_lid_notify_state(device, 1);
378 break; 379 break;
380 case ACPI_BUTTON_LID_INIT_METHOD:
381 (void)acpi_lid_update_state(device);
382 break;
379 case ACPI_BUTTON_LID_INIT_IGNORE: 383 case ACPI_BUTTON_LID_INIT_IGNORE:
380 default: 384 default:
381 break; 385 break;
@@ -398,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
398 } else { 402 } else {
399 int keycode; 403 int keycode;
400 404
401 pm_wakeup_hard_event(&device->dev); 405 pm_wakeup_event(&device->dev, 0);
402 if (button->suspended) 406 if (button->suspended)
403 break; 407 break;
404 408
@@ -530,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
530 lid_device = device; 534 lid_device = device;
531 } 535 }
532 536
533 device_init_wakeup(&device->dev, true);
534 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); 537 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
535 return 0; 538 return 0;
536 539
@@ -560,6 +563,9 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
560 if (!strncmp(val, "open", sizeof("open") - 1)) { 563 if (!strncmp(val, "open", sizeof("open") - 1)) {
561 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; 564 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
562 pr_info("Notify initial lid state as open\n"); 565 pr_info("Notify initial lid state as open\n");
566 } else if (!strncmp(val, "method", sizeof("method") - 1)) {
567 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
568 pr_info("Notify initial lid state with _LID return value\n");
563 } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { 569 } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
564 lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; 570 lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
565 pr_info("Do not notify initial lid state\n"); 571 pr_info("Do not notify initial lid state\n");
@@ -573,6 +579,8 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
573 switch (lid_init_state) { 579 switch (lid_init_state) {
574 case ACPI_BUTTON_LID_INIT_OPEN: 580 case ACPI_BUTTON_LID_INIT_OPEN:
575 return sprintf(buffer, "open"); 581 return sprintf(buffer, "open");
582 case ACPI_BUTTON_LID_INIT_METHOD:
583 return sprintf(buffer, "method");
576 case ACPI_BUTTON_LID_INIT_IGNORE: 584 case ACPI_BUTTON_LID_INIT_IGNORE:
577 return sprintf(buffer, "ignore"); 585 return sprintf(buffer, "ignore");
578 default: 586 default:
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 798d5003a039..993fd31394c8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -24,7 +24,6 @@
24#include <linux/pm_qos.h> 24#include <linux/pm_qos.h>
25#include <linux/pm_domain.h> 25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/suspend.h>
28 27
29#include "internal.h" 28#include "internal.h"
30 29
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
400 mutex_lock(&acpi_pm_notifier_lock); 399 mutex_lock(&acpi_pm_notifier_lock);
401 400
402 if (adev->wakeup.flags.notifier_present) { 401 if (adev->wakeup.flags.notifier_present) {
403 pm_wakeup_ws_event(adev->wakeup.ws, 0, true); 402 __pm_wakeup_event(adev->wakeup.ws, 0);
404 if (adev->wakeup.context.work.func) 403 if (adev->wakeup.context.work.func)
405 queue_pm_work(&adev->wakeup.context.work); 404 queue_pm_work(&adev->wakeup.context.work);
406 } 405 }
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 3ba1c3472cf9..fd86bec98dea 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
26 struct nfit_spa *nfit_spa; 26 struct nfit_spa *nfit_spa;
27 27
28 /* We only care about memory errors */ 28 /* We only care about memory errors */
29 if (!(mce->status & MCACOD)) 29 if (!mce_is_memory_error(mce))
30 return NOTIFY_DONE; 30 return NOTIFY_DONE;
31 31
32 /* 32 /*
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e39ec7b7cb67..d53162997f32 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
1371 iort_set_dma_mask(dev); 1371 iort_set_dma_mask(dev);
1372 1372
1373 iommu = iort_iommu_configure(dev); 1373 iommu = iort_iommu_configure(dev);
1374 if (IS_ERR(iommu)) 1374 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
1375 return PTR_ERR(iommu); 1375 return -EPROBE_DEFER;
1376 1376
1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
1378 /* 1378 /*
@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
1428 adev->flags.coherent_dma = cca; 1428 adev->flags.coherent_dma = cca;
1429} 1429}
1430 1430
1431static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1432{
1433 bool *is_spi_i2c_slave_p = data;
1434
1435 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1436 return 1;
1437
1438 /*
1439 * devices that are connected to UART still need to be enumerated to
1440 * platform bus
1441 */
1442 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1443 *is_spi_i2c_slave_p = true;
1444
1445 /* no need to do more checking */
1446 return -1;
1447}
1448
1449static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
1450{
1451 struct list_head resource_list;
1452 bool is_spi_i2c_slave = false;
1453
1454 INIT_LIST_HEAD(&resource_list);
1455 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1456 &is_spi_i2c_slave);
1457 acpi_dev_free_resource_list(&resource_list);
1458
1459 return is_spi_i2c_slave;
1460}
1461
1431void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1462void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1432 int type, unsigned long long sta) 1463 int type, unsigned long long sta)
1433{ 1464{
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1443 acpi_bus_get_flags(device); 1474 acpi_bus_get_flags(device);
1444 device->flags.match_driver = false; 1475 device->flags.match_driver = false;
1445 device->flags.initialized = true; 1476 device->flags.initialized = true;
1477 device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
1446 acpi_device_clear_enumerated(device); 1478 acpi_device_clear_enumerated(device);
1447 device_initialize(&device->dev); 1479 device_initialize(&device->dev);
1448 dev_set_uevent_suppress(&device->dev, true); 1480 dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1727 return AE_OK; 1759 return AE_OK;
1728} 1760}
1729 1761
1730static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
1731{
1732 bool *is_spi_i2c_slave_p = data;
1733
1734 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1735 return 1;
1736
1737 /*
1738 * devices that are connected to UART still need to be enumerated to
1739 * platform bus
1740 */
1741 if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
1742 *is_spi_i2c_slave_p = true;
1743
1744 /* no need to do more checking */
1745 return -1;
1746}
1747
1748static void acpi_default_enumeration(struct acpi_device *device) 1762static void acpi_default_enumeration(struct acpi_device *device)
1749{ 1763{
1750 struct list_head resource_list;
1751 bool is_spi_i2c_slave = false;
1752
1753 /* 1764 /*
1754 * Do not enumerate SPI/I2C slaves as they will be enumerated by their 1765 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
1755 * respective parents. 1766 * respective parents.
1756 */ 1767 */
1757 INIT_LIST_HEAD(&resource_list); 1768 if (!device->flags.spi_i2c_slave) {
1758 acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
1759 &is_spi_i2c_slave);
1760 acpi_dev_free_resource_list(&resource_list);
1761 if (!is_spi_i2c_slave) {
1762 acpi_create_platform_device(device, NULL); 1769 acpi_create_platform_device(device, NULL);
1763 acpi_device_set_enumerated(device); 1770 acpi_device_set_enumerated(device);
1764 } else { 1771 } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
1854 return; 1861 return;
1855 1862
1856 device->flags.match_driver = true; 1863 device->flags.match_driver = true;
1857 if (ret > 0) { 1864 if (ret > 0 && !device->flags.spi_i2c_slave) {
1858 acpi_device_set_enumerated(device); 1865 acpi_device_set_enumerated(device);
1859 goto ok; 1866 goto ok;
1860 } 1867 }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
1863 if (ret < 0) 1870 if (ret < 0)
1864 return; 1871 return;
1865 1872
1866 if (device->pnp.type.platform_id) 1873 if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
1867 acpi_default_enumeration(device);
1868 else
1869 acpi_device_set_enumerated(device); 1874 acpi_device_set_enumerated(device);
1875 else
1876 acpi_default_enumeration(device);
1870 1877
1871 ok: 1878 ok:
1872 list_for_each_entry(child, &device->children, node) 1879 list_for_each_entry(child, &device->children, node)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a6574d626340..097d630ab886 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
663 acpi_os_wait_events_complete(); 663 acpi_os_wait_events_complete();
664 if (acpi_sci_irq_valid()) 664 if (acpi_sci_irq_valid())
665 enable_irq_wake(acpi_sci_irq); 665 enable_irq_wake(acpi_sci_irq);
666
667 return 0; 666 return 0;
668} 667}
669 668
670static void acpi_freeze_wake(void)
671{
672 /*
673 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
674 * that the SCI has triggered while suspended, so cancel the wakeup in
675 * case it has not been a wakeup event (the GPEs will be checked later).
676 */
677 if (acpi_sci_irq_valid() &&
678 !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
679 pm_system_cancel_wakeup();
680}
681
682static void acpi_freeze_sync(void)
683{
684 /*
685 * Process all pending events in case there are any wakeup ones.
686 *
687 * The EC driver uses the system workqueue, so that one needs to be
688 * flushed too.
689 */
690 acpi_os_wait_events_complete();
691 flush_scheduled_work();
692}
693
694static void acpi_freeze_restore(void) 669static void acpi_freeze_restore(void)
695{ 670{
696 acpi_disable_wakeup_devices(ACPI_STATE_S0); 671 acpi_disable_wakeup_devices(ACPI_STATE_S0);
697 if (acpi_sci_irq_valid()) 672 if (acpi_sci_irq_valid())
698 disable_irq_wake(acpi_sci_irq); 673 disable_irq_wake(acpi_sci_irq);
699
700 acpi_enable_all_runtime_gpes(); 674 acpi_enable_all_runtime_gpes();
701} 675}
702 676
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
708static const struct platform_freeze_ops acpi_freeze_ops = { 682static const struct platform_freeze_ops acpi_freeze_ops = {
709 .begin = acpi_freeze_begin, 683 .begin = acpi_freeze_begin,
710 .prepare = acpi_freeze_prepare, 684 .prepare = acpi_freeze_prepare,
711 .wake = acpi_freeze_wake,
712 .sync = acpi_freeze_sync,
713 .restore = acpi_freeze_restore, 685 .restore = acpi_freeze_restore,
714 .end = acpi_freeze_end, 686 .end = acpi_freeze_end,
715}; 687};
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 1b5ee1e0e5a3..e414fabf7315 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
333 container_of(bin_attr, struct acpi_table_attr, attr); 333 container_of(bin_attr, struct acpi_table_attr, attr);
334 struct acpi_table_header *table_header = NULL; 334 struct acpi_table_header *table_header = NULL;
335 acpi_status status; 335 acpi_status status;
336 ssize_t rc;
336 337
337 status = acpi_get_table(table_attr->name, table_attr->instance, 338 status = acpi_get_table(table_attr->name, table_attr->instance,
338 &table_header); 339 &table_header);
339 if (ACPI_FAILURE(status)) 340 if (ACPI_FAILURE(status))
340 return -ENODEV; 341 return -ENODEV;
341 342
342 return memory_read_from_buffer(buf, count, &offset, 343 rc = memory_read_from_buffer(buf, count, &offset, table_header,
343 table_header, table_header->length); 344 table_header->length);
345 acpi_put_table(table_header);
346 return rc;
344} 347}
345 348
346static int acpi_table_attr_init(struct kobject *tables_obj, 349static int acpi_table_attr_init(struct kobject *tables_obj,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2fc52407306c..c69954023c2e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1364{} 1364{}
1365#endif 1365#endif
1366 1366
1367/*
1368 * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
1369 * as DUMMY, or detected but eventually get a "link down" and never get up
1370 * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
1371 * port_map may hold a value of 0x00.
1372 *
1373 * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
1374 * and can significantly reduce the occurrence of the problem.
1375 *
1376 * https://bugzilla.kernel.org/show_bug.cgi?id=189471
1377 */
1378static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
1379 struct pci_dev *pdev)
1380{
1381 static const struct dmi_system_id sysids[] = {
1382 {
1383 .ident = "Acer Switch Alpha 12",
1384 .matches = {
1385 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1386 DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
1387 },
1388 },
1389 { }
1390 };
1391
1392 if (dmi_check_system(sysids)) {
1393 dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
1394 if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
1395 hpriv->port_map = 0x7;
1396 hpriv->cap = 0xC734FF02;
1397 }
1398 }
1399}
1400
1367#ifdef CONFIG_ARM64 1401#ifdef CONFIG_ARM64
1368/* 1402/*
1369 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. 1403 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1636 "online status unreliable, applying workaround\n"); 1670 "online status unreliable, applying workaround\n");
1637 } 1671 }
1638 1672
1673
1674 /* Acer SA5-271 workaround modifies private_data */
1675 acer_sa5_271_workaround(hpriv, pdev);
1676
1639 /* CAP.NP sometimes indicate the index of the last enabled 1677 /* CAP.NP sometimes indicate the index of the last enabled
1640 * port, at other times, that of the last possible port, so 1678 * port, at other times, that of the last possible port, so
1641 * determining the maximum port number requires looking at 1679 * determining the maximum port number requires looking at
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
514 514
515 irq = platform_get_irq(pdev, 0); 515 irq = platform_get_irq(pdev, 0);
516 if (irq <= 0) { 516 if (irq <= 0) {
517 dev_err(dev, "no irq\n"); 517 if (irq != -EPROBE_DEFER)
518 return -EINVAL; 518 dev_err(dev, "no irq\n");
519 return irq;
519 } 520 }
520 521
521 hpriv->irq = irq; 522 hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2d83b8c75965..e157a0e44419 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
6800 } 6800 }
6801 6801
6802 force_ent->port = simple_strtoul(id, &endp, 10); 6802 force_ent->port = simple_strtoul(id, &endp, 10);
6803 if (p == endp || *endp != '\0') { 6803 if (id == endp || *endp != '\0') {
6804 *reason = "invalid port/link"; 6804 *reason = "invalid port/link";
6805 return -EINVAL; 6805 return -EINVAL;
6806 } 6806 }
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b66bcda88320..3b2246dded74 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
4067 struct ata_host *host; 4067 struct ata_host *host;
4068 struct mv_host_priv *hpriv; 4068 struct mv_host_priv *hpriv;
4069 struct resource *res; 4069 struct resource *res;
4070 void __iomem *mmio;
4071 int n_ports = 0, irq = 0; 4070 int n_ports = 0, irq = 0;
4072 int rc; 4071 int rc;
4073 int port; 4072 int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
4086 * Get the register base first 4085 * Get the register base first
4087 */ 4086 */
4088 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4087 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4089 mmio = devm_ioremap_resource(&pdev->dev, res); 4088 if (res == NULL)
4090 if (IS_ERR(mmio)) 4089 return -EINVAL;
4091 return PTR_ERR(mmio);
4092 4090
4093 /* allocate host */ 4091 /* allocate host */
4094 if (pdev->dev.of_node) { 4092 if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
4132 hpriv->board_idx = chip_soc; 4130 hpriv->board_idx = chip_soc;
4133 4131
4134 host->iomap = NULL; 4132 host->iomap = NULL;
4135 hpriv->base = mmio - SATAHC0_REG_BASE; 4133 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4134 resource_size(res));
4135 if (!hpriv->base)
4136 return -ENOMEM;
4137
4138 hpriv->base -= SATAHC0_REG_BASE;
4136 4139
4137 hpriv->clk = clk_get(&pdev->dev, NULL); 4140 hpriv->clk = clk_get(&pdev->dev, NULL);
4138 if (IS_ERR(hpriv->clk)) 4141 if (IS_ERR(hpriv->clk))
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 5d38245a7a73..b7939a2c1fab 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
890 dev_err(&pdev->dev, "failed to get access to sata clock\n"); 890 dev_err(&pdev->dev, "failed to get access to sata clock\n");
891 return PTR_ERR(priv->clk); 891 return PTR_ERR(priv->clk);
892 } 892 }
893 clk_prepare_enable(priv->clk); 893
894 ret = clk_prepare_enable(priv->clk);
895 if (ret)
896 return ret;
894 897
895 host = ata_host_alloc(&pdev->dev, 1); 898 host = ata_host_alloc(&pdev->dev, 1);
896 if (!host) { 899 if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
970 struct ata_host *host = dev_get_drvdata(dev); 973 struct ata_host *host = dev_get_drvdata(dev);
971 struct sata_rcar_priv *priv = host->private_data; 974 struct sata_rcar_priv *priv = host->private_data;
972 void __iomem *base = priv->base; 975 void __iomem *base = priv->base;
976 int ret;
973 977
974 clk_prepare_enable(priv->clk); 978 ret = clk_prepare_enable(priv->clk);
979 if (ret)
980 return ret;
975 981
976 /* ack and mask */ 982 /* ack and mask */
977 iowrite32(0, base + SATAINTSTAT_REG); 983 iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
988{ 994{
989 struct ata_host *host = dev_get_drvdata(dev); 995 struct ata_host *host = dev_get_drvdata(dev);
990 struct sata_rcar_priv *priv = host->private_data; 996 struct sata_rcar_priv *priv = host->private_data;
997 int ret;
991 998
992 clk_prepare_enable(priv->clk); 999 ret = clk_prepare_enable(priv->clk);
1000 if (ret)
1001 return ret;
993 1002
994 sata_rcar_setup_port(host); 1003 sata_rcar_setup_port(host);
995 1004
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e987a6f55d36..9faee1c893e5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1091 if (async_error) 1091 if (async_error)
1092 goto Complete; 1092 goto Complete;
1093 1093
1094 if (pm_wakeup_pending()) {
1095 async_error = -EBUSY;
1096 goto Complete;
1097 }
1098
1094 if (dev->power.syscore || dev->power.direct_complete) 1099 if (dev->power.syscore || dev->power.direct_complete)
1095 goto Complete; 1100 goto Complete;
1096 1101
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index f62082fdd670..c313b600d356 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
28/* First wakeup IRQ seen by the kernel in the last cycle. */ 28/* First wakeup IRQ seen by the kernel in the last cycle. */
29unsigned int pm_wakeup_irq __read_mostly; 29unsigned int pm_wakeup_irq __read_mostly;
30 30
31/* If greater than 0 and the system is suspending, terminate the suspend. */ 31/* If set and the system is suspending, terminate the suspend. */
32static atomic_t pm_abort_suspend __read_mostly; 32static bool pm_abort_suspend __read_mostly;
33 33
34/* 34/*
35 * Combined counters of registered wakeup events and wakeup events in progress. 35 * Combined counters of registered wakeup events and wakeup events in progress.
@@ -512,13 +512,12 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
512/** 512/**
513 * wakup_source_activate - Mark given wakeup source as active. 513 * wakup_source_activate - Mark given wakeup source as active.
514 * @ws: Wakeup source to handle. 514 * @ws: Wakeup source to handle.
515 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
516 * 515 *
517 * Update the @ws' statistics and, if @ws has just been activated, notify the PM 516 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
518 * core of the event by incrementing the counter of of wakeup events being 517 * core of the event by incrementing the counter of of wakeup events being
519 * processed. 518 * processed.
520 */ 519 */
521static void wakeup_source_activate(struct wakeup_source *ws, bool hard) 520static void wakeup_source_activate(struct wakeup_source *ws)
522{ 521{
523 unsigned int cec; 522 unsigned int cec;
524 523
@@ -526,9 +525,6 @@ static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
526 "unregistered wakeup source\n")) 525 "unregistered wakeup source\n"))
527 return; 526 return;
528 527
529 if (hard)
530 pm_system_wakeup();
531
532 ws->active = true; 528 ws->active = true;
533 ws->active_count++; 529 ws->active_count++;
534 ws->last_time = ktime_get(); 530 ws->last_time = ktime_get();
@@ -554,7 +550,10 @@ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
554 ws->wakeup_count++; 550 ws->wakeup_count++;
555 551
556 if (!ws->active) 552 if (!ws->active)
557 wakeup_source_activate(ws, hard); 553 wakeup_source_activate(ws);
554
555 if (hard)
556 pm_system_wakeup();
558} 557}
559 558
560/** 559/**
@@ -856,26 +855,20 @@ bool pm_wakeup_pending(void)
856 pm_print_active_wakeup_sources(); 855 pm_print_active_wakeup_sources();
857 } 856 }
858 857
859 return ret || atomic_read(&pm_abort_suspend) > 0; 858 return ret || pm_abort_suspend;
860} 859}
861 860
862void pm_system_wakeup(void) 861void pm_system_wakeup(void)
863{ 862{
864 atomic_inc(&pm_abort_suspend); 863 pm_abort_suspend = true;
865 freeze_wake(); 864 freeze_wake();
866} 865}
867EXPORT_SYMBOL_GPL(pm_system_wakeup); 866EXPORT_SYMBOL_GPL(pm_system_wakeup);
868 867
869void pm_system_cancel_wakeup(void) 868void pm_wakeup_clear(void)
870{
871 atomic_dec(&pm_abort_suspend);
872}
873
874void pm_wakeup_clear(bool reset)
875{ 869{
870 pm_abort_suspend = false;
876 pm_wakeup_irq = 0; 871 pm_wakeup_irq = 0;
877 if (reset)
878 atomic_set(&pm_abort_suspend, 0);
879} 872}
880 873
881void pm_system_irq_wakeup(unsigned int irq_number) 874void pm_system_irq_wakeup(unsigned int irq_number)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index b5730e17b455..656624314f0d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
315} 315}
316 316
317/* still holds resource->req_lock */ 317/* still holds resource->req_lock */
318static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 318static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
319{ 319{
320 struct drbd_device *device = req->device; 320 struct drbd_device *device = req->device;
321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); 321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
322 322
323 if (!put)
324 return;
325
323 if (!atomic_sub_and_test(put, &req->completion_ref)) 326 if (!atomic_sub_and_test(put, &req->completion_ref))
324 return 0; 327 return;
325 328
326 drbd_req_complete(req, m); 329 drbd_req_complete(req, m);
327 330
331 /* local completion may still come in later,
332 * we need to keep the req object around. */
333 if (req->rq_state & RQ_LOCAL_ABORTED)
334 return;
335
328 if (req->rq_state & RQ_POSTPONED) { 336 if (req->rq_state & RQ_POSTPONED) {
329 /* don't destroy the req object just yet, 337 /* don't destroy the req object just yet,
330 * but queue it for retry */ 338 * but queue it for retry */
331 drbd_restart_request(req); 339 drbd_restart_request(req);
332 return 0; 340 return;
333 } 341 }
334 342
335 return 1; 343 kref_put(&req->kref, drbd_req_destroy);
336} 344}
337 345
338static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) 346static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
519 if (req->i.waiting) 527 if (req->i.waiting)
520 wake_up(&device->misc_wait); 528 wake_up(&device->misc_wait);
521 529
522 if (c_put) { 530 drbd_req_put_completion_ref(req, m, c_put);
523 if (drbd_req_put_completion_ref(req, m, c_put)) 531 kref_put(&req->kref, drbd_req_destroy);
524 kref_put(&req->kref, drbd_req_destroy);
525 } else {
526 kref_put(&req->kref, drbd_req_destroy);
527 }
528} 532}
529 533
530static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) 534static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
@@ -1366,8 +1370,7 @@ nodata:
1366 } 1370 }
1367 1371
1368out: 1372out:
1369 if (drbd_req_put_completion_ref(req, &m, 1)) 1373 drbd_req_put_completion_ref(req, &m, 1);
1370 kref_put(&req->kref, drbd_req_destroy);
1371 spin_unlock_irq(&resource->req_lock); 1374 spin_unlock_irq(&resource->req_lock);
1372 1375
1373 /* Even though above is a kref_put(), this is safe. 1376 /* Even though above is a kref_put(), this is safe.
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d932906f24..ebbd0c3fe0ed 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
608 */ 608 */
609static int loop_flush(struct loop_device *lo) 609static int loop_flush(struct loop_device *lo)
610{ 610{
611 /* loop not yet configured, no running thread, nothing to flush */
612 if (lo->lo_state != Lo_bound)
613 return 0;
611 return loop_switch(lo, NULL); 614 return loop_switch(lo, NULL);
612} 615}
613 616
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9a7bb2c29447..f3f191ba8ca4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
937 return -ENOSPC; 937 return -ENOSPC;
938} 938}
939 939
940/* Reset all properties of an NBD device */
941static void nbd_reset(struct nbd_device *nbd)
942{
943 nbd->config = NULL;
944 nbd->tag_set.timeout = 0;
945 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
946}
947
948static void nbd_bdev_reset(struct block_device *bdev) 940static void nbd_bdev_reset(struct block_device *bdev)
949{ 941{
950 if (bdev->bd_openers > 1) 942 if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
1029 } 1021 }
1030 kfree(config->socks); 1022 kfree(config->socks);
1031 } 1023 }
1032 nbd_reset(nbd); 1024 kfree(nbd->config);
1025 nbd->config = NULL;
1026
1027 nbd->tag_set.timeout = 0;
1028 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1033 1029
1034 mutex_unlock(&nbd->config_lock); 1030 mutex_unlock(&nbd->config_lock);
1035 nbd_put(nbd); 1031 nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
1483 disk->fops = &nbd_fops; 1479 disk->fops = &nbd_fops;
1484 disk->private_data = nbd; 1480 disk->private_data = nbd;
1485 sprintf(disk->disk_name, "nbd%d", index); 1481 sprintf(disk->disk_name, "nbd%d", index);
1486 nbd_reset(nbd);
1487 add_disk(disk); 1482 add_disk(disk);
1488 nbd_total_devices++; 1483 nbd_total_devices++;
1489 return index; 1484 return index;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 454bf9c34882..c16f74547804 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work)
4023 4023
4024 switch (req_op(rq)) { 4024 switch (req_op(rq)) {
4025 case REQ_OP_DISCARD: 4025 case REQ_OP_DISCARD:
4026 case REQ_OP_WRITE_ZEROES:
4026 op_type = OBJ_OP_DISCARD; 4027 op_type = OBJ_OP_DISCARD;
4027 break; 4028 break;
4028 case REQ_OP_WRITE: 4029 case REQ_OP_WRITE:
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4420 q->limits.discard_granularity = segment_size; 4421 q->limits.discard_granularity = segment_size;
4421 q->limits.discard_alignment = segment_size; 4422 q->limits.discard_alignment = segment_size;
4422 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 4423 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4424 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
4423 4425
4424 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4426 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4425 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; 4427 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e35db9..0e824091a12f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
609 unsigned long timeout; 609 unsigned long timeout;
610 int ret; 610 int ret;
611 611
612 xen_blkif_get(blkif);
613
614 set_freezable(); 612 set_freezable();
615 while (!kthread_should_stop()) { 613 while (!kthread_should_stop()) {
616 if (try_to_freeze()) 614 if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
665 print_stats(ring); 663 print_stats(ring);
666 664
667 ring->xenblkd = NULL; 665 ring->xenblkd = NULL;
668 xen_blkif_put(blkif);
669 666
670 return 0; 667 return 0;
671} 668}
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1436static void make_response(struct xen_blkif_ring *ring, u64 id, 1433static void make_response(struct xen_blkif_ring *ring, u64 id,
1437 unsigned short op, int st) 1434 unsigned short op, int st)
1438{ 1435{
1439 struct blkif_response resp; 1436 struct blkif_response *resp;
1440 unsigned long flags; 1437 unsigned long flags;
1441 union blkif_back_rings *blk_rings; 1438 union blkif_back_rings *blk_rings;
1442 int notify; 1439 int notify;
1443 1440
1444 resp.id = id;
1445 resp.operation = op;
1446 resp.status = st;
1447
1448 spin_lock_irqsave(&ring->blk_ring_lock, flags); 1441 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1449 blk_rings = &ring->blk_rings; 1442 blk_rings = &ring->blk_rings;
1450 /* Place on the response ring for the relevant domain. */ 1443 /* Place on the response ring for the relevant domain. */
1451 switch (ring->blkif->blk_protocol) { 1444 switch (ring->blkif->blk_protocol) {
1452 case BLKIF_PROTOCOL_NATIVE: 1445 case BLKIF_PROTOCOL_NATIVE:
1453 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), 1446 resp = RING_GET_RESPONSE(&blk_rings->native,
1454 &resp, sizeof(resp)); 1447 blk_rings->native.rsp_prod_pvt);
1455 break; 1448 break;
1456 case BLKIF_PROTOCOL_X86_32: 1449 case BLKIF_PROTOCOL_X86_32:
1457 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), 1450 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1458 &resp, sizeof(resp)); 1451 blk_rings->x86_32.rsp_prod_pvt);
1459 break; 1452 break;
1460 case BLKIF_PROTOCOL_X86_64: 1453 case BLKIF_PROTOCOL_X86_64:
1461 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), 1454 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1462 &resp, sizeof(resp)); 1455 blk_rings->x86_64.rsp_prod_pvt);
1463 break; 1456 break;
1464 default: 1457 default:
1465 BUG(); 1458 BUG();
1466 } 1459 }
1460
1461 resp->id = id;
1462 resp->operation = op;
1463 resp->status = st;
1464
1467 blk_rings->common.rsp_prod_pvt++; 1465 blk_rings->common.rsp_prod_pvt++;
1468 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 1466 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1469 spin_unlock_irqrestore(&ring->blk_ring_lock, flags); 1467 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6ab8cb..ecb35fe8ca8d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
75struct blkif_common_request { 75struct blkif_common_request {
76 char dummy; 76 char dummy;
77}; 77};
78struct blkif_common_response { 78
79 char dummy; 79/* i386 protocol version */
80};
81 80
82struct blkif_x86_32_request_rw { 81struct blkif_x86_32_request_rw {
83 uint8_t nr_segments; /* number of segments */ 82 uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
129 } u; 128 } u;
130} __attribute__((__packed__)); 129} __attribute__((__packed__));
131 130
132/* i386 protocol version */
133#pragma pack(push, 4)
134struct blkif_x86_32_response {
135 uint64_t id; /* copied from request */
136 uint8_t operation; /* copied from request */
137 int16_t status; /* BLKIF_RSP_??? */
138};
139#pragma pack(pop)
140/* x86_64 protocol version */ 131/* x86_64 protocol version */
141 132
142struct blkif_x86_64_request_rw { 133struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
193 } u; 184 } u;
194} __attribute__((__packed__)); 185} __attribute__((__packed__));
195 186
196struct blkif_x86_64_response {
197 uint64_t __attribute__((__aligned__(8))) id;
198 uint8_t operation; /* copied from request */
199 int16_t status; /* BLKIF_RSP_??? */
200};
201
202DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, 187DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
203 struct blkif_common_response); 188 struct blkif_response);
204DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, 189DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
205 struct blkif_x86_32_response); 190 struct blkif_response __packed);
206DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, 191DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
207 struct blkif_x86_64_response); 192 struct blkif_response);
208 193
209union blkif_back_rings { 194union blkif_back_rings {
210 struct blkif_back_ring native; 195 struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
281 266
282 wait_queue_head_t wq; 267 wait_queue_head_t wq;
283 atomic_t inflight; 268 atomic_t inflight;
269 bool active;
284 /* One thread per blkif ring. */ 270 /* One thread per blkif ring. */
285 struct task_struct *xenblkd; 271 struct task_struct *xenblkd;
286 unsigned int waiting_reqs; 272 unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 8fe61b5dc5a6..792da683e70d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
159 init_waitqueue_head(&ring->shutdown_wq); 159 init_waitqueue_head(&ring->shutdown_wq);
160 ring->blkif = blkif; 160 ring->blkif = blkif;
161 ring->st_print = jiffies; 161 ring->st_print = jiffies;
162 xen_blkif_get(blkif); 162 ring->active = true;
163 } 163 }
164 164
165 return 0; 165 return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
249 struct xen_blkif_ring *ring = &blkif->rings[r]; 249 struct xen_blkif_ring *ring = &blkif->rings[r];
250 unsigned int i = 0; 250 unsigned int i = 0;
251 251
252 if (!ring->active)
253 continue;
254
252 if (ring->xenblkd) { 255 if (ring->xenblkd) {
253 kthread_stop(ring->xenblkd); 256 kthread_stop(ring->xenblkd);
254 wake_up(&ring->shutdown_wq); 257 wake_up(&ring->shutdown_wq);
255 ring->xenblkd = NULL;
256 } 258 }
257 259
258 /* The above kthread_stop() guarantees that at this point we 260 /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
296 BUG_ON(ring->free_pages_num != 0); 298 BUG_ON(ring->free_pages_num != 0);
297 BUG_ON(ring->persistent_gnt_c != 0); 299 BUG_ON(ring->persistent_gnt_c != 0);
298 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 300 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
299 xen_blkif_put(blkif); 301 ring->active = false;
300 } 302 }
301 blkif->nr_ring_pages = 0; 303 blkif->nr_ring_pages = 0;
302 /* 304 /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
312 314
313static void xen_blkif_free(struct xen_blkif *blkif) 315static void xen_blkif_free(struct xen_blkif *blkif)
314{ 316{
315 317 WARN_ON(xen_blkif_disconnect(blkif));
316 xen_blkif_disconnect(blkif);
317 xen_vbd_free(&blkif->vbd); 318 xen_vbd_free(&blkif->vbd);
319 kfree(blkif->be->mode);
320 kfree(blkif->be);
318 321
319 /* Make sure everything is drained before shutting down */ 322 /* Make sure everything is drained before shutting down */
320 kmem_cache_free(xen_blkif_cachep, blkif); 323 kmem_cache_free(xen_blkif_cachep, blkif);
@@ -504,13 +507,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
504 507
505 dev_set_drvdata(&dev->dev, NULL); 508 dev_set_drvdata(&dev->dev, NULL);
506 509
507 if (be->blkif) 510 if (be->blkif) {
508 xen_blkif_disconnect(be->blkif); 511 xen_blkif_disconnect(be->blkif);
509 512
510 /* Put the reference we set in xen_blkif_alloc(). */ 513 /* Put the reference we set in xen_blkif_alloc(). */
511 xen_blkif_put(be->blkif); 514 xen_blkif_put(be->blkif);
512 kfree(be->mode); 515 }
513 kfree(be); 516
514 return 0; 517 return 0;
515} 518}
516 519
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 565e4cf04a02..8249762192d5 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
859 } else if (!strcmp(str, "auto")) { 859 } else if (!strcmp(str, "auto")) {
860 parport_nr[0] = LP_PARPORT_AUTO; 860 parport_nr[0] = LP_PARPORT_AUTO;
861 } else if (!strcmp(str, "none")) { 861 } else if (!strcmp(str, "none")) {
862 parport_nr[parport_ptr++] = LP_PARPORT_NONE; 862 if (parport_ptr < LP_NO)
863 parport_nr[parport_ptr++] = LP_PARPORT_NONE;
864 else
865 printk(KERN_INFO "lp: too many ports, %s ignored.\n",
866 str);
863 } else if (!strcmp(str, "reset")) { 867 } else if (!strcmp(str, "reset")) {
864 reset = 1; 868 reset = 1;
865 } 869 }
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7e4a9d1296bb..593a8818aca9 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
340static int mmap_mem(struct file *file, struct vm_area_struct *vma) 340static int mmap_mem(struct file *file, struct vm_area_struct *vma)
341{ 341{
342 size_t size = vma->vm_end - vma->vm_start; 342 size_t size = vma->vm_end - vma->vm_start;
343 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
344
345 /* It's illegal to wrap around the end of the physical address space. */
346 if (offset + (phys_addr_t)size - 1 < offset)
347 return -EINVAL;
343 348
344 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
345 return -EINVAL; 350 return -EINVAL;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index d4dbd8d8e524..382c864814d9 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
374 374
375 rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); 375 rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
376 if (rc <= 0) { 376 if (rc <= 0) {
377 DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 377 DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
378 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 378 DEBUGP(2, dev, "<- cm4040_write (failed)\n");
379 if (rc == -ERESTARTSYS) 379 if (rc == -ERESTARTSYS)
380 return rc; 380 return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
387 for (i = 0; i < bytes_to_write; i++) { 387 for (i = 0; i < bytes_to_write; i++) {
388 rc = wait_for_bulk_out_ready(dev); 388 rc = wait_for_bulk_out_ready(dev);
389 if (rc <= 0) { 389 if (rc <= 0) {
390 DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", 390 DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
391 rc); 391 rc);
392 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 392 DEBUGP(2, dev, "<- cm4040_write (failed)\n");
393 if (rc == -ERESTARTSYS) 393 if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
403 rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); 403 rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
404 404
405 if (rc <= 0) { 405 if (rc <= 0) {
406 DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 406 DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
407 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 407 DEBUGP(2, dev, "<- cm4040_write (failed)\n");
408 if (rc == -ERESTARTSYS) 408 if (rc == -ERESTARTSYS)
409 return rc; 409 return rc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0ab024918907..01a260f67437 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * random.c -- A strong random number generator 2 * random.c -- A strong random number generator
3 * 3 *
4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
5 * Rights Reserved.
6 *
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * 8 *
6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762static struct crng_state **crng_node_pool __read_mostly; 765static struct crng_state **crng_node_pool __read_mostly;
763#endif 766#endif
764 767
768static void invalidate_batched_entropy(void);
769
765static void crng_initialize(struct crng_state *crng) 770static void crng_initialize(struct crng_state *crng)
766{ 771{
767 int i; 772 int i;
@@ -798,12 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
798 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; 803 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
799 cp++; crng_init_cnt++; len--; 804 cp++; crng_init_cnt++; len--;
800 } 805 }
806 spin_unlock_irqrestore(&primary_crng.lock, flags);
801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 807 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
808 invalidate_batched_entropy();
802 crng_init = 1; 809 crng_init = 1;
803 wake_up_interruptible(&crng_init_wait); 810 wake_up_interruptible(&crng_init_wait);
804 pr_notice("random: fast init done\n"); 811 pr_notice("random: fast init done\n");
805 } 812 }
806 spin_unlock_irqrestore(&primary_crng.lock, flags);
807 return 1; 813 return 1;
808} 814}
809 815
@@ -835,13 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
835 } 841 }
836 memzero_explicit(&buf, sizeof(buf)); 842 memzero_explicit(&buf, sizeof(buf));
837 crng->init_time = jiffies; 843 crng->init_time = jiffies;
844 spin_unlock_irqrestore(&primary_crng.lock, flags);
838 if (crng == &primary_crng && crng_init < 2) { 845 if (crng == &primary_crng && crng_init < 2) {
846 invalidate_batched_entropy();
839 crng_init = 2; 847 crng_init = 2;
840 process_random_ready_list(); 848 process_random_ready_list();
841 wake_up_interruptible(&crng_init_wait); 849 wake_up_interruptible(&crng_init_wait);
842 pr_notice("random: crng init done\n"); 850 pr_notice("random: crng init done\n");
843 } 851 }
844 spin_unlock_irqrestore(&primary_crng.lock, flags);
845} 852}
846 853
847static inline void crng_wait_ready(void) 854static inline void crng_wait_ready(void)
@@ -1097,12 +1104,16 @@ static void add_interrupt_bench(cycles_t start)
1097static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 1104static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1098{ 1105{
1099 __u32 *ptr = (__u32 *) regs; 1106 __u32 *ptr = (__u32 *) regs;
1107 unsigned int idx;
1100 1108
1101 if (regs == NULL) 1109 if (regs == NULL)
1102 return 0; 1110 return 0;
1103 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1111 idx = READ_ONCE(f->reg_idx);
1104 f->reg_idx = 0; 1112 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1105 return *(ptr + f->reg_idx++); 1113 idx = 0;
1114 ptr += idx++;
1115 WRITE_ONCE(f->reg_idx, idx);
1116 return *ptr;
1106} 1117}
1107 1118
1108void add_interrupt_randomness(int irq, int irq_flags) 1119void add_interrupt_randomness(int irq, int irq_flags)
@@ -2019,6 +2030,7 @@ struct batched_entropy {
2019 }; 2030 };
2020 unsigned int position; 2031 unsigned int position;
2021}; 2032};
2033static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2022 2034
2023/* 2035/*
2024 * Get a random word for internal kernel use only. The quality of the random 2036 * Get a random word for internal kernel use only. The quality of the random
@@ -2029,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2029u64 get_random_u64(void) 2041u64 get_random_u64(void)
2030{ 2042{
2031 u64 ret; 2043 u64 ret;
2044 bool use_lock = READ_ONCE(crng_init) < 2;
2045 unsigned long flags = 0;
2032 struct batched_entropy *batch; 2046 struct batched_entropy *batch;
2033 2047
2034#if BITS_PER_LONG == 64 2048#if BITS_PER_LONG == 64
@@ -2041,11 +2055,15 @@ u64 get_random_u64(void)
2041#endif 2055#endif
2042 2056
2043 batch = &get_cpu_var(batched_entropy_u64); 2057 batch = &get_cpu_var(batched_entropy_u64);
2058 if (use_lock)
2059 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2044 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2060 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2045 extract_crng((u8 *)batch->entropy_u64); 2061 extract_crng((u8 *)batch->entropy_u64);
2046 batch->position = 0; 2062 batch->position = 0;
2047 } 2063 }
2048 ret = batch->entropy_u64[batch->position++]; 2064 ret = batch->entropy_u64[batch->position++];
2065 if (use_lock)
2066 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2049 put_cpu_var(batched_entropy_u64); 2067 put_cpu_var(batched_entropy_u64);
2050 return ret; 2068 return ret;
2051} 2069}
@@ -2055,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2055u32 get_random_u32(void) 2073u32 get_random_u32(void)
2056{ 2074{
2057 u32 ret; 2075 u32 ret;
2076 bool use_lock = READ_ONCE(crng_init) < 2;
2077 unsigned long flags = 0;
2058 struct batched_entropy *batch; 2078 struct batched_entropy *batch;
2059 2079
2060 if (arch_get_random_int(&ret)) 2080 if (arch_get_random_int(&ret))
2061 return ret; 2081 return ret;
2062 2082
2063 batch = &get_cpu_var(batched_entropy_u32); 2083 batch = &get_cpu_var(batched_entropy_u32);
2084 if (use_lock)
2085 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2064 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2086 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2065 extract_crng((u8 *)batch->entropy_u32); 2087 extract_crng((u8 *)batch->entropy_u32);
2066 batch->position = 0; 2088 batch->position = 0;
2067 } 2089 }
2068 ret = batch->entropy_u32[batch->position++]; 2090 ret = batch->entropy_u32[batch->position++];
2091 if (use_lock)
2092 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2069 put_cpu_var(batched_entropy_u32); 2093 put_cpu_var(batched_entropy_u32);
2070 return ret; 2094 return ret;
2071} 2095}
2072EXPORT_SYMBOL(get_random_u32); 2096EXPORT_SYMBOL(get_random_u32);
2073 2097
2098/* It's important to invalidate all potential batched entropy that might
2099 * be stored before the crng is initialized, which we can do lazily by
2100 * simply resetting the counter to zero so that it's re-extracted on the
2101 * next usage. */
2102static void invalidate_batched_entropy(void)
2103{
2104 int cpu;
2105 unsigned long flags;
2106
2107 write_lock_irqsave(&batched_entropy_reset_lock, flags);
2108 for_each_possible_cpu (cpu) {
2109 per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2110 per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2111 }
2112 write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2113}
2114
2074/** 2115/**
2075 * randomize_page - Generate a random, page aligned address 2116 * randomize_page - Generate a random, page aligned address
2076 * @start: The smallest acceptable address the caller will take. 2117 * @start: The smallest acceptable address the caller will take.
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 19480bcc7046..2f29ee1a4d00 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
14config COMMON_CLK_GXBB 14config COMMON_CLK_GXBB
15 bool 15 bool
16 depends on COMMON_CLK_AMLOGIC 16 depends on COMMON_CLK_AMLOGIC
17 select RESET_CONTROLLER
17 help 18 help
18 Support for the clock controller on AmLogic S905 devices, aka gxbb. 19 Support for the clock controller on AmLogic S905 devices, aka gxbb.
19 Say Y if you want peripherals and CPU frequency scaling to work. 20 Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b0d551a8efe4..eb89c7801f00 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -156,6 +156,7 @@ config SUN8I_R_CCU
156 bool "Support for Allwinner SoCs' PRCM CCUs" 156 bool "Support for Allwinner SoCs' PRCM CCUs"
157 select SUNXI_CCU_DIV 157 select SUNXI_CCU_DIV
158 select SUNXI_CCU_GATE 158 select SUNXI_CCU_GATE
159 select SUNXI_CCU_MP
159 default MACH_SUN8I || (ARCH_SUNXI && ARM64) 160 default MACH_SUN8I || (ARCH_SUNXI && ARM64)
160 161
161endif 162endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 9b3cd24b78d2..061b6fbb4f95 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -31,7 +31,9 @@
31#define CLK_PLL_VIDEO0_2X 8 31#define CLK_PLL_VIDEO0_2X 8
32#define CLK_PLL_VE 9 32#define CLK_PLL_VE 9
33#define CLK_PLL_DDR0 10 33#define CLK_PLL_DDR0 10
34#define CLK_PLL_PERIPH0 11 34
35/* PLL_PERIPH0 exported for PRCM */
36
35#define CLK_PLL_PERIPH0_2X 12 37#define CLK_PLL_PERIPH0_2X 12
36#define CLK_PLL_PERIPH1 13 38#define CLK_PLL_PERIPH1 13
37#define CLK_PLL_PERIPH1_2X 14 39#define CLK_PLL_PERIPH1_2X 14
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5c476f966a72..5372bf8be5e6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
243static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", 243static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
244 0x060, BIT(6), 0); 244 0x060, BIT(6), 0);
245static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", 245static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
246 0x060, BIT(6), 0); 246 0x060, BIT(7), 0);
247static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", 247static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
248 0x060, BIT(8), 0); 248 0x060, BIT(8), 0);
249static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", 249static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 89e68d29bf45..df97e25aec76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
556 0x12c, 0, 4, 24, 3, BIT(31), 556 0x12c, 0, 4, 24, 3, BIT(31),
557 CLK_SET_RATE_PARENT); 557 CLK_SET_RATE_PARENT);
558static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, 558static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
559 0x12c, 0, 4, 24, 3, BIT(31), 559 0x130, 0, 4, 24, 3, BIT(31),
560 CLK_SET_RATE_PARENT); 560 CLK_SET_RATE_PARENT);
561 561
562static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", 562static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 85973d1e8165..1b4baea37d81 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -29,7 +29,9 @@
29#define CLK_PLL_VIDEO 6 29#define CLK_PLL_VIDEO 6
30#define CLK_PLL_VE 7 30#define CLK_PLL_VE 7
31#define CLK_PLL_DDR 8 31#define CLK_PLL_DDR 8
32#define CLK_PLL_PERIPH0 9 32
33/* PLL_PERIPH0 exported for PRCM */
34
33#define CLK_PLL_PERIPH0_2X 10 35#define CLK_PLL_PERIPH0_2X 10
34#define CLK_PLL_GPU 11 36#define CLK_PLL_GPU 11
35#define CLK_PLL_PERIPH1 12 37#define CLK_PLL_PERIPH1 12
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index e58706b40ae9..6297add857b5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, 537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, 538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, 539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
540 [RST_BUS_OTG] = { 0x2c0, BIT(23) }, 540 [RST_BUS_OTG] = { 0x2c0, BIT(24) },
541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, 541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
543 543
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4bed671e490e..8b5c30062d99 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1209 return 0; 1209 return 0;
1210 } 1210 }
1211 1211
1212 rate = readl_relaxed(frame + CNTFRQ); 1212 rate = readl_relaxed(base + CNTFRQ);
1213 1213
1214 iounmap(frame); 1214 iounmap(base);
1215 1215
1216 return rate; 1216 return rate;
1217} 1217}
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 44e5e951583b..8e64b8460f11 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -18,6 +18,7 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/clockchips.h> 20#include <linux/clockchips.h>
21#include <linux/clocksource.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
22#include <linux/of_irq.h> 23#include <linux/of_irq.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2e9c830ae1cd..c4656c4d44a6 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/clockchips.h> 14#include <linux/clockchips.h>
15#include <linux/clocksource.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
17#include <linux/irq.h> 18#include <linux/irq.h>
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 74ed7e9a7f27..2011fec2d6ad 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,6 +71,15 @@ config ARM_HIGHBANK_CPUFREQ
71 71
72 If in doubt, say N. 72 If in doubt, say N.
73 73
74config ARM_DB8500_CPUFREQ
75 tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500
76 default ARCH_U8500
77 depends on HAS_IOMEM
78 depends on !CPU_THERMAL || THERMAL
79 help
80 This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC
81 series.
82
74config ARM_IMX6Q_CPUFREQ 83config ARM_IMX6Q_CPUFREQ
75 tristate "Freescale i.MX6 cpufreq support" 84 tristate "Freescale i.MX6 cpufreq support"
76 depends on ARCH_MXC 85 depends on ARCH_MXC
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b7e78f063c4f..ab3a42cd29ef 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
53 53
54obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o 54obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
55obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o 55obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
56obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 56obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o
57obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 57obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0e3f6496524d..26b643d57847 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2468 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2468 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2469 list_empty(&cpufreq_policy_list)) { 2469 list_empty(&cpufreq_policy_list)) {
2470 /* if all ->init() calls failed, unregister */ 2470 /* if all ->init() calls failed, unregister */
2471 ret = -ENODEV;
2471 pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2472 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2472 driver_data->name); 2473 driver_data->name);
2473 goto err_if_unreg; 2474 goto err_if_unreg;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 992f7c20760f..88220ff3e1c2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
185 int ret; 185 int ret;
186 ret = sscanf(buf, "%u", &input); 186 ret = sscanf(buf, "%u", &input);
187 187
188 /* cannot be lower than 11 otherwise freq will not fall */ 188 /* cannot be lower than 1 otherwise freq will not fall */
189 if (ret != 1 || input < 11 || input > 100 || 189 if (ret != 1 || input < 1 || input > 100 ||
190 input >= dbs_data->up_threshold) 190 input >= dbs_data->up_threshold)
191 return -EINVAL; 191 return -EINVAL;
192 192
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7de5bd76a31..eb1158532de3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
571static int min_perf_pct_min(void) 571static int min_perf_pct_min(void)
572{ 572{
573 struct cpudata *cpu = all_cpu_data[0]; 573 struct cpudata *cpu = all_cpu_data[0];
574 int turbo_pstate = cpu->pstate.turbo_pstate;
574 575
575 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 576 return turbo_pstate ?
576 cpu->pstate.turbo_pstate); 577 DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
577} 578}
578 579
579static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 580static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 1b9bcd76c60e..c2dd43f3f5d8 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
127 return PTR_ERR(priv.cpu_clk); 127 return PTR_ERR(priv.cpu_clk);
128 } 128 }
129 129
130 clk_prepare_enable(priv.cpu_clk); 130 err = clk_prepare_enable(priv.cpu_clk);
131 if (err) {
132 dev_err(priv.dev, "Unable to prepare cpuclk\n");
133 return err;
134 }
135
131 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; 136 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
132 137
133 priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); 138 priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
137 goto out_cpu; 142 goto out_cpu;
138 } 143 }
139 144
140 clk_prepare_enable(priv.ddr_clk); 145 err = clk_prepare_enable(priv.ddr_clk);
146 if (err) {
147 dev_err(priv.dev, "Unable to prepare ddrclk\n");
148 goto out_cpu;
149 }
141 kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; 150 kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
142 151
143 priv.powersave_clk = of_clk_get_by_name(np, "powersave"); 152 priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
146 err = PTR_ERR(priv.powersave_clk); 155 err = PTR_ERR(priv.powersave_clk);
147 goto out_ddr; 156 goto out_ddr;
148 } 157 }
149 clk_prepare_enable(priv.powersave_clk); 158 err = clk_prepare_enable(priv.powersave_clk);
159 if (err) {
160 dev_err(priv.dev, "Unable to prepare powersave clk\n");
161 goto out_ddr;
162 }
150 163
151 of_node_put(np); 164 of_node_put(np);
152 np = NULL; 165 np = NULL;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ffca4fc0061d..ae8eb0359889 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
180 if (!state_node) 180 if (!state_node)
181 break; 181 break;
182 182
183 if (!of_device_is_available(state_node)) 183 if (!of_device_is_available(state_node)) {
184 of_node_put(state_node);
184 continue; 185 continue;
186 }
185 187
186 if (!idle_state_valid(state_node, i, cpumask)) { 188 if (!idle_state_valid(state_node, i, cpumask)) {
187 pr_warn("%s idle state not valid, bailing out\n", 189 pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ebf43f531ada..922d0823f8ec 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -44,6 +44,7 @@ void dax_read_unlock(int id)
44} 44}
45EXPORT_SYMBOL_GPL(dax_read_unlock); 45EXPORT_SYMBOL_GPL(dax_read_unlock);
46 46
47#ifdef CONFIG_BLOCK
47int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 48int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
48 pgoff_t *pgoff) 49 pgoff_t *pgoff)
49{ 50{
@@ -112,6 +113,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
112 return 0; 113 return 0;
113} 114}
114EXPORT_SYMBOL_GPL(__bdev_dax_supported); 115EXPORT_SYMBOL_GPL(__bdev_dax_supported);
116#endif
115 117
116/** 118/**
117 * struct dax_device - anchor object for dax services 119 * struct dax_device - anchor object for dax services
@@ -208,9 +210,12 @@ EXPORT_SYMBOL_GPL(kill_dax);
208static struct inode *dax_alloc_inode(struct super_block *sb) 210static struct inode *dax_alloc_inode(struct super_block *sb)
209{ 211{
210 struct dax_device *dax_dev; 212 struct dax_device *dax_dev;
213 struct inode *inode;
211 214
212 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 215 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
213 return &dax_dev->inode; 216 inode = &dax_dev->inode;
217 inode->i_rdev = 0;
218 return inode;
214} 219}
215 220
216static struct dax_device *to_dax_dev(struct inode *inode) 221static struct dax_device *to_dax_dev(struct inode *inode)
@@ -225,7 +230,8 @@ static void dax_i_callback(struct rcu_head *head)
225 230
226 kfree(dax_dev->host); 231 kfree(dax_dev->host);
227 dax_dev->host = NULL; 232 dax_dev->host = NULL;
228 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 233 if (inode->i_rdev)
234 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
229 kmem_cache_free(dax_cache, dax_dev); 235 kmem_cache_free(dax_cache, dax_dev);
230} 236}
231 237
@@ -421,6 +427,7 @@ static void init_once(void *_dax_dev)
421 struct dax_device *dax_dev = _dax_dev; 427 struct dax_device *dax_dev = _dax_dev;
422 struct inode *inode = &dax_dev->inode; 428 struct inode *inode = &dax_dev->inode;
423 429
430 memset(dax_dev, 0, sizeof(*dax_dev));
424 inode_init_once(inode); 431 inode_init_once(inode);
425} 432}
426 433
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5c3e7b11e8a6..f6e7956fc91a 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev)
267 } 267 }
268 platform_set_drvdata(pdev, nocp); 268 platform_set_drvdata(pdev, nocp);
269 269
270 clk_prepare_enable(nocp->clk); 270 ret = clk_prepare_enable(nocp->clk);
271 if (ret) {
272 dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
273 return ret;
274 }
271 275
272 pr_info("exynos-nocp: new NoC Probe device registered: %s\n", 276 pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
273 dev_name(dev)); 277 dev_name(dev));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b7350935b73..d96e3dc71cf8 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -44,7 +44,7 @@ struct exynos_ppmu {
44 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ 44 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
45 { "ppmu-event3-"#name, PPMU_PMNCNT3 } 45 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
46 46
47struct __exynos_ppmu_events { 47static struct __exynos_ppmu_events {
48 char *name; 48 char *name;
49 int id; 49 int id;
50} ppmu_events[] = { 50} ppmu_events[] = {
@@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
648 dev_name(&pdev->dev), desc[i].name); 648 dev_name(&pdev->dev), desc[i].name);
649 } 649 }
650 650
651 clk_prepare_enable(info->ppmu.clk); 651 ret = clk_prepare_enable(info->ppmu.clk);
652 if (ret) {
653 dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
654 return ret;
655 }
652 656
653 return 0; 657 return 0;
654} 658}
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d37e8dda8079..ec240592f5c8 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
201 struct dma_device dma_dev; 201 struct dma_device dma_dev;
202 bool m2m; 202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *); 203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_synchronize)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *); 205 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *); 206 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *); 207 int (*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
323 | M2P_CONTROL_ENABLE; 324 | M2P_CONTROL_ENABLE;
324 m2p_set_control(edmac, control); 325 m2p_set_control(edmac, control);
325 326
327 edmac->buffer = 0;
328
326 return 0; 329 return 0;
327} 330}
328 331
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
331 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; 334 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
332} 335}
333 336
334static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 337static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
335{ 338{
339 unsigned long flags;
336 u32 control; 340 u32 control;
337 341
342 spin_lock_irqsave(&edmac->lock, flags);
338 control = readl(edmac->regs + M2P_CONTROL); 343 control = readl(edmac->regs + M2P_CONTROL);
339 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 344 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
340 m2p_set_control(edmac, control); 345 m2p_set_control(edmac, control);
346 spin_unlock_irqrestore(&edmac->lock, flags);
341 347
342 while (m2p_channel_state(edmac) >= M2P_STATE_ON) 348 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
343 cpu_relax(); 349 schedule();
350}
344 351
352static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
353{
345 m2p_set_control(edmac, 0); 354 m2p_set_control(edmac, 0);
346 355
347 while (m2p_channel_state(edmac) == M2P_STATE_STALL) 356 while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
348 cpu_relax(); 357 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
349} 358}
350 359
351static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) 360static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1161,6 +1170,26 @@ fail:
1161} 1170}
1162 1171
1163/** 1172/**
1173 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1174 * current context.
1175 * @chan: channel
1176 *
1177 * Synchronizes the DMA channel termination to the current context. When this
1178 * function returns it is guaranteed that all transfers for previously issued
1179 * descriptors have stopped and and it is safe to free the memory associated
1180 * with them. Furthermore it is guaranteed that all complete callback functions
1181 * for a previously submitted descriptor have finished running and it is safe to
1182 * free resources accessed from within the complete callbacks.
1183 */
1184static void ep93xx_dma_synchronize(struct dma_chan *chan)
1185{
1186 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1187
1188 if (edmac->edma->hw_synchronize)
1189 edmac->edma->hw_synchronize(edmac);
1190}
1191
1192/**
1164 * ep93xx_dma_terminate_all - terminate all transactions 1193 * ep93xx_dma_terminate_all - terminate all transactions
1165 * @chan: channel 1194 * @chan: channel
1166 * 1195 *
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1323 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1352 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1324 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1353 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1325 dma_dev->device_config = ep93xx_dma_slave_config; 1354 dma_dev->device_config = ep93xx_dma_slave_config;
1355 dma_dev->device_synchronize = ep93xx_dma_synchronize;
1326 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; 1356 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1327 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1357 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1328 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1358 dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1340 } else { 1370 } else {
1341 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1371 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1342 1372
1373 edma->hw_synchronize = m2p_hw_synchronize;
1343 edma->hw_setup = m2p_hw_setup; 1374 edma->hw_setup = m2p_hw_setup;
1344 edma->hw_shutdown = m2p_hw_shutdown; 1375 edma->hw_shutdown = m2p_hw_shutdown;
1345 edma->hw_submit = m2p_hw_submit; 1376 edma->hw_submit = m2p_hw_submit;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index a28a01fcba67..f3e211f8f6c5 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
161 struct mv_xor_v2_sw_desc *sw_desq; 161 struct mv_xor_v2_sw_desc *sw_desq;
162 int desc_size; 162 int desc_size;
163 unsigned int npendings; 163 unsigned int npendings;
164 unsigned int hw_queue_idx;
164}; 165};
165 166
166/** 167/**
@@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
214} 215}
215 216
216/* 217/*
217 * Return the next available index in the DESQ.
218 */
219static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
220{
221 /* read the index for the next available descriptor in the DESQ */
222 u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
223
224 return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
225 & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
226}
227
228/*
229 * notify the engine of new descriptors, and update the available index. 218 * notify the engine of new descriptors, and update the available index.
230 */ 219 */
231static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, 220static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
257 return MV_XOR_V2_EXT_DESC_SIZE; 246 return MV_XOR_V2_EXT_DESC_SIZE;
258} 247}
259 248
260/*
261 * Set the IMSG threshold
262 */
263static inline
264void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
265{
266 u32 reg;
267
268 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
269
270 reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
271 reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
272
273 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
274}
275
276static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) 249static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
277{ 250{
278 struct mv_xor_v2_device *xor_dev = data; 251 struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
288 if (!ndescs) 261 if (!ndescs)
289 return IRQ_NONE; 262 return IRQ_NONE;
290 263
291 /*
292 * Update IMSG threshold, to disable new IMSG interrupts until
293 * end of the tasklet
294 */
295 mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
296
297 /* schedule a tasklet to handle descriptors callbacks */ 264 /* schedule a tasklet to handle descriptors callbacks */
298 tasklet_schedule(&xor_dev->irq_tasklet); 265 tasklet_schedule(&xor_dev->irq_tasklet);
299 266
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
306static dma_cookie_t 273static dma_cookie_t
307mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) 274mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
308{ 275{
309 int desq_ptr;
310 void *dest_hw_desc; 276 void *dest_hw_desc;
311 dma_cookie_t cookie; 277 dma_cookie_t cookie;
312 struct mv_xor_v2_sw_desc *sw_desc = 278 struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
322 spin_lock_bh(&xor_dev->lock); 288 spin_lock_bh(&xor_dev->lock);
323 cookie = dma_cookie_assign(tx); 289 cookie = dma_cookie_assign(tx);
324 290
325 /* get the next available slot in the DESQ */
326 desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
327
328 /* copy the HW descriptor from the SW descriptor to the DESQ */ 291 /* copy the HW descriptor from the SW descriptor to the DESQ */
329 dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; 292 dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
330 293
331 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); 294 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
332 295
333 xor_dev->npendings++; 296 xor_dev->npendings++;
297 xor_dev->hw_queue_idx++;
298 if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
299 xor_dev->hw_queue_idx = 0;
334 300
335 spin_unlock_bh(&xor_dev->lock); 301 spin_unlock_bh(&xor_dev->lock);
336 302
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
344mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) 310mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
345{ 311{
346 struct mv_xor_v2_sw_desc *sw_desc; 312 struct mv_xor_v2_sw_desc *sw_desc;
313 bool found = false;
347 314
348 /* Lock the channel */ 315 /* Lock the channel */
349 spin_lock_bh(&xor_dev->lock); 316 spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
355 return NULL; 322 return NULL;
356 } 323 }
357 324
358 /* get a free SW descriptor from the SW DESQ */ 325 list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
359 sw_desc = list_first_entry(&xor_dev->free_sw_desc, 326 if (async_tx_test_ack(&sw_desc->async_tx)) {
360 struct mv_xor_v2_sw_desc, free_list); 327 found = true;
328 break;
329 }
330 }
331
332 if (!found) {
333 spin_unlock_bh(&xor_dev->lock);
334 return NULL;
335 }
336
361 list_del(&sw_desc->free_list); 337 list_del(&sw_desc->free_list);
362 338
363 /* Release the channel */ 339 /* Release the channel */
364 spin_unlock_bh(&xor_dev->lock); 340 spin_unlock_bh(&xor_dev->lock);
365 341
366 /* set the async tx descriptor */
367 dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
368 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
369 async_tx_ack(&sw_desc->async_tx);
370
371 return sw_desc; 342 return sw_desc;
372} 343}
373 344
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
389 __func__, len, &src, &dest, flags); 360 __func__, len, &src, &dest, flags);
390 361
391 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 362 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
363 if (!sw_desc)
364 return NULL;
392 365
393 sw_desc->async_tx.flags = flags; 366 sw_desc->async_tx.flags = flags;
394 367
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
443 __func__, src_cnt, len, &dest, flags); 416 __func__, src_cnt, len, &dest, flags);
444 417
445 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 418 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
419 if (!sw_desc)
420 return NULL;
446 421
447 sw_desc->async_tx.flags = flags; 422 sw_desc->async_tx.flags = flags;
448 423
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
491 container_of(chan, struct mv_xor_v2_device, dmachan); 466 container_of(chan, struct mv_xor_v2_device, dmachan);
492 467
493 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 468 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
469 if (!sw_desc)
470 return NULL;
494 471
495 /* set the HW descriptor */ 472 /* set the HW descriptor */
496 hw_descriptor = &sw_desc->hw_desc; 473 hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
554{ 531{
555 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; 532 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
556 int pending_ptr, num_of_pending, i; 533 int pending_ptr, num_of_pending, i;
557 struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
558 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; 534 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
559 535
560 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); 536 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
562 /* get the pending descriptors parameters */ 538 /* get the pending descriptors parameters */
563 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); 539 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
564 540
565 /* next HW descriptor */
566 next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
567
568 /* loop over free descriptors */ 541 /* loop over free descriptors */
569 for (i = 0; i < num_of_pending; i++) { 542 for (i = 0; i < num_of_pending; i++) {
570 543 struct mv_xor_v2_descriptor *next_pending_hw_desc =
571 if (pending_ptr > MV_XOR_V2_DESC_NUM) 544 xor_dev->hw_desq_virt + pending_ptr;
572 pending_ptr = 0;
573
574 if (next_pending_sw_desc != NULL)
575 next_pending_hw_desc++;
576 545
577 /* get the SW descriptor related to the HW descriptor */ 546 /* get the SW descriptor related to the HW descriptor */
578 next_pending_sw_desc = 547 next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
608 577
609 /* increment the next descriptor */ 578 /* increment the next descriptor */
610 pending_ptr++; 579 pending_ptr++;
580 if (pending_ptr >= MV_XOR_V2_DESC_NUM)
581 pending_ptr = 0;
611 } 582 }
612 583
613 if (num_of_pending != 0) { 584 if (num_of_pending != 0) {
614 /* free the descriptores */ 585 /* free the descriptores */
615 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); 586 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
616 } 587 }
617
618 /* Update IMSG threshold, to enable new IMSG interrupts */
619 mv_xor_v2_set_imsg_thrd(xor_dev, 0);
620} 588}
621 589
622/* 590/*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
648 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, 616 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
649 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); 617 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
650 618
651 /* enable the DMA engine */
652 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
653
654 /* 619 /*
655 * This is a temporary solution, until we activate the 620 * This is a temporary solution, until we activate the
656 * SMMU. Set the attributes for reading & writing data buffers 621 * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
694 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; 659 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
695 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); 660 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
696 661
662 /* enable the DMA engine */
663 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
664
697 return 0; 665 return 0;
698} 666}
699 667
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
725 693
726 platform_set_drvdata(pdev, xor_dev); 694 platform_set_drvdata(pdev, xor_dev);
727 695
696 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
697 if (ret)
698 return ret;
699
728 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 700 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
729 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) 701 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
730 return -EPROBE_DEFER; 702 return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
785 757
786 /* add all SW descriptors to the free list */ 758 /* add all SW descriptors to the free list */
787 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { 759 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
788 xor_dev->sw_desq[i].idx = i; 760 struct mv_xor_v2_sw_desc *sw_desc =
789 list_add(&xor_dev->sw_desq[i].free_list, 761 xor_dev->sw_desq + i;
762 sw_desc->idx = i;
763 dma_async_tx_descriptor_init(&sw_desc->async_tx,
764 &xor_dev->dmachan);
765 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
766 async_tx_ack(&sw_desc->async_tx);
767
768 list_add(&sw_desc->free_list,
790 &xor_dev->free_sw_desc); 769 &xor_dev->free_sw_desc);
791 } 770 }
792 771
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 8b0da7fa520d..e90a7a0d760a 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
3008 3008
3009 for (i = 0; i < AMBA_NR_IRQS; i++) { 3009 for (i = 0; i < AMBA_NR_IRQS; i++) {
3010 irq = adev->irq[i]; 3010 irq = adev->irq[i];
3011 devm_free_irq(&adev->dev, irq, pl330); 3011 if (irq)
3012 devm_free_irq(&adev->dev, irq, pl330);
3012 } 3013 }
3013 3014
3014 dma_async_device_unregister(&pl330->ddma); 3015 dma_async_device_unregister(&pl330->ddma);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index db41795fe42a..bd261c9e9664 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1287 if (desc->hwdescs.use) { 1287 if (desc->hwdescs.use) {
1288 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1288 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1289 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1289 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1290 if (dptr == 0)
1291 dptr = desc->nchunks;
1292 dptr--;
1290 WARN_ON(dptr >= desc->nchunks); 1293 WARN_ON(dptr >= desc->nchunks);
1291 } else { 1294 } else {
1292 running = desc->running; 1295 running = desc->running;
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 72c649713ace..31a145154e9f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -117,7 +117,7 @@ struct usb_dmac {
117#define USB_DMASWR 0x0008 117#define USB_DMASWR 0x0008
118#define USB_DMASWR_SWR (1 << 0) 118#define USB_DMASWR_SWR (1 << 0)
119#define USB_DMAOR 0x0060 119#define USB_DMAOR 0x0060
120#define USB_DMAOR_AE (1 << 2) 120#define USB_DMAOR_AE (1 << 1)
121#define USB_DMAOR_DME (1 << 0) 121#define USB_DMAOR_DME (1 << 0)
122 122
123#define USB_DMASAR 0x0000 123#define USB_DMASAR 0x0000
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 82dab1692264..3aea55698165 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
782 782
783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
784{ 784{
785 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 785 int dimm, size0, size1, cs0, cs1;
786 int dimm, size0, size1;
787 786
788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 787 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
789 788
790 for (dimm = 0; dimm < 4; dimm++) { 789 for (dimm = 0; dimm < 4; dimm++) {
791 size0 = 0; 790 size0 = 0;
791 cs0 = dimm * 2;
792 792
793 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 793 if (csrow_enabled(cs0, ctrl, pvt))
794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
795 795
796 size1 = 0; 796 size1 = 0;
797 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 797 cs1 = dimm * 2 + 1;
798 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 798
799 if (csrow_enabled(cs1, ctrl, pvt))
800 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
799 801
800 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 802 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
801 dimm * 2, size0, 803 cs0, size0,
802 dimm * 2 + 1, size1); 804 cs1, size1);
803 } 805 }
804} 806}
805 807
@@ -2756,26 +2758,22 @@ skip:
2756 * encompasses 2758 * encompasses
2757 * 2759 *
2758 */ 2760 */
2759static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2761static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2760{ 2762{
2761 u32 cs_mode, nr_pages;
2762 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2763 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2764 int csrow_nr = csrow_nr_orig;
2765 u32 cs_mode, nr_pages;
2763 2766
2767 if (!pvt->umc)
2768 csrow_nr >>= 1;
2764 2769
2765 /* 2770 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2766 * The math on this doesn't look right on the surface because x/2*4 can
2767 * be simplified to x*2 but this expression makes use of the fact that
2768 * it is integral math where 1/2=0. This intermediate value becomes the
2769 * number of bits to shift the DBAM register to extract the proper CSROW
2770 * field.
2771 */
2772 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2773 2771
2774 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) 2772 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2775 << (20 - PAGE_SHIFT); 2773 nr_pages <<= 20 - PAGE_SHIFT;
2776 2774
2777 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2775 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2778 csrow_nr, dct, cs_mode); 2776 csrow_nr_orig, dct, cs_mode);
2779 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2777 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2780 2778
2781 return nr_pages; 2779 return nr_pages;
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 44c01390d035..951b6c79f166 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
50DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
50DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 51DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
51DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 52DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
52DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); 53DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
191 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); 192 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
192 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 193 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
193 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 194 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
195 ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
194 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 196 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
195 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 197 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
196 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); 198 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 54be60ead08f..783041964439 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
144 144
145 buf = dmi_early_remap(dmi_base, orig_dmi_len); 145 buf = dmi_early_remap(dmi_base, orig_dmi_len);
146 if (buf == NULL) 146 if (buf == NULL)
147 return -1; 147 return -ENOMEM;
148 148
149 dmi_decode_table(buf, decode, NULL); 149 dmi_decode_table(buf, decode, NULL);
150 150
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
178 const char *d = (const char *) dm; 178 const char *d = (const char *) dm;
179 const char *p; 179 const char *p;
180 180
181 if (dmi_ident[slot]) 181 if (dmi_ident[slot] || dm->length <= string)
182 return; 182 return;
183 183
184 p = dmi_string(dm, d[string]); 184 p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
191static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, 191static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
192 int index) 192 int index)
193{ 193{
194 const u8 *d = (u8 *) dm + index; 194 const u8 *d;
195 char *s; 195 char *s;
196 int is_ff = 1, is_00 = 1, i; 196 int is_ff = 1, is_00 = 1, i;
197 197
198 if (dmi_ident[slot]) 198 if (dmi_ident[slot] || dm->length <= index + 16)
199 return; 199 return;
200 200
201 d = (u8 *) dm + index;
201 for (i = 0; i < 16 && (is_ff || is_00); i++) { 202 for (i = 0; i < 16 && (is_ff || is_00); i++) {
202 if (d[i] != 0x00) 203 if (d[i] != 0x00)
203 is_00 = 0; 204 is_00 = 0;
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
228static void __init dmi_save_type(const struct dmi_header *dm, int slot, 229static void __init dmi_save_type(const struct dmi_header *dm, int slot,
229 int index) 230 int index)
230{ 231{
231 const u8 *d = (u8 *) dm + index; 232 const u8 *d;
232 char *s; 233 char *s;
233 234
234 if (dmi_ident[slot]) 235 if (dmi_ident[slot] || dm->length <= index)
235 return; 236 return;
236 237
237 s = dmi_alloc(4); 238 s = dmi_alloc(4);
238 if (!s) 239 if (!s)
239 return; 240 return;
240 241
242 d = (u8 *) dm + index;
241 sprintf(s, "%u", *d & 0x7F); 243 sprintf(s, "%u", *d & 0x7F);
242 dmi_ident[slot] = s; 244 dmi_ident[slot] = s;
243} 245}
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
278 280
279static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 281static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
280{ 282{
281 int i, count = *(u8 *)(dm + 1); 283 int i, count;
282 struct dmi_device *dev; 284 struct dmi_device *dev;
283 285
286 if (dm->length < 0x05)
287 return;
288
289 count = *(u8 *)(dm + 1);
284 for (i = 1; i <= count; i++) { 290 for (i = 1; i <= count; i++) {
285 const char *devname = dmi_string(dm, i); 291 const char *devname = dmi_string(dm, i);
286 292
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
353 const char *name; 359 const char *name;
354 const u8 *d = (u8 *)dm; 360 const u8 *d = (u8 *)dm;
355 361
362 if (dm->length < 0x0B)
363 return;
364
356 /* Skip disabled device */ 365 /* Skip disabled device */
357 if ((d[0x5] & 0x80) == 0) 366 if ((d[0x5] & 0x80) == 0)
358 return; 367 return;
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
387 const char *d = (const char *)dm; 396 const char *d = (const char *)dm;
388 static int nr; 397 static int nr;
389 398
390 if (dm->type != DMI_ENTRY_MEM_DEVICE) 399 if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
391 return; 400 return;
392 if (nr >= dmi_memdev_nr) { 401 if (nr >= dmi_memdev_nr) {
393 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); 402 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -430,6 +439,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
430 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 439 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
431 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 440 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
432 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); 441 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
442 dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
433 break; 443 break;
434 case 2: /* Base Board Information */ 444 case 2: /* Base Board Information */
435 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); 445 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
@@ -649,6 +659,21 @@ void __init dmi_scan_machine(void)
649 goto error; 659 goto error;
650 660
651 /* 661 /*
662 * Same logic as above, look for a 64-bit entry point
663 * first, and if not found, fall back to 32-bit entry point.
664 */
665 memcpy_fromio(buf, p, 16);
666 for (q = p + 16; q < p + 0x10000; q += 16) {
667 memcpy_fromio(buf + 16, q, 16);
668 if (!dmi_smbios3_present(buf)) {
669 dmi_available = 1;
670 dmi_early_unmap(p, 0x10000);
671 goto out;
672 }
673 memcpy(buf, buf + 16, 16);
674 }
675
676 /*
652 * Iterate over all possible DMI header addresses q. 677 * Iterate over all possible DMI header addresses q.
653 * Maintain the 32 bytes around q in buf. On the 678 * Maintain the 32 bytes around q in buf. On the
654 * first iteration, substitute zero for the 679 * first iteration, substitute zero for the
@@ -658,7 +683,7 @@ void __init dmi_scan_machine(void)
658 memset(buf, 0, 16); 683 memset(buf, 0, 16);
659 for (q = p; q < p + 0x10000; q += 16) { 684 for (q = p; q < p + 0x10000; q += 16) {
660 memcpy_fromio(buf + 16, q, 16); 685 memcpy_fromio(buf + 16, q, 16);
661 if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { 686 if (!dmi_present(buf)) {
662 dmi_available = 1; 687 dmi_available = 1;
663 dmi_early_unmap(p, 0x10000); 688 dmi_early_unmap(p, 0x10000);
664 goto out; 689 goto out;
@@ -992,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
992 * @decode: Callback function 1017 * @decode: Callback function
993 * @private_data: Private data to be passed to the callback function 1018 * @private_data: Private data to be passed to the callback function
994 * 1019 *
995 * Returns -1 when the DMI table can't be reached, 0 on success. 1020 * Returns 0 on success, -ENXIO if DMI is not selected or not present,
1021 * or a different negative error code if DMI walking fails.
996 */ 1022 */
997int dmi_walk(void (*decode)(const struct dmi_header *, void *), 1023int dmi_walk(void (*decode)(const struct dmi_header *, void *),
998 void *private_data) 1024 void *private_data)
@@ -1000,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
1000 u8 *buf; 1026 u8 *buf;
1001 1027
1002 if (!dmi_available) 1028 if (!dmi_available)
1003 return -1; 1029 return -ENXIO;
1004 1030
1005 buf = dmi_remap(dmi_base, dmi_len); 1031 buf = dmi_remap(dmi_base, dmi_len);
1006 if (buf == NULL) 1032 if (buf == NULL)
1007 return -1; 1033 return -ENOMEM;
1008 1034
1009 dmi_decode_table(buf, decode, private_data); 1035 dmi_decode_table(buf, decode, private_data);
1010 1036
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 04ca8764f0c0..b58233e4ed71 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,6 +27,26 @@ struct bmp_header {
27 u32 size; 27 u32 size;
28} __packed; 28} __packed;
29 29
30static bool efi_bgrt_addr_valid(u64 addr)
31{
32 efi_memory_desc_t *md;
33
34 for_each_efi_memory_desc(md) {
35 u64 size;
36 u64 end;
37
38 if (md->type != EFI_BOOT_SERVICES_DATA)
39 continue;
40
41 size = md->num_pages << EFI_PAGE_SHIFT;
42 end = md->phys_addr + size;
43 if (addr >= md->phys_addr && addr < end)
44 return true;
45 }
46
47 return false;
48}
49
30void __init efi_bgrt_init(struct acpi_table_header *table) 50void __init efi_bgrt_init(struct acpi_table_header *table)
31{ 51{
32 void *image; 52 void *image;
@@ -36,6 +56,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
36 if (acpi_disabled) 56 if (acpi_disabled)
37 return; 57 return;
38 58
59 if (!efi_enabled(EFI_MEMMAP))
60 return;
61
39 if (table->length < sizeof(bgrt_tab)) { 62 if (table->length < sizeof(bgrt_tab)) {
40 pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", 63 pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
41 table->length, sizeof(bgrt_tab)); 64 table->length, sizeof(bgrt_tab));
@@ -62,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
62 goto out; 85 goto out;
63 } 86 }
64 87
88 if (!efi_bgrt_addr_valid(bgrt->image_address)) {
89 pr_notice("Ignoring BGRT: invalid image address\n");
90 goto out;
91 }
65 image = early_memremap(bgrt->image_address, sizeof(bmp_header)); 92 image = early_memremap(bgrt->image_address, sizeof(bmp_header));
66 if (!image) { 93 if (!image) {
67 pr_notice("Ignoring BGRT: failed to map image header memory\n"); 94 pr_notice("Ignoring BGRT: failed to map image header memory\n");
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index ed3137c1ceb0..ef1fafdad400 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -53,6 +53,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
53 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", 53 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
54 &record->type, &part, &cnt, &time, &data_type) == 5) { 54 &record->type, &part, &cnt, &time, &data_type) == 5) {
55 record->id = generic_id(time, part, cnt); 55 record->id = generic_id(time, part, cnt);
56 record->part = part;
56 record->count = cnt; 57 record->count = cnt;
57 record->time.tv_sec = time; 58 record->time.tv_sec = time;
58 record->time.tv_nsec = 0; 59 record->time.tv_nsec = 0;
@@ -64,6 +65,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
64 } else if (sscanf(name, "dump-type%u-%u-%d-%lu", 65 } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
65 &record->type, &part, &cnt, &time) == 4) { 66 &record->type, &part, &cnt, &time) == 4) {
66 record->id = generic_id(time, part, cnt); 67 record->id = generic_id(time, part, cnt);
68 record->part = part;
67 record->count = cnt; 69 record->count = cnt;
68 record->time.tv_sec = time; 70 record->time.tv_sec = time;
69 record->time.tv_nsec = 0; 71 record->time.tv_nsec = 0;
@@ -77,6 +79,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
77 * multiple logs, remains. 79 * multiple logs, remains.
78 */ 80 */
79 record->id = generic_id(time, part, 0); 81 record->id = generic_id(time, part, 0);
82 record->part = part;
80 record->count = 0; 83 record->count = 0;
81 record->time.tv_sec = time; 84 record->time.tv_sec = time;
82 record->time.tv_nsec = 0; 85 record->time.tv_nsec = 0;
@@ -155,19 +158,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
155 * efi_pstore_sysfs_entry_iter 158 * efi_pstore_sysfs_entry_iter
156 * 159 *
157 * @record: pstore record to pass to callback 160 * @record: pstore record to pass to callback
158 * @pos: entry to begin iterating from
159 * 161 *
160 * You MUST call efivar_enter_iter_begin() before this function, and 162 * You MUST call efivar_enter_iter_begin() before this function, and
161 * efivar_entry_iter_end() afterwards. 163 * efivar_entry_iter_end() afterwards.
162 * 164 *
163 * It is possible to begin iteration from an arbitrary entry within
164 * the list by passing @pos. @pos is updated on return to point to
165 * the next entry of the last one passed to efi_pstore_read_func().
166 * To begin iterating from the beginning of the list @pos must be %NULL.
167 */ 165 */
168static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, 166static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
169 struct efivar_entry **pos)
170{ 167{
168 struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
171 struct efivar_entry *entry, *n; 169 struct efivar_entry *entry, *n;
172 struct list_head *head = &efivar_sysfs_list; 170 struct list_head *head = &efivar_sysfs_list;
173 int size = 0; 171 int size = 0;
@@ -218,7 +216,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
218 */ 216 */
219static ssize_t efi_pstore_read(struct pstore_record *record) 217static ssize_t efi_pstore_read(struct pstore_record *record)
220{ 218{
221 struct efivar_entry *entry = (struct efivar_entry *)record->psi->data;
222 ssize_t size; 219 ssize_t size;
223 220
224 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); 221 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@@ -229,7 +226,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
229 size = -EINTR; 226 size = -EINTR;
230 goto out; 227 goto out;
231 } 228 }
232 size = efi_pstore_sysfs_entry_iter(record, &entry); 229 size = efi_pstore_sysfs_entry_iter(record);
233 efivar_entry_iter_end(); 230 efivar_entry_iter_end();
234 231
235out: 232out:
@@ -247,9 +244,15 @@ static int efi_pstore_write(struct pstore_record *record)
247 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 244 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
248 int i, ret = 0; 245 int i, ret = 0;
249 246
247 record->time.tv_sec = get_seconds();
248 record->time.tv_nsec = 0;
249
250 record->id = generic_id(record->time.tv_sec, record->part,
251 record->count);
252
250 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", 253 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c",
251 record->type, record->part, record->count, 254 record->type, record->part, record->count,
252 get_seconds(), record->compressed ? 'C' : 'D'); 255 record->time.tv_sec, record->compressed ? 'C' : 'D');
253 256
254 for (i = 0; i < DUMP_NAME_LEN; i++) 257 for (i = 0; i < DUMP_NAME_LEN; i++)
255 efi_name[i] = name[i]; 258 efi_name[i] = name[i];
@@ -261,7 +264,6 @@ static int efi_pstore_write(struct pstore_record *record)
261 if (record->reason == KMSG_DUMP_OOPS) 264 if (record->reason == KMSG_DUMP_OOPS)
262 efivar_run_worker(); 265 efivar_run_worker();
263 266
264 record->id = record->part;
265 return ret; 267 return ret;
266}; 268};
267 269
@@ -293,7 +295,7 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
293 * holding multiple logs, remains. 295 * holding multiple logs, remains.
294 */ 296 */
295 snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu", 297 snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu",
296 ed->record->type, (unsigned int)ed->record->id, 298 ed->record->type, ed->record->part,
297 ed->record->time.tv_sec); 299 ed->record->time.tv_sec);
298 300
299 for (i = 0; i < DUMP_NAME_LEN; i++) 301 for (i = 0; i < DUMP_NAME_LEN; i++)
@@ -326,10 +328,7 @@ static int efi_pstore_erase(struct pstore_record *record)
326 char name[DUMP_NAME_LEN]; 328 char name[DUMP_NAME_LEN];
327 efi_char16_t efi_name[DUMP_NAME_LEN]; 329 efi_char16_t efi_name[DUMP_NAME_LEN];
328 int found, i; 330 int found, i;
329 unsigned int part;
330 331
331 do_div(record->id, 1000);
332 part = do_div(record->id, 100);
333 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", 332 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu",
334 record->type, record->part, record->count, 333 record->type, record->part, record->count,
335 record->time.tv_sec); 334 record->time.tv_sec);
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8c34d50a4d80..959777ec8a77 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -16,10 +16,10 @@
16 16
17/* BIOS variables */ 17/* BIOS variables */
18static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; 18static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
19static const efi_char16_t const efi_SecureBoot_name[] = { 19static const efi_char16_t efi_SecureBoot_name[] = {
20 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 20 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
21}; 21};
22static const efi_char16_t const efi_SetupMode_name[] = { 22static const efi_char16_t efi_SetupMode_name[] = {
23 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 23 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
24}; 24};
25 25
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 3ce813110d5e..31058d400bda 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -116,9 +116,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
116 return VPD_OK; 116 return VPD_OK;
117 117
118 info = kzalloc(sizeof(*info), GFP_KERNEL); 118 info = kzalloc(sizeof(*info), GFP_KERNEL);
119 info->key = kzalloc(key_len + 1, GFP_KERNEL); 119 if (!info)
120 if (!info->key)
121 return -ENOMEM; 120 return -ENOMEM;
121 info->key = kzalloc(key_len + 1, GFP_KERNEL);
122 if (!info->key) {
123 ret = -ENOMEM;
124 goto free_info;
125 }
122 126
123 memcpy(info->key, key, key_len); 127 memcpy(info->key, key, key_len);
124 128
@@ -132,15 +136,20 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
132 info->value = value; 136 info->value = value;
133 137
134 INIT_LIST_HEAD(&info->list); 138 INIT_LIST_HEAD(&info->list);
135 list_add_tail(&info->list, &sec->attribs);
136 139
137 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 140 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
138 if (ret) { 141 if (ret)
139 kfree(info->key); 142 goto free_info_key;
140 return ret;
141 }
142 143
144 list_add_tail(&info->list, &sec->attribs);
143 return 0; 145 return 0;
146
147free_info_key:
148 kfree(info->key);
149free_info:
150 kfree(info);
151
152 return ret;
144} 153}
145 154
146static void vpd_section_attrib_destroy(struct vpd_section *sec) 155static void vpd_section_attrib_destroy(struct vpd_section *sec)
@@ -149,8 +158,8 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
149 struct vpd_attrib_info *temp; 158 struct vpd_attrib_info *temp;
150 159
151 list_for_each_entry_safe(info, temp, &sec->attribs, list) { 160 list_for_each_entry_safe(info, temp, &sec->attribs, list) {
152 kfree(info->key);
153 sysfs_remove_bin_file(sec->kobj, &info->bin_attr); 161 sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
162 kfree(info->key);
154 kfree(info); 163 kfree(info);
155 } 164 }
156} 165}
@@ -235,7 +244,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
235{ 244{
236 if (sec->enabled) { 245 if (sec->enabled) {
237 vpd_section_attrib_destroy(sec); 246 vpd_section_attrib_destroy(sec);
238 kobject_del(sec->kobj); 247 kobject_put(sec->kobj);
239 sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); 248 sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
240 kfree(sec->raw_name); 249 kfree(sec->raw_name);
241 iounmap(sec->baseaddr); 250 iounmap(sec->baseaddr);
@@ -322,7 +331,7 @@ static void __exit vpd_platform_exit(void)
322{ 331{
323 vpd_section_destroy(&ro_vpd); 332 vpd_section_destroy(&ro_vpd);
324 vpd_section_destroy(&rw_vpd); 333 vpd_section_destroy(&rw_vpd);
325 kobject_del(vpd_kobj); 334 kobject_put(vpd_kobj);
326} 335}
327 336
328module_init(vpd_platform_init); 337module_init(vpd_platform_init);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 874ff32db366..00cfed3c3e1a 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -202,7 +202,8 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
202 info->debug_buffer[info->debug_region_size] = 0; 202 info->debug_buffer[info->debug_region_size] = 0;
203 203
204 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 204 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
205 sizeof(debug_name)), 205 sizeof(debug_name) -
206 sizeof("ti_sci_debug@")),
206 0444, NULL, info, &ti_sci_debug_fops); 207 0444, NULL, info, &ti_sci_debug_fops);
207 if (IS_ERR(info->d)) 208 if (IS_ERR(info->d))
208 return PTR_ERR(info->d); 209 return PTR_ERR(info->d);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index ccea609676ee..4ca436e66bdb 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -646,6 +646,9 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
646 int rc; 646 int rc;
647 int i; 647 int i;
648 648
649 if (!gpio->clk)
650 return -EINVAL;
651
649 rc = usecs_to_cycles(gpio, usecs, &requested_cycles); 652 rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
650 if (rc < 0) { 653 if (rc < 0) {
651 dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n", 654 dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 2197368cc899..e60156ec0c18 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
90{ 90{
91 int reg; 91 int reg;
92 92
93 if (gpio == 94) 93 if (gpio >= CRYSTALCOVE_GPIO_NUM) {
94 return GPIOPANELCTL; 94 /*
95 * Virtual GPIO called from ACPI, for now we only support
96 * the panel ctl.
97 */
98 switch (gpio) {
99 case 0x5e:
100 return GPIOPANELCTL;
101 default:
102 return -EOPNOTSUPP;
103 }
104 }
95 105
96 if (reg_type == CTRL_IN) { 106 if (reg_type == CTRL_IN) {
97 if (gpio < 8) 107 if (gpio < 8)
@@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
130static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) 140static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
131{ 141{
132 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 142 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
143 int reg = to_reg(gpio, CTRL_OUT);
133 144
134 if (gpio > CRYSTALCOVE_VGPIO_NUM) 145 if (reg < 0)
135 return 0; 146 return 0;
136 147
137 return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 148 return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
138 CTLO_INPUT_SET);
139} 149}
140 150
141static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, 151static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
142 int value) 152 int value)
143{ 153{
144 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 154 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
155 int reg = to_reg(gpio, CTRL_OUT);
145 156
146 if (gpio > CRYSTALCOVE_VGPIO_NUM) 157 if (reg < 0)
147 return 0; 158 return 0;
148 159
149 return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 160 return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
150 CTLO_OUTPUT_SET | value);
151} 161}
152 162
153static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) 163static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
154{ 164{
155 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 165 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
156 int ret;
157 unsigned int val; 166 unsigned int val;
167 int ret, reg = to_reg(gpio, CTRL_IN);
158 168
159 if (gpio > CRYSTALCOVE_VGPIO_NUM) 169 if (reg < 0)
160 return 0; 170 return 0;
161 171
162 ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); 172 ret = regmap_read(cg->regmap, reg, &val);
163 if (ret) 173 if (ret)
164 return ret; 174 return ret;
165 175
@@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
170 unsigned gpio, int value) 180 unsigned gpio, int value)
171{ 181{
172 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 182 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
183 int reg = to_reg(gpio, CTRL_OUT);
173 184
174 if (gpio > CRYSTALCOVE_VGPIO_NUM) 185 if (reg < 0)
175 return; 186 return;
176 187
177 if (value) 188 if (value)
178 regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); 189 regmap_update_bits(cg->regmap, reg, 1, 1);
179 else 190 else
180 regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); 191 regmap_update_bits(cg->regmap, reg, 1, 0);
181} 192}
182 193
183static int crystalcove_irq_type(struct irq_data *data, unsigned type) 194static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type)
185 struct crystalcove_gpio *cg = 196 struct crystalcove_gpio *cg =
186 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 197 gpiochip_get_data(irq_data_get_irq_chip_data(data));
187 198
199 if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
200 return 0;
201
188 switch (type) { 202 switch (type) {
189 case IRQ_TYPE_NONE: 203 case IRQ_TYPE_NONE:
190 cg->intcnt_value = CTLI_INTCNT_DIS; 204 cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data)
235 struct crystalcove_gpio *cg = 249 struct crystalcove_gpio *cg =
236 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 250 gpiochip_get_data(irq_data_get_irq_chip_data(data));
237 251
238 cg->set_irq_mask = false; 252 if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
239 cg->update |= UPDATE_IRQ_MASK; 253 cg->set_irq_mask = false;
254 cg->update |= UPDATE_IRQ_MASK;
255 }
240} 256}
241 257
242static void crystalcove_irq_mask(struct irq_data *data) 258static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data)
244 struct crystalcove_gpio *cg = 260 struct crystalcove_gpio *cg =
245 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 261 gpiochip_get_data(irq_data_get_irq_chip_data(data));
246 262
247 cg->set_irq_mask = true; 263 if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
248 cg->update |= UPDATE_IRQ_MASK; 264 cg->set_irq_mask = true;
265 cg->update |= UPDATE_IRQ_MASK;
266 }
249} 267}
250 268
251static struct irq_chip crystalcove_irqchip = { 269static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 19a92efabbef..c83ea68be792 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
721 u32 set; 721 u32 set;
722 722
723 if (!of_device_is_compatible(mvchip->chip.of_node, 723 if (!of_device_is_compatible(mvchip->chip.of_node,
724 "marvell,armada-370-xp-gpio")) 724 "marvell,armada-370-gpio"))
725 return 0; 725 return 0;
726 726
727 if (IS_ERR(mvchip->clk)) 727 if (IS_ERR(mvchip->clk))
@@ -747,7 +747,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
747 set = U32_MAX; 747 set = U32_MAX;
748 else 748 else
749 return -EINVAL; 749 return -EINVAL;
750 writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip)); 750 writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
751 751
752 mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); 752 mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
753 if (!mvpwm) 753 if (!mvpwm)
@@ -768,6 +768,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
768 mvpwm->chip.dev = dev; 768 mvpwm->chip.dev = dev;
769 mvpwm->chip.ops = &mvebu_pwm_ops; 769 mvpwm->chip.ops = &mvebu_pwm_ops;
770 mvpwm->chip.npwm = mvchip->chip.ngpio; 770 mvpwm->chip.npwm = mvchip->chip.ngpio;
771 /*
772 * There may already be some PWM allocated, so we can't force
773 * mvpwm->chip.base to a fixed point like mvchip->chip.base.
774 * So, we let pwmchip_add() do the numbering and take the next free
775 * region.
776 */
777 mvpwm->chip.base = -1;
771 778
772 spin_lock_init(&mvpwm->lock); 779 spin_lock_init(&mvpwm->lock);
773 780
@@ -845,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
845 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, 852 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
846 }, 853 },
847 { 854 {
848 .compatible = "marvell,armada-370-xp-gpio", 855 .compatible = "marvell,armada-370-gpio",
849 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, 856 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
850 }, 857 },
851 { 858 {
@@ -1121,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
1121 mvchip); 1128 mvchip);
1122 } 1129 }
1123 1130
1124 /* Armada 370/XP has simple PWM support for GPIO lines */ 1131 /* Some MVEBU SoCs have simple PWM support for GPIO lines */
1125 if (IS_ENABLED(CONFIG_PWM)) 1132 if (IS_ENABLED(CONFIG_PWM))
1126 return mvebu_pwm_probe(pdev, mvchip, id); 1133 return mvebu_pwm_probe(pdev, mvchip, id);
1127 1134
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2185232da823..8fa5fcd00e9a 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -201,7 +201,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
201 handler = acpi_gpio_irq_handler_evt; 201 handler = acpi_gpio_irq_handler_evt;
202 } 202 }
203 if (!handler) 203 if (!handler)
204 return AE_BAD_PARAMETER; 204 return AE_OK;
205 205
206 pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin); 206 pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
207 if (pin < 0) 207 if (pin < 0)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5db44139cef8..a42a1eea5714 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -708,7 +708,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
708 708
709 ge.timestamp = ktime_get_real_ns(); 709 ge.timestamp = ktime_get_real_ns();
710 710
711 if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { 711 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
712 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
712 int level = gpiod_get_value_cansleep(le->desc); 713 int level = gpiod_get_value_cansleep(le->desc);
713 714
714 if (level) 715 if (level)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4dd339..1e8e1123ddf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", 693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
694 adev->clock.default_dispclk / 100); 694 adev->clock.default_dispclk / 100);
695 adev->clock.default_dispclk = 60000; 695 adev->clock.default_dispclk = 60000;
696 } else if (adev->clock.default_dispclk <= 60000) {
697 DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
698 adev->clock.default_dispclk / 100);
699 adev->clock.default_dispclk = 62500;
696 } 700 }
697 adev->clock.dp_extclk = 701 adev->clock.dp_extclk =
698 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 702 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2d705e6a75a..ab6b0d0febab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
449 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 449 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
450 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 450 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
451 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 451 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
452 {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
452 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 453 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
453 /* Vega 10 */ 454 /* Vega 10 */
454 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, 455 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 236d9950221b..c0d8c6ff6380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
425 425
426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) 426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
427{ 427{
428 struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; 428 struct amdgpu_fbdev *afbdev;
429 struct drm_fb_helper *fb_helper; 429 struct drm_fb_helper *fb_helper;
430 int ret; 430 int ret;
431 431
432 if (!adev)
433 return;
434
435 afbdev = adev->mode_info.rfbdev;
436
432 if (!afbdev) 437 if (!afbdev)
433 return; 438 return;
434 439
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 07ff3b1514f1..8ecf82c5fe74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
634 mutex_unlock(&id_mgr->lock); 634 mutex_unlock(&id_mgr->lock);
635 } 635 }
636 636
637 if (gds_switch_needed) { 637 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
638 id->gds_base = job->gds_base; 638 id->gds_base = job->gds_base;
639 id->gds_size = job->gds_size; 639 id->gds_size = job->gds_size;
640 id->gws_base = job->gws_base; 640 id->gws_base = job->gws_base;
@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
672 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 672 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
673 struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; 673 struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
674 674
675 atomic64_set(&id->owner, 0);
675 id->gds_base = 0; 676 id->gds_base = 0;
676 id->gds_size = 0; 677 id->gds_size = 0;
677 id->gws_base = 0; 678 id->gws_base = 0;
@@ -681,6 +682,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
681} 682}
682 683
683/** 684/**
685 * amdgpu_vm_reset_all_id - reset VMID to zero
686 *
687 * @adev: amdgpu device structure
688 *
689 * Reset VMID to force flush on next use
690 */
691void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
692{
693 unsigned i, j;
694
695 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
696 struct amdgpu_vm_id_manager *id_mgr =
697 &adev->vm_manager.id_mgr[i];
698
699 for (j = 1; j < id_mgr->num_ids; ++j)
700 amdgpu_vm_reset_id(adev, i, j);
701 }
702}
703
704/**
684 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 705 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
685 * 706 *
686 * @vm: requested vm 707 * @vm: requested vm
@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2270 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2291 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2271 adev->vm_manager.seqno[i] = 0; 2292 adev->vm_manager.seqno[i] = 0;
2272 2293
2273
2274 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 2294 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2275 atomic64_set(&adev->vm_manager.client_counter, 0); 2295 atomic64_set(&adev->vm_manager.client_counter, 0);
2276 spin_lock_init(&adev->vm_manager.prt_lock); 2296 spin_lock_init(&adev->vm_manager.prt_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d97e28b4bdc4..e1d951ece433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
206 unsigned vmid); 206 unsigned vmid);
207void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
207int amdgpu_vm_update_directories(struct amdgpu_device *adev, 208int amdgpu_vm_update_directories(struct amdgpu_device *adev,
208 struct amdgpu_vm *vm); 209 struct amdgpu_vm *vm);
209int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 210int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a4831fe0223b..a2c59a08b2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
220} 220}
221 221
222const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { 222const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
223 amdgpu_vram_mgr_init, 223 .init = amdgpu_vram_mgr_init,
224 amdgpu_vram_mgr_fini, 224 .takedown = amdgpu_vram_mgr_fini,
225 amdgpu_vram_mgr_new, 225 .get_node = amdgpu_vram_mgr_new,
226 amdgpu_vram_mgr_del, 226 .put_node = amdgpu_vram_mgr_del,
227 amdgpu_vram_mgr_debug 227 .debug = amdgpu_vram_mgr_debug
228}; 228};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75a9c2d..8a0818b23ea4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
165 struct drm_device *dev = crtc->dev; 165 struct drm_device *dev = crtc->dev;
166 struct amdgpu_device *adev = dev->dev_private; 166 struct amdgpu_device *adev = dev->dev_private;
167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
168 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 168 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
169 169
170 memset(&args, 0, sizeof(args)); 170 memset(&args, 0, sizeof(args));
171 171
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) 178void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
179{ 179{
180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
181 ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 181 ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
182 182
183 memset(&args, 0, sizeof(args)); 183 memset(&args, 0, sizeof(args));
184 184
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 6dc1410b380f..ec93714e4524 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; 907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908 908
909 /* disable mclk switching if the refresh is >120Hz, even if the
910 * blanking period would allow it
911 */
912 if (amdgpu_dpm_get_vrefresh(adev) > 120)
913 return true;
914
909 if (vblank_time < switch_limit) 915 if (vblank_time < switch_limit)
910 return true; 916 return true;
911 else 917 else
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a2e4a0..5dffa27afa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1208 1208
1209 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1209 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1210 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1210 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1211 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1211 (u32)mode->clock);
1212 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1213 (u32)mode->clock);
1214 line_time = min(line_time, (u32)65535);
1212 1215
1213 /* watermark for high clocks */ 1216 /* watermark for high clocks */
1214 if (adev->pm.dpm_enabled) { 1217 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a19749..47bbc87f96d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1177 1177
1178 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1178 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1179 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1179 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1180 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1180 (u32)mode->clock);
1181 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1182 (u32)mode->clock);
1183 line_time = min(line_time, (u32)65535);
1181 1184
1182 /* watermark for high clocks */ 1185 /* watermark for high clocks */
1183 if (adev->pm.dpm_enabled) { 1186 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f3552967ba3..d8c9a959493e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
983 fixed20_12 a, b, c; 983 fixed20_12 a, b, c;
984 984
985 if (amdgpu_crtc->base.enabled && num_heads && mode) { 985 if (amdgpu_crtc->base.enabled && num_heads && mode) {
986 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 986 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
987 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 987 (u32)mode->clock);
988 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
989 (u32)mode->clock);
990 line_time = min(line_time, (u32)65535);
988 priority_a_cnt = 0; 991 priority_a_cnt = 0;
989 priority_b_cnt = 0; 992 priority_b_cnt = 0;
990 993
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c170e5e..db30c6ba563a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1092 1092
1093 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1093 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1094 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1094 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1095 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1095 (u32)mode->clock);
1096 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1097 (u32)mode->clock);
1098 line_time = min(line_time, (u32)65535);
1096 1099
1097 /* watermark for high clocks */ 1100 /* watermark for high clocks */
1098 if (adev->pm.dpm_enabled) { 1101 if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a572979f186c..d860939152df 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
950{ 950{
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
952 952
953 if (adev->vm_manager.enabled) {
954 gmc_v6_0_vm_fini(adev);
955 adev->vm_manager.enabled = false;
956 }
957 gmc_v6_0_hw_fini(adev); 953 gmc_v6_0_hw_fini(adev);
958 954
959 return 0; 955 return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
968 if (r) 964 if (r)
969 return r; 965 return r;
970 966
971 if (!adev->vm_manager.enabled) { 967 amdgpu_vm_reset_all_ids(adev);
972 r = gmc_v6_0_vm_init(adev);
973 if (r) {
974 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
975 return r;
976 }
977 adev->vm_manager.enabled = true;
978 }
979 968
980 return r; 969 return 0;
981} 970}
982 971
983static bool gmc_v6_0_is_idle(void *handle) 972static bool gmc_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a9083a16a250..2750e5c23813 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
1117{ 1117{
1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1119 1119
1120 if (adev->vm_manager.enabled) {
1121 gmc_v7_0_vm_fini(adev);
1122 adev->vm_manager.enabled = false;
1123 }
1124 gmc_v7_0_hw_fini(adev); 1120 gmc_v7_0_hw_fini(adev);
1125 1121
1126 return 0; 1122 return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
1135 if (r) 1131 if (r)
1136 return r; 1132 return r;
1137 1133
1138 if (!adev->vm_manager.enabled) { 1134 amdgpu_vm_reset_all_ids(adev);
1139 r = gmc_v7_0_vm_init(adev);
1140 if (r) {
1141 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1142 return r;
1143 }
1144 adev->vm_manager.enabled = true;
1145 }
1146 1135
1147 return r; 1136 return 0;
1148} 1137}
1149 1138
1150static bool gmc_v7_0_is_idle(void *handle) 1139static bool gmc_v7_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 4ac99784160a..f56b4089ee9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
1209{ 1209{
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 1211
1212 if (adev->vm_manager.enabled) {
1213 gmc_v8_0_vm_fini(adev);
1214 adev->vm_manager.enabled = false;
1215 }
1216 gmc_v8_0_hw_fini(adev); 1212 gmc_v8_0_hw_fini(adev);
1217 1213
1218 return 0; 1214 return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
1227 if (r) 1223 if (r)
1228 return r; 1224 return r;
1229 1225
1230 if (!adev->vm_manager.enabled) { 1226 amdgpu_vm_reset_all_ids(adev);
1231 r = gmc_v8_0_vm_init(adev);
1232 if (r) {
1233 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1234 return r;
1235 }
1236 adev->vm_manager.enabled = true;
1237 }
1238 1227
1239 return r; 1228 return 0;
1240} 1229}
1241 1230
1242static bool gmc_v8_0_is_idle(void *handle) 1231static bool gmc_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dc1e1c1d6b24..f936332a069d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
791{ 791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 793
794 if (adev->vm_manager.enabled) {
795 gmc_v9_0_vm_fini(adev);
796 adev->vm_manager.enabled = false;
797 }
798 gmc_v9_0_hw_fini(adev); 794 gmc_v9_0_hw_fini(adev);
799 795
800 return 0; 796 return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
809 if (r) 805 if (r)
810 return r; 806 return r;
811 807
812 if (!adev->vm_manager.enabled) { 808 amdgpu_vm_reset_all_ids(adev);
813 r = gmc_v9_0_vm_init(adev);
814 if (r) {
815 dev_err(adev->dev,
816 "vm manager initialization failed (%d).\n", r);
817 return r;
818 }
819 adev->vm_manager.enabled = true;
820 }
821 809
822 return r; 810 return 0;
823} 811}
824 812
825static bool gmc_v9_0_is_idle(void *handle) 813static bool gmc_v9_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index fb0819359909..90332f55cfba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
77static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 77static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
78{ 78{
79 struct amdgpu_device *adev = ring->adev; 79 struct amdgpu_device *adev = ring->adev;
80 u32 v;
81
82 mutex_lock(&adev->grbm_idx_mutex);
83 if (adev->vce.harvest_config == 0 ||
84 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
85 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
86 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
87 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
80 88
81 if (ring == &adev->vce.ring[0]) 89 if (ring == &adev->vce.ring[0])
82 return RREG32(mmVCE_RB_RPTR); 90 v = RREG32(mmVCE_RB_RPTR);
83 else if (ring == &adev->vce.ring[1]) 91 else if (ring == &adev->vce.ring[1])
84 return RREG32(mmVCE_RB_RPTR2); 92 v = RREG32(mmVCE_RB_RPTR2);
85 else 93 else
86 return RREG32(mmVCE_RB_RPTR3); 94 v = RREG32(mmVCE_RB_RPTR3);
95
96 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
97 mutex_unlock(&adev->grbm_idx_mutex);
98
99 return v;
87} 100}
88 101
89/** 102/**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
96static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 109static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
97{ 110{
98 struct amdgpu_device *adev = ring->adev; 111 struct amdgpu_device *adev = ring->adev;
112 u32 v;
113
114 mutex_lock(&adev->grbm_idx_mutex);
115 if (adev->vce.harvest_config == 0 ||
116 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
117 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
118 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
119 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
99 120
100 if (ring == &adev->vce.ring[0]) 121 if (ring == &adev->vce.ring[0])
101 return RREG32(mmVCE_RB_WPTR); 122 v = RREG32(mmVCE_RB_WPTR);
102 else if (ring == &adev->vce.ring[1]) 123 else if (ring == &adev->vce.ring[1])
103 return RREG32(mmVCE_RB_WPTR2); 124 v = RREG32(mmVCE_RB_WPTR2);
104 else 125 else
105 return RREG32(mmVCE_RB_WPTR3); 126 v = RREG32(mmVCE_RB_WPTR3);
127
128 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
129 mutex_unlock(&adev->grbm_idx_mutex);
130
131 return v;
106} 132}
107 133
108/** 134/**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
116{ 142{
117 struct amdgpu_device *adev = ring->adev; 143 struct amdgpu_device *adev = ring->adev;
118 144
145 mutex_lock(&adev->grbm_idx_mutex);
146 if (adev->vce.harvest_config == 0 ||
147 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
148 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
149 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
150 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
151
119 if (ring == &adev->vce.ring[0]) 152 if (ring == &adev->vce.ring[0])
120 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 153 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
121 else if (ring == &adev->vce.ring[1]) 154 else if (ring == &adev->vce.ring[1])
122 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 155 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
123 else 156 else
124 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 157 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
158
159 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
160 mutex_unlock(&adev->grbm_idx_mutex);
125} 161}
126 162
127static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 163static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
231 struct amdgpu_ring *ring; 267 struct amdgpu_ring *ring;
232 int idx, r; 268 int idx, r;
233 269
234 ring = &adev->vce.ring[0];
235 WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
236 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
237 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
238 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
239 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
240
241 ring = &adev->vce.ring[1];
242 WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
243 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
244 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
245 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
246 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
247
248 ring = &adev->vce.ring[2];
249 WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
250 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
251 WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
252 WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
253 WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
254
255 mutex_lock(&adev->grbm_idx_mutex); 270 mutex_lock(&adev->grbm_idx_mutex);
256 for (idx = 0; idx < 2; ++idx) { 271 for (idx = 0; idx < 2; ++idx) {
257 if (adev->vce.harvest_config & (1 << idx)) 272 if (adev->vce.harvest_config & (1 << idx))
258 continue; 273 continue;
259 274
260 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 275 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
276
277 /* Program instance 0 reg space for two instances or instance 0 case
278 program instance 1 reg space for only instance 1 available case */
279 if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
280 ring = &adev->vce.ring[0];
281 WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
282 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
283 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
284 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
285 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
286
287 ring = &adev->vce.ring[1];
288 WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
289 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
290 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
291 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
292 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
293
294 ring = &adev->vce.ring[2];
295 WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
296 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
297 WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
298 WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
299 WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
300 }
301
261 vce_v3_0_mc_resume(adev, idx); 302 vce_v3_0_mc_resume(adev, idx);
262 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 303 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
263 304
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index a74a3db3056c..102eb6d029fa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2655 return sizeof(struct smu7_power_state); 2655 return sizeof(struct smu7_power_state);
2656} 2656}
2657 2657
2658static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2659 uint32_t vblank_time_us)
2660{
2661 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2662 uint32_t switch_limit_us;
2663
2664 switch (hwmgr->chip_id) {
2665 case CHIP_POLARIS10:
2666 case CHIP_POLARIS11:
2667 case CHIP_POLARIS12:
2668 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2669 break;
2670 default:
2671 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2672 break;
2673 }
2674
2675 if (vblank_time_us < switch_limit_us)
2676 return true;
2677 else
2678 return false;
2679}
2658 2680
2659static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 2681static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2660 struct pp_power_state *request_ps, 2682 struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2669 bool disable_mclk_switching; 2691 bool disable_mclk_switching;
2670 bool disable_mclk_switching_for_frame_lock; 2692 bool disable_mclk_switching_for_frame_lock;
2671 struct cgs_display_info info = {0}; 2693 struct cgs_display_info info = {0};
2694 struct cgs_mode_info mode_info = {0};
2672 const struct phm_clock_and_voltage_limits *max_limits; 2695 const struct phm_clock_and_voltage_limits *max_limits;
2673 uint32_t i; 2696 uint32_t i;
2674 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2697 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2677 int32_t count; 2700 int32_t count;
2678 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 2701 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2679 2702
2703 info.mode_info = &mode_info;
2680 data->battery_state = (PP_StateUILabel_Battery == 2704 data->battery_state = (PP_StateUILabel_Battery ==
2681 request_ps->classification.ui_label); 2705 request_ps->classification.ui_label);
2682 2706
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2703 2727
2704 cgs_get_active_displays_info(hwmgr->device, &info); 2728 cgs_get_active_displays_info(hwmgr->device, &info);
2705 2729
2706 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2707
2708 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; 2730 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2709 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 2731 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2710 2732
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2769 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2791 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2770 2792
2771 2793
2772 disable_mclk_switching = (1 < info.display_count) || 2794 disable_mclk_switching = ((1 < info.display_count) ||
2773 disable_mclk_switching_for_frame_lock; 2795 disable_mclk_switching_for_frame_lock ||
2796 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2797 (mode_info.refresh_rate > 120));
2774 2798
2775 sclk = smu7_ps->performance_levels[0].engine_clock; 2799 sclk = smu7_ps->performance_levels[0].engine_clock;
2776 mclk = smu7_ps->performance_levels[0].memory_clock; 2800 mclk = smu7_ps->performance_levels[0].memory_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index ad30f5d3a10d..2614af2f553f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4186 enum pp_clock_type type, uint32_t mask) 4186 enum pp_clock_type type, uint32_t mask)
4187{ 4187{
4188 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4188 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4189 uint32_t i; 4189 int i;
4190 4190
4191 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 4191 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4192 return -EINVAL; 4192 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d5f53d04fa08..83e40fe51b62 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
709 709
710static struct phm_master_table_item 710static struct phm_master_table_item
711vega10_thermal_start_thermal_controller_master_list[] = { 711vega10_thermal_start_thermal_controller_master_list[] = {
712 {NULL, tf_vega10_thermal_initialize}, 712 { .tableFunction = tf_vega10_thermal_initialize },
713 {NULL, tf_vega10_thermal_set_temperature_range}, 713 { .tableFunction = tf_vega10_thermal_set_temperature_range },
714 {NULL, tf_vega10_thermal_enable_alert}, 714 { .tableFunction = tf_vega10_thermal_enable_alert },
715/* We should restrict performance levels to low before we halt the SMC. 715/* We should restrict performance levels to low before we halt the SMC.
716 * On the other hand we are still in boot state when we do this 716 * On the other hand we are still in boot state when we do this
717 * so it would be pointless. 717 * so it would be pointless.
718 * If this assumption changes we have to revisit this table. 718 * If this assumption changes we have to revisit this table.
719 */ 719 */
720 {NULL, tf_vega10_thermal_setup_fan_table}, 720 { .tableFunction = tf_vega10_thermal_setup_fan_table },
721 {NULL, tf_vega10_thermal_start_smc_fan_control}, 721 { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
722 {NULL, NULL} 722 { }
723}; 723};
724 724
725static struct phm_master_table_header 725static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
731 731
732static struct phm_master_table_item 732static struct phm_master_table_item
733vega10_thermal_set_temperature_range_master_list[] = { 733vega10_thermal_set_temperature_range_master_list[] = {
734 {NULL, tf_vega10_thermal_disable_alert}, 734 { .tableFunction = tf_vega10_thermal_disable_alert },
735 {NULL, tf_vega10_thermal_set_temperature_range}, 735 { .tableFunction = tf_vega10_thermal_set_temperature_range },
736 {NULL, tf_vega10_thermal_enable_alert}, 736 { .tableFunction = tf_vega10_thermal_enable_alert },
737 {NULL, NULL} 737 { }
738}; 738};
739 739
740struct phm_master_table_header 740struct phm_master_table_header
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 798a3cc480a2..1a3359c0f6cd 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
14#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
15#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
226static int hdlcd_plane_atomic_check(struct drm_plane *plane, 227static int hdlcd_plane_atomic_check(struct drm_plane *plane,
227 struct drm_plane_state *state) 228 struct drm_plane_state *state)
228{ 229{
229 u32 src_w, src_h; 230 struct drm_rect clip = { 0 };
231 struct drm_crtc_state *crtc_state;
232 u32 src_h = state->src_h >> 16;
230 233
231 src_w = state->src_w >> 16; 234 /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
232 src_h = state->src_h >> 16; 235 if (src_h >= HDLCD_MAX_YRES) {
236 DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
237 return -EINVAL;
238 }
239
240 if (!state->fb || !state->crtc)
241 return 0;
233 242
234 /* we can't do any scaling of the plane source */ 243 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
235 if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) 244 state->crtc);
245 if (!crtc_state) {
246 DRM_DEBUG_KMS("Invalid crtc state\n");
236 return -EINVAL; 247 return -EINVAL;
248 }
237 249
238 return 0; 250 clip.x2 = crtc_state->adjusted_mode.hdisplay;
251 clip.y2 = crtc_state->adjusted_mode.vdisplay;
252
253 return drm_plane_helper_check_state(state, &clip,
254 DRM_PLANE_HELPER_NO_SCALING,
255 DRM_PLANE_HELPER_NO_SCALING,
256 false, true);
239} 257}
240 258
241static void hdlcd_plane_atomic_update(struct drm_plane *plane, 259static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
244 struct drm_framebuffer *fb = plane->state->fb; 262 struct drm_framebuffer *fb = plane->state->fb;
245 struct hdlcd_drm_private *hdlcd; 263 struct hdlcd_drm_private *hdlcd;
246 struct drm_gem_cma_object *gem; 264 struct drm_gem_cma_object *gem;
247 u32 src_w, src_h, dest_w, dest_h; 265 u32 src_x, src_y, dest_h;
248 dma_addr_t scanout_start; 266 dma_addr_t scanout_start;
249 267
250 if (!fb) 268 if (!fb)
251 return; 269 return;
252 270
253 src_w = plane->state->src_w >> 16; 271 src_x = plane->state->src.x1 >> 16;
254 src_h = plane->state->src_h >> 16; 272 src_y = plane->state->src.y1 >> 16;
255 dest_w = plane->state->crtc_w; 273 dest_h = drm_rect_height(&plane->state->dst);
256 dest_h = plane->state->crtc_h;
257 gem = drm_fb_cma_get_gem_obj(fb, 0); 274 gem = drm_fb_cma_get_gem_obj(fb, 0);
275
258 scanout_start = gem->paddr + fb->offsets[0] + 276 scanout_start = gem->paddr + fb->offsets[0] +
259 plane->state->crtc_y * fb->pitches[0] + 277 src_y * fb->pitches[0] +
260 plane->state->crtc_x * 278 src_x * fb->format->cpp[0];
261 fb->format->cpp[0];
262 279
263 hdlcd = plane->dev->dev_private; 280 hdlcd = plane->dev->dev_private;
264 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); 281 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
305 formats, ARRAY_SIZE(formats), 322 formats, ARRAY_SIZE(formats),
306 DRM_PLANE_TYPE_PRIMARY, NULL); 323 DRM_PLANE_TYPE_PRIMARY, NULL);
307 if (ret) { 324 if (ret) {
308 devm_kfree(drm->dev, plane);
309 return ERR_PTR(ret); 325 return ERR_PTR(ret);
310 } 326 }
311 327
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
329 &hdlcd_crtc_funcs, NULL); 345 &hdlcd_crtc_funcs, NULL);
330 if (ret) { 346 if (ret) {
331 hdlcd_plane_destroy(primary); 347 hdlcd_plane_destroy(primary);
332 devm_kfree(drm->dev, primary);
333 return ret; 348 return ret;
334 } 349 }
335 350
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 65a3bd7a0c00..423dda2785d4 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
153}; 153};
154 154
155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, 155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
156 const struct device_node *np)
157{ 156{
158 struct atmel_hlcdc_dc *dc = dev->dev_private; 157 struct atmel_hlcdc_dc *dc = dev->dev_private;
159 struct atmel_hlcdc_rgb_output *output; 158 struct atmel_hlcdc_rgb_output *output;
@@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
161 struct drm_bridge *bridge; 160 struct drm_bridge *bridge;
162 int ret; 161 int ret;
163 162
163 ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
164 &panel, &bridge);
165 if (ret)
166 return ret;
167
164 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); 168 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
165 if (!output) 169 if (!output)
166 return -EINVAL; 170 return -EINVAL;
@@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
177 181
178 output->encoder.possible_crtcs = 0x1; 182 output->encoder.possible_crtcs = 0x1;
179 183
180 ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
181 if (ret)
182 return ret;
183
184 if (panel) { 184 if (panel) {
185 output->connector.dpms = DRM_MODE_DPMS_OFF; 185 output->connector.dpms = DRM_MODE_DPMS_OFF;
186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; 186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -220,22 +220,14 @@ err_encoder_cleanup:
220 220
221int atmel_hlcdc_create_outputs(struct drm_device *dev) 221int atmel_hlcdc_create_outputs(struct drm_device *dev)
222{ 222{
223 struct device_node *remote; 223 int endpoint, ret = 0;
224 int ret = -ENODEV; 224
225 int endpoint = 0; 225 for (endpoint = 0; !ret; endpoint++)
226 226 ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
227 while (true) { 227
228 /* Loop thru possible multiple connections to the output */ 228 /* At least one device was successfully attached.*/
229 remote = of_graph_get_remote_node(dev->dev->of_node, 0, 229 if (ret == -ENODEV && endpoint)
230 endpoint++); 230 return 0;
231 if (!remote)
232 break;
233
234 ret = atmel_hlcdc_attach_endpoint(dev, remote);
235 of_node_put(remote);
236 if (ret)
237 return ret;
238 }
239 231
240 return ret; 232 return ret;
241} 233}
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827a6d19..53e78d092d18 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
1config DRM_DW_HDMI 1config DRM_DW_HDMI
2 tristate 2 tristate
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select REGMAP_MMIO
4 5
5config DRM_DW_HDMI_AHB_AUDIO 6config DRM_DW_HDMI_AHB_AUDIO
6 tristate "Synopsys Designware AHB Audio interface" 7 tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719284b0..aa885a614e27 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
508 bool has_connectors = 508 bool has_connectors =
509 !!new_crtc_state->connector_mask; 509 !!new_crtc_state->connector_mask;
510 510
511 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
512
511 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 513 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
512 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 514 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
513 crtc->base.id, crtc->name); 515 crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
551 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 553 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
552 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 554 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
553 555
556 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
557
554 /* 558 /*
555 * This only sets crtc->connectors_changed for routing changes, 559 * This only sets crtc->connectors_changed for routing changes,
556 * drivers must set crtc->connectors_changed themselves when 560 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
650 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 654 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
651 const struct drm_plane_helper_funcs *funcs; 655 const struct drm_plane_helper_funcs *funcs;
652 656
657 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
658
653 funcs = plane->helper_private; 659 funcs = plane->helper_private;
654 660
655 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 661 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
2663 2669
2664 drm_modeset_acquire_init(&ctx, 0); 2670 drm_modeset_acquire_init(&ctx, 0);
2665 while (1) { 2671 while (1) {
2672 err = drm_modeset_lock_all_ctx(dev, &ctx);
2673 if (err)
2674 goto out;
2675
2666 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 2676 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
2677out:
2667 if (err != -EDEADLK) 2678 if (err != -EDEADLK)
2668 break; 2679 break;
2669 2680
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9f847615ac74..48ca2457df8c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1229 if (!connector) 1229 if (!connector)
1230 return -ENOENT; 1230 return -ENOENT;
1231 1231
1232 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1233 encoder = drm_connector_get_encoder(connector);
1234 if (encoder)
1235 out_resp->encoder_id = encoder->base.id;
1236 else
1237 out_resp->encoder_id = 0;
1238
1239 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1240 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1241 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1242 &out_resp->count_props);
1243 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1244 if (ret)
1245 goto out_unref;
1246
1247 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) 1232 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
1248 if (connector->encoder_ids[i] != 0) 1233 if (connector->encoder_ids[i] != 0)
1249 encoders_count++; 1234 encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1256 if (put_user(connector->encoder_ids[i], 1241 if (put_user(connector->encoder_ids[i],
1257 encoder_ptr + copied)) { 1242 encoder_ptr + copied)) {
1258 ret = -EFAULT; 1243 ret = -EFAULT;
1259 goto out_unref; 1244 goto out;
1260 } 1245 }
1261 copied++; 1246 copied++;
1262 } 1247 }
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1300 if (copy_to_user(mode_ptr + copied, 1285 if (copy_to_user(mode_ptr + copied,
1301 &u_mode, sizeof(u_mode))) { 1286 &u_mode, sizeof(u_mode))) {
1302 ret = -EFAULT; 1287 ret = -EFAULT;
1288 mutex_unlock(&dev->mode_config.mutex);
1289
1303 goto out; 1290 goto out;
1304 } 1291 }
1305 copied++; 1292 copied++;
1306 } 1293 }
1307 } 1294 }
1308 out_resp->count_modes = mode_count; 1295 out_resp->count_modes = mode_count;
1309out:
1310 mutex_unlock(&dev->mode_config.mutex); 1296 mutex_unlock(&dev->mode_config.mutex);
1311out_unref: 1297
1298 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1299 encoder = drm_connector_get_encoder(connector);
1300 if (encoder)
1301 out_resp->encoder_id = encoder->base.id;
1302 else
1303 out_resp->encoder_id = 0;
1304
1305 /* Only grab properties after probing, to make sure EDID and other
1306 * properties reflect the latest status. */
1307 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
1308 (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
1309 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
1310 &out_resp->count_props);
1311 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1312
1313out:
1312 drm_connector_put(connector); 1314 drm_connector_put(connector);
1313 1315
1314 return ret; 1316 return ret;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3e5f52110ea1..213fb837e1c4 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
1208 return 0; 1208 return 0;
1209} 1209}
1210EXPORT_SYMBOL(drm_dp_stop_crc); 1210EXPORT_SYMBOL(drm_dp_stop_crc);
1211
1212struct dpcd_quirk {
1213 u8 oui[3];
1214 bool is_branch;
1215 u32 quirks;
1216};
1217
1218#define OUI(first, second, third) { (first), (second), (third) }
1219
1220static const struct dpcd_quirk dpcd_quirk_list[] = {
1221 /* Analogix 7737 needs reduced M and N at HBR2 link rates */
1222 { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
1223};
1224
1225#undef OUI
1226
1227/*
1228 * Get a bit mask of DPCD quirks for the sink/branch device identified by
1229 * ident. The quirk data is shared but it's up to the drivers to act on the
1230 * data.
1231 *
1232 * For now, only the OUI (first three bytes) is used, but this may be extended
1233 * to device identification string and hardware/firmware revisions later.
1234 */
1235static u32
1236drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
1237{
1238 const struct dpcd_quirk *quirk;
1239 u32 quirks = 0;
1240 int i;
1241
1242 for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
1243 quirk = &dpcd_quirk_list[i];
1244
1245 if (quirk->is_branch != is_branch)
1246 continue;
1247
1248 if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
1249 continue;
1250
1251 quirks |= quirk->quirks;
1252 }
1253
1254 return quirks;
1255}
1256
1257/**
1258 * drm_dp_read_desc - read sink/branch descriptor from DPCD
1259 * @aux: DisplayPort AUX channel
1260 * @desc: Device decriptor to fill from DPCD
1261 * @is_branch: true for branch devices, false for sink devices
1262 *
1263 * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
1264 * identification.
1265 *
1266 * Returns 0 on success or a negative error code on failure.
1267 */
1268int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
1269 bool is_branch)
1270{
1271 struct drm_dp_dpcd_ident *ident = &desc->ident;
1272 unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
1273 int ret, dev_id_len;
1274
1275 ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
1276 if (ret < 0)
1277 return ret;
1278
1279 desc->quirks = drm_dp_get_quirks(ident, is_branch);
1280
1281 dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
1282
1283 DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
1284 is_branch ? "branch" : "sink",
1285 (int)sizeof(ident->oui), ident->oui,
1286 dev_id_len, ident->device_id,
1287 ident->hw_rev >> 4, ident->hw_rev & 0xf,
1288 ident->sw_major_rev, ident->sw_minor_rev,
1289 desc->quirks);
1290
1291 return 0;
1292}
1293EXPORT_SYMBOL(drm_dp_read_desc);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb46a425..37b8ad3e30d8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
358void drm_unplug_dev(struct drm_device *dev) 358void drm_unplug_dev(struct drm_device *dev)
359{ 359{
360 /* for a USB device */ 360 /* for a USB device */
361 drm_dev_unregister(dev); 361 if (drm_core_check_feature(dev, DRIVER_MODESET))
362 drm_modeset_unregister_all(dev);
363
364 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
365 drm_minor_unregister(dev, DRM_MINOR_RENDER);
366 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
362 367
363 mutex_lock(&drm_global_mutex); 368 mutex_lock(&drm_global_mutex);
364 369
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index fedd4d60d9cd..5dc8c4350602 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,8 +948,6 @@ retry:
948 } 948 }
949 949
950out: 950out:
951 if (ret && crtc->funcs->page_flip_target)
952 drm_crtc_vblank_put(crtc);
953 if (fb) 951 if (fb)
954 drm_framebuffer_put(fb); 952 drm_framebuffer_put(fb);
955 if (crtc->primary->old_fb) 953 if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
964 drm_modeset_drop_locks(&ctx); 962 drm_modeset_drop_locks(&ctx);
965 drm_modeset_acquire_fini(&ctx); 963 drm_modeset_acquire_fini(&ctx);
966 964
965 if (ret && crtc->funcs->page_flip_target)
966 drm_crtc_vblank_put(crtc);
967
967 return ret; 968 return ret;
968} 969}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index c4a091e87426..e437fba1209d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
106 struct etnaviv_gpu *gpu; 106 struct etnaviv_gpu *gpu;
107 struct ww_acquire_ctx ticket; 107 struct ww_acquire_ctx ticket;
108 struct dma_fence *fence; 108 struct dma_fence *fence;
109 u32 flags;
109 unsigned int nr_bos; 110 unsigned int nr_bos;
110 struct etnaviv_gem_submit_bo bos[0]; 111 struct etnaviv_gem_submit_bo bos[0];
111 u32 flags; 112 /* No new members here, the previous one is variable-length! */
112}; 113};
113 114
114int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, 115int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index e1909429837e..1013765274da 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
44 44
45 /* initially, until copy_from_user() and bo lookup succeeds: */ 45 /* initially, until copy_from_user() and bo lookup succeeds: */
46 submit->nr_bos = 0; 46 submit->nr_bos = 0;
47 submit->fence = NULL;
47 48
48 ww_acquire_init(&submit->ticket, &reservation_ww_class); 49 ww_acquire_init(&submit->ticket, &reservation_ww_class);
49 } 50 }
@@ -171,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
171 for (i = 0; i < submit->nr_bos; i++) { 172 for (i = 0; i < submit->nr_bos; i++) {
172 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 173 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
173 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; 174 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
174 bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); 175 bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
175 176
176 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, 177 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
177 explicit); 178 explicit);
@@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
294 } 295 }
295 296
296 ww_acquire_fini(&submit->ticket); 297 ww_acquire_fini(&submit->ticket);
297 dma_fence_put(submit->fence); 298 if (submit->fence)
299 dma_fence_put(submit->fence);
298 kfree(submit); 300 kfree(submit);
299} 301}
300 302
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 09d3c4c3c858..50294a7bd29d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -82,14 +82,9 @@ err_file_priv_free:
82 return ret; 82 return ret;
83} 83}
84 84
85static void exynos_drm_preclose(struct drm_device *dev,
86 struct drm_file *file)
87{
88 exynos_drm_subdrv_close(dev, file);
89}
90
91static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 85static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
92{ 86{
87 exynos_drm_subdrv_close(dev, file);
93 kfree(file->driver_priv); 88 kfree(file->driver_priv);
94 file->driver_priv = NULL; 89 file->driver_priv = NULL;
95} 90}
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
145 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 140 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
146 | DRIVER_ATOMIC | DRIVER_RENDER, 141 | DRIVER_ATOMIC | DRIVER_RENDER,
147 .open = exynos_drm_open, 142 .open = exynos_drm_open,
148 .preclose = exynos_drm_preclose,
149 .lastclose = exynos_drm_lastclose, 143 .lastclose = exynos_drm_lastclose,
150 .postclose = exynos_drm_postclose, 144 .postclose = exynos_drm_postclose,
151 .gem_free_object_unlocked = exynos_drm_gem_free_object, 145 .gem_free_object_unlocked = exynos_drm_gem_free_object,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cb3176930596..39c740572034 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
160 * drm framework doesn't support multiple irq yet. 160 * drm framework doesn't support multiple irq yet.
161 * we can refer to the crtc to current hardware interrupt occurred through 161 * we can refer to the crtc to current hardware interrupt occurred through
162 * this pipe value. 162 * this pipe value.
163 * @enabled: if the crtc is enabled or not
164 * @event: vblank event that is currently queued for flip
165 * @wait_update: wait all pending planes updates to finish
166 * @pending_update: number of pending plane updates in this crtc
167 * @ops: pointer to callbacks for exynos drm specific functionality 163 * @ops: pointer to callbacks for exynos drm specific functionality
168 * @ctx: A pointer to the crtc's implementation specific context 164 * @ctx: A pointer to the crtc's implementation specific context
165 * @pipe_clk: A pointer to the crtc's pipeline clock.
169 */ 166 */
170struct exynos_drm_crtc { 167struct exynos_drm_crtc {
171 struct drm_crtc base; 168 struct drm_crtc base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index fc4fda738906..d404de86d5f9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1633{ 1633{
1634 struct device *dev = dsi->dev; 1634 struct device *dev = dsi->dev;
1635 struct device_node *node = dev->of_node; 1635 struct device_node *node = dev->of_node;
1636 struct device_node *ep;
1637 int ret; 1636 int ret;
1638 1637
1639 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", 1638 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1641 if (ret < 0) 1640 if (ret < 0)
1642 return ret; 1641 return ret;
1643 1642
1644 ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); 1643 ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
1645 if (!ep) {
1646 dev_err(dev, "no output port with endpoint specified\n");
1647 return -EINVAL;
1648 }
1649
1650 ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
1651 &dsi->burst_clk_rate); 1644 &dsi->burst_clk_rate);
1652 if (ret < 0) 1645 if (ret < 0)
1653 goto end; 1646 return ret;
1654 1647
1655 ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", 1648 ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
1656 &dsi->esc_clk_rate); 1649 &dsi->esc_clk_rate);
1657 if (ret < 0) 1650 if (ret < 0)
1658 goto end; 1651 return ret;
1659
1660 of_node_put(ep);
1661 1652
1662 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); 1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
1663 if (!dsi->bridge_node) 1654 if (!dsi->bridge_node)
1664 return -EINVAL; 1655 return -EINVAL;
1665 1656
1666end: 1657 return 0;
1667 of_node_put(ep);
1668
1669 return ret;
1670} 1658}
1671 1659
1672static int exynos_dsi_bind(struct device *dev, struct device *master, 1660static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1817 1805
1818static int exynos_dsi_remove(struct platform_device *pdev) 1806static int exynos_dsi_remove(struct platform_device *pdev)
1819{ 1807{
1808 struct exynos_dsi *dsi = platform_get_drvdata(pdev);
1809
1810 of_node_put(dsi->bridge_node);
1811
1820 pm_runtime_disable(&pdev->dev); 1812 pm_runtime_disable(&pdev->dev);
1821 1813
1822 component_del(&pdev->dev, &exynos_dsi_component_ops); 1814 component_del(&pdev->dev, &exynos_dsi_component_ops);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0066fe7e622e..be3eefec5152 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
759 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 759 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
760 mode_dev->panel_fixed_mode = 760 mode_dev->panel_fixed_mode =
761 drm_mode_duplicate(dev, scan); 761 drm_mode_duplicate(dev, scan);
762 DRM_DEBUG_KMS("Using mode from DDC\n");
762 goto out; /* FIXME: check for quirks */ 763 goto out; /* FIXME: check for quirks */
763 } 764 }
764 } 765 }
765 766
766 /* Failed to get EDID, what about VBT? do we need this? */ 767 /* Failed to get EDID, what about VBT? do we need this? */
767 if (mode_dev->vbt_mode) 768 if (dev_priv->lfp_lvds_vbt_mode) {
768 mode_dev->panel_fixed_mode = 769 mode_dev->panel_fixed_mode =
769 drm_mode_duplicate(dev, mode_dev->vbt_mode); 770 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
770 771
771 if (!mode_dev->panel_fixed_mode) 772 if (mode_dev->panel_fixed_mode) {
772 if (dev_priv->lfp_lvds_vbt_mode) 773 mode_dev->panel_fixed_mode->type |=
773 mode_dev->panel_fixed_mode = 774 DRM_MODE_TYPE_PREFERRED;
774 drm_mode_duplicate(dev, 775 DRM_DEBUG_KMS("Using mode from VBT\n");
775 dev_priv->lfp_lvds_vbt_mode); 776 goto out;
777 }
778 }
776 779
777 /* 780 /*
778 * If we didn't get EDID, try checking if the panel is already turned 781 * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
789 if (mode_dev->panel_fixed_mode) { 792 if (mode_dev->panel_fixed_mode) {
790 mode_dev->panel_fixed_mode->type |= 793 mode_dev->panel_fixed_mode->type |=
791 DRM_MODE_TYPE_PREFERRED; 794 DRM_MODE_TYPE_PREFERRED;
795 DRM_DEBUG_KMS("Using pre-programmed mode\n");
792 goto out; /* FIXME: check for quirks */ 796 goto out; /* FIXME: check for quirks */
793 } 797 }
794 } 798 }
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c9630f..f77dcfaade6c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
760 * Get the endpoint node. In our case, dsi has one output port1 760 * Get the endpoint node. In our case, dsi has one output port1
761 * to which the external HDMI bridge is connected. 761 * to which the external HDMI bridge is connected.
762 */ 762 */
763 ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge); 763 ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
764 if (ret) 764 if (ret)
765 return ret; 765 return ret;
766 766
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dca989eb2d42..24fe04d6307b 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
780} 780}
781 781
782static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
783{
784 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
785 struct intel_engine_cs *engine;
786 struct intel_vgpu_workload *pos, *n;
787 unsigned int tmp;
788
789 /* free the unsubmited workloads in the queues. */
790 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
791 list_for_each_entry_safe(pos, n,
792 &vgpu->workload_q_head[engine->id], list) {
793 list_del_init(&pos->list);
794 free_workload(pos);
795 }
796 }
797}
798
782void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) 799void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
783{ 800{
801 clean_workloads(vgpu, ALL_ENGINES);
784 kmem_cache_destroy(vgpu->workloads); 802 kmem_cache_destroy(vgpu->workloads);
785} 803}
786 804
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
811{ 829{
812 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 830 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
813 struct intel_engine_cs *engine; 831 struct intel_engine_cs *engine;
814 struct intel_vgpu_workload *pos, *n;
815 unsigned int tmp; 832 unsigned int tmp;
816 833
817 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 834 clean_workloads(vgpu, engine_mask);
818 /* free the unsubmited workload in the queue */ 835 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
819 list_for_each_entry_safe(pos, n,
820 &vgpu->workload_q_head[engine->id], list) {
821 list_del_init(&pos->list);
822 free_workload(pos);
823 }
824
825 init_vgpu_execlist(vgpu, engine->id); 836 init_vgpu_execlist(vgpu, engine->id);
826 }
827} 837}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ad1a508e2af..0ffd69654592 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1244 mode = vgpu_vreg(vgpu, offset); 1244 mode = vgpu_vreg(vgpu, offset);
1245 1245
1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { 1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", 1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
1248 vgpu->id); 1248 vgpu->id);
1249 return 0; 1249 return 0;
1250 } 1250 }
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
1366 void *p_data, unsigned int bytes) 1366 void *p_data, unsigned int bytes)
1367{ 1367{
1368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1369 i915_reg_t reg = {.reg = offset}; 1369 u32 v = *(u32 *)p_data;
1370
1371 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
1372 return intel_vgpu_default_mmio_write(vgpu,
1373 offset, p_data, bytes);
1370 1374
1371 switch (offset) { 1375 switch (offset) {
1372 case 0x4ddc: 1376 case 0x4ddc:
1373 vgpu_vreg(vgpu, offset) = 0x8000003c; 1377 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1374 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ 1378 vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
1375 I915_WRITE(reg, vgpu_vreg(vgpu, offset));
1376 break; 1379 break;
1377 case 0x42080: 1380 case 0x42080:
1378 vgpu_vreg(vgpu, offset) = 0x8000; 1381 /* bypass WaCompressedResourceDisplayNewHashMode */
1379 /* WaCompressedResourceDisplayNewHashMode:skl */ 1382 vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
1380 I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1383 break;
1384 case 0xe194:
1385 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1386 vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
1387 break;
1388 case 0x7014:
1389 /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
1390 vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
1381 break; 1391 break;
1382 default: 1392 default:
1383 return -EINVAL; 1393 return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1634 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1644 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1635 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1645 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1636 NULL, NULL); 1646 NULL, NULL);
1637 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1647 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
1648 skl_misc_ctl_write);
1638 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1649 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
1639 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1650 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
1640 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); 1651 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2568 MMIO_D(0x6e570, D_BDW_PLUS); 2579 MMIO_D(0x6e570, D_BDW_PLUS);
2569 MMIO_D(0x65f10, D_BDW_PLUS); 2580 MMIO_D(0x65f10, D_BDW_PLUS);
2570 2581
2571 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2582 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2583 skl_misc_ctl_write);
2572 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2584 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2573 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2585 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2574 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2586 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972ac21d..a5e11d89df2f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
340 } else 340 } else
341 v = mmio->value; 341 v = mmio->value;
342 342
343 if (mmio->in_context)
344 continue;
345
343 I915_WRITE(mmio->reg, v); 346 I915_WRITE(mmio->reg, v);
344 POSTING_READ(mmio->reg); 347 POSTING_READ(mmio->reg);
345 348
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 79ba4b3440aa..f25ff133865f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
129 struct vgpu_sched_data *vgpu_data; 129 struct vgpu_sched_data *vgpu_data;
130 ktime_t cur_time; 130 ktime_t cur_time;
131 131
132 /* no target to schedule */ 132 /* no need to schedule if next_vgpu is the same with current_vgpu,
133 if (!scheduler->next_vgpu) 133 * let scheduler chose next_vgpu again by setting it to NULL.
134 */
135 if (scheduler->next_vgpu == scheduler->current_vgpu) {
136 scheduler->next_vgpu = NULL;
134 return; 137 return;
138 }
135 139
136 /* 140 /*
137 * after the flag is set, workload dispatch thread will 141 * after the flag is set, workload dispatch thread will
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d689e511744e..4bd1467c17b1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
292 struct file_stats *stats = data; 292 struct file_stats *stats = data;
293 struct i915_vma *vma; 293 struct i915_vma *vma;
294 294
295 lockdep_assert_held(&obj->base.dev->struct_mutex);
296
295 stats->count++; 297 stats->count++;
296 stats->total += obj->base.size; 298 stats->total += obj->base.size;
297 if (!obj->bind_count) 299 if (!obj->bind_count)
@@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
476 struct drm_i915_gem_request *request; 478 struct drm_i915_gem_request *request;
477 struct task_struct *task; 479 struct task_struct *task;
478 480
481 mutex_lock(&dev->struct_mutex);
482
479 memset(&stats, 0, sizeof(stats)); 483 memset(&stats, 0, sizeof(stats));
480 stats.file_priv = file->driver_priv; 484 stats.file_priv = file->driver_priv;
481 spin_lock(&file->table_lock); 485 spin_lock(&file->table_lock);
@@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
487 * still alive (e.g. get_pid(current) => fork() => exit()). 491 * still alive (e.g. get_pid(current) => fork() => exit()).
488 * Therefore, we need to protect this ->comm access using RCU. 492 * Therefore, we need to protect this ->comm access using RCU.
489 */ 493 */
490 mutex_lock(&dev->struct_mutex);
491 request = list_first_entry_or_null(&file_priv->mm.request_list, 494 request = list_first_entry_or_null(&file_priv->mm.request_list,
492 struct drm_i915_gem_request, 495 struct drm_i915_gem_request,
493 client_link); 496 client_link);
@@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
497 PIDTYPE_PID); 500 PIDTYPE_PID);
498 print_file_stats(m, task ? task->comm : "<unknown>", stats); 501 print_file_stats(m, task ? task->comm : "<unknown>", stats);
499 rcu_read_unlock(); 502 rcu_read_unlock();
503
500 mutex_unlock(&dev->struct_mutex); 504 mutex_unlock(&dev->struct_mutex);
501 } 505 }
502 mutex_unlock(&dev->filelist_mutex); 506 mutex_unlock(&dev->filelist_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3036d4835b0f..48428672fc6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1235 goto out_fini; 1235 goto out_fini;
1236 1236
1237 pci_set_drvdata(pdev, &dev_priv->drm); 1237 pci_set_drvdata(pdev, &dev_priv->drm);
1238 /*
1239 * Disable the system suspend direct complete optimization, which can
1240 * leave the device suspended skipping the driver's suspend handlers
1241 * if the device was already runtime suspended. This is needed due to
1242 * the difference in our runtime and system suspend sequence and
1243 * becaue the HDA driver may require us to enable the audio power
1244 * domain during system suspend.
1245 */
1246 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1238 1247
1239 ret = i915_driver_init_early(dev_priv, ent); 1248 ret = i915_driver_init_early(dev_priv, ent);
1240 if (ret < 0) 1249 if (ret < 0)
@@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1272 1281
1273 dev_priv->ipc_enabled = false; 1282 dev_priv->ipc_enabled = false;
1274 1283
1275 /* Everything is in place, we can now relax! */
1276 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1277 driver.name, driver.major, driver.minor, driver.patchlevel,
1278 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
1279 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1284 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1280 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1285 DRM_INFO("DRM_I915_DEBUG enabled\n");
1281 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1286 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9b0949f6c1a..2c453a4e97d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -562,7 +562,8 @@ struct intel_link_m_n {
562 562
563void intel_link_compute_m_n(int bpp, int nlanes, 563void intel_link_compute_m_n(int bpp, int nlanes,
564 int pixel_clock, int link_clock, 564 int pixel_clock, int link_clock,
565 struct intel_link_m_n *m_n); 565 struct intel_link_m_n *m_n,
566 bool reduce_m_n);
566 567
567/* Interface history: 568/* Interface history:
568 * 569 *
@@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2990 return false; 2991 return false;
2991} 2992}
2992 2993
2994static inline bool
2995intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2996{
2997#ifdef CONFIG_INTEL_IOMMU
2998 if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
2999 return true;
3000#endif
3001 return false;
3002}
3003
2993int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 3004int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2994 int enable_ppgtt); 3005 int enable_ppgtt);
2995 3006
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df18b58..615f0a855222 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2285 struct page *page; 2285 struct page *page;
2286 unsigned long last_pfn = 0; /* suppress gcc warning */ 2286 unsigned long last_pfn = 0; /* suppress gcc warning */
2287 unsigned int max_segment; 2287 unsigned int max_segment;
2288 gfp_t noreclaim;
2288 int ret; 2289 int ret;
2289 gfp_t gfp;
2290 2290
2291 /* Assert that the object is not currently in any GPU domain. As it 2291 /* Assert that the object is not currently in any GPU domain. As it
2292 * wasn't in the GTT, there shouldn't be any way it could have been in 2292 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
2315 * Fail silently without starting the shrinker 2315 * Fail silently without starting the shrinker
2316 */ 2316 */
2317 mapping = obj->base.filp->f_mapping; 2317 mapping = obj->base.filp->f_mapping;
2318 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2318 noreclaim = mapping_gfp_constraint(mapping,
2319 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2319 ~(__GFP_IO | __GFP_RECLAIM));
2320 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2321
2320 sg = st->sgl; 2322 sg = st->sgl;
2321 st->nents = 0; 2323 st->nents = 0;
2322 for (i = 0; i < page_count; i++) { 2324 for (i = 0; i < page_count; i++) {
2323 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2325 const unsigned int shrink[] = {
2324 if (unlikely(IS_ERR(page))) { 2326 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2325 i915_gem_shrink(dev_priv, 2327 0,
2326 page_count, 2328 }, *s = shrink;
2327 I915_SHRINK_BOUND | 2329 gfp_t gfp = noreclaim;
2328 I915_SHRINK_UNBOUND | 2330
2329 I915_SHRINK_PURGEABLE); 2331 do {
2330 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2332 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2331 } 2333 if (likely(!IS_ERR(page)))
2332 if (unlikely(IS_ERR(page))) { 2334 break;
2333 gfp_t reclaim; 2335
2336 if (!*s) {
2337 ret = PTR_ERR(page);
2338 goto err_sg;
2339 }
2340
2341 i915_gem_shrink(dev_priv, 2 * page_count, *s++);
2342 cond_resched();
2334 2343
2335 /* We've tried hard to allocate the memory by reaping 2344 /* We've tried hard to allocate the memory by reaping
2336 * our own buffer, now let the real VM do its job and 2345 * our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
2340 * defer the oom here by reporting the ENOMEM back 2349 * defer the oom here by reporting the ENOMEM back
2341 * to userspace. 2350 * to userspace.
2342 */ 2351 */
2343 reclaim = mapping_gfp_mask(mapping); 2352 if (!*s) {
2344 reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ 2353 /* reclaim and warn, but no oom */
2345 2354 gfp = mapping_gfp_mask(mapping);
2346 page = shmem_read_mapping_page_gfp(mapping, i, reclaim); 2355
2347 if (IS_ERR(page)) { 2356 /* Our bo are always dirty and so we require
2348 ret = PTR_ERR(page); 2357 * kswapd to reclaim our pages (direct reclaim
2349 goto err_sg; 2358 * does not effectively begin pageout of our
2359 * buffers on its own). However, direct reclaim
2360 * only waits for kswapd when under allocation
2361 * congestion. So as a result __GFP_RECLAIM is
2362 * unreliable and fails to actually reclaim our
2363 * dirty pages -- unless you try over and over
2364 * again with !__GFP_NORETRY. However, we still
2365 * want to fail this allocation rather than
2366 * trigger the out-of-memory killer and for
2367 * this we want the future __GFP_MAYFAIL.
2368 */
2350 } 2369 }
2351 } 2370 } while (1);
2371
2352 if (!i || 2372 if (!i ||
2353 sg->length >= max_segment || 2373 sg->length >= max_segment ||
2354 page_to_pfn(page) != last_pfn + 1) { 2374 page_to_pfn(page) != last_pfn + 1) {
@@ -3298,6 +3318,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3298{ 3318{
3299 int ret; 3319 int ret;
3300 3320
3321 /* If the device is asleep, we have no requests outstanding */
3322 if (!READ_ONCE(i915->gt.awake))
3323 return 0;
3324
3301 if (flags & I915_WAIT_LOCKED) { 3325 if (flags & I915_WAIT_LOCKED) {
3302 struct i915_gem_timeline *tl; 3326 struct i915_gem_timeline *tl;
3303 3327
@@ -4218,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4218 4242
4219 mapping = obj->base.filp->f_mapping; 4243 mapping = obj->base.filp->f_mapping;
4220 mapping_set_gfp_mask(mapping, mask); 4244 mapping_set_gfp_mask(mapping, mask);
4245 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4221 4246
4222 i915_gem_object_init(obj, &i915_gem_object_ops); 4247 i915_gem_object_init(obj, &i915_gem_object_ops);
4223 4248
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3e59c8ef27b..9ad13eeed904 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -546,11 +546,12 @@ repeat:
546} 546}
547 547
548static int 548static int
549i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 549i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
550 struct eb_vmas *eb, 550 struct eb_vmas *eb,
551 struct drm_i915_gem_relocation_entry *reloc, 551 struct drm_i915_gem_relocation_entry *reloc,
552 struct reloc_cache *cache) 552 struct reloc_cache *cache)
553{ 553{
554 struct drm_i915_gem_object *obj = vma->obj;
554 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 555 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
555 struct drm_gem_object *target_obj; 556 struct drm_gem_object *target_obj;
556 struct drm_i915_gem_object *target_i915_obj; 557 struct drm_i915_gem_object *target_i915_obj;
@@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
628 return -EINVAL; 629 return -EINVAL;
629 } 630 }
630 631
632 /*
633 * If we write into the object, we need to force the synchronisation
634 * barrier, either with an asynchronous clflush or if we executed the
635 * patching using the GPU (though that should be serialised by the
636 * timeline). To be completely sure, and since we are required to
637 * do relocations we are already stalling, disable the user's opt
638 * of our synchronisation.
639 */
640 vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
641
631 ret = relocate_entry(obj, reloc, cache, target_offset); 642 ret = relocate_entry(obj, reloc, cache, target_offset);
632 if (ret) 643 if (ret)
633 return ret; 644 return ret;
@@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
678 do { 689 do {
679 u64 offset = r->presumed_offset; 690 u64 offset = r->presumed_offset;
680 691
681 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); 692 ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
682 if (ret) 693 if (ret)
683 goto out; 694 goto out;
684 695
@@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
726 737
727 reloc_cache_init(&cache, eb->i915); 738 reloc_cache_init(&cache, eb->i915);
728 for (i = 0; i < entry->relocation_count; i++) { 739 for (i = 0; i < entry->relocation_count; i++) {
729 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); 740 ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
730 if (ret) 741 if (ret)
731 break; 742 break;
732 } 743 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2aa6b97fd22f..f1989b8792dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -195,9 +195,12 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
195 u32 pte_flags; 195 u32 pte_flags;
196 int ret; 196 int ret;
197 197
198 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size); 198 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
199 if (ret) 199 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
200 return ret; 200 vma->size);
201 if (ret)
202 return ret;
203 }
201 204
202 vma->pages = vma->obj->mm.pages; 205 vma->pages = vma->obj->mm.pages;
203 206
@@ -2188,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2188 gen8_set_pte(&gtt_base[i], scratch_pte); 2191 gen8_set_pte(&gtt_base[i], scratch_pte);
2189} 2192}
2190 2193
2194static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2195{
2196 struct drm_i915_private *dev_priv = vm->i915;
2197
2198 /*
2199 * Make sure the internal GAM fifo has been cleared of all GTT
2200 * writes before exiting stop_machine(). This guarantees that
2201 * any aperture accesses waiting to start in another process
2202 * cannot back up behind the GTT writes causing a hang.
2203 * The register can be any arbitrary GAM register.
2204 */
2205 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2206}
2207
2208struct insert_page {
2209 struct i915_address_space *vm;
2210 dma_addr_t addr;
2211 u64 offset;
2212 enum i915_cache_level level;
2213};
2214
2215static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2216{
2217 struct insert_page *arg = _arg;
2218
2219 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2220 bxt_vtd_ggtt_wa(arg->vm);
2221
2222 return 0;
2223}
2224
2225static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2226 dma_addr_t addr,
2227 u64 offset,
2228 enum i915_cache_level level,
2229 u32 unused)
2230{
2231 struct insert_page arg = { vm, addr, offset, level };
2232
2233 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2234}
2235
2236struct insert_entries {
2237 struct i915_address_space *vm;
2238 struct sg_table *st;
2239 u64 start;
2240 enum i915_cache_level level;
2241};
2242
2243static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2244{
2245 struct insert_entries *arg = _arg;
2246
2247 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2248 bxt_vtd_ggtt_wa(arg->vm);
2249
2250 return 0;
2251}
2252
2253static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2254 struct sg_table *st,
2255 u64 start,
2256 enum i915_cache_level level,
2257 u32 unused)
2258{
2259 struct insert_entries arg = { vm, st, start, level };
2260
2261 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2262}
2263
2264struct clear_range {
2265 struct i915_address_space *vm;
2266 u64 start;
2267 u64 length;
2268};
2269
2270static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2271{
2272 struct clear_range *arg = _arg;
2273
2274 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2275 bxt_vtd_ggtt_wa(arg->vm);
2276
2277 return 0;
2278}
2279
2280static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2281 u64 start,
2282 u64 length)
2283{
2284 struct clear_range arg = { vm, start, length };
2285
2286 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2287}
2288
2191static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2289static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2192 u64 start, u64 length) 2290 u64 start, u64 length)
2193{ 2291{
@@ -2306,10 +2404,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2306 if (flags & I915_VMA_LOCAL_BIND) { 2404 if (flags & I915_VMA_LOCAL_BIND) {
2307 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; 2405 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2308 2406
2309 if (appgtt->base.allocate_va_range) { 2407 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2408 appgtt->base.allocate_va_range) {
2310 ret = appgtt->base.allocate_va_range(&appgtt->base, 2409 ret = appgtt->base.allocate_va_range(&appgtt->base,
2311 vma->node.start, 2410 vma->node.start,
2312 vma->node.size); 2411 vma->size);
2313 if (ret) 2412 if (ret)
2314 goto err_pages; 2413 goto err_pages;
2315 } 2414 }
@@ -2781,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2781 2880
2782 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 2881 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
2783 2882
2883 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2884 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2885 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2886 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2887 if (ggtt->base.clear_range != nop_clear_range)
2888 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2889 }
2890
2784 ggtt->invalidate = gen6_ggtt_invalidate; 2891 ggtt->invalidate = gen6_ggtt_invalidate;
2785 2892
2786 return ggtt_probe_common(ggtt, size); 2893 return ggtt_probe_common(ggtt, size);
@@ -2993,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
2993 3100
2994void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3101void i915_ggtt_disable_guc(struct drm_i915_private *i915)
2995{ 3102{
2996 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3103 if (i915->ggtt.invalidate == guc_ggtt_invalidate)
3104 i915->ggtt.invalidate = gen6_ggtt_invalidate;
2997} 3105}
2998 3106
2999void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3107void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc9499775..a74d0ac737cb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
623 * GPU processing the request, we never over-estimate the 623 * GPU processing the request, we never over-estimate the
624 * position of the head. 624 * position of the head.
625 */ 625 */
626 req->head = req->ring->tail; 626 req->head = req->ring->emit;
627 627
628 /* Check that we didn't interrupt ourselves with a new request */ 628 /* Check that we didn't interrupt ourselves with a new request */
629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); 629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 129ed303a6c4..57d9f7f4ef15 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
59 return; 59 return;
60 60
61 mutex_unlock(&dev->struct_mutex); 61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65} 62}
66 63
67static bool any_vma_pinned(struct drm_i915_gem_object *obj) 64static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
274 I915_SHRINK_ACTIVE); 271 I915_SHRINK_ACTIVE);
275 intel_runtime_pm_put(dev_priv); 272 intel_runtime_pm_put(dev_priv);
276 273
277 synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
278
279 return freed; 274 return freed;
280} 275}
281 276
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d4317a49..fb5231f98c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
278 obj->mm.quirked = false; 278 obj->mm.quirked = false;
279 } 279 }
280 if (!i915_gem_object_is_tiled(obj)) { 280 if (!i915_gem_object_is_tiled(obj)) {
281 GEM_BUG_ON(!obj->mm.quirked); 281 GEM_BUG_ON(obj->mm.quirked);
282 __i915_gem_object_pin_pages(obj); 282 __i915_gem_object_pin_pages(obj);
283 obj->mm.quirked = true; 283 obj->mm.quirked = true;
284 } 284 }
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff9cf13..ab5140ba108d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
480 GEM_BUG_ON(freespace < wqi_size); 480 GEM_BUG_ON(freespace < wqi_size);
481 481
482 /* The GuC firmware wants the tail index in QWords, not bytes */ 482 /* The GuC firmware wants the tail index in QWords, not bytes */
483 tail = rq->tail; 483 tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
484 assert_ring_tail_valid(rq->ring, rq->tail);
485 tail >>= 3;
486 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); 484 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
487 485
488 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 486 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fd97fe00cd0d..190f6aa5d15e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2953 u32 pipestat_mask; 2953 u32 pipestat_mask;
2954 u32 enable_mask; 2954 u32 enable_mask;
2955 enum pipe pipe; 2955 enum pipe pipe;
2956 u32 val;
2957 2956
2958 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2957 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
2959 PIPE_CRC_DONE_INTERRUPT_STATUS; 2958 PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2964 2963
2965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2964 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2965 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2967 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2966 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2967 I915_LPE_PIPE_A_INTERRUPT |
2968 I915_LPE_PIPE_B_INTERRUPT;
2969
2968 if (IS_CHERRYVIEW(dev_priv)) 2970 if (IS_CHERRYVIEW(dev_priv))
2969 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2971 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2972 I915_LPE_PIPE_C_INTERRUPT;
2970 2973
2971 WARN_ON(dev_priv->irq_mask != ~0); 2974 WARN_ON(dev_priv->irq_mask != ~0);
2972 2975
2973 val = (I915_LPE_PIPE_A_INTERRUPT |
2974 I915_LPE_PIPE_B_INTERRUPT |
2975 I915_LPE_PIPE_C_INTERRUPT);
2976
2977 enable_mask |= val;
2978
2979 dev_priv->irq_mask = ~enable_mask; 2976 dev_priv->irq_mask = ~enable_mask;
2980 2977
2981 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2978 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4e564d..1a78363c7f4a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
208static const struct intel_device_info intel_ironlake_m_info = { 208static const struct intel_device_info intel_ironlake_m_info = {
209 GEN5_FEATURES, 209 GEN5_FEATURES,
210 .platform = INTEL_IRONLAKE, 210 .platform = INTEL_IRONLAKE,
211 .is_mobile = 1, 211 .is_mobile = 1, .has_fbc = 1,
212}; 212};
213 213
214#define GEN6_FEATURES \ 214#define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
390 .has_hw_contexts = 1, \ 390 .has_hw_contexts = 1, \
391 .has_logical_ring_contexts = 1, \ 391 .has_logical_ring_contexts = 1, \
392 .has_guc = 1, \ 392 .has_guc = 1, \
393 .has_decoupled_mmio = 1, \
394 .has_aliasing_ppgtt = 1, \ 393 .has_aliasing_ppgtt = 1, \
395 .has_full_ppgtt = 1, \ 394 .has_full_ppgtt = 1, \
396 .has_full_48bit_ppgtt = 1, \ 395 .has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb2974caac..2cfe96d3e5d1 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
36#define VGT_VERSION_MAJOR 1 36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0 37#define VGT_VERSION_MINOR 0
38 38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/* 39/*
44 * notifications from guest to vgpu device model 40 * notifications from guest to vgpu device model
45 */ 41 */
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
55 51
56struct vgt_if { 52struct vgt_if {
57 u64 magic; /* VGT_MAGIC */ 53 u64 magic; /* VGT_MAGIC */
58 uint16_t version_major; 54 u16 version_major;
59 uint16_t version_minor; 55 u16 version_minor;
60 u32 vgt_id; /* ID of vGT instance */ 56 u32 vgt_id; /* ID of vGT instance */
61 u32 rsv1[12]; /* pad to offset 0x40 */ 57 u32 rsv1[12]; /* pad to offset 0x40 */
62 /* 58 /*
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 11b12f412492..65b837e96fe6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3051,10 +3051,14 @@ enum skl_disp_power_wells {
3051#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ 3051#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
3052#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ 3052#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
3053#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ 3053#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
3054#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
3054#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ 3055#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
3055/* Note, below two are guess */ 3056/*
3056#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ 3057 * Note that on at least on ELK the below value is reported for both
3057#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ 3058 * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
3059 * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
3060 */
3061#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
3058#define CLKCFG_FSB_MASK (7 << 0) 3062#define CLKCFG_FSB_MASK (7 << 0)
3059#define CLKCFG_MEM_533 (1 << 4) 3063#define CLKCFG_MEM_533 (1 << 4)
3060#define CLKCFG_MEM_667 (2 << 4) 3064#define CLKCFG_MEM_667 (2 << 4)
@@ -8276,7 +8280,7 @@ enum {
8276 8280
8277/* MIPI DSI registers */ 8281/* MIPI DSI registers */
8278 8282
8279#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ 8283#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
8280#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 8284#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
8281 8285
8282#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) 8286#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a973b61f..2e739018fb4c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
60 */ 60 */
61void i915_check_vgpu(struct drm_i915_private *dev_priv) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 uint64_t magic; 63 u64 magic;
64 uint32_t version; 64 u16 version_major;
65 65
66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
67 67
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
69 if (magic != VGT_MAGIC) 69 if (magic != VGT_MAGIC)
70 return; 70 return;
71 71
72 version = INTEL_VGT_IF_VERSION_ENCODE( 72 version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
73 __raw_i915_read16(dev_priv, vgtif_reg(version_major)), 73 if (version_major < VGT_VERSION_MAJOR) {
74 __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
75 if (version != INTEL_VGT_IF_VERSION) {
76 DRM_INFO("VGT interface version mismatch!\n"); 74 DRM_INFO("VGT interface version mismatch!\n");
77 return; 75 return;
78 } 76 }
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1aba47024656..f066e2d785f5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
650 break; 650 break;
651 } 651 }
652 652
653 if (!ret) {
654 ret = i915_gem_active_retire(&vma->last_fence,
655 &vma->vm->i915->drm.struct_mutex);
656 }
657
653 __i915_vma_unpin(vma); 658 __i915_vma_unpin(vma);
654 if (ret) 659 if (ret)
655 return ret; 660 return ret;
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index dd3ad52b7dfe..f29a226e24d8 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1798,13 +1798,11 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
1798 case CLKCFG_FSB_800: 1798 case CLKCFG_FSB_800:
1799 return 200000; 1799 return 200000;
1800 case CLKCFG_FSB_1067: 1800 case CLKCFG_FSB_1067:
1801 case CLKCFG_FSB_1067_ALT:
1801 return 266667; 1802 return 266667;
1802 case CLKCFG_FSB_1333: 1803 case CLKCFG_FSB_1333:
1804 case CLKCFG_FSB_1333_ALT:
1803 return 333333; 1805 return 333333;
1804 /* these two are just a guess; one of them might be right */
1805 case CLKCFG_FSB_1600:
1806 case CLKCFG_FSB_1600_ALT:
1807 return 400000;
1808 default: 1806 default:
1809 return 133333; 1807 return 133333;
1810 } 1808 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3617927af269..9106ea32b048 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
120static void skylake_pfit_enable(struct intel_crtc *crtc); 120static void skylake_pfit_enable(struct intel_crtc *crtc);
121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
123static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev,
124 struct drm_modeset_acquire_ctx *ctx);
124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 125static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125 126
126struct intel_limit { 127struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
3449 struct drm_crtc *crtc; 3450 struct drm_crtc *crtc;
3450 int i, ret; 3451 int i, ret;
3451 3452
3452 intel_modeset_setup_hw_state(dev); 3453 intel_modeset_setup_hw_state(dev, ctx);
3453 i915_redisable_vga(to_i915(dev)); 3454 i915_redisable_vga(to_i915(dev));
3454 3455
3455 if (!state) 3456 if (!state)
@@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4598 4599
4599static int 4600static int
4600skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4601skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4601 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4602 unsigned int scaler_user, int *scaler_id,
4602 int src_w, int src_h, int dst_w, int dst_h) 4603 int src_w, int src_h, int dst_w, int dst_h)
4603{ 4604{
4604 struct intel_crtc_scaler_state *scaler_state = 4605 struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4607 to_intel_crtc(crtc_state->base.crtc); 4608 to_intel_crtc(crtc_state->base.crtc);
4608 int need_scaling; 4609 int need_scaling;
4609 4610
4610 need_scaling = drm_rotation_90_or_270(rotation) ? 4611 /*
4611 (src_h != dst_w || src_w != dst_h): 4612 * Src coordinates are already rotated by 270 degrees for
4612 (src_w != dst_w || src_h != dst_h); 4613 * the 90/270 degree plane rotation cases (to match the
4614 * GTT mapping), hence no need to account for rotation here.
4615 */
4616 need_scaling = src_w != dst_w || src_h != dst_h;
4613 4617
4614 /* 4618 /*
4615 * if plane is being disabled or scaler is no more required or force detach 4619 * if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4671 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4675 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4672 4676
4673 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4677 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4674 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4678 &state->scaler_state.scaler_id,
4675 state->pipe_src_w, state->pipe_src_h, 4679 state->pipe_src_w, state->pipe_src_h,
4676 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4680 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4677} 4681}
@@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4700 ret = skl_update_scaler(crtc_state, force_detach, 4704 ret = skl_update_scaler(crtc_state, force_detach,
4701 drm_plane_index(&intel_plane->base), 4705 drm_plane_index(&intel_plane->base),
4702 &plane_state->scaler_id, 4706 &plane_state->scaler_id,
4703 plane_state->base.rotation,
4704 drm_rect_width(&plane_state->base.src) >> 16, 4707 drm_rect_width(&plane_state->base.src) >> 16,
4705 drm_rect_height(&plane_state->base.src) >> 16, 4708 drm_rect_height(&plane_state->base.src) >> 16,
4706 drm_rect_width(&plane_state->base.dst), 4709 drm_rect_width(&plane_state->base.dst),
@@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
5823 intel_update_watermarks(intel_crtc); 5826 intel_update_watermarks(intel_crtc);
5824} 5827}
5825 5828
5826static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 5829static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
5830 struct drm_modeset_acquire_ctx *ctx)
5827{ 5831{
5828 struct intel_encoder *encoder; 5832 struct intel_encoder *encoder;
5829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
5853 return; 5857 return;
5854 } 5858 }
5855 5859
5856 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 5860 state->acquire_ctx = ctx;
5857 5861
5858 /* Everything's already locked, -EDEADLK can't happen. */ 5862 /* Everything's already locked, -EDEADLK can't happen. */
5859 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5863 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -6101,7 +6105,7 @@ retry:
6101 pipe_config->fdi_lanes = lane; 6105 pipe_config->fdi_lanes = lane;
6102 6106
6103 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6107 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6104 link_bw, &pipe_config->fdi_m_n); 6108 link_bw, &pipe_config->fdi_m_n, false);
6105 6109
6106 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6110 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6107 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6111 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6281,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6277} 6281}
6278 6282
6279static void compute_m_n(unsigned int m, unsigned int n, 6283static void compute_m_n(unsigned int m, unsigned int n,
6280 uint32_t *ret_m, uint32_t *ret_n) 6284 uint32_t *ret_m, uint32_t *ret_n,
6285 bool reduce_m_n)
6281{ 6286{
6282 /* 6287 /*
6283 * Reduce M/N as much as possible without loss in precision. Several DP 6288 * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6290,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
6285 * values. The passed in values are more likely to have the least 6290 * values. The passed in values are more likely to have the least
6286 * significant bits zero than M after rounding below, so do this first. 6291 * significant bits zero than M after rounding below, so do this first.
6287 */ 6292 */
6288 while ((m & 1) == 0 && (n & 1) == 0) { 6293 if (reduce_m_n) {
6289 m >>= 1; 6294 while ((m & 1) == 0 && (n & 1) == 0) {
6290 n >>= 1; 6295 m >>= 1;
6296 n >>= 1;
6297 }
6291 } 6298 }
6292 6299
6293 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6300 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6305,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
6298void 6305void
6299intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6306intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6300 int pixel_clock, int link_clock, 6307 int pixel_clock, int link_clock,
6301 struct intel_link_m_n *m_n) 6308 struct intel_link_m_n *m_n,
6309 bool reduce_m_n)
6302{ 6310{
6303 m_n->tu = 64; 6311 m_n->tu = 64;
6304 6312
6305 compute_m_n(bits_per_pixel * pixel_clock, 6313 compute_m_n(bits_per_pixel * pixel_clock,
6306 link_clock * nlanes * 8, 6314 link_clock * nlanes * 8,
6307 &m_n->gmch_m, &m_n->gmch_n); 6315 &m_n->gmch_m, &m_n->gmch_n,
6316 reduce_m_n);
6308 6317
6309 compute_m_n(pixel_clock, link_clock, 6318 compute_m_n(pixel_clock, link_clock,
6310 &m_n->link_m, &m_n->link_n); 6319 &m_n->link_m, &m_n->link_n,
6320 reduce_m_n);
6311} 6321}
6312 6322
6313static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6323static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -12197,6 +12207,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12197 * type. For DP ports it behaves like most other platforms, but on HDMI 12207 * type. For DP ports it behaves like most other platforms, but on HDMI
12198 * there's an extra 1 line difference. So we need to add two instead of 12208 * there's an extra 1 line difference. So we need to add two instead of
12199 * one to the value. 12209 * one to the value.
12210 *
12211 * On VLV/CHV DSI the scanline counter would appear to increment
12212 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12213 * that means we can't tell whether we're in vblank or not while
12214 * we're on that particular line. We must still set scanline_offset
12215 * to 1 so that the vblank timestamps come out correct when we query
12216 * the scanline counter from within the vblank interrupt handler.
12217 * However if queried just before the start of vblank we'll get an
12218 * answer that's slightly in the future.
12200 */ 12219 */
12201 if (IS_GEN2(dev_priv)) { 12220 if (IS_GEN2(dev_priv)) {
12202 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12221 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -15013,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
15013 intel_setup_outputs(dev_priv); 15032 intel_setup_outputs(dev_priv);
15014 15033
15015 drm_modeset_lock_all(dev); 15034 drm_modeset_lock_all(dev);
15016 intel_modeset_setup_hw_state(dev); 15035 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15017 drm_modeset_unlock_all(dev); 15036 drm_modeset_unlock_all(dev);
15018 15037
15019 for_each_intel_crtc(dev, crtc) { 15038 for_each_intel_crtc(dev, crtc) {
@@ -15050,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
15050 return 0; 15069 return 0;
15051} 15070}
15052 15071
15053static void intel_enable_pipe_a(struct drm_device *dev) 15072static void intel_enable_pipe_a(struct drm_device *dev,
15073 struct drm_modeset_acquire_ctx *ctx)
15054{ 15074{
15055 struct intel_connector *connector; 15075 struct intel_connector *connector;
15056 struct drm_connector_list_iter conn_iter; 15076 struct drm_connector_list_iter conn_iter;
15057 struct drm_connector *crt = NULL; 15077 struct drm_connector *crt = NULL;
15058 struct intel_load_detect_pipe load_detect_temp; 15078 struct intel_load_detect_pipe load_detect_temp;
15059 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15060 int ret; 15079 int ret;
15061 15080
15062 /* We can't just switch on the pipe A, we need to set things up with a 15081 /* We can't just switch on the pipe A, we need to set things up with a
@@ -15128,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15128 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15147 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
15129} 15148}
15130 15149
15131static void intel_sanitize_crtc(struct intel_crtc *crtc) 15150static void intel_sanitize_crtc(struct intel_crtc *crtc,
15151 struct drm_modeset_acquire_ctx *ctx)
15132{ 15152{
15133 struct drm_device *dev = crtc->base.dev; 15153 struct drm_device *dev = crtc->base.dev;
15134 struct drm_i915_private *dev_priv = to_i915(dev); 15154 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15174,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15174 plane = crtc->plane; 15194 plane = crtc->plane;
15175 crtc->base.primary->state->visible = true; 15195 crtc->base.primary->state->visible = true;
15176 crtc->plane = !plane; 15196 crtc->plane = !plane;
15177 intel_crtc_disable_noatomic(&crtc->base); 15197 intel_crtc_disable_noatomic(&crtc->base, ctx);
15178 crtc->plane = plane; 15198 crtc->plane = plane;
15179 } 15199 }
15180 15200
@@ -15184,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15184 * resume. Force-enable the pipe to fix this, the update_dpms 15204 * resume. Force-enable the pipe to fix this, the update_dpms
15185 * call below we restore the pipe to the right state, but leave 15205 * call below we restore the pipe to the right state, but leave
15186 * the required bits on. */ 15206 * the required bits on. */
15187 intel_enable_pipe_a(dev); 15207 intel_enable_pipe_a(dev, ctx);
15188 } 15208 }
15189 15209
15190 /* Adjust the state of the output pipe according to whether we 15210 /* Adjust the state of the output pipe according to whether we
15191 * have active connectors/encoders. */ 15211 * have active connectors/encoders. */
15192 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15212 if (crtc->active && !intel_crtc_has_encoders(crtc))
15193 intel_crtc_disable_noatomic(&crtc->base); 15213 intel_crtc_disable_noatomic(&crtc->base, ctx);
15194 15214
15195 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15215 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15196 /* 15216 /*
@@ -15488,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
15488 * and sanitizes it to the current state 15508 * and sanitizes it to the current state
15489 */ 15509 */
15490static void 15510static void
15491intel_modeset_setup_hw_state(struct drm_device *dev) 15511intel_modeset_setup_hw_state(struct drm_device *dev,
15512 struct drm_modeset_acquire_ctx *ctx)
15492{ 15513{
15493 struct drm_i915_private *dev_priv = to_i915(dev); 15514 struct drm_i915_private *dev_priv = to_i915(dev);
15494 enum pipe pipe; 15515 enum pipe pipe;
@@ -15508,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
15508 for_each_pipe(dev_priv, pipe) { 15529 for_each_pipe(dev_priv, pipe) {
15509 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15530 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15510 15531
15511 intel_sanitize_crtc(crtc); 15532 intel_sanitize_crtc(crtc, ctx);
15512 intel_dump_pipe_config(crtc, crtc->config, 15533 intel_dump_pipe_config(crtc, crtc->config,
15513 "[setup_hw_state]"); 15534 "[setup_hw_state]");
15514 } 15535 }
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee77b519835c..fc691b8b317c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
1507 DRM_DEBUG_KMS("common rates: %s\n", str); 1507 DRM_DEBUG_KMS("common rates: %s\n", str);
1508} 1508}
1509 1509
1510bool
1511__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1512{
1513 u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1514 DP_SINK_OUI;
1515
1516 return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1517 sizeof(*desc);
1518}
1519
1520bool intel_dp_read_desc(struct intel_dp *intel_dp)
1521{
1522 struct intel_dp_desc *desc = &intel_dp->desc;
1523 bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1524 DP_OUI_SUPPORT;
1525 int dev_id_len;
1526
1527 if (!__intel_dp_read_desc(intel_dp, desc))
1528 return false;
1529
1530 dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1531 DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1532 drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1533 (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1534 dev_id_len, desc->device_id,
1535 desc->hw_rev >> 4, desc->hw_rev & 0xf,
1536 desc->sw_major_rev, desc->sw_minor_rev);
1537
1538 return true;
1539}
1540
1541static int rate_to_index(int find, const int *rates) 1510static int rate_to_index(int find, const int *rates)
1542{ 1511{
1543 int i = 0; 1512 int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1624 int common_rates[DP_MAX_SUPPORTED_RATES] = {}; 1593 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1625 int common_len; 1594 int common_len;
1626 uint8_t link_bw, rate_select; 1595 uint8_t link_bw, rate_select;
1596 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
1597 DP_DPCD_QUIRK_LIMITED_M_N);
1627 1598
1628 common_len = intel_dp_common_rates(intel_dp, common_rates); 1599 common_len = intel_dp_common_rates(intel_dp, common_rates);
1629 1600
@@ -1753,7 +1724,8 @@ found:
1753 intel_link_compute_m_n(bpp, lane_count, 1724 intel_link_compute_m_n(bpp, lane_count,
1754 adjusted_mode->crtc_clock, 1725 adjusted_mode->crtc_clock,
1755 pipe_config->port_clock, 1726 pipe_config->port_clock,
1756 &pipe_config->dp_m_n); 1727 &pipe_config->dp_m_n,
1728 reduce_m_n);
1757 1729
1758 if (intel_connector->panel.downclock_mode != NULL && 1730 if (intel_connector->panel.downclock_mode != NULL &&
1759 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 1731 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
1761 intel_link_compute_m_n(bpp, lane_count, 1733 intel_link_compute_m_n(bpp, lane_count,
1762 intel_connector->panel.downclock_mode->clock, 1734 intel_connector->panel.downclock_mode->clock,
1763 pipe_config->port_clock, 1735 pipe_config->port_clock,
1764 &pipe_config->dp_m2_n2); 1736 &pipe_config->dp_m2_n2,
1737 reduce_m_n);
1765 } 1738 }
1766 1739
1767 /* 1740 /*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
3622 if (!intel_dp_read_dpcd(intel_dp)) 3595 if (!intel_dp_read_dpcd(intel_dp))
3623 return false; 3596 return false;
3624 3597
3625 intel_dp_read_desc(intel_dp); 3598 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3599 drm_dp_is_branch(intel_dp->dpcd));
3626 3600
3627 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3601 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3628 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3602 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4624 4598
4625 intel_dp_print_rates(intel_dp); 4599 intel_dp_print_rates(intel_dp);
4626 4600
4627 intel_dp_read_desc(intel_dp); 4601 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4602 drm_dp_is_branch(intel_dp->dpcd));
4628 4603
4629 intel_dp_configure_mst(intel_dp); 4604 intel_dp_configure_mst(intel_dp);
4630 4605
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e226db29..40ba3134545e 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel; 120 struct intel_panel *panel = &connector->panel;
121 121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) 122 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF; 123 panel->backlight.max = 0xFFFF;
126 else 124 else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c1f62eb07c07..989e25577ac0 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
44 int lane_count, slots; 44 int lane_count, slots;
45 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 45 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
46 int mst_pbn; 46 int mst_pbn;
47 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
48 DP_DPCD_QUIRK_LIMITED_M_N);
47 49
48 pipe_config->has_pch_encoder = false; 50 pipe_config->has_pch_encoder = false;
49 bpp = 24; 51 bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
75 intel_link_compute_m_n(bpp, lane_count, 77 intel_link_compute_m_n(bpp, lane_count,
76 adjusted_mode->crtc_clock, 78 adjusted_mode->crtc_clock,
77 pipe_config->port_clock, 79 pipe_config->port_clock,
78 &pipe_config->dp_m_n); 80 &pipe_config->dp_m_n,
81 reduce_m_n);
79 82
80 pipe_config->dp_m_n.tu = slots; 83 pipe_config->dp_m_n.tu = slots;
81 84
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aaee3949a422..f630c7af5020 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -906,14 +906,6 @@ enum link_m_n_set {
906 M2_N2 906 M2_N2
907}; 907};
908 908
909struct intel_dp_desc {
910 u8 oui[3];
911 u8 device_id[6];
912 u8 hw_rev;
913 u8 sw_major_rev;
914 u8 sw_minor_rev;
915} __packed;
916
917struct intel_dp_compliance_data { 909struct intel_dp_compliance_data {
918 unsigned long edid; 910 unsigned long edid;
919 uint8_t video_pattern; 911 uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
957 /* Max link BW for the sink as per DPCD registers */ 949 /* Max link BW for the sink as per DPCD registers */
958 int max_sink_link_bw; 950 int max_sink_link_bw;
959 /* sink or branch descriptor */ 951 /* sink or branch descriptor */
960 struct intel_dp_desc desc; 952 struct drm_dp_desc desc;
961 struct drm_dp_aux aux; 953 struct drm_dp_aux aux;
962 enum intel_display_power_domain aux_power_domain; 954 enum intel_display_power_domain aux_power_domain;
963 uint8_t train_set[4]; 955 uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1532} 1524}
1533 1525
1534bool intel_dp_read_dpcd(struct intel_dp *intel_dp); 1526bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
1535bool __intel_dp_read_desc(struct intel_dp *intel_dp,
1536 struct intel_dp_desc *desc);
1537bool intel_dp_read_desc(struct intel_dp *intel_dp);
1538int intel_dp_link_required(int pixel_clock, int bpp); 1527int intel_dp_link_required(int pixel_clock, int bpp);
1539int intel_dp_max_data_rate(int max_link_clock, int max_lanes); 1528int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
1540bool intel_digital_port_connected(struct drm_i915_private *dev_priv, 1529bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3ffe8b1f1d48..fc0ef492252a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -410,11 +410,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
410 val |= (ULPS_STATE_ENTER | DEVICE_READY); 410 val |= (ULPS_STATE_ENTER | DEVICE_READY);
411 I915_WRITE(MIPI_DEVICE_READY(port), val); 411 I915_WRITE(MIPI_DEVICE_READY(port), val);
412 412
413 /* Wait for ULPS Not active */ 413 /* Wait for ULPS active */
414 if (intel_wait_for_register(dev_priv, 414 if (intel_wait_for_register(dev_priv,
415 MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 415 MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
416 GLK_ULPS_NOT_ACTIVE, 20)) 416 DRM_ERROR("ULPS not active\n");
417 DRM_ERROR("ULPS is still active\n");
418 417
419 /* Exit ULPS */ 418 /* Exit ULPS */
420 val = I915_READ(MIPI_DEVICE_READY(port)); 419 val = I915_READ(MIPI_DEVICE_READY(port));
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0c836b..f94eacff196c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1075 return 0; 1075 return 0;
1076} 1076}
1077 1077
1078static bool ring_is_idle(struct intel_engine_cs *engine)
1079{
1080 struct drm_i915_private *dev_priv = engine->i915;
1081 bool idle = true;
1082
1083 intel_runtime_pm_get(dev_priv);
1084
1085 /* No bit for gen2, so assume the CS parser is idle */
1086 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1087 idle = false;
1088
1089 intel_runtime_pm_put(dev_priv);
1090
1091 return idle;
1092}
1093
1078/** 1094/**
1079 * intel_engine_is_idle() - Report if the engine has finished process all work 1095 * intel_engine_is_idle() - Report if the engine has finished process all work
1080 * @engine: the intel_engine_cs 1096 * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1084 */ 1100 */
1085bool intel_engine_is_idle(struct intel_engine_cs *engine) 1101bool intel_engine_is_idle(struct intel_engine_cs *engine)
1086{ 1102{
1087 struct drm_i915_private *dev_priv = engine->i915;
1088
1089 /* Any inflight/incomplete requests? */ 1103 /* Any inflight/incomplete requests? */
1090 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 1104 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1091 intel_engine_last_submit(engine))) 1105 intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
1100 return false; 1114 return false;
1101 1115
1102 /* Ring stopped? */ 1116 /* Ring stopped? */
1103 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1117 if (!ring_is_idle(engine))
1104 return false; 1118 return false;
1105 1119
1106 return true; 1120 return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add18b26..d93c58410bff 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
83 int *width, int *height) 83 int *width, int *height)
84{ 84{
85 int w, h;
86
87 if (drm_rotation_90_or_270(cache->plane.rotation)) {
88 w = cache->plane.src_h;
89 h = cache->plane.src_w;
90 } else {
91 w = cache->plane.src_w;
92 h = cache->plane.src_h;
93 }
94
95 if (width) 85 if (width)
96 *width = w; 86 *width = cache->plane.src_w;
97 if (height) 87 if (height)
98 *height = h; 88 *height = cache->plane.src_h;
99} 89}
100 90
101static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 91static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
746 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 736 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
747 737
748 cache->plane.rotation = plane_state->base.rotation; 738 cache->plane.rotation = plane_state->base.rotation;
739 /*
740 * Src coordinates are already rotated by 270 degrees for
741 * the 90/270 degree plane rotation cases (to match the
742 * GTT mapping), hence no need to account for rotation here.
743 */
749 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 744 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
750 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 745 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
751 cache->plane.visible = plane_state->base.visible; 746 cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 25d8e76489e4..292fedf30b00 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -63,6 +63,7 @@
63#include <linux/acpi.h> 63#include <linux/acpi.h>
64#include <linux/device.h> 64#include <linux/device.h>
65#include <linux/pci.h> 65#include <linux/pci.h>
66#include <linux/pm_runtime.h>
66 67
67#include "i915_drv.h" 68#include "i915_drv.h"
68#include <linux/delay.h> 69#include <linux/delay.h>
@@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
121 122
122 kfree(rsc); 123 kfree(rsc);
123 124
125 pm_runtime_forbid(&platdev->dev);
126 pm_runtime_set_active(&platdev->dev);
127 pm_runtime_enable(&platdev->dev);
128
124 return platdev; 129 return platdev;
125 130
126err: 131err:
@@ -144,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
144 149
145static void lpe_audio_irq_unmask(struct irq_data *d) 150static void lpe_audio_irq_unmask(struct irq_data *d)
146{ 151{
147 struct drm_i915_private *dev_priv = d->chip_data;
148 unsigned long irqflags;
149 u32 val = (I915_LPE_PIPE_A_INTERRUPT |
150 I915_LPE_PIPE_B_INTERRUPT);
151
152 if (IS_CHERRYVIEW(dev_priv))
153 val |= I915_LPE_PIPE_C_INTERRUPT;
154
155 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
156
157 dev_priv->irq_mask &= ~val;
158 I915_WRITE(VLV_IIR, val);
159 I915_WRITE(VLV_IIR, val);
160 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
161 POSTING_READ(VLV_IMR);
162
163 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
164} 152}
165 153
166static void lpe_audio_irq_mask(struct irq_data *d) 154static void lpe_audio_irq_mask(struct irq_data *d)
167{ 155{
168 struct drm_i915_private *dev_priv = d->chip_data;
169 unsigned long irqflags;
170 u32 val = (I915_LPE_PIPE_A_INTERRUPT |
171 I915_LPE_PIPE_B_INTERRUPT);
172
173 if (IS_CHERRYVIEW(dev_priv))
174 val |= I915_LPE_PIPE_C_INTERRUPT;
175
176 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
177
178 dev_priv->irq_mask |= val;
179 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
180 I915_WRITE(VLV_IIR, val);
181 I915_WRITE(VLV_IIR, val);
182 POSTING_READ(VLV_IIR);
183
184 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
185} 156}
186 157
187static struct irq_chip lpe_audio_irqchip = { 158static struct irq_chip lpe_audio_irqchip = {
@@ -325,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
325 296
326 desc = irq_to_desc(dev_priv->lpe_audio.irq); 297 desc = irq_to_desc(dev_priv->lpe_audio.irq);
327 298
328 lpe_audio_irq_mask(&desc->irq_data);
329
330 lpe_audio_platdev_destroy(dev_priv); 299 lpe_audio_platdev_destroy(dev_priv);
331 300
332 irq_free_desc(dev_priv->lpe_audio.irq); 301 irq_free_desc(dev_priv->lpe_audio.irq);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8f7c631fc1f..62f44d3e7c43 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
327 u32 *reg_state = ce->lrc_reg_state; 327 u32 *reg_state = ce->lrc_reg_state;
328 328
329 assert_ring_tail_valid(rq->ring, rq->tail); 329 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
330 reg_state[CTX_RING_TAIL+1] = rq->tail;
331 330
332 /* True 32b PPGTT with dynamic page allocation: update PDP 331 /* True 32b PPGTT with dynamic page allocation: update PDP
333 * registers and point the unallocated PDPs to scratch page. 332 * registers and point the unallocated PDPs to scratch page.
@@ -1989,7 +1988,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
1989 1988
1990 ce->ring = ring; 1989 ce->ring = ring;
1991 ce->state = vma; 1990 ce->state = vma;
1992 ce->initialised = engine->init_context == NULL; 1991 ce->initialised |= engine->init_context == NULL;
1993 1992
1994 return 0; 1993 return 0;
1995 1994
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2036 ce->state->obj->mm.dirty = true; 2035 ce->state->obj->mm.dirty = true;
2037 i915_gem_object_unpin_map(ce->state->obj); 2036 i915_gem_object_unpin_map(ce->state->obj);
2038 2037
2039 ce->ring->head = ce->ring->tail = 0; 2038 intel_ring_reset(ce->ring, 0);
2040 intel_ring_update_space(ce->ring);
2041 } 2039 }
2042 } 2040 }
2043} 2041}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 71cbe9c08932..5abef482eacf 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
240 return false; 240 return false;
241 } 241 }
242 242
243 intel_dp_read_desc(dp); 243 drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
244 244
245 DRM_DEBUG_KMS("Success: LSPCON init\n"); 245 DRM_DEBUG_KMS("Success: LSPCON init\n");
246 return true; 246 return true;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd603f401..078fd1bfa5ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3373 3373
3374 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3374 /* n.b., src is 16.16 fixed point, dst is whole integer */
3375 if (plane->id == PLANE_CURSOR) { 3375 if (plane->id == PLANE_CURSOR) {
3376 /*
3377 * Cursors only support 0/180 degree rotation,
3378 * hence no need to account for rotation here.
3379 */
3376 src_w = pstate->base.src_w; 3380 src_w = pstate->base.src_w;
3377 src_h = pstate->base.src_h; 3381 src_h = pstate->base.src_h;
3378 dst_w = pstate->base.crtc_w; 3382 dst_w = pstate->base.crtc_w;
3379 dst_h = pstate->base.crtc_h; 3383 dst_h = pstate->base.crtc_h;
3380 } else { 3384 } else {
3385 /*
3386 * Src coordinates are already rotated by 270 degrees for
3387 * the 90/270 degree plane rotation cases (to match the
3388 * GTT mapping), hence no need to account for rotation here.
3389 */
3381 src_w = drm_rect_width(&pstate->base.src); 3390 src_w = drm_rect_width(&pstate->base.src);
3382 src_h = drm_rect_height(&pstate->base.src); 3391 src_h = drm_rect_height(&pstate->base.src);
3383 dst_w = drm_rect_width(&pstate->base.dst); 3392 dst_w = drm_rect_width(&pstate->base.dst);
3384 dst_h = drm_rect_height(&pstate->base.dst); 3393 dst_h = drm_rect_height(&pstate->base.dst);
3385 } 3394 }
3386 3395
3387 if (drm_rotation_90_or_270(pstate->base.rotation))
3388 swap(dst_w, dst_h);
3389
3390 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3396 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3391 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3397 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3392 3398
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3417 if (y && format != DRM_FORMAT_NV12) 3423 if (y && format != DRM_FORMAT_NV12)
3418 return 0; 3424 return 0;
3419 3425
3426 /*
3427 * Src coordinates are already rotated by 270 degrees for
3428 * the 90/270 degree plane rotation cases (to match the
3429 * GTT mapping), hence no need to account for rotation here.
3430 */
3420 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3431 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3421 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3432 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3422 3433
3423 if (drm_rotation_90_or_270(pstate->rotation))
3424 swap(width, height);
3425
3426 /* for planar format */ 3434 /* for planar format */
3427 if (format == DRM_FORMAT_NV12) { 3435 if (format == DRM_FORMAT_NV12) {
3428 if (y) /* y-plane data rate */ 3436 if (y) /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3505 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 3513 fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3506 return 8; 3514 return 8;
3507 3515
3516 /*
3517 * Src coordinates are already rotated by 270 degrees for
3518 * the 90/270 degree plane rotation cases (to match the
3519 * GTT mapping), hence no need to account for rotation here.
3520 */
3508 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 3521 src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
3509 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 3522 src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
3510 3523
3511 if (drm_rotation_90_or_270(pstate->rotation))
3512 swap(src_w, src_h);
3513
3514 /* Halve UV plane width and height for NV12 */ 3524 /* Halve UV plane width and height for NV12 */
3515 if (fb->format->format == DRM_FORMAT_NV12 && !y) { 3525 if (fb->format->format == DRM_FORMAT_NV12 && !y) {
3516 src_w /= 2; 3526 src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3794 width = intel_pstate->base.crtc_w; 3804 width = intel_pstate->base.crtc_w;
3795 height = intel_pstate->base.crtc_h; 3805 height = intel_pstate->base.crtc_h;
3796 } else { 3806 } else {
3807 /*
3808 * Src coordinates are already rotated by 270 degrees for
3809 * the 90/270 degree plane rotation cases (to match the
3810 * GTT mapping), hence no need to account for rotation here.
3811 */
3797 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3812 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3798 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3813 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3799 } 3814 }
3800 3815
3801 if (drm_rotation_90_or_270(pstate->rotation))
3802 swap(width, height);
3803
3804 cpp = fb->format->cpp[0]; 3816 cpp = fb->format->cpp[0];
3805 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 3817 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3806 3818
@@ -4335,11 +4347,19 @@ skl_compute_wm(struct drm_atomic_state *state)
4335 struct drm_crtc_state *cstate; 4347 struct drm_crtc_state *cstate;
4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4348 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4337 struct skl_wm_values *results = &intel_state->wm_results; 4349 struct skl_wm_values *results = &intel_state->wm_results;
4350 struct drm_device *dev = state->dev;
4338 struct skl_pipe_wm *pipe_wm; 4351 struct skl_pipe_wm *pipe_wm;
4339 bool changed = false; 4352 bool changed = false;
4340 int ret, i; 4353 int ret, i;
4341 4354
4342 /* 4355 /*
4356 * When we distrust bios wm we always need to recompute to set the
4357 * expected DDB allocations for each CRTC.
4358 */
4359 if (to_i915(dev)->wm.distrust_bios_wm)
4360 changed = true;
4361
4362 /*
4343 * If this transaction isn't actually touching any CRTC's, don't 4363 * If this transaction isn't actually touching any CRTC's, don't
4344 * bother with watermark calculation. Note that if we pass this 4364 * bother with watermark calculation. Note that if we pass this
4345 * test, we're guaranteed to hold at least one CRTC state mutex, 4365 * test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4369,7 @@ skl_compute_wm(struct drm_atomic_state *state)
4349 */ 4369 */
4350 for_each_new_crtc_in_state(state, crtc, cstate, i) 4370 for_each_new_crtc_in_state(state, crtc, cstate, i)
4351 changed = true; 4371 changed = true;
4372
4352 if (!changed) 4373 if (!changed)
4353 return 0; 4374 return 0;
4354 4375
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0d2baf..559f1ab42bfc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
435 } 435 }
436 436
437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ 437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
438 if (intel_crtc->config->pipe_src_w > 3200 || 438 if (dev_priv->psr.psr2_support &&
439 intel_crtc->config->pipe_src_h > 2000) { 439 (intel_crtc->config->pipe_src_w > 3200 ||
440 intel_crtc->config->pipe_src_h > 2000)) {
440 dev_priv->psr.psr2_support = false; 441 dev_priv->psr.psr2_support = false;
441 return false; 442 return false;
442 } 443 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b83972..513a0f4b469b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
49 49
50void intel_ring_update_space(struct intel_ring *ring) 50void intel_ring_update_space(struct intel_ring *ring)
51{ 51{
52 ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); 52 ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
53} 53}
54 54
55static int 55static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
774 774
775 i915_gem_request_submit(request); 775 i915_gem_request_submit(request);
776 776
777 assert_ring_tail_valid(request->ring, request->tail); 777 I915_WRITE_TAIL(request->engine,
778 I915_WRITE_TAIL(request->engine, request->tail); 778 intel_ring_set_tail(request->ring, request->tail));
779} 779}
780 780
781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 781static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
1316 return PTR_ERR(addr); 1316 return PTR_ERR(addr);
1317} 1317}
1318 1318
1319void intel_ring_reset(struct intel_ring *ring, u32 tail)
1320{
1321 GEM_BUG_ON(!list_empty(&ring->request_list));
1322 ring->tail = tail;
1323 ring->head = tail;
1324 ring->emit = tail;
1325 intel_ring_update_space(ring);
1326}
1327
1319void intel_ring_unpin(struct intel_ring *ring) 1328void intel_ring_unpin(struct intel_ring *ring)
1320{ 1329{
1321 GEM_BUG_ON(!ring->vma); 1330 GEM_BUG_ON(!ring->vma);
1322 GEM_BUG_ON(!ring->vaddr); 1331 GEM_BUG_ON(!ring->vaddr);
1323 1332
1333 /* Discard any unused bytes beyond that submitted to hw. */
1334 intel_ring_reset(ring, ring->tail);
1335
1324 if (i915_vma_is_map_and_fenceable(ring->vma)) 1336 if (i915_vma_is_map_and_fenceable(ring->vma))
1325 i915_vma_unpin_iomap(ring->vma); 1337 i915_vma_unpin_iomap(ring->vma);
1326 else 1338 else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1562 struct intel_engine_cs *engine; 1574 struct intel_engine_cs *engine;
1563 enum intel_engine_id id; 1575 enum intel_engine_id id;
1564 1576
1577 /* Restart from the beginning of the rings for convenience */
1565 for_each_engine(engine, dev_priv, id) 1578 for_each_engine(engine, dev_priv, id)
1566 engine->buffer->head = engine->buffer->tail; 1579 intel_ring_reset(engine->buffer, 0);
1567} 1580}
1568 1581
1569static int ring_request_alloc(struct drm_i915_gem_request *request) 1582static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1616 unsigned space; 1629 unsigned space;
1617 1630
1618 /* Would completion of this request free enough space? */ 1631 /* Would completion of this request free enough space? */
1619 space = __intel_ring_space(target->postfix, ring->tail, 1632 space = __intel_ring_space(target->postfix, ring->emit,
1620 ring->size); 1633 ring->size);
1621 if (space >= bytes) 1634 if (space >= bytes)
1622 break; 1635 break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
1641u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 1654u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1642{ 1655{
1643 struct intel_ring *ring = req->ring; 1656 struct intel_ring *ring = req->ring;
1644 int remain_actual = ring->size - ring->tail; 1657 int remain_actual = ring->size - ring->emit;
1645 int remain_usable = ring->effective_size - ring->tail; 1658 int remain_usable = ring->effective_size - ring->emit;
1646 int bytes = num_dwords * sizeof(u32); 1659 int bytes = num_dwords * sizeof(u32);
1647 int total_bytes, wait_bytes; 1660 int total_bytes, wait_bytes;
1648 bool need_wrap = false; 1661 bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1678 1691
1679 if (unlikely(need_wrap)) { 1692 if (unlikely(need_wrap)) {
1680 GEM_BUG_ON(remain_actual > ring->space); 1693 GEM_BUG_ON(remain_actual > ring->space);
1681 GEM_BUG_ON(ring->tail + remain_actual > ring->size); 1694 GEM_BUG_ON(ring->emit + remain_actual > ring->size);
1682 1695
1683 /* Fill the tail with MI_NOOP */ 1696 /* Fill the tail with MI_NOOP */
1684 memset(ring->vaddr + ring->tail, 0, remain_actual); 1697 memset(ring->vaddr + ring->emit, 0, remain_actual);
1685 ring->tail = 0; 1698 ring->emit = 0;
1686 ring->space -= remain_actual; 1699 ring->space -= remain_actual;
1687 } 1700 }
1688 1701
1689 GEM_BUG_ON(ring->tail > ring->size - bytes); 1702 GEM_BUG_ON(ring->emit > ring->size - bytes);
1690 cs = ring->vaddr + ring->tail; 1703 cs = ring->vaddr + ring->emit;
1691 ring->tail += bytes; 1704 ring->emit += bytes;
1692 ring->space -= bytes; 1705 ring->space -= bytes;
1693 GEM_BUG_ON(ring->space < 0); 1706 GEM_BUG_ON(ring->space < 0);
1694 1707
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
1699int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1712int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1700{ 1713{
1701 int num_dwords = 1714 int num_dwords =
1702 (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1715 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1703 u32 *cs; 1716 u32 *cs;
1704 1717
1705 if (num_dwords == 0) 1718 if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a0807f64d..f7144fe09613 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,6 +145,7 @@ struct intel_ring {
145 145
146 u32 head; 146 u32 head;
147 u32 tail; 147 u32 tail;
148 u32 emit;
148 149
149 int space; 150 int space;
150 int size; 151 int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
488struct intel_ring * 489struct intel_ring *
489intel_engine_create_ring(struct intel_engine_cs *engine, int size); 490intel_engine_create_ring(struct intel_engine_cs *engine, int size);
490int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); 491int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
492void intel_ring_reset(struct intel_ring *ring, u32 tail);
493void intel_ring_update_space(struct intel_ring *ring);
491void intel_ring_unpin(struct intel_ring *ring); 494void intel_ring_unpin(struct intel_ring *ring);
492void intel_ring_free(struct intel_ring *ring); 495void intel_ring_free(struct intel_ring *ring);
493 496
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
511 * reserved for the command packet (i.e. the value passed to 514 * reserved for the command packet (i.e. the value passed to
512 * intel_ring_begin()). 515 * intel_ring_begin()).
513 */ 516 */
514 GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); 517 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
515} 518}
516 519
517static inline u32 520static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
540 GEM_BUG_ON(tail >= ring->size); 543 GEM_BUG_ON(tail >= ring->size);
541} 544}
542 545
543void intel_ring_update_space(struct intel_ring *ring); 546static inline unsigned int
547intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
548{
549 /* Whilst writes to the tail are strictly order, there is no
550 * serialisation between readers and the writers. The tail may be
551 * read by i915_gem_request_retire() just as it is being updated
552 * by execlists, as although the breadcrumb is complete, the context
553 * switch hasn't been seen.
554 */
555 assert_ring_tail_valid(ring, tail);
556 ring->tail = tail;
557 return tail;
558}
544 559
545void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); 560void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
546 561
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c717c7cd..e6517edcd16b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
83 */ 83 */
84void intel_pipe_update_start(struct intel_crtc *crtc) 84void intel_pipe_update_start(struct intel_crtc *crtc)
85{ 85{
86 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
86 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 87 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
87 long timeout = msecs_to_jiffies_timeout(1); 88 long timeout = msecs_to_jiffies_timeout(1);
88 int scanline, min, max, vblank_start; 89 int scanline, min, max, vblank_start;
89 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 90 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
91 bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
92 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
90 DEFINE_WAIT(wait); 93 DEFINE_WAIT(wait);
91 94
92 vblank_start = adjusted_mode->crtc_vblank_start; 95 vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 142
140 drm_crtc_vblank_put(&crtc->base); 143 drm_crtc_vblank_put(&crtc->base);
141 144
145 /*
146 * On VLV/CHV DSI the scanline counter would appear to
147 * increment approx. 1/3 of a scanline before start of vblank.
148 * The registers still get latched at start of vblank however.
149 * This means we must not write any registers on the first
150 * line of vblank (since not the whole line is actually in
151 * vblank). And unfortunately we can't use the interrupt to
152 * wait here since it will fire too soon. We could use the
153 * frame start interrupt instead since it will fire after the
154 * critical scanline, but that would require more changes
155 * in the interrupt code. So for now we'll just do the nasty
156 * thing and poll for the bad scanline to pass us by.
157 *
158 * FIXME figure out if BXT+ DSI suffers from this as well
159 */
160 while (need_vlv_dsi_wa && scanline == vblank_start)
161 scanline = intel_get_crtc_scanline(crtc);
162
142 crtc->debug.scanline_start = scanline; 163 crtc->debug.scanline_start = scanline;
143 crtc->debug.start_vbl_time = ktime_get(); 164 crtc->debug.start_vbl_time = ktime_get();
144 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 165 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73aeddac..f84115261ae7 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
59 * available in the work queue (note, the queue is shared, 59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but 60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge! 61 * it should not be huge!
62 * q_fail: failed to enqueue a work item. This should never happen,
63 * because we check for space beforehand.
64 * b_fail: failed to ring the doorbell. This should never happen, unless 62 * b_fail: failed to ring the doorbell. This should never happen, unless
65 * somehow the hardware misbehaves, or maybe if the GuC firmware 63 * somehow the hardware misbehaves, or maybe if the GuC firmware
66 * crashes? We probably need to reset the GPU to recover. 64 * crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1afb8b06e3e1..12b85b3278cd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
320static int igt_ctx_exec(void *arg) 320static int igt_ctx_exec(void *arg)
321{ 321{
322 struct drm_i915_private *i915 = arg; 322 struct drm_i915_private *i915 = arg;
323 struct drm_i915_gem_object *obj; 323 struct drm_i915_gem_object *obj = NULL;
324 struct drm_file *file; 324 struct drm_file *file;
325 IGT_TIMEOUT(end_time); 325 IGT_TIMEOUT(end_time);
326 LIST_HEAD(objects); 326 LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
359 } 359 }
360 360
361 for_each_engine(engine, i915, id) { 361 for_each_engine(engine, i915, id) {
362 if (dw == 0) { 362 if (!obj) {
363 obj = create_test_object(ctx, file, &objects); 363 obj = create_test_object(ctx, file, &objects);
364 if (IS_ERR(obj)) { 364 if (IS_ERR(obj)) {
365 err = PTR_ERR(obj); 365 err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
376 goto out_unlock; 376 goto out_unlock;
377 } 377 }
378 378
379 if (++dw == max_dwords(obj)) 379 if (++dw == max_dwords(obj)) {
380 obj = NULL;
380 dw = 0; 381 dw = 0;
382 }
381 ndwords++; 383 ndwords++;
382 } 384 }
383 ncontexts++; 385 ncontexts++;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801fab039..8b05ecb8fdef 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
673 ret = drm_of_find_panel_or_bridge(child, 673 ret = drm_of_find_panel_or_bridge(child,
674 imx_ldb->lvds_mux ? 4 : 2, 0, 674 imx_ldb->lvds_mux ? 4 : 2, 0,
675 &channel->panel, &channel->bridge); 675 &channel->panel, &channel->bridge);
676 if (ret) 676 if (ret && ret != -ENODEV)
677 return ret; 677 return ret;
678 678
679 /* panel ddc only if there is no bridge */ 679 /* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995a990f..b5cc6e12334c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
19#include <drm/drm_of.h> 19#include <drm/drm_of.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h> 21#include <linux/component.h>
22#include <linux/iopoll.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/of.h> 24#include <linux/of.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
900 901
901static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 902static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
902{ 903{
903 u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ 904 int ret;
904 905 u32 val;
905 while (timeout_ms--) {
906 if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
907 break;
908
909 usleep_range(2, 4);
910 }
911 906
912 if (timeout_ms == 0) { 907 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
908 4, 2000000);
909 if (ret) {
913 DRM_WARN("polling dsi wait not busy timeout!\n"); 910 DRM_WARN("polling dsi wait not busy timeout!\n");
914 911
915 mtk_dsi_enable(dsi); 912 mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03b0347..0a4ffd724146 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
1062 } 1062 }
1063 1063
1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
1065 if (err) { 1065 if (err < 0) {
1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", 1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
1067 err); 1067 err);
1068 return err; 1068 return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5f0fce..10b227d83e9a 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
152 .max_register = 0x1000, 152 .max_register = 0x1000,
153}; 153};
154 154
155static int meson_drv_bind(struct device *dev) 155static int meson_drv_bind_master(struct device *dev, bool has_components)
156{ 156{
157 struct platform_device *pdev = to_platform_device(dev); 157 struct platform_device *pdev = to_platform_device(dev);
158 struct meson_drm *priv; 158 struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
233 if (ret) 233 if (ret)
234 goto free_drm; 234 goto free_drm;
235 235
236 ret = component_bind_all(drm->dev, drm); 236 if (has_components) {
237 if (ret) { 237 ret = component_bind_all(drm->dev, drm);
238 dev_err(drm->dev, "Couldn't bind all components\n"); 238 if (ret) {
239 goto free_drm; 239 dev_err(drm->dev, "Couldn't bind all components\n");
240 goto free_drm;
241 }
240 } 242 }
241 243
242 ret = meson_plane_create(priv); 244 ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
276 return ret; 278 return ret;
277} 279}
278 280
281static int meson_drv_bind(struct device *dev)
282{
283 return meson_drv_bind_master(dev, true);
284}
285
279static void meson_drv_unbind(struct device *dev) 286static void meson_drv_unbind(struct device *dev)
280{ 287{
281 struct drm_device *drm = dev_get_drvdata(dev); 288 struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
357 count += meson_probe_remote(pdev, &match, np, remote); 364 count += meson_probe_remote(pdev, &match, np, remote);
358 } 365 }
359 366
367 if (count && !match)
368 return meson_drv_bind_master(&pdev->dev, false);
369
360 /* If some endpoints were found, initialize the nodes */ 370 /* If some endpoints were found, initialize the nodes */
361 if (count) { 371 if (count) {
362 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count); 372 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a078e8..f4b53588e071 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1173 1173
1174 1174
1175 if (IS_G200_SE(mdev)) { 1175 if (IS_G200_SE(mdev)) {
1176 if (mdev->unique_rev_id >= 0x02) { 1176 if (mdev->unique_rev_id >= 0x04) {
1177 WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1178 WREG8(MGAREG_CRTCEXT_DATA, 0);
1179 } else if (mdev->unique_rev_id >= 0x02) {
1177 u8 hi_pri_lvl; 1180 u8 hi_pri_lvl;
1178 u32 bpp; 1181 u32 bpp;
1179 u32 mb; 1182 u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1639 if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1642 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1640 > (30100 * 1024)) 1643 > (30100 * 1024))
1641 return MODE_BANDWIDTH; 1644 return MODE_BANDWIDTH;
1645 } else {
1646 if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1647 > (55000 * 1024))
1648 return MODE_BANDWIDTH;
1642 } 1649 }
1643 } else if (mdev->type == G200_WB) { 1650 } else if (mdev->type == G200_WB) {
1644 if (mode->hdisplay > 1280) 1651 if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 5b8e23d051f2..0a31cd6d01ce 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -13,6 +13,7 @@ config DRM_MSM
13 select QCOM_SCM 13 select QCOM_SCM
14 select SND_SOC_HDMI_CODEC if SND_SOC 14 select SND_SOC_HDMI_CODEC if SND_SOC
15 select SYNC_FILE 15 select SYNC_FILE
16 select PM_OPP
16 default y 17 default y
17 help 18 help
18 DRM/KMS driver for MSM/snapdragon. 19 DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index f8f48d014978..9c34d7824988 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
116 return 0; 116 return 0;
117} 117}
118 118
119static struct irq_domain_ops mdss_hw_irqdomain_ops = { 119static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
120 .map = mdss_hw_irqdomain_map, 120 .map = mdss_hw_irqdomain_map,
121 .xlate = irq_domain_xlate_onecell, 121 .xlate = irq_domain_xlate_onecell,
122}; 122};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a38c5fe6cc19..7d3741215387 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
225 225
226 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), 226 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
227 sizeof(*mdp5_state), GFP_KERNEL); 227 sizeof(*mdp5_state), GFP_KERNEL);
228 if (!mdp5_state)
229 return NULL;
228 230
229 if (mdp5_state && mdp5_state->base.fb) 231 __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
230 drm_framebuffer_reference(mdp5_state->base.fb);
231 232
232 return &mdp5_state->base; 233 return &mdp5_state->base;
233} 234}
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
444 mdp5_pipe_release(state->state, old_hwpipe); 445 mdp5_pipe_release(state->state, old_hwpipe);
445 mdp5_pipe_release(state->state, old_right_hwpipe); 446 mdp5_pipe_release(state->state, old_right_hwpipe);
446 } 447 }
448 } else {
449 mdp5_pipe_release(state->state, mdp5_state->hwpipe);
450 mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
451 mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
447 } 452 }
448 453
449 return 0; 454 return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 87b5695d4034..9d498eb81906 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
830 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 830 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
831 .gem_prime_export = drm_gem_prime_export, 831 .gem_prime_export = drm_gem_prime_export,
832 .gem_prime_import = drm_gem_prime_import, 832 .gem_prime_import = drm_gem_prime_import,
833 .gem_prime_res_obj = msm_gem_prime_res_obj,
833 .gem_prime_pin = msm_gem_prime_pin, 834 .gem_prime_pin = msm_gem_prime_pin,
834 .gem_prime_unpin = msm_gem_prime_unpin, 835 .gem_prime_unpin = msm_gem_prime_unpin,
835 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, 836 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 28b6f9ba5066..1b26ca626528 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
224void *msm_gem_prime_vmap(struct drm_gem_object *obj); 224void *msm_gem_prime_vmap(struct drm_gem_object *obj);
225void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 225void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
226int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 226int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
227struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
227struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 228struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
228 struct dma_buf_attachment *attach, struct sg_table *sg); 229 struct dma_buf_attachment *attach, struct sg_table *sg);
229int msm_gem_prime_pin(struct drm_gem_object *obj); 230int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 3f299c537b77..a2f89bac9c16 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
99} 99}
100 100
101struct msm_fence { 101struct msm_fence {
102 struct msm_fence_context *fctx;
103 struct dma_fence base; 102 struct dma_fence base;
103 struct msm_fence_context *fctx;
104}; 104};
105 105
106static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) 106static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
130 return fence_completed(f->fctx, f->base.seqno); 130 return fence_completed(f->fctx, f->base.seqno);
131} 131}
132 132
133static void msm_fence_release(struct dma_fence *fence)
134{
135 struct msm_fence *f = to_msm_fence(fence);
136 kfree_rcu(f, base.rcu);
137}
138
139static const struct dma_fence_ops msm_fence_ops = { 133static const struct dma_fence_ops msm_fence_ops = {
140 .get_driver_name = msm_fence_get_driver_name, 134 .get_driver_name = msm_fence_get_driver_name,
141 .get_timeline_name = msm_fence_get_timeline_name, 135 .get_timeline_name = msm_fence_get_timeline_name,
142 .enable_signaling = msm_fence_enable_signaling, 136 .enable_signaling = msm_fence_enable_signaling,
143 .signaled = msm_fence_signaled, 137 .signaled = msm_fence_signaled,
144 .wait = dma_fence_default_wait, 138 .wait = dma_fence_default_wait,
145 .release = msm_fence_release, 139 .release = dma_fence_free,
146}; 140};
147 141
148struct dma_fence * 142struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 68e509b3b9e4..50289a23baf8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
758 struct msm_gem_object *msm_obj; 758 struct msm_gem_object *msm_obj;
759 bool use_vram = false; 759 bool use_vram = false;
760 760
761 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
762
761 switch (flags & MSM_BO_CACHE_MASK) { 763 switch (flags & MSM_BO_CACHE_MASK) {
762 case MSM_BO_UNCACHED: 764 case MSM_BO_UNCACHED:
763 case MSM_BO_CACHED: 765 case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
853 855
854 size = PAGE_ALIGN(dmabuf->size); 856 size = PAGE_ALIGN(dmabuf->size);
855 857
858 /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
859 mutex_lock(&dev->struct_mutex);
856 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 860 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
861 mutex_unlock(&dev->struct_mutex);
862
857 if (ret) 863 if (ret)
858 goto fail; 864 goto fail;
859 865
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 60bb290700ce..13403c6da6c7 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
70 if (!obj->import_attach) 70 if (!obj->import_attach)
71 msm_gem_put_pages(obj); 71 msm_gem_put_pages(obj);
72} 72}
73
74struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
75{
76 struct msm_gem_object *msm_obj = to_msm_bo(obj);
77
78 return msm_obj->resv;
79}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1c545ebe6a5a..7832e6421d25 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
410 if (!in_fence) 410 if (!in_fence)
411 return -EINVAL; 411 return -EINVAL;
412 412
413 /* TODO if we get an array-fence due to userspace merging multiple 413 /*
414 * fences, we need a way to determine if all the backing fences 414 * Wait if the fence is from a foreign context, or if the fence
415 * are from our own context.. 415 * array contains any fence from a foreign context.
416 */ 416 */
417 417 if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
418 if (in_fence->context != gpu->fctx->context) {
419 ret = dma_fence_wait(in_fence, true); 418 ret = dma_fence_wait(in_fence, true);
420 if (ret) 419 if (ret)
421 return ret; 420 return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
496 goto out; 495 goto out;
497 } 496 }
498 497
499 if ((submit_cmd.size + submit_cmd.submit_offset) >= 498 if (!submit_cmd.size ||
500 msm_obj->base.size) { 499 ((submit_cmd.size + submit_cmd.submit_offset) >
500 msm_obj->base.size)) {
501 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); 501 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
502 ret = -EINVAL; 502 ret = -EINVAL;
503 goto out; 503 goto out;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 97b9c38c6b3f..0fdc88d79ca8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
549 gpu->grp_clks[i] = get_clock(dev, name); 549 gpu->grp_clks[i] = get_clock(dev, name);
550 550
551 /* Remember the key clocks that we need to control later */ 551 /* Remember the key clocks that we need to control later */
552 if (!strcmp(name, "core")) 552 if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
553 gpu->core_clk = gpu->grp_clks[i]; 553 gpu->core_clk = gpu->grp_clks[i];
554 else if (!strcmp(name, "rbbmtimer")) 554 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
555 gpu->rbbmtimer_clk = gpu->grp_clks[i]; 555 gpu->rbbmtimer_clk = gpu->grp_clks[i];
556 556
557 ++i; 557 ++i;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c9e894..0abe77675b76 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
35#include "mxsfb_drv.h" 35#include "mxsfb_drv.h"
36#include "mxsfb_regs.h" 36#include "mxsfb_regs.h"
37 37
38#define MXS_SET_ADDR 0x4
39#define MXS_CLR_ADDR 0x8
40#define MODULE_CLKGATE BIT(30)
41#define MODULE_SFTRST BIT(31)
42/* 1 second delay should be plenty of time for block reset */
43#define RESET_TIMEOUT 1000000
44
38static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) 45static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
39{ 46{
40 return (val & mxsfb->devdata->hs_wdth_mask) << 47 return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
159 clk_disable_unprepare(mxsfb->clk_disp_axi); 166 clk_disable_unprepare(mxsfb->clk_disp_axi);
160} 167}
161 168
169/*
170 * Clear the bit and poll it cleared. This is usually called with
171 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
172 * (bit 30).
173 */
174static int clear_poll_bit(void __iomem *addr, u32 mask)
175{
176 u32 reg;
177
178 writel(mask, addr + MXS_CLR_ADDR);
179 return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
180}
181
182static int mxsfb_reset_block(void __iomem *reset_addr)
183{
184 int ret;
185
186 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
187 if (ret)
188 return ret;
189
190 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
191
192 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
193 if (ret)
194 return ret;
195
196 return clear_poll_bit(reset_addr, MODULE_CLKGATE);
197}
198
162static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) 199static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
163{ 200{
164 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; 201 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
173 */ 210 */
174 mxsfb_enable_axi_clk(mxsfb); 211 mxsfb_enable_axi_clk(mxsfb);
175 212
213 /* Mandatory eLCDIF reset as per the Reference Manual */
214 err = mxsfb_reset_block(mxsfb->base);
215 if (err)
216 return;
217
176 /* Clear the FIFOs */ 218 /* Clear the FIFOs */
177 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); 219 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
178 220
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe347b3..820a4805916f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
4 4
5struct nvkm_alarm { 5struct nvkm_alarm {
6 struct list_head head; 6 struct list_head head;
7 struct list_head exec;
7 u64 timestamp; 8 u64 timestamp;
8 void (*func)(struct nvkm_alarm *); 9 void (*func)(struct nvkm_alarm *);
9}; 10};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 21b10f9840c9..549763f5e17d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -360,6 +360,8 @@ nouveau_display_hpd_work(struct work_struct *work)
360 pm_runtime_get_sync(drm->dev->dev); 360 pm_runtime_get_sync(drm->dev->dev);
361 361
362 drm_helper_hpd_irq_event(drm->dev); 362 drm_helper_hpd_irq_event(drm->dev);
363 /* enable polling for external displays */
364 drm_kms_helper_poll_enable(drm->dev);
363 365
364 pm_runtime_mark_last_busy(drm->dev->dev); 366 pm_runtime_mark_last_busy(drm->dev->dev);
365 pm_runtime_put_sync(drm->dev->dev); 367 pm_runtime_put_sync(drm->dev->dev);
@@ -413,10 +415,6 @@ nouveau_display_init(struct drm_device *dev)
413 if (ret) 415 if (ret)
414 return ret; 416 return ret;
415 417
416 /* enable polling for external displays */
417 if (!dev->mode_config.poll_enabled)
418 drm_kms_helper_poll_enable(dev);
419
420 /* enable hotplug interrupts */ 418 /* enable hotplug interrupts */
421 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 419 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
422 struct nouveau_connector *conn = nouveau_connector(connector); 420 struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b6ac24ce690..15a13d09d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
80module_param_named(modeset, nouveau_modeset, int, 0400); 80module_param_named(modeset, nouveau_modeset, int, 0400);
81 81
82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); 82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
83int nouveau_runtime_pm = -1; 83static int nouveau_runtime_pm = -1;
84module_param_named(runpm, nouveau_runtime_pm, int, 0400); 84module_param_named(runpm, nouveau_runtime_pm, int, 0400);
85 85
86static struct drm_driver driver_stub; 86static struct drm_driver driver_stub;
@@ -495,13 +495,16 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
495 nouveau_fbcon_init(dev); 495 nouveau_fbcon_init(dev);
496 nouveau_led_init(dev); 496 nouveau_led_init(dev);
497 497
498 if (nouveau_runtime_pm != 0) { 498 if (nouveau_pmops_runtime()) {
499 pm_runtime_use_autosuspend(dev->dev); 499 pm_runtime_use_autosuspend(dev->dev);
500 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 500 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
501 pm_runtime_set_active(dev->dev); 501 pm_runtime_set_active(dev->dev);
502 pm_runtime_allow(dev->dev); 502 pm_runtime_allow(dev->dev);
503 pm_runtime_mark_last_busy(dev->dev); 503 pm_runtime_mark_last_busy(dev->dev);
504 pm_runtime_put(dev->dev); 504 pm_runtime_put(dev->dev);
505 } else {
506 /* enable polling for external displays */
507 drm_kms_helper_poll_enable(dev);
505 } 508 }
506 return 0; 509 return 0;
507 510
@@ -524,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
524{ 527{
525 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
526 529
527 if (nouveau_runtime_pm != 0) { 530 if (nouveau_pmops_runtime()) {
528 pm_runtime_get_sync(dev->dev); 531 pm_runtime_get_sync(dev->dev);
529 pm_runtime_forbid(dev->dev); 532 pm_runtime_forbid(dev->dev);
530 } 533 }
@@ -723,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
723 return nouveau_do_resume(drm_dev, false); 726 return nouveau_do_resume(drm_dev, false);
724} 727}
725 728
729bool
730nouveau_pmops_runtime()
731{
732 if (nouveau_runtime_pm == -1)
733 return nouveau_is_optimus() || nouveau_is_v1_dsm();
734 return nouveau_runtime_pm == 1;
735}
736
726static int 737static int
727nouveau_pmops_runtime_suspend(struct device *dev) 738nouveau_pmops_runtime_suspend(struct device *dev)
728{ 739{
@@ -730,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
730 struct drm_device *drm_dev = pci_get_drvdata(pdev); 741 struct drm_device *drm_dev = pci_get_drvdata(pdev);
731 int ret; 742 int ret;
732 743
733 if (nouveau_runtime_pm == 0) { 744 if (!nouveau_pmops_runtime()) {
734 pm_runtime_forbid(dev);
735 return -EBUSY;
736 }
737
738 /* are we optimus enabled? */
739 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
740 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
741 pm_runtime_forbid(dev); 745 pm_runtime_forbid(dev);
742 return -EBUSY; 746 return -EBUSY;
743 } 747 }
@@ -762,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
762 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; 766 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
763 int ret; 767 int ret;
764 768
765 if (nouveau_runtime_pm == 0) 769 if (!nouveau_pmops_runtime()) {
766 return -EINVAL; 770 pm_runtime_forbid(dev);
771 return -EBUSY;
772 }
767 773
768 pci_set_power_state(pdev, PCI_D0); 774 pci_set_power_state(pdev, PCI_D0);
769 pci_restore_state(pdev); 775 pci_restore_state(pdev);
@@ -774,9 +780,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
774 780
775 ret = nouveau_do_resume(drm_dev, true); 781 ret = nouveau_do_resume(drm_dev, true);
776 782
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
780 /* do magic */ 783 /* do magic */
781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 784 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 785 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
796 struct nouveau_drm *drm = nouveau_drm(drm_dev); 799 struct nouveau_drm *drm = nouveau_drm(drm_dev);
797 struct drm_crtc *crtc; 800 struct drm_crtc *crtc;
798 801
799 if (nouveau_runtime_pm == 0) { 802 if (!nouveau_pmops_runtime()) {
800 pm_runtime_forbid(dev);
801 return -EBUSY;
802 }
803
804 /* are we optimus enabled? */
805 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
806 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
807 pm_runtime_forbid(dev); 803 pm_runtime_forbid(dev);
808 return -EBUSY; 804 return -EBUSY;
809 } 805 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f49ad3..a11b6aaed325 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
108#include <nvif/object.h> 108#include <nvif/object.h>
109#include <nvif/device.h> 109#include <nvif/device.h>
110 110
111extern int nouveau_runtime_pm;
112
113struct nouveau_drm { 111struct nouveau_drm {
114 struct nouveau_cli client; 112 struct nouveau_cli client;
115 struct drm_device *dev; 113 struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
195 193
196int nouveau_pmops_suspend(struct device *); 194int nouveau_pmops_suspend(struct device *);
197int nouveau_pmops_resume(struct device *); 195int nouveau_pmops_resume(struct device *);
196bool nouveau_pmops_runtime(void);
198 197
199#include <nvkm/core/tegra.h> 198#include <nvkm/core/tegra.h>
200 199
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc0cec8..02fe0efb9e16 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@ void
87nouveau_vga_init(struct nouveau_drm *drm) 87nouveau_vga_init(struct nouveau_drm *drm)
88{ 88{
89 struct drm_device *dev = drm->dev; 89 struct drm_device *dev = drm->dev;
90 bool runtime = false; 90 bool runtime = nouveau_pmops_runtime();
91 91
92 /* only relevant for PCI devices */ 92 /* only relevant for PCI devices */
93 if (!dev->pdev) 93 if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
99 if (pci_is_thunderbolt_attached(dev->pdev)) 99 if (pci_is_thunderbolt_attached(dev->pdev))
100 return; 100 return;
101 101
102 if (nouveau_runtime_pm == 1)
103 runtime = true;
104 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
105 runtime = true;
106 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); 102 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
107 103
108 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 104 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
113nouveau_vga_fini(struct nouveau_drm *drm) 109nouveau_vga_fini(struct nouveau_drm *drm)
114{ 110{
115 struct drm_device *dev = drm->dev; 111 struct drm_device *dev = drm->dev;
116 bool runtime = false; 112 bool runtime = nouveau_pmops_runtime();
117 113
118 vga_client_register(dev->pdev, NULL, NULL, NULL); 114 vga_client_register(dev->pdev, NULL, NULL, NULL);
119 115
120 if (pci_is_thunderbolt_attached(dev->pdev)) 116 if (pci_is_thunderbolt_attached(dev->pdev))
121 return; 117 return;
122 118
123 if (nouveau_runtime_pm == 1)
124 runtime = true;
125 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
126 runtime = true;
127
128 vga_switcheroo_unregister_client(dev->pdev); 119 vga_switcheroo_unregister_client(dev->pdev);
129 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 120 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
130 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); 121 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a7663249b3ba..06e564a9ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2107 asyc->set.dither = true; 2107 asyc->set.dither = true;
2108 } 2108 }
2109 } else { 2109 } else {
2110 asyc->set.mask = ~0; 2110 if (asyc)
2111 asyc->set.mask = ~0;
2111 asyh->set.mask = ~0; 2112 asyh->set.mask = ~0;
2112 } 2113 }
2113 2114
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 3a24788c3185..a7e55c422501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 148 case NVKM_MEM_TARGET_NCOH: target = 3; break;
149 default: 149 default:
150 WARN_ON(1); 150 WARN_ON(1);
151 return; 151 goto unlock;
152 } 152 }
153 153
154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
160 & 0x00100000), 160 & 0x00100000),
161 msecs_to_jiffies(2000)) == 0) 161 msecs_to_jiffies(2000)) == 0)
162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 162 nvkm_error(subdev, "runlist %d update timeout\n", runl);
163unlock:
163 mutex_unlock(&subdev->mutex); 164 mutex_unlock(&subdev->mutex);
164} 165}
165 166
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index d1cf02d22db1..1b0c793c0192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
116 ret = nvkm_firmware_get(subdev->device, f, &sig); 116 ret = nvkm_firmware_get(subdev->device, f, &sig);
117 if (ret) 117 if (ret)
118 goto free_data; 118 goto free_data;
119
119 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 120 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
120 if (!img->sig) { 121 if (!img->sig) {
121 ret = -ENOMEM; 122 ret = -ENOMEM;
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
126 img->ucode_data = ls_ucode_img_build(bl, code, data, 127 img->ucode_data = ls_ucode_img_build(bl, code, data,
127 &img->ucode_desc); 128 &img->ucode_desc);
128 if (IS_ERR(img->ucode_data)) { 129 if (IS_ERR(img->ucode_data)) {
130 kfree(img->sig);
129 ret = PTR_ERR(img->ucode_data); 131 ret = PTR_ERR(img->ucode_data);
130 goto free_data; 132 goto free_sig;
131 } 133 }
132 img->ucode_size = img->ucode_desc.image_size; 134 img->ucode_size = img->ucode_desc.image_size;
133 135
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86eae0a0d..2437f7d41ca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
50 /* Move to completed list. We'll drop the lock before 50 /* Move to completed list. We'll drop the lock before
51 * executing the callback so it can reschedule itself. 51 * executing the callback so it can reschedule itself.
52 */ 52 */
53 list_move_tail(&alarm->head, &exec); 53 list_del_init(&alarm->head);
54 list_add(&alarm->exec, &exec);
54 } 55 }
55 56
56 /* Shut down interrupt if no more pending alarms. */ 57 /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
59 spin_unlock_irqrestore(&tmr->lock, flags); 60 spin_unlock_irqrestore(&tmr->lock, flags);
60 61
61 /* Execute completed callbacks. */ 62 /* Execute completed callbacks. */
62 list_for_each_entry_safe(alarm, atemp, &exec, head) { 63 list_for_each_entry_safe(alarm, atemp, &exec, exec) {
63 list_del_init(&alarm->head); 64 list_del(&alarm->exec);
64 alarm->func(alarm); 65 alarm->func(alarm);
65 } 66 }
66} 67}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 058340a002c2..4a340efd8ba6 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
575 if (ret) 575 if (ret)
576 return; 576 return;
577 577
578 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
579
580 if (fb != old_state->fb) { 578 if (fb != old_state->fb) {
581 obj = to_qxl_framebuffer(fb)->obj; 579 obj = to_qxl_framebuffer(fb)->obj;
582 user_bo = gem_to_qxl_bo(obj); 580 user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
614 qxl_bo_kunmap(cursor_bo); 612 qxl_bo_kunmap(cursor_bo);
615 qxl_bo_kunmap(user_bo); 613 qxl_bo_kunmap(user_bo);
616 614
615 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
617 cmd->u.set.visible = 1; 616 cmd->u.set.visible = 1;
618 cmd->u.set.shape = qxl_bo_physical_address(qdev, 617 cmd->u.set.shape = qxl_bo_physical_address(qdev,
619 cursor_bo, 0); 618 cursor_bo, 0);
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
624 if (ret) 623 if (ret)
625 goto out_free_release; 624 goto out_free_release;
626 625
626 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
627 cmd->type = QXL_CURSOR_MOVE; 627 cmd->type = QXL_CURSOR_MOVE;
628 } 628 }
629 629
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7ba450832e6b..ea36dc4dd5d2 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
776 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 776 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
778 778
779 /* disable mclk switching if the refresh is >120Hz, even if the
780 * blanking period would allow it
781 */
782 if (r600_dpm_get_vrefresh(rdev) > 120)
783 return true;
784
779 if (vblank_time < switch_limit) 785 if (vblank_time < switch_limit)
780 return true; 786 return true;
781 else 787 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ccebe0f8d2e1..ca44233ceacc 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7401 WREG32(DC_HPD5_INT_CONTROL, tmp); 7401 WREG32(DC_HPD5_INT_CONTROL, tmp);
7402 } 7402 }
7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7404 tmp = RREG32(DC_HPD5_INT_CONTROL); 7404 tmp = RREG32(DC_HPD6_INT_CONTROL);
7405 tmp |= DC_HPDx_INT_ACK; 7405 tmp |= DC_HPDx_INT_ACK;
7406 WREG32(DC_HPD6_INT_CONTROL, tmp); 7406 WREG32(DC_HPD6_INT_CONTROL, tmp);
7407 } 7407 }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7431 WREG32(DC_HPD5_INT_CONTROL, tmp); 7431 WREG32(DC_HPD5_INT_CONTROL, tmp);
7432 } 7432 }
7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
7434 tmp = RREG32(DC_HPD5_INT_CONTROL); 7434 tmp = RREG32(DC_HPD6_INT_CONTROL);
7435 tmp |= DC_HPDx_RX_INT_ACK; 7435 tmp |= DC_HPDx_RX_INT_ACK;
7436 WREG32(DC_HPD6_INT_CONTROL, tmp); 7436 WREG32(DC_HPD6_INT_CONTROL, tmp);
7437 } 7437 }
@@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
9267 u32 tmp, wm_mask; 9267 u32 tmp, wm_mask;
9268 9268
9269 if (radeon_crtc->base.enabled && num_heads && mode) { 9269 if (radeon_crtc->base.enabled && num_heads && mode) {
9270 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 9270 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
9271 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 9271 (u32)mode->clock);
9272 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
9273 (u32)mode->clock);
9274 line_time = min(line_time, (u32)65535);
9272 9275
9273 /* watermark for high clocks */ 9276 /* watermark for high clocks */
9274 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 9277 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f130ec41ee4b..534637203e70 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2266 fixed20_12 a, b, c; 2266 fixed20_12 a, b, c;
2267 2267
2268 if (radeon_crtc->base.enabled && num_heads && mode) { 2268 if (radeon_crtc->base.enabled && num_heads && mode) {
2269 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2269 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2270 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2270 (u32)mode->clock);
2271 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2272 (u32)mode->clock);
2273 line_time = min(line_time, (u32)65535);
2271 priority_a_cnt = 0; 2274 priority_a_cnt = 0;
2272 priority_b_cnt = 0; 2275 priority_b_cnt = 0;
2273 dram_channels = evergreen_get_number_of_dram_channels(rdev); 2276 dram_channels = evergreen_get_number_of_dram_channels(rdev);
@@ -4927,7 +4930,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4927 WREG32(DC_HPD5_INT_CONTROL, tmp); 4930 WREG32(DC_HPD5_INT_CONTROL, tmp);
4928 } 4931 }
4929 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 4932 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
4930 tmp = RREG32(DC_HPD5_INT_CONTROL); 4933 tmp = RREG32(DC_HPD6_INT_CONTROL);
4931 tmp |= DC_HPDx_INT_ACK; 4934 tmp |= DC_HPDx_INT_ACK;
4932 WREG32(DC_HPD6_INT_CONTROL, tmp); 4935 WREG32(DC_HPD6_INT_CONTROL, tmp);
4933 } 4936 }
@@ -4958,7 +4961,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4958 WREG32(DC_HPD5_INT_CONTROL, tmp); 4961 WREG32(DC_HPD5_INT_CONTROL, tmp);
4959 } 4962 }
4960 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 4963 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
4961 tmp = RREG32(DC_HPD5_INT_CONTROL); 4964 tmp = RREG32(DC_HPD6_INT_CONTROL);
4962 tmp |= DC_HPDx_RX_INT_ACK; 4965 tmp |= DC_HPDx_RX_INT_ACK;
4963 WREG32(DC_HPD6_INT_CONTROL, tmp); 4966 WREG32(DC_HPD6_INT_CONTROL, tmp);
4964 } 4967 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0a085176e79b..e06e2d8feab3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
3988 WREG32(DC_HPD5_INT_CONTROL, tmp); 3988 WREG32(DC_HPD5_INT_CONTROL, tmp);
3989 } 3989 }
3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3991 tmp = RREG32(DC_HPD5_INT_CONTROL); 3991 tmp = RREG32(DC_HPD6_INT_CONTROL);
3992 tmp |= DC_HPDx_INT_ACK; 3992 tmp |= DC_HPDx_INT_ACK;
3993 WREG32(DC_HPD6_INT_CONTROL, tmp); 3993 WREG32(DC_HPD6_INT_CONTROL, tmp);
3994 } 3994 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480ff9d22..3178ba0c537c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
3393 rdev->pdev->subsystem_vendor == 0x103c && 3393 rdev->pdev->subsystem_vendor == 0x103c &&
3394 rdev->pdev->subsystem_device == 0x280a) 3394 rdev->pdev->subsystem_device == 0x280a)
3395 return; 3395 return;
3396 /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
3397 * - it hangs on resume inside the dynclk 1 table.
3398 */
3399 if (rdev->family == CHIP_RS400 &&
3400 rdev->pdev->subsystem_vendor == 0x1179 &&
3401 rdev->pdev->subsystem_device == 0xff31)
3402 return;
3396 3403
3397 /* DYN CLK 1 */ 3404 /* DYN CLK 1 */
3398 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3405 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf42783d4b..0a6444d72000 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
137 */ 137 */
138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
140 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
141 */
142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
139 /* macbook pro 8.2 */ 143 /* macbook pro 8.2 */
140 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 144 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
141 { 0, 0, 0, 0, 0 }, 145 { 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e3e7cb1d10a2..4761f27f2ca2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
116 if ((radeon_runtime_pm != 0) && 116 if ((radeon_runtime_pm != 0) &&
117 radeon_has_atpx() && 117 radeon_has_atpx() &&
118 ((flags & RADEON_IS_IGP) == 0) && 118 ((flags & RADEON_IS_IGP) == 0) &&
119 !pci_is_thunderbolt_attached(rdev->pdev)) 119 !pci_is_thunderbolt_attached(dev->pdev))
120 flags |= RADEON_IS_PX; 120 flags |= RADEON_IS_PX;
121 121
122 /* radeon_device_init should report only fatal error 122 /* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 7431eb4a11b7..d34d1cf33895 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
621 } 621 }
622 622
623 /* TODO: is this still necessary on NI+ ? */ 623 /* TODO: is this still necessary on NI+ ? */
624 if ((cmd == 0 || cmd == 1 || cmd == 0x3) && 624 if ((cmd == 0 || cmd == 0x3) &&
625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
627 start, end); 627 start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ceee87f029d9..5303f25d5280 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
2284 fixed20_12 a, b, c; 2284 fixed20_12 a, b, c;
2285 2285
2286 if (radeon_crtc->base.enabled && num_heads && mode) { 2286 if (radeon_crtc->base.enabled && num_heads && mode) {
2287 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2287 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2288 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2288 (u32)mode->clock);
2289 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2290 (u32)mode->clock);
2291 line_time = min(line_time, (u32)65535);
2289 priority_a_cnt = 0; 2292 priority_a_cnt = 0;
2290 priority_b_cnt = 0; 2293 priority_b_cnt = 0;
2291 2294
@@ -6317,7 +6320,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6317 WREG32(DC_HPD5_INT_CONTROL, tmp); 6320 WREG32(DC_HPD5_INT_CONTROL, tmp);
6318 } 6321 }
6319 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6322 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6320 tmp = RREG32(DC_HPD5_INT_CONTROL); 6323 tmp = RREG32(DC_HPD6_INT_CONTROL);
6321 tmp |= DC_HPDx_INT_ACK; 6324 tmp |= DC_HPDx_INT_ACK;
6322 WREG32(DC_HPD6_INT_CONTROL, tmp); 6325 WREG32(DC_HPD6_INT_CONTROL, tmp);
6323 } 6326 }
@@ -6348,7 +6351,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6348 WREG32(DC_HPD5_INT_CONTROL, tmp); 6351 WREG32(DC_HPD5_INT_CONTROL, tmp);
6349 } 6352 }
6350 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6353 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6351 tmp = RREG32(DC_HPD5_INT_CONTROL); 6354 tmp = RREG32(DC_HPD6_INT_CONTROL);
6352 tmp |= DC_HPDx_RX_INT_ACK; 6355 tmp |= DC_HPDx_RX_INT_ACK;
6353 WREG32(DC_HPD6_INT_CONTROL, tmp); 6356 WREG32(DC_HPD6_INT_CONTROL, tmp);
6354 } 6357 }
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9c9240..ce5f2d1f9994 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
245 struct drm_connector_state *conn_state) 245 struct drm_connector_state *conn_state)
246{ 246{
247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
248 struct rockchip_dp_device *dp = to_dp(encoder);
249 int ret;
250 248
251 /* 249 /*
252 * The hardware IC designed that VOP must output the RGB10 video 250 * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
258 256
259 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 257 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
260 s->output_type = DRM_MODE_CONNECTOR_eDP; 258 s->output_type = DRM_MODE_CONNECTOR_eDP;
261 if (dp->data->chip_type == RK3399_EDP) {
262 /*
263 * For RK3399, VOP Lit must code the out mode to RGB888,
264 * VOP Big must code the out mode to RGB10.
265 */
266 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
267 encoder);
268 if (ret > 0)
269 s->output_mode = ROCKCHIP_OUT_MODE_P888;
270 }
271 259
272 return 0; 260 return 0;
273} 261}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd3d26b..14fa1f8351e8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
615{ 615{
616 struct cdn_dp_device *dp = encoder_to_dp(encoder); 616 struct cdn_dp_device *dp = encoder_to_dp(encoder);
617 int ret, val; 617 int ret, val;
618 struct rockchip_crtc_state *state;
619 618
620 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 619 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
621 if (ret < 0) { 620 if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
625 624
626 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 625 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
627 (ret) ? "LIT" : "BIG"); 626 (ret) ? "LIT" : "BIG");
628 state = to_rockchip_crtc_state(encoder->crtc->state); 627 if (ret)
629 if (ret) {
630 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 628 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
631 state->output_mode = ROCKCHIP_OUT_MODE_P888; 629 else
632 } else {
633 val = DP_SEL_VOP_LIT << 16; 630 val = DP_SEL_VOP_LIT << 16;
634 state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
635 }
636 631
637 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 632 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
638 if (ret) 633 if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d1e095..45589d6ce65e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
875static void vop_crtc_enable(struct drm_crtc *crtc) 875static void vop_crtc_enable(struct drm_crtc *crtc)
876{ 876{
877 struct vop *vop = to_vop(crtc); 877 struct vop *vop = to_vop(crtc);
878 const struct vop_data *vop_data = vop->data;
878 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 879 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
879 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 880 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
880 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 881 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
967 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 968 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
968 s->output_type); 969 s->output_type);
969 } 970 }
971
972 /*
973 * if vop is not support RGB10 output, need force RGB10 to RGB888.
974 */
975 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
976 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
977 s->output_mode = ROCKCHIP_OUT_MODE_P888;
970 VOP_CTRL_SET(vop, out_mode, s->output_mode); 978 VOP_CTRL_SET(vop, out_mode, s->output_mode);
971 979
972 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); 980 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85dbd2..9979fd0c2282 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@ struct vop_data {
142 const struct vop_intr *intr; 142 const struct vop_intr *intr;
143 const struct vop_win_data *win; 143 const struct vop_win_data *win;
144 unsigned int win_size; 144 unsigned int win_size;
145
146#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
147 u64 feature;
145}; 148};
146 149
147/* interrupt define */ 150/* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da44442aab0..bafd698a28b1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
275static const struct vop_data rk3288_vop = { 275static const struct vop_data rk3288_vop = {
276 .init_table = rk3288_init_reg_table, 276 .init_table = rk3288_init_reg_table,
277 .table_size = ARRAY_SIZE(rk3288_init_reg_table), 277 .table_size = ARRAY_SIZE(rk3288_init_reg_table),
278 .feature = VOP_FEATURE_OUTPUT_RGB10,
278 .intr = &rk3288_vop_intr, 279 .intr = &rk3288_vop_intr,
279 .ctrl = &rk3288_ctrl_data, 280 .ctrl = &rk3288_ctrl_data,
280 .win = rk3288_vop_win_data, 281 .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
343static const struct vop_data rk3399_vop_big = { 344static const struct vop_data rk3399_vop_big = {
344 .init_table = rk3399_init_reg_table, 345 .init_table = rk3399_init_reg_table,
345 .table_size = ARRAY_SIZE(rk3399_init_reg_table), 346 .table_size = ARRAY_SIZE(rk3399_init_reg_table),
347 .feature = VOP_FEATURE_OUTPUT_RGB10,
346 .intr = &rk3399_vop_intr, 348 .intr = &rk3399_vop_intr,
347 .ctrl = &rk3399_ctrl_data, 349 .ctrl = &rk3399_ctrl_data,
348 /* 350 /*
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9a1e34e48f64..81f86a67c10d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -451,18 +451,6 @@ fail:
451 451
452 452
453#ifdef CONFIG_DRM_TEGRA_STAGING 453#ifdef CONFIG_DRM_TEGRA_STAGING
454static struct tegra_drm_context *
455tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
456{
457 struct tegra_drm_context *context;
458
459 mutex_lock(&file->lock);
460 context = idr_find(&file->contexts, id);
461 mutex_unlock(&file->lock);
462
463 return context;
464}
465
466static int tegra_gem_create(struct drm_device *drm, void *data, 454static int tegra_gem_create(struct drm_device *drm, void *data,
467 struct drm_file *file) 455 struct drm_file *file)
468{ 456{
@@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
551 if (err < 0) 539 if (err < 0)
552 return err; 540 return err;
553 541
554 err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); 542 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
555 if (err < 0) { 543 if (err < 0) {
556 client->ops->close_channel(context); 544 client->ops->close_channel(context);
557 return err; 545 return err;
@@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
606 594
607 mutex_lock(&fpriv->lock); 595 mutex_lock(&fpriv->lock);
608 596
609 context = tegra_drm_file_get_context(fpriv, args->context); 597 context = idr_find(&fpriv->contexts, args->context);
610 if (!context) { 598 if (!context) {
611 err = -EINVAL; 599 err = -EINVAL;
612 goto unlock; 600 goto unlock;
@@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
631 619
632 mutex_lock(&fpriv->lock); 620 mutex_lock(&fpriv->lock);
633 621
634 context = tegra_drm_file_get_context(fpriv, args->context); 622 context = idr_find(&fpriv->contexts, args->context);
635 if (!context) { 623 if (!context) {
636 err = -ENODEV; 624 err = -ENODEV;
637 goto unlock; 625 goto unlock;
@@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
660 648
661 mutex_lock(&fpriv->lock); 649 mutex_lock(&fpriv->lock);
662 650
663 context = tegra_drm_file_get_context(fpriv, args->context); 651 context = idr_find(&fpriv->contexts, args->context);
664 if (!context) { 652 if (!context) {
665 err = -ENODEV; 653 err = -ENODEV;
666 goto unlock; 654 goto unlock;
@@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
685 673
686 mutex_lock(&fpriv->lock); 674 mutex_lock(&fpriv->lock);
687 675
688 context = tegra_drm_file_get_context(fpriv, args->context); 676 context = idr_find(&fpriv->contexts, args->context);
689 if (!context) { 677 if (!context) {
690 err = -ENODEV; 678 err = -ENODEV;
691 goto unlock; 679 goto unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2851ed..1f013d45c9e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
321 list_for_each_entry_safe(entry, next, &man->list, head) 321 list_for_each_entry_safe(entry, next, &man->list, head)
322 vmw_cmdbuf_res_free(man, entry); 322 vmw_cmdbuf_res_free(man, entry);
323 323
324 drm_ht_remove(&man->resources);
324 kfree(man); 325 kfree(man);
325} 326}
326 327
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c5ec6a..4b948fba9eec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
41#include <drm/ttm/ttm_module.h> 41#include <drm/ttm/ttm_module.h>
42#include "vmwgfx_fence.h" 42#include "vmwgfx_fence.h"
43 43
44#define VMWGFX_DRIVER_DATE "20170221" 44#define VMWGFX_DRIVER_DATE "20170607"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 12 46#define VMWGFX_DRIVER_MINOR 13
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806b06bf..a1c68e6a689e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
368 return fifo_state->static_buffer; 368 return fifo_state->static_buffer;
369 else { 369 else {
370 fifo_state->dynamic_buffer = vmalloc(bytes); 370 fifo_state->dynamic_buffer = vmalloc(bytes);
371 if (!fifo_state->dynamic_buffer)
372 goto out_err;
371 return fifo_state->dynamic_buffer; 373 return fifo_state->dynamic_buffer;
372 } 374 }
373 } 375 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2a4030..1d2db5d912b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
274} 274}
275 275
276 276
277
278/**
279 * vmw_du_cursor_plane_update() - Update cursor image and location
280 *
281 * @plane: plane object to update
282 * @crtc: owning CRTC of @plane
283 * @fb: framebuffer to flip onto plane
284 * @crtc_x: x offset of plane on crtc
285 * @crtc_y: y offset of plane on crtc
286 * @crtc_w: width of plane rectangle on crtc
287 * @crtc_h: height of plane rectangle on crtc
288 * @src_x: Not used
289 * @src_y: Not used
290 * @src_w: Not used
291 * @src_h: Not used
292 *
293 *
294 * RETURNS:
295 * Zero on success, error code on failure
296 */
297int vmw_du_cursor_plane_update(struct drm_plane *plane,
298 struct drm_crtc *crtc,
299 struct drm_framebuffer *fb,
300 int crtc_x, int crtc_y,
301 unsigned int crtc_w,
302 unsigned int crtc_h,
303 uint32_t src_x, uint32_t src_y,
304 uint32_t src_w, uint32_t src_h)
305{
306 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
307 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
308 struct vmw_surface *surface = NULL;
309 struct vmw_dma_buffer *dmabuf = NULL;
310 s32 hotspot_x, hotspot_y;
311 int ret;
312
313 hotspot_x = du->hotspot_x + fb->hot_x;
314 hotspot_y = du->hotspot_y + fb->hot_y;
315
316 /* A lot of the code assumes this */
317 if (crtc_w != 64 || crtc_h != 64) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 if (vmw_framebuffer_to_vfb(fb)->dmabuf)
323 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
324 else
325 surface = vmw_framebuffer_to_vfbs(fb)->surface;
326
327 if (surface && !surface->snooper.image) {
328 DRM_ERROR("surface not suitable for cursor\n");
329 ret = -EINVAL;
330 goto out;
331 }
332
333 /* setup new image */
334 ret = 0;
335 if (surface) {
336 /* vmw_user_surface_lookup takes one reference */
337 du->cursor_surface = surface;
338
339 du->cursor_age = du->cursor_surface->snooper.age;
340
341 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
342 64, 64, hotspot_x, hotspot_y);
343 } else if (dmabuf) {
344 /* vmw_user_surface_lookup takes one reference */
345 du->cursor_dmabuf = dmabuf;
346
347 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
348 hotspot_x, hotspot_y);
349 } else {
350 vmw_cursor_update_position(dev_priv, false, 0, 0);
351 goto out;
352 }
353
354 if (!ret) {
355 du->cursor_x = crtc_x + du->set_gui_x;
356 du->cursor_y = crtc_y + du->set_gui_y;
357
358 vmw_cursor_update_position(dev_priv, true,
359 du->cursor_x + hotspot_x,
360 du->cursor_y + hotspot_y);
361 }
362
363out:
364 return ret;
365}
366
367
368int vmw_du_cursor_plane_disable(struct drm_plane *plane)
369{
370 if (plane->fb) {
371 drm_framebuffer_unreference(plane->fb);
372 plane->fb = NULL;
373 }
374
375 return -EINVAL;
376}
377
378
379void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 277void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
380{ 278{
381 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 279 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
473 371
474 372
475void 373void
476vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
477 struct drm_plane_state *old_state)
478{
479 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
480 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
481
482 drm_atomic_set_fb_for_plane(plane->state, NULL);
483 vmw_cursor_update_position(dev_priv, false, 0, 0);
484}
485
486
487void
488vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 374vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
489 struct drm_plane_state *old_state) 375 struct drm_plane_state *old_state)
490{ 376{
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1498 */ 1384 */
1499 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1385 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1500 dmabuf && only_2d && 1386 dmabuf && only_2d &&
1387 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1501 dev_priv->active_display_unit == vmw_du_screen_target) { 1388 dev_priv->active_display_unit == vmw_du_screen_target) {
1502 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1389 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1503 dmabuf, &surface); 1390 dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2818a..5f8d678ae675 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
256 u16 *r, u16 *g, u16 *b, 256 u16 *r, u16 *g, u16 *b,
257 uint32_t size, 257 uint32_t size,
258 struct drm_modeset_acquire_ctx *ctx); 258 struct drm_modeset_acquire_ctx *ctx);
259int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
260 uint32_t handle, uint32_t width, uint32_t height,
261 int32_t hot_x, int32_t hot_y);
262int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
263int vmw_du_connector_set_property(struct drm_connector *connector, 259int vmw_du_connector_set_property(struct drm_connector *connector,
264 struct drm_property *property, 260 struct drm_property *property,
265 uint64_t val); 261 uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
339/* Universal Plane Helpers */ 335/* Universal Plane Helpers */
340void vmw_du_primary_plane_destroy(struct drm_plane *plane); 336void vmw_du_primary_plane_destroy(struct drm_plane *plane);
341void vmw_du_cursor_plane_destroy(struct drm_plane *plane); 337void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
342int vmw_du_cursor_plane_disable(struct drm_plane *plane);
343int vmw_du_cursor_plane_update(struct drm_plane *plane,
344 struct drm_crtc *crtc,
345 struct drm_framebuffer *fb,
346 int crtc_x, int crtc_y,
347 unsigned int crtc_w,
348 unsigned int crtc_h,
349 uint32_t src_x, uint32_t src_y,
350 uint32_t src_w, uint32_t src_h);
351 338
352/* Atomic Helpers */ 339/* Atomic Helpers */
353int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 340int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
356 struct drm_plane_state *state); 343 struct drm_plane_state *state);
357void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 344void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
358 struct drm_plane_state *old_state); 345 struct drm_plane_state *old_state);
359void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
360 struct drm_plane_state *old_state);
361int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 346int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
362 struct drm_plane_state *new_state); 347 struct drm_plane_state *new_state);
363void vmw_du_plane_cleanup_fb(struct drm_plane *plane, 348void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bdf09b6..50be1f034f9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@ enum stdu_content_type {
56 * @right: Right side of bounding box. 56 * @right: Right side of bounding box.
57 * @top: Top side of bounding box. 57 * @top: Top side of bounding box.
58 * @bottom: Bottom side of bounding box. 58 * @bottom: Bottom side of bounding box.
59 * @fb_left: Left side of the framebuffer/content bounding box
60 * @fb_top: Top of the framebuffer/content bounding box
59 * @buf: DMA buffer when DMA-ing between buffer and screen targets. 61 * @buf: DMA buffer when DMA-ing between buffer and screen targets.
60 * @sid: Surface ID when copying between surface and screen targets. 62 * @sid: Surface ID when copying between surface and screen targets.
61 */ 63 */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
63 struct vmw_kms_dirty base; 65 struct vmw_kms_dirty base;
64 SVGA3dTransferType transfer; 66 SVGA3dTransferType transfer;
65 s32 left, right, top, bottom; 67 s32 left, right, top, bottom;
68 s32 fb_left, fb_top;
66 u32 pitch; 69 u32 pitch;
67 union { 70 union {
68 struct vmw_dma_buffer *buf; 71 struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
647 * 650 *
648 * @dirty: The closure structure. 651 * @dirty: The closure structure.
649 * 652 *
650 * This function calculates the bounding box for all the incoming clips 653 * This function calculates the bounding box for all the incoming clips.
651 */ 654 */
652static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) 655static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
653{ 656{
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
656 659
657 dirty->num_hits = 1; 660 dirty->num_hits = 1;
658 661
659 /* Calculate bounding box */ 662 /* Calculate destination bounding box */
660 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); 663 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
661 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); 664 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
662 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); 665 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
663 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); 666 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
667
668 /*
669 * Calculate content bounding box. We only need the top-left
670 * coordinate because width and height will be the same as the
671 * destination bounding box above
672 */
673 ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
674 ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y);
664} 675}
665 676
666 677
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
697 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 708 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
698 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 709 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
699 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used); 710 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
700 src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp; 711 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
701 712
702 dst_pitch = ddirty->pitch; 713 dst_pitch = ddirty->pitch;
703 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 714 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
704 dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp; 715 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
705 716
706 717
707 /* Figure out the real direction */ 718 /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
760 } 771 }
761 772
762out_cleanup: 773out_cleanup:
763 ddirty->left = ddirty->top = S32_MAX; 774 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
764 ddirty->right = ddirty->bottom = S32_MIN; 775 ddirty->right = ddirty->bottom = S32_MIN;
765} 776}
766 777
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
812 SVGA3D_READ_HOST_VRAM; 823 SVGA3D_READ_HOST_VRAM;
813 ddirty.left = ddirty.top = S32_MAX; 824 ddirty.left = ddirty.top = S32_MAX;
814 ddirty.right = ddirty.bottom = S32_MIN; 825 ddirty.right = ddirty.bottom = S32_MIN;
826 ddirty.fb_left = ddirty.fb_top = S32_MAX;
815 ddirty.pitch = vfb->base.pitches[0]; 827 ddirty.pitch = vfb->base.pitches[0];
816 ddirty.buf = buf; 828 ddirty.buf = buf;
817 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; 829 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1355 DRM_ERROR("Failed to bind surface to STDU.\n"); 1367 DRM_ERROR("Failed to bind surface to STDU.\n");
1356 else 1368 else
1357 crtc->primary->fb = plane->state->fb; 1369 crtc->primary->fb = plane->state->fb;
1370
1371 ret = vmw_stdu_update_st(dev_priv, stdu);
1372
1373 if (ret)
1374 DRM_ERROR("Failed to update STDU.\n");
1358} 1375}
1359 1376
1360 1377
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341fe32b..6b70bd259953 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1275 int ret; 1275 int ret;
1276 uint32_t size; 1276 uint32_t size;
1277 uint32_t backup_handle; 1277 uint32_t backup_handle = 0;
1278 1278
1279 if (req->multisample_count != 0) 1279 if (req->multisample_count != 0)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
1282 if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1283 return -EINVAL;
1284
1282 if (unlikely(vmw_user_surface_size == 0)) 1285 if (unlikely(vmw_user_surface_size == 0))
1283 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1286 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1284 128; 1287 128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1314 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1317 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1315 &res->backup, 1318 &res->backup,
1316 &user_srf->backup_base); 1319 &user_srf->backup_base);
1317 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1320 if (ret == 0) {
1318 res->backup_size) { 1321 if (res->backup->base.num_pages * PAGE_SIZE <
1319 DRM_ERROR("Surface backup buffer is too small.\n"); 1322 res->backup_size) {
1320 vmw_dmabuf_unreference(&res->backup); 1323 DRM_ERROR("Surface backup buffer is too small.\n");
1321 ret = -EINVAL; 1324 vmw_dmabuf_unreference(&res->backup);
1322 goto out_unlock; 1325 ret = -EINVAL;
1326 goto out_unlock;
1327 } else {
1328 backup_handle = req->buffer_handle;
1329 }
1323 } 1330 }
1324 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1331 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1325 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1332 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1491 dev_priv->stdu_max_height); 1498 dev_priv->stdu_max_height);
1492 1499
1493 if (size.width > max_width || size.height > max_height) { 1500 if (size.width > max_width || size.height > max_height) {
1494 DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u", 1501 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1495 size.width, size.height, 1502 size.width, size.height,
1496 max_width, max_height); 1503 max_width, max_height);
1497 return -EINVAL; 1504 return -EINVAL;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index b2fd029d67b3..91916326957f 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,6 +1,7 @@
1config TEGRA_HOST1X 1config TEGRA_HOST1X
2 tristate "NVIDIA Tegra host1x driver" 2 tristate "NVIDIA Tegra host1x driver"
3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST) 3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
4 select IOMMU_IOVA if IOMMU_SUPPORT
4 help 5 help
5 Driver for the NVIDIA Tegra host1x hardware. 6 Driver for the NVIDIA Tegra host1x hardware.
6 7
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f05ebb14fa63..ac65f52850a6 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
172 172
173 host->rst = devm_reset_control_get(&pdev->dev, "host1x"); 173 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
174 if (IS_ERR(host->rst)) { 174 if (IS_ERR(host->rst)) {
175 err = PTR_ERR(host->clk); 175 err = PTR_ERR(host->rst);
176 dev_err(&pdev->dev, "failed to get reset: %d\n", err); 176 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
177 return err; 177 return err;
178 } 178 }
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 16d556816b5f..2fb5f432a54c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
725 spin_lock_irqsave(&ipu->lock, flags); 725 spin_lock_irqsave(&ipu->lock, flags);
726 726
727 val = ipu_cm_read(ipu, IPU_CONF); 727 val = ipu_cm_read(ipu, IPU_CONF);
728 if (vdi) { 728 if (vdi)
729 val |= IPU_CONF_IC_INPUT; 729 val |= IPU_CONF_IC_INPUT;
730 } else { 730 else
731 val &= ~IPU_CONF_IC_INPUT; 731 val &= ~IPU_CONF_IC_INPUT;
732 if (csi_id == 1) 732
733 val |= IPU_CONF_CSI_SEL; 733 if (csi_id == 1)
734 else 734 val |= IPU_CONF_CSI_SEL;
735 val &= ~IPU_CONF_CSI_SEL; 735 else
736 } 736 val &= ~IPU_CONF_CSI_SEL;
737
737 ipu_cm_write(ipu, val, IPU_CONF); 738 ipu_cm_write(ipu, val, IPU_CONF);
738 739
739 spin_unlock_irqrestore(&ipu->lock, flags); 740 spin_unlock_irqrestore(&ipu->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c55563379e2e..c35f74c83065 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
131 if (pre->in_use) 131 if (pre->in_use)
132 return -EBUSY; 132 return -EBUSY;
133 133
134 clk_prepare_enable(pre->clk_axi);
135
136 /* first get the engine out of reset and remove clock gating */ 134 /* first get the engine out of reset and remove clock gating */
137 writel(0, pre->regs + IPU_PRE_CTRL); 135 writel(0, pre->regs + IPU_PRE_CTRL);
138 136
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
149 147
150void ipu_pre_put(struct ipu_pre *pre) 148void ipu_pre_put(struct ipu_pre *pre)
151{ 149{
152 u32 val; 150 writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
153
154 val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
155 writel(val, pre->regs + IPU_PRE_CTRL);
156
157 clk_disable_unprepare(pre->clk_axi);
158 151
159 pre->in_use = false; 152 pre->in_use = false;
160} 153}
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
249 if (!pre->buffer_virt) 242 if (!pre->buffer_virt)
250 return -ENOMEM; 243 return -ENOMEM;
251 244
245 clk_prepare_enable(pre->clk_axi);
246
252 pre->dev = dev; 247 pre->dev = dev;
253 platform_set_drvdata(pdev, pre); 248 platform_set_drvdata(pdev, pre);
254 mutex_lock(&ipu_pre_list_mutex); 249 mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
268 available_pres--; 263 available_pres--;
269 mutex_unlock(&ipu_pre_list_mutex); 264 mutex_unlock(&ipu_pre_list_mutex);
270 265
266 clk_disable_unprepare(pre->clk_axi);
267
271 if (pre->buffer_virt) 268 if (pre->buffer_virt)
272 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt, 269 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
273 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4); 270 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index fe40e5e499dd..687705c50794 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -275,10 +275,12 @@ config HID_EMS_FF
275 - Trio Linker Plus II 275 - Trio Linker Plus II
276 276
277config HID_ELECOM 277config HID_ELECOM
278 tristate "ELECOM BM084 bluetooth mouse" 278 tristate "ELECOM HID devices"
279 depends on HID 279 depends on HID
280 ---help--- 280 ---help---
281 Support for the ELECOM BM084 (bluetooth mouse). 281 Support for ELECOM devices:
282 - BM084 Bluetooth Mouse
283 - DEFT Trackball (Wired and wireless)
282 284
283config HID_ELO 285config HID_ELO
284 tristate "ELO USB 4000/4500 touchscreen" 286 tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 16df6cc90235..a6268f2f7408 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
69#define QUIRK_IS_MULTITOUCH BIT(3) 69#define QUIRK_IS_MULTITOUCH BIT(3)
70#define QUIRK_NO_CONSUMER_USAGES BIT(4) 70#define QUIRK_NO_CONSUMER_USAGES BIT(4)
71#define QUIRK_USE_KBD_BACKLIGHT BIT(5) 71#define QUIRK_USE_KBD_BACKLIGHT BIT(5)
72#define QUIRK_T100_KEYBOARD BIT(6)
72 73
73#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ 74#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
74 QUIRK_NO_INIT_REPORTS | \ 75 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
536 drvdata->kbd_backlight->removed = true; 537 drvdata->kbd_backlight->removed = true;
537 cancel_work_sync(&drvdata->kbd_backlight->work); 538 cancel_work_sync(&drvdata->kbd_backlight->work);
538 } 539 }
540
541 hid_hw_stop(hdev);
539} 542}
540 543
541static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, 544static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
548 hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); 551 hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
549 rdesc[55] = 0xdd; 552 rdesc[55] = 0xdd;
550 } 553 }
554 if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
555 *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
556 hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
557 rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
558 }
559
551 return rdesc; 560 return rdesc;
552} 561}
553 562
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
560 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 569 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
561 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 570 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
562 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, 571 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
572 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
573 USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
574 QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
563 { } 575 { }
564}; 576};
565MODULE_DEVICE_TABLE(hid, asus_devices); 577MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 37084b645785..6e040692f1d8 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid)
826 * hid-rmi should take care of them, 826 * hid-rmi should take care of them,
827 * not hid-generic 827 * not hid-generic
828 */ 828 */
829 if (IS_ENABLED(CONFIG_HID_RMI)) 829 hid->group = HID_GROUP_RMI;
830 hid->group = HID_GROUP_RMI;
831 break; 830 break;
832 } 831 }
833 832
833 /* fall back to generic driver in case specific driver doesn't exist */
834 switch (hid->group) {
835 case HID_GROUP_MULTITOUCH_WIN_8:
836 /* fall-through */
837 case HID_GROUP_MULTITOUCH:
838 if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
839 hid->group = HID_GROUP_GENERIC;
840 break;
841 case HID_GROUP_SENSOR_HUB:
842 if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
843 hid->group = HID_GROUP_GENERIC;
844 break;
845 case HID_GROUP_RMI:
846 if (!IS_ENABLED(CONFIG_HID_RMI))
847 hid->group = HID_GROUP_GENERIC;
848 break;
849 case HID_GROUP_WACOM:
850 if (!IS_ENABLED(CONFIG_HID_WACOM))
851 hid->group = HID_GROUP_GENERIC;
852 break;
853 case HID_GROUP_LOGITECH_DJ_DEVICE:
854 if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
855 hid->group = HID_GROUP_GENERIC;
856 break;
857 }
834 vfree(parser); 858 vfree(parser);
835 return 0; 859 return 0;
836} 860}
@@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
1763 * used as a driver. See hid_scan_report(). 1787 * used as a driver. See hid_scan_report().
1764 */ 1788 */
1765static const struct hid_device_id hid_have_special_driver[] = { 1789static const struct hid_device_id hid_have_special_driver[] = {
1790#if IS_ENABLED(CONFIG_HID_A4TECH)
1766 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1791 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
1767 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1792 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
1768 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, 1793 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
1794#endif
1795#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
1796 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
1797#endif
1798#if IS_ENABLED(CONFIG_HID_ACRUX)
1769 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, 1799 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
1770 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, 1800 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
1801#endif
1802#if IS_ENABLED(CONFIG_HID_ALPS)
1771 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, 1803 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
1804#endif
1805#if IS_ENABLED(CONFIG_HID_APPLE)
1772 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, 1806 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
1773 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
1774 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
1775 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, 1807 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
1776 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, 1808 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
1777 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, 1809 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1792 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, 1824 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
1793 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, 1825 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
1794 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, 1826 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
1795 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
1796 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
1797 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
1798 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
1799 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
1800 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, 1827 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
1801 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, 1828 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
1802 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, 1829 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@@ -1851,59 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = {
1851 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, 1878 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
1852 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1879 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1853 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1880 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1881#endif
1882#if IS_ENABLED(CONFIG_HID_APPLEIR)
1883 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
1884 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
1885 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
1886 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
1887 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
1888#endif
1889#if IS_ENABLED(CONFIG_HID_ASUS)
1854 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, 1890 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
1855 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, 1891 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
1856 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 1892 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
1857 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, 1893 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
1894 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
1895#endif
1896#if IS_ENABLED(CONFIG_HID_AUREAL)
1858 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1897 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
1898#endif
1899#if IS_ENABLED(CONFIG_HID_BELKIN)
1859 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1900 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1901 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1902#endif
1903#if IS_ENABLED(CONFIG_HID_BETOP_FF)
1860 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, 1904 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
1861 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, 1905 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
1862 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, 1906 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
1863 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, 1907 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
1864 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1908#endif
1865 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, 1909#if IS_ENABLED(CONFIG_HID_CHERRY)
1866 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1910 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
1867 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1911 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1912#endif
1913#if IS_ENABLED(CONFIG_HID_CHICONY)
1868 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1914 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1869 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 1915 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1916 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1872 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1917 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
1918 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1919#endif
1920#if IS_ENABLED(CONFIG_HID_CMEDIA)
1921 { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
1922#endif
1923#if IS_ENABLED(CONFIG_HID_CORSAIR)
1873 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1924 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
1874 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 1925 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
1875 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1926#endif
1927#if IS_ENABLED(CONFIG_HID_CP2112)
1876 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1928 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
1929#endif
1930#if IS_ENABLED(CONFIG_HID_CYPRESS)
1877 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1931 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1878 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, 1932 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
1879 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, 1933 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
1880 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, 1934 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
1881 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, 1935 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1882 { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, 1936#endif
1937#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
1883 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, 1938 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1884 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, 1939 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
1885#if IS_ENABLED(CONFIG_HID_MAYFLASH)
1886 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
1887 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
1888 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
1889 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
1890#endif 1940#endif
1891 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, 1941#if IS_ENABLED(CONFIG_HID_ELECOM)
1892 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
1893 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1942 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1943 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
1944 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
1945#endif
1946#if IS_ENABLED(CONFIG_HID_ELO)
1894 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 1947 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
1895 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, 1948 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
1896 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, 1949#endif
1950#if IS_ENABLED(CONFIG_HID_EMS_FF)
1897 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, 1951 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
1952#endif
1953#if IS_ENABLED(CONFIG_HID_EZKEY)
1898 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1954 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
1899 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 1955#endif
1900 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, 1956#if IS_ENABLED(CONFIG_HID_GEMBIRD)
1901 { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, 1957 { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
1902 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, 1958#endif
1959#if IS_ENABLED(CONFIG_HID_GFRM)
1960 { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
1961 { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
1962#endif
1963#if IS_ENABLED(CONFIG_HID_GREENASIA)
1903 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
1965#endif
1966#if IS_ENABLED(CONFIG_HID_GT683R)
1967 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
1968#endif
1969#if IS_ENABLED(CONFIG_HID_GYRATION)
1904 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1970 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1905 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1971 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1906 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, 1972 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1973#endif
1974#if IS_ENABLED(CONFIG_HID_HOLTEK)
1907 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, 1975 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
1908 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1976 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
1909 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 1977 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@@ -1912,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
1912 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, 1980 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
1913 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1981 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1914 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1982 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
1915 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1983#endif
1916 { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1984#if IS_ENABLED(CONFIG_HID_ICADE)
1917 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1918 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1985 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
1986#endif
1987#if IS_ENABLED(CONFIG_HID_KENSINGTON)
1919 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1988 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1989#endif
1990#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
1920 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, 1991 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1992#endif
1993#if IS_ENABLED(CONFIG_HID_KYE)
1921 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 1994 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1922 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, 1995 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
1923 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, 1996 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@@ -1927,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = {
1927 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, 2000 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
1928 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 2001 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1929 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, 2002 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
1930 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 2003#endif
2004#if IS_ENABLED(CONFIG_HID_LCPOWER)
1931 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 2005 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
2006#endif
2007#if IS_ENABLED(CONFIG_HID_LED)
2008 { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
2009 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
2010 { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
2011 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
2012 { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
2013 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
2014#endif
1932#if IS_ENABLED(CONFIG_HID_LENOVO) 2015#if IS_ENABLED(CONFIG_HID_LENOVO)
1933 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 2016 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
1934 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, 2017 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
1935 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, 2018 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
1936 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, 2019 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
1937#endif 2020#endif
1938 { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, 2021#if IS_ENABLED(CONFIG_HID_LOGITECH)
1939 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 2022 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
1940 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 2023 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
1941 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, 2024 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
1942 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, 2025 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
1943 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
1944 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
1945 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, 2026 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
1946 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, 2027 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
1947 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, 2028 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1954,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1954 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, 2035 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
1955 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, 2036 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
1956 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, 2037 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
1957 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
1958 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, 2038 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
1959 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, 2039 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
1960 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, 2040 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -1966,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = {
1966 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, 2046 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
1967 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, 2047 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
1968 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, 2048 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
1969#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
1970 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
1971 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
1972#endif
1973 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, 2049 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
1974 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, 2050 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
1975 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 2051 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1976 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 2052 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
1977 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 2053#endif
1978 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 2054#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
1979 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, 2055 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
2056 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
2057#endif
2058#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
2059 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
2060 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
2061#endif
2062#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
2063 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
2064 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
2065#endif
2066#if IS_ENABLED(CONFIG_HID_MAYFLASH)
2067 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
2068 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
2069 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
2070 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
2071#endif
2072#if IS_ENABLED(CONFIG_HID_MICROSOFT)
1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, 2073 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, 2074 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 2075 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -1992,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
1992 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, 2085 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
1993 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, 2086 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
1994 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, 2087 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
2088 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
2089#endif
2090#if IS_ENABLED(CONFIG_HID_MONTEREY)
1995 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 2091 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1996 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 2092#endif
2093#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
2094 { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
2095#endif
2096#if IS_ENABLED(CONFIG_HID_WIIMOTE)
2097 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
2098 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
2099#endif
2100#if IS_ENABLED(CONFIG_HID_NTI)
1997 { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
2102#endif
2103#if IS_ENABLED(CONFIG_HID_NTRIG)
1998 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 2104 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
1999 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, 2105 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
2000 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, 2106 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -2014,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = {
2014 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, 2120 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
2015 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, 2121 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
2016 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, 2122 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
2123#endif
2124#if IS_ENABLED(CONFIG_HID_ORTEK)
2017 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 2125 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
2018 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 2126 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
2127 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
2128#endif
2129#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
2130 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
2131 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
2132 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
2133 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
2134#endif
2135#if IS_ENABLED(CONFIG_HID_PENMOUNT)
2019 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, 2136 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
2137#endif
2138#if IS_ENABLED(CONFIG_HID_PETALYNX)
2020 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 2139 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
2140#endif
2141#if IS_ENABLED(CONFIG_HID_PICOLCD)
2142 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
2143 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
2144#endif
2145#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
2021 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, 2146 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
2147#endif
2148#if IS_ENABLED(CONFIG_HID_PRIMAX)
2022 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 2149 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
2023 { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, 2150#endif
2151#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
2152 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
2153#endif
2154#if IS_ENABLED(CONFIG_HID_RMI)
2155 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
2156 { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
2157#endif
2024#if IS_ENABLED(CONFIG_HID_ROCCAT) 2158#if IS_ENABLED(CONFIG_HID_ROCCAT)
2025 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 2159 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
2026 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, 2160 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2048,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
2048 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, 2182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
2049 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 2183 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
2050#endif 2184#endif
2185#if IS_ENABLED(CONFIG_HID_SAMSUNG)
2051 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 2186 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
2052 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, 2187 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
2053 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2188#endif
2189#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
2190 { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
2191 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2192 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2193 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
2194 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
2195 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
2196 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
2197#endif
2198#if IS_ENABLED(CONFIG_HID_SONY)
2199 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
2054 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, 2200 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
2055 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, 2201 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
2056 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, 2202 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2069,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
2069 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 2215 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
2070 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, 2216 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
2071 { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, 2217 { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
2218#endif
2219#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
2220 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2221#endif
2222#if IS_ENABLED(CONFIG_HID_STEELSERIES)
2072 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, 2223 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
2224#endif
2225#if IS_ENABLED(CONFIG_HID_SUNPLUS)
2073 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 2226 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
2074 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, 2227#endif
2228#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
2075 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 2229 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
2076 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, 2230 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
2077 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, 2231 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2080,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = {
2080 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, 2234 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
2081 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 2235 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
2082 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 2236 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
2237#endif
2238#if IS_ENABLED(CONFIG_HID_TIVO)
2083 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 2239 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
2084 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 2240 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
2085 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, 2241 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
2242#endif
2243#if IS_ENABLED(CONFIG_HID_TOPSEED)
2244 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
2245 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
2246 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
2086 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 2247 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
2087 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 2248 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
2249#endif
2250#if IS_ENABLED(CONFIG_HID_TWINHAN)
2088 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 2251 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
2252#endif
2253#if IS_ENABLED(CONFIG_HID_UCLOGIC)
2254 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
2255 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
2089 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, 2256 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
2090 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, 2257 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
2091 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, 2258 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -2093,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
2093 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, 2260 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
2094 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, 2261 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, 2262 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
2096 { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, 2263 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, 2264 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2265 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2266 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2102 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, 2267 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2103 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2268 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2104 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2269#endif
2105 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2270#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
2106 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, 2271 { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
2107 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, 2272#endif
2108 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, 2273#if IS_ENABLED(CONFIG_HID_WALTOP)
2109 { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
2110 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, 2274 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
2111 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, 2275 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, 2276 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2114,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = {
2114 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, 2278 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, 2279 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
2116 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2280 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2117 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2281#endif
2282#if IS_ENABLED(CONFIG_HID_XINMO)
2118 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2283 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2119 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, 2284 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2285#endif
2286#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
2120 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2287 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2121 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2288 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2289#endif
2290#if IS_ENABLED(CONFIG_HID_ZYDACRON)
2122 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2291 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
2123 2292#endif
2124 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
2125 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
2126 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
2127 { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
2128 { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
2129 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
2130 { } 2293 { }
2131}; 2294};
2132 2295
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 6e3848a8d8dd..e2c7465df69f 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,10 +1,8 @@
1/* 1/*
2 * HID driver for Elecom BM084 (bluetooth mouse). 2 * HID driver for ELECOM devices.
3 * Removes a non-existing horizontal wheel from
4 * the HID descriptor.
5 * (This module is based on "hid-ortek".)
6 *
7 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> 3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
4 * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
5 * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
8 */ 6 */
9 7
10/* 8/*
@@ -23,15 +21,61 @@
23static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, 21static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
24 unsigned int *rsize) 22 unsigned int *rsize)
25{ 23{
26 if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 24 switch (hdev->product) {
27 hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); 25 case USB_DEVICE_ID_ELECOM_BM084:
28 rdesc[47] = 0x00; 26 /* The BM084 Bluetooth mouse includes a non-existing horizontal
27 * wheel in the HID descriptor. */
28 if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
29 hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
30 rdesc[47] = 0x00;
31 }
32 break;
33 case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
34 case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
35 /* The DEFT trackball has eight buttons, but its descriptor only
36 * reports five, disabling the three Fn buttons on the top of
37 * the mouse.
38 *
39 * Apply the following diff to the descriptor:
40 *
41 * Collection (Physical), Collection (Physical),
42 * Report ID (1), Report ID (1),
43 * Report Count (5), -> Report Count (8),
44 * Report Size (1), Report Size (1),
45 * Usage Page (Button), Usage Page (Button),
46 * Usage Minimum (01h), Usage Minimum (01h),
47 * Usage Maximum (05h), -> Usage Maximum (08h),
48 * Logical Minimum (0), Logical Minimum (0),
49 * Logical Maximum (1), Logical Maximum (1),
50 * Input (Variable), Input (Variable),
51 * Report Count (1), -> Report Count (0),
52 * Report Size (3), Report Size (3),
53 * Input (Constant), Input (Constant),
54 * Report Size (16), Report Size (16),
55 * Report Count (2), Report Count (2),
56 * Usage Page (Desktop), Usage Page (Desktop),
57 * Usage (X), Usage (X),
58 * Usage (Y), Usage (Y),
59 * Logical Minimum (-32768), Logical Minimum (-32768),
60 * Logical Maximum (32767), Logical Maximum (32767),
61 * Input (Variable, Relative), Input (Variable, Relative),
62 * End Collection, End Collection,
63 */
64 if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
65 hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
66 rdesc[13] = 8; /* Button/Variable Report Count */
67 rdesc[21] = 8; /* Button/Variable Usage Maximum */
68 rdesc[29] = 0; /* Button/Constant Report Count */
69 }
70 break;
29 } 71 }
30 return rdesc; 72 return rdesc;
31} 73}
32 74
33static const struct hid_device_id elecom_devices[] = { 75static const struct hid_device_id elecom_devices[] = {
34 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)}, 76 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
77 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
78 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
35 { } 79 { }
36}; 80};
37MODULE_DEVICE_TABLE(hid, elecom_devices); 81MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 643390ba749d..4f9a3938189a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,7 @@
173#define USB_VENDOR_ID_ASUSTEK 0x0b05 173#define USB_VENDOR_ID_ASUSTEK 0x0b05
174#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 174#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
175#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b 175#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
176#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
176#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 177#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
177#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 178#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101
178#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 179#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
@@ -318,6 +319,9 @@
318#define USB_VENDOR_ID_DELCOM 0x0fc5 319#define USB_VENDOR_ID_DELCOM 0x0fc5
319#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 320#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
320 321
322#define USB_VENDOR_ID_DELL 0x413c
323#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
324
321#define USB_VENDOR_ID_DELORME 0x1163 325#define USB_VENDOR_ID_DELORME 0x1163
322#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 326#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
323#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 327#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
@@ -358,6 +362,8 @@
358 362
359#define USB_VENDOR_ID_ELECOM 0x056e 363#define USB_VENDOR_ID_ELECOM 0x056e
360#define USB_DEVICE_ID_ELECOM_BM084 0x0061 364#define USB_DEVICE_ID_ELECOM_BM084 0x0061
365#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
366#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
361 367
362#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 368#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
363#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 369#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 8daa8ce64ebb..fb55fb4c39fc 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
897 return 0; 897 return 0;
898} 898}
899 899
900static void i2c_hid_acpi_fix_up_power(struct device *dev)
901{
902 acpi_handle handle = ACPI_HANDLE(dev);
903 struct acpi_device *adev;
904
905 if (handle && acpi_bus_get_device(handle, &adev) == 0)
906 acpi_device_fix_up_power(adev);
907}
908
900static const struct acpi_device_id i2c_hid_acpi_match[] = { 909static const struct acpi_device_id i2c_hid_acpi_match[] = {
901 {"ACPI0C50", 0 }, 910 {"ACPI0C50", 0 },
902 {"PNP0C50", 0 }, 911 {"PNP0C50", 0 },
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
909{ 918{
910 return -ENODEV; 919 return -ENODEV;
911} 920}
921
922static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
912#endif 923#endif
913 924
914#ifdef CONFIG_OF 925#ifdef CONFIG_OF
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client,
1030 if (ret < 0) 1041 if (ret < 0)
1031 goto err_regulator; 1042 goto err_regulator;
1032 1043
1044 i2c_hid_acpi_fix_up_power(&client->dev);
1045
1033 pm_runtime_get_noresume(&client->dev); 1046 pm_runtime_get_noresume(&client->dev);
1034 pm_runtime_set_active(&client->dev); 1047 pm_runtime_set_active(&client->dev);
1035 pm_runtime_enable(&client->dev); 1048 pm_runtime_enable(&client->dev);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6316498b7812..a88e7c7bea0a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
88 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
89 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 4b225fb19a16..e274c9dc32f3 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
1571{ 1571{
1572 unsigned char *data = wacom->data; 1572 unsigned char *data = wacom->data;
1573 1573
1574 if (wacom->pen_input) 1574 if (wacom->pen_input) {
1575 dev_dbg(wacom->pen_input->dev.parent, 1575 dev_dbg(wacom->pen_input->dev.parent,
1576 "%s: received report #%d\n", __func__, data[0]); 1576 "%s: received report #%d\n", __func__, data[0]);
1577 else if (wacom->touch_input) 1577
1578 if (len == WACOM_PKGLEN_PENABLED ||
1579 data[0] == WACOM_REPORT_PENABLED)
1580 return wacom_tpc_pen(wacom);
1581 }
1582 else if (wacom->touch_input) {
1578 dev_dbg(wacom->touch_input->dev.parent, 1583 dev_dbg(wacom->touch_input->dev.parent,
1579 "%s: received report #%d\n", __func__, data[0]); 1584 "%s: received report #%d\n", __func__, data[0]);
1580 1585
1581 switch (len) { 1586 switch (len) {
1582 case WACOM_PKGLEN_TPC1FG: 1587 case WACOM_PKGLEN_TPC1FG:
1583 return wacom_tpc_single_touch(wacom, len); 1588 return wacom_tpc_single_touch(wacom, len);
1584 1589
1585 case WACOM_PKGLEN_TPC2FG: 1590 case WACOM_PKGLEN_TPC2FG:
1586 return wacom_tpc_mt_touch(wacom); 1591 return wacom_tpc_mt_touch(wacom);
1587 1592
1588 case WACOM_PKGLEN_PENABLED: 1593 default:
1589 return wacom_tpc_pen(wacom); 1594 switch (data[0]) {
1595 case WACOM_REPORT_TPC1FG:
1596 case WACOM_REPORT_TPCHID:
1597 case WACOM_REPORT_TPCST:
1598 case WACOM_REPORT_TPC1FGE:
1599 return wacom_tpc_single_touch(wacom, len);
1590 1600
1591 default: 1601 case WACOM_REPORT_TPCMT:
1592 switch (data[0]) { 1602 case WACOM_REPORT_TPCMT2:
1593 case WACOM_REPORT_TPC1FG: 1603 return wacom_mt_touch(wacom);
1594 case WACOM_REPORT_TPCHID:
1595 case WACOM_REPORT_TPCST:
1596 case WACOM_REPORT_TPC1FGE:
1597 return wacom_tpc_single_touch(wacom, len);
1598
1599 case WACOM_REPORT_TPCMT:
1600 case WACOM_REPORT_TPCMT2:
1601 return wacom_mt_touch(wacom);
1602 1604
1603 case WACOM_REPORT_PENABLED: 1605 }
1604 return wacom_tpc_pen(wacom);
1605 } 1606 }
1606 } 1607 }
1607 1608
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 26b05106f0d3..93d28c0ec8bf 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev)
1066 dev->addr_len = 1; 1066 dev->addr_len = 1;
1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN;
1068 1068
1069 dev->destructor = free_netdev; 1069 dev->needs_free_netdev = true;
1070 dev->header_ops = &phonet_header_ops; 1070 dev->header_ops = &phonet_header_ops;
1071} 1071}
1072 1072
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 22d5eafd6815..5ef2814345ef 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -343,6 +343,7 @@ config SENSORS_ASB100
343 343
344config SENSORS_ASPEED 344config SENSORS_ASPEED
345 tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" 345 tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
346 select REGMAP
346 help 347 help
347 This driver provides support for ASPEED AST2400/AST2500 PWM 348 This driver provides support for ASPEED AST2400/AST2500 PWM
348 and Fan Tacho controllers. 349 and Fan Tacho controllers.
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 48403a2115be..9de13d626c68 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/errno.h>
10#include <linux/gpio/consumer.h> 11#include <linux/gpio/consumer.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
12#include <linux/hwmon.h> 13#include <linux/hwmon.h>
@@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
494 return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); 495 return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
495} 496}
496 497
497static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, 498static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
498 u8 fan_tach_ch) 499 u8 fan_tach_ch)
499{ 500{
500 u32 raw_data, tach_div, clk_source, sec, val; 501 u32 raw_data, tach_div, clk_source, sec, val;
@@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
510 msleep(sec); 511 msleep(sec);
511 512
512 regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); 513 regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
514 if (!(val & RESULT_STATUS_MASK))
515 return -ETIMEDOUT;
516
513 raw_data = val & RESULT_VALUE_MASK; 517 raw_data = val & RESULT_VALUE_MASK;
514 tach_div = priv->type_fan_tach_clock_division[type]; 518 tach_div = priv->type_fan_tach_clock_division[type];
515 tach_div = 0x4 << (tach_div * 2); 519 tach_div = 0x4 << (tach_div * 2);
@@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
561{ 565{
562 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); 566 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
563 int index = sensor_attr->index; 567 int index = sensor_attr->index;
564 u32 rpm; 568 int rpm;
565 struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); 569 struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
566 570
567 rpm = aspeed_get_fan_tach_ch_rpm(priv, index); 571 rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
572 if (rpm < 0)
573 return rpm;
568 574
569 return sprintf(buf, "%u\n", rpm); 575 return sprintf(buf, "%d\n", rpm);
570} 576}
571 577
572static umode_t pwm_is_visible(struct kobject *kobj, 578static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
591 return a->mode; 597 return a->mode;
592} 598}
593 599
594static SENSOR_DEVICE_ATTR(pwm0, 0644,
595 show_pwm, set_pwm, 0);
596static SENSOR_DEVICE_ATTR(pwm1, 0644, 600static SENSOR_DEVICE_ATTR(pwm1, 0644,
597 show_pwm, set_pwm, 1); 601 show_pwm, set_pwm, 0);
598static SENSOR_DEVICE_ATTR(pwm2, 0644, 602static SENSOR_DEVICE_ATTR(pwm2, 0644,
599 show_pwm, set_pwm, 2); 603 show_pwm, set_pwm, 1);
600static SENSOR_DEVICE_ATTR(pwm3, 0644, 604static SENSOR_DEVICE_ATTR(pwm3, 0644,
601 show_pwm, set_pwm, 3); 605 show_pwm, set_pwm, 2);
602static SENSOR_DEVICE_ATTR(pwm4, 0644, 606static SENSOR_DEVICE_ATTR(pwm4, 0644,
603 show_pwm, set_pwm, 4); 607 show_pwm, set_pwm, 3);
604static SENSOR_DEVICE_ATTR(pwm5, 0644, 608static SENSOR_DEVICE_ATTR(pwm5, 0644,
605 show_pwm, set_pwm, 5); 609 show_pwm, set_pwm, 4);
606static SENSOR_DEVICE_ATTR(pwm6, 0644, 610static SENSOR_DEVICE_ATTR(pwm6, 0644,
607 show_pwm, set_pwm, 6); 611 show_pwm, set_pwm, 5);
608static SENSOR_DEVICE_ATTR(pwm7, 0644, 612static SENSOR_DEVICE_ATTR(pwm7, 0644,
613 show_pwm, set_pwm, 6);
614static SENSOR_DEVICE_ATTR(pwm8, 0644,
609 show_pwm, set_pwm, 7); 615 show_pwm, set_pwm, 7);
610static struct attribute *pwm_dev_attrs[] = { 616static struct attribute *pwm_dev_attrs[] = {
611 &sensor_dev_attr_pwm0.dev_attr.attr,
612 &sensor_dev_attr_pwm1.dev_attr.attr, 617 &sensor_dev_attr_pwm1.dev_attr.attr,
613 &sensor_dev_attr_pwm2.dev_attr.attr, 618 &sensor_dev_attr_pwm2.dev_attr.attr,
614 &sensor_dev_attr_pwm3.dev_attr.attr, 619 &sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = {
616 &sensor_dev_attr_pwm5.dev_attr.attr, 621 &sensor_dev_attr_pwm5.dev_attr.attr,
617 &sensor_dev_attr_pwm6.dev_attr.attr, 622 &sensor_dev_attr_pwm6.dev_attr.attr,
618 &sensor_dev_attr_pwm7.dev_attr.attr, 623 &sensor_dev_attr_pwm7.dev_attr.attr,
624 &sensor_dev_attr_pwm8.dev_attr.attr,
619 NULL, 625 NULL,
620}; 626};
621 627
@@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = {
624 .is_visible = pwm_is_visible, 630 .is_visible = pwm_is_visible,
625}; 631};
626 632
627static SENSOR_DEVICE_ATTR(fan0_input, 0444,
628 show_rpm, NULL, 0);
629static SENSOR_DEVICE_ATTR(fan1_input, 0444, 633static SENSOR_DEVICE_ATTR(fan1_input, 0444,
630 show_rpm, NULL, 1); 634 show_rpm, NULL, 0);
631static SENSOR_DEVICE_ATTR(fan2_input, 0444, 635static SENSOR_DEVICE_ATTR(fan2_input, 0444,
632 show_rpm, NULL, 2); 636 show_rpm, NULL, 1);
633static SENSOR_DEVICE_ATTR(fan3_input, 0444, 637static SENSOR_DEVICE_ATTR(fan3_input, 0444,
634 show_rpm, NULL, 3); 638 show_rpm, NULL, 2);
635static SENSOR_DEVICE_ATTR(fan4_input, 0444, 639static SENSOR_DEVICE_ATTR(fan4_input, 0444,
636 show_rpm, NULL, 4); 640 show_rpm, NULL, 3);
637static SENSOR_DEVICE_ATTR(fan5_input, 0444, 641static SENSOR_DEVICE_ATTR(fan5_input, 0444,
638 show_rpm, NULL, 5); 642 show_rpm, NULL, 4);
639static SENSOR_DEVICE_ATTR(fan6_input, 0444, 643static SENSOR_DEVICE_ATTR(fan6_input, 0444,
640 show_rpm, NULL, 6); 644 show_rpm, NULL, 5);
641static SENSOR_DEVICE_ATTR(fan7_input, 0444, 645static SENSOR_DEVICE_ATTR(fan7_input, 0444,
642 show_rpm, NULL, 7); 646 show_rpm, NULL, 6);
643static SENSOR_DEVICE_ATTR(fan8_input, 0444, 647static SENSOR_DEVICE_ATTR(fan8_input, 0444,
644 show_rpm, NULL, 8); 648 show_rpm, NULL, 7);
645static SENSOR_DEVICE_ATTR(fan9_input, 0444, 649static SENSOR_DEVICE_ATTR(fan9_input, 0444,
646 show_rpm, NULL, 9); 650 show_rpm, NULL, 8);
647static SENSOR_DEVICE_ATTR(fan10_input, 0444, 651static SENSOR_DEVICE_ATTR(fan10_input, 0444,
648 show_rpm, NULL, 10); 652 show_rpm, NULL, 9);
649static SENSOR_DEVICE_ATTR(fan11_input, 0444, 653static SENSOR_DEVICE_ATTR(fan11_input, 0444,
650 show_rpm, NULL, 11); 654 show_rpm, NULL, 10);
651static SENSOR_DEVICE_ATTR(fan12_input, 0444, 655static SENSOR_DEVICE_ATTR(fan12_input, 0444,
652 show_rpm, NULL, 12); 656 show_rpm, NULL, 11);
653static SENSOR_DEVICE_ATTR(fan13_input, 0444, 657static SENSOR_DEVICE_ATTR(fan13_input, 0444,
654 show_rpm, NULL, 13); 658 show_rpm, NULL, 12);
655static SENSOR_DEVICE_ATTR(fan14_input, 0444, 659static SENSOR_DEVICE_ATTR(fan14_input, 0444,
656 show_rpm, NULL, 14); 660 show_rpm, NULL, 13);
657static SENSOR_DEVICE_ATTR(fan15_input, 0444, 661static SENSOR_DEVICE_ATTR(fan15_input, 0444,
662 show_rpm, NULL, 14);
663static SENSOR_DEVICE_ATTR(fan16_input, 0444,
658 show_rpm, NULL, 15); 664 show_rpm, NULL, 15);
659static struct attribute *fan_dev_attrs[] = { 665static struct attribute *fan_dev_attrs[] = {
660 &sensor_dev_attr_fan0_input.dev_attr.attr,
661 &sensor_dev_attr_fan1_input.dev_attr.attr, 666 &sensor_dev_attr_fan1_input.dev_attr.attr,
662 &sensor_dev_attr_fan2_input.dev_attr.attr, 667 &sensor_dev_attr_fan2_input.dev_attr.attr,
663 &sensor_dev_attr_fan3_input.dev_attr.attr, 668 &sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = {
673 &sensor_dev_attr_fan13_input.dev_attr.attr, 678 &sensor_dev_attr_fan13_input.dev_attr.attr,
674 &sensor_dev_attr_fan14_input.dev_attr.attr, 679 &sensor_dev_attr_fan14_input.dev_attr.attr,
675 &sensor_dev_attr_fan15_input.dev_attr.attr, 680 &sensor_dev_attr_fan15_input.dev_attr.attr,
681 &sensor_dev_attr_fan16_input.dev_attr.attr,
676 NULL 682 NULL
677}; 683};
678 684
@@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
802 if (ret) 808 if (ret)
803 return ret; 809 return ret;
804 } 810 }
805 of_node_put(np);
806 811
807 priv->groups[0] = &pwm_dev_group; 812 priv->groups[0] = &pwm_dev_group;
808 priv->groups[1] = &fan_dev_group; 813 priv->groups[1] = &fan_dev_group;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 3ac4c03ba77b..c13a4fd86b3c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -605,6 +605,13 @@ static int coretemp_cpu_online(unsigned int cpu)
605 struct platform_data *pdata; 605 struct platform_data *pdata;
606 606
607 /* 607 /*
608 * Don't execute this on resume as the offline callback did
609 * not get executed on suspend.
610 */
611 if (cpuhp_tasks_frozen)
612 return 0;
613
614 /*
608 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 615 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
609 * sensors. We check this bit only, all the early CPUs 616 * sensors. We check this bit only, all the early CPUs
610 * without thermal sensors will be filtered out. 617 * without thermal sensors will be filtered out.
@@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu)
654 struct temp_data *tdata; 661 struct temp_data *tdata;
655 int indx, target; 662 int indx, target;
656 663
664 /*
665 * Don't execute this on suspend as the device remove locks
666 * up the machine.
667 */
668 if (cpuhp_tasks_frozen)
669 return 0;
670
657 /* If the physical CPU device does not exist, just return */ 671 /* If the physical CPU device does not exist, just return */
658 if (!pdev) 672 if (!pdev)
659 return 0; 673 return 0;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index f2acd4b6bf01..d1263b82d646 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -94,6 +94,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
94static int dw_i2c_acpi_configure(struct platform_device *pdev) 94static int dw_i2c_acpi_configure(struct platform_device *pdev)
95{ 95{
96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
97 u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
97 acpi_handle handle = ACPI_HANDLE(&pdev->dev); 98 acpi_handle handle = ACPI_HANDLE(&pdev->dev);
98 const struct acpi_device_id *id; 99 const struct acpi_device_id *id;
99 struct acpi_device *adev; 100 struct acpi_device *adev;
@@ -107,23 +108,24 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
107 * Try to get SDA hold time and *CNT values from an ACPI method for 108 * Try to get SDA hold time and *CNT values from an ACPI method for
108 * selected speed modes. 109 * selected speed modes.
109 */ 110 */
111 dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
112 dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
113 dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
114 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
115
110 switch (dev->clk_freq) { 116 switch (dev->clk_freq) {
111 case 100000: 117 case 100000:
112 dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, 118 dev->sda_hold_time = ss_ht;
113 &dev->sda_hold_time);
114 break; 119 break;
115 case 1000000: 120 case 1000000:
116 dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, 121 dev->sda_hold_time = fp_ht;
117 &dev->sda_hold_time);
118 break; 122 break;
119 case 3400000: 123 case 3400000:
120 dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, 124 dev->sda_hold_time = hs_ht;
121 &dev->sda_hold_time);
122 break; 125 break;
123 case 400000: 126 case 400000:
124 default: 127 default:
125 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 128 dev->sda_hold_time = fs_ht;
126 &dev->sda_hold_time);
127 break; 129 break;
128 } 130 }
129 131
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 95ed17183e73..54a47b40546f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
734 * the first read operation, otherwise the first read cost 734 * the first read operation, otherwise the first read cost
735 * one extra clock cycle. 735 * one extra clock cycle.
736 */ 736 */
737 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 737 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
738 temp |= I2CR_MTX; 738 temp |= I2CR_MTX;
739 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 739 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
740 } 740 }
741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); 741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
742 742
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
857 * the first read operation, otherwise the first read cost 857 * the first read operation, otherwise the first read cost
858 * one extra clock cycle. 858 * one extra clock cycle.
859 */ 859 */
860 temp = readb(i2c_imx->base + IMX_I2C_I2CR); 860 temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
861 temp |= I2CR_MTX; 861 temp |= I2CR_MTX;
862 writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 862 imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
863 } 863 }
864 } else if (i == (msgs->len - 2)) { 864 } else if (i == (msgs->len - 2)) {
865 dev_dbg(&i2c_imx->adapter.dev, 865 dev_dbg(&i2c_imx->adapter.dev,
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f573448d2132..e98e44e584a4 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
584 584
585 /* unmap the data buffer */ 585 /* unmap the data buffer */
586 if (dma_size != 0) 586 if (dma_size != 0)
587 dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); 587 dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
588 588
589 if (unlikely(!time_left)) { 589 if (unlikely(!time_left)) {
590 dev_err(dev, "completion wait timed out\n"); 590 dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index cf737ec8563b..5c4db65c5019 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -819,7 +819,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
819 rc = -EINVAL; 819 rc = -EINVAL;
820 goto out; 820 goto out;
821 } 821 }
822 drv_data->irq = irq_of_parse_and_map(np, 0);
823 822
824 drv_data->rstc = devm_reset_control_get_optional(dev, NULL); 823 drv_data->rstc = devm_reset_control_get_optional(dev, NULL);
825 if (IS_ERR(drv_data->rstc)) { 824 if (IS_ERR(drv_data->rstc)) {
@@ -902,10 +901,11 @@ mv64xxx_i2c_probe(struct platform_device *pd)
902 if (!IS_ERR(drv_data->clk)) 901 if (!IS_ERR(drv_data->clk))
903 clk_prepare_enable(drv_data->clk); 902 clk_prepare_enable(drv_data->clk);
904 903
904 drv_data->irq = platform_get_irq(pd, 0);
905
905 if (pdata) { 906 if (pdata) {
906 drv_data->freq_m = pdata->freq_m; 907 drv_data->freq_m = pdata->freq_m;
907 drv_data->freq_n = pdata->freq_n; 908 drv_data->freq_n = pdata->freq_n;
908 drv_data->irq = platform_get_irq(pd, 0);
909 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); 909 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
910 drv_data->offload_enabled = false; 910 drv_data->offload_enabled = false;
911 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); 911 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
@@ -915,7 +915,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
915 goto exit_clk; 915 goto exit_clk;
916 } 916 }
917 if (drv_data->irq < 0) { 917 if (drv_data->irq < 0) {
918 rc = -ENXIO; 918 rc = drv_data->irq;
919 goto exit_reset; 919 goto exit_reset;
920 } 920 }
921 921
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 214bf2835d1f..8be3e6cb8fe6 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
319 rcar_i2c_write(priv, ICFBSCR, TCYC06); 319 rcar_i2c_write(priv, ICFBSCR, TCYC06);
320 320
321 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 321 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
322 priv->msg->len, priv->dma_direction); 322 sg_dma_len(&priv->sg), priv->dma_direction);
323 323
324 priv->dma_direction = DMA_NONE; 324 priv->dma_direction = DMA_NONE;
325} 325}
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77eeff31e..a2e3dd715380 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
178 int value, int index, void *data, int len) 178 int value, int index, void *data, int len)
179{ 179{
180 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; 180 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
181 void *dmadata = kmalloc(len, GFP_KERNEL);
182 int ret;
183
184 if (!dmadata)
185 return -ENOMEM;
181 186
182 /* do control transfer */ 187 /* do control transfer */
183 return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), 188 ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
184 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | 189 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
185 USB_DIR_IN, value, index, data, len, 2000); 190 USB_DIR_IN, value, index, dmadata, len, 2000);
191
192 memcpy(data, dmadata, len);
193 kfree(dmadata);
194 return ret;
186} 195}
187 196
188static int usb_write(struct i2c_adapter *adapter, int cmd, 197static int usb_write(struct i2c_adapter *adapter, int cmd,
189 int value, int index, void *data, int len) 198 int value, int index, void *data, int len)
190{ 199{
191 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; 200 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
201 void *dmadata = kmemdup(data, len, GFP_KERNEL);
202 int ret;
203
204 if (!dmadata)
205 return -ENOMEM;
192 206
193 /* do control transfer */ 207 /* do control transfer */
194 return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), 208 ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
195 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 209 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
196 value, index, data, len, 2000); 210 value, index, dmadata, len, 2000);
211
212 kfree(dmadata);
213 return ret;
197} 214}
198 215
199static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) 216static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dbe7e44c9321..6ba6c83ca8f1 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -416,6 +416,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
416 adapter->class = I2C_CLASS_HWMON; 416 adapter->class = I2C_CLASS_HWMON;
417 adapter->dev.parent = &pdev->dev; 417 adapter->dev.parent = &pdev->dev;
418 adapter->dev.of_node = pdev->dev.of_node; 418 adapter->dev.of_node = pdev->dev.of_node;
419 ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev));
419 i2c_set_adapdata(adapter, ctx); 420 i2c_set_adapdata(adapter, ctx);
420 rc = i2c_add_adapter(adapter); 421 rc = i2c_add_adapter(adapter);
421 if (rc) { 422 if (rc) {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 26f7237558ba..9669ca4937b8 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -395,18 +395,20 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
395 if (force_nr) { 395 if (force_nr) {
396 priv->adap.nr = force_nr; 396 priv->adap.nr = force_nr;
397 ret = i2c_add_numbered_adapter(&priv->adap); 397 ret = i2c_add_numbered_adapter(&priv->adap);
398 dev_err(&parent->dev, 398 if (ret < 0) {
399 "failed to add mux-adapter %u as bus %u (error=%d)\n", 399 dev_err(&parent->dev,
400 chan_id, force_nr, ret); 400 "failed to add mux-adapter %u as bus %u (error=%d)\n",
401 chan_id, force_nr, ret);
402 goto err_free_priv;
403 }
401 } else { 404 } else {
402 ret = i2c_add_adapter(&priv->adap); 405 ret = i2c_add_adapter(&priv->adap);
403 dev_err(&parent->dev, 406 if (ret < 0) {
404 "failed to add mux-adapter %u (error=%d)\n", 407 dev_err(&parent->dev,
405 chan_id, ret); 408 "failed to add mux-adapter %u (error=%d)\n",
406 } 409 chan_id, ret);
407 if (ret < 0) { 410 goto err_free_priv;
408 kfree(priv); 411 }
409 return ret;
410 } 412 }
411 413
412 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, 414 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
@@ -422,6 +424,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
422 424
423 muxc->adapter[muxc->num_adapters++] = &priv->adap; 425 muxc->adapter[muxc->num_adapters++] = &priv->adap;
424 return 0; 426 return 0;
427
428err_free_priv:
429 kfree(priv);
430 return ret;
425} 431}
426EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); 432EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);
427 433
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 406d5059072c..d97031804de8 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
197 mux->data.reg_size = resource_size(res); 197 mux->data.reg_size = resource_size(res);
198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res); 198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
199 if (IS_ERR(mux->data.reg)) 199 if (IS_ERR(mux->data.reg)) {
200 return PTR_ERR(mux->data.reg); 200 ret = PTR_ERR(mux->data.reg);
201 goto err_put_parent;
202 }
201 } 203 }
202 204
203 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && 205 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
204 mux->data.reg_size != 1) { 206 mux->data.reg_size != 1) {
205 dev_err(&pdev->dev, "Invalid register size\n"); 207 dev_err(&pdev->dev, "Invalid register size\n");
206 return -EINVAL; 208 ret = -EINVAL;
209 goto err_put_parent;
207 } 210 }
208 211
209 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, 212 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
210 i2c_mux_reg_select, NULL); 213 i2c_mux_reg_select, NULL);
211 if (!muxc) 214 if (!muxc) {
212 return -ENOMEM; 215 ret = -ENOMEM;
216 goto err_put_parent;
217 }
213 muxc->priv = mux; 218 muxc->priv = mux;
214 219
215 platform_set_drvdata(pdev, muxc); 220 platform_set_drvdata(pdev, muxc);
@@ -223,7 +228,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
223 228
224 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); 229 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
225 if (ret) 230 if (ret)
226 goto add_adapter_failed; 231 goto err_del_mux_adapters;
227 } 232 }
228 233
229 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", 234 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
@@ -231,8 +236,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
231 236
232 return 0; 237 return 0;
233 238
234add_adapter_failed: 239err_del_mux_adapters:
235 i2c_mux_del_adapters(muxc); 240 i2c_mux_del_adapters(muxc);
241err_put_parent:
242 i2c_put_adapter(parent);
236 243
237 return ret; 244 return ret;
238} 245}
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 21d38c8af21e..7f4f9c4150e3 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
143 iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); 143 iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
144} 144}
145 145
146static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) 146static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
147{ 147{
148 u32 channel_intr_status; 148 u32 channel_intr_status;
149 u32 intr_status; 149 u32 intr_status;
@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
167 return IRQ_NONE; 167 return IRQ_NONE;
168} 168}
169 169
170static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) 170static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
171{ 171{
172 irqreturn_t retval = IRQ_NONE; 172 irqreturn_t retval = IRQ_NONE;
173 struct iproc_adc_priv *adc_priv; 173 struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
181 adc_priv = iio_priv(indio_dev); 181 adc_priv = iio_priv(indio_dev);
182 182
183 regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); 183 regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
184 dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", 184 dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
185 intr_status); 185 intr_status);
186 186
187 intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; 187 intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
566 } 566 }
567 567
568 ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, 568 ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
569 iproc_adc_interrupt_thread,
570 iproc_adc_interrupt_handler, 569 iproc_adc_interrupt_handler,
570 iproc_adc_interrupt_thread,
571 IRQF_SHARED, "iproc-adc", indio_dev); 571 IRQF_SHARED, "iproc-adc", indio_dev);
572 if (ret) { 572 if (ret) {
573 dev_err(&pdev->dev, "request_irq error %d\n", ret); 573 dev_err(&pdev->dev, "request_irq error %d\n", ret);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index ec82106480e1..b0526e4b9530 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -438,10 +438,10 @@ static ssize_t max9611_shunt_resistor_show(struct device *dev,
438 struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev)); 438 struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
439 unsigned int i, r; 439 unsigned int i, r;
440 440
441 i = max9611->shunt_resistor_uohm / 1000; 441 i = max9611->shunt_resistor_uohm / 1000000;
442 r = max9611->shunt_resistor_uohm % 1000; 442 r = max9611->shunt_resistor_uohm % 1000000;
443 443
444 return sprintf(buf, "%u.%03u\n", i, r); 444 return sprintf(buf, "%u.%06u\n", i, r);
445} 445}
446 446
447static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444, 447static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
@@ -536,8 +536,8 @@ static int max9611_probe(struct i2c_client *client,
536 int ret; 536 int ret;
537 537
538 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611)); 538 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
539 if (IS_ERR(indio_dev)) 539 if (!indio_dev)
540 return PTR_ERR(indio_dev); 540 return -ENOMEM;
541 541
542 i2c_set_clientdata(client, indio_dev); 542 i2c_set_clientdata(client, indio_dev);
543 543
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index dd4190b50df6..6066bbfc42fe 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
468static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) 468static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
469{ 469{
470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
471 int count; 471 unsigned int count, tmp;
472 472
473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { 473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
474 if (!meson_sar_adc_get_fifo_count(indio_dev)) 474 if (!meson_sar_adc_get_fifo_count(indio_dev))
475 break; 475 break;
476 476
477 regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); 477 regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
478 } 478 }
479} 479}
480 480
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index b0c7d8ee5cb8..6888167ca1e6 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
718 adc->dev = dev; 718 adc->dev = dev;
719 719
720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
721 if (!iores)
722 return -EINVAL;
723
721 adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); 724 adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
722 if (IS_ERR(adc->base)) 725 if (!adc->base)
723 return PTR_ERR(adc->base); 726 return -ENOMEM;
724 727
725 init_completion(&adc->completion); 728 init_completion(&adc->completion);
726 spin_lock_init(&adc->lock); 729 spin_lock_init(&adc->lock);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index b23527309088..81d4c39e414a 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -105,6 +105,8 @@ struct sun4i_gpadc_iio {
105 bool no_irq; 105 bool no_irq;
106 /* prevents concurrent reads of temperature and ADC */ 106 /* prevents concurrent reads of temperature and ADC */
107 struct mutex mutex; 107 struct mutex mutex;
108 struct thermal_zone_device *tzd;
109 struct device *sensor_device;
108}; 110};
109 111
110#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \ 112#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \
@@ -502,7 +504,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
502{ 504{
503 struct sun4i_gpadc_iio *info = iio_priv(indio_dev); 505 struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
504 const struct of_device_id *of_dev; 506 const struct of_device_id *of_dev;
505 struct thermal_zone_device *tzd;
506 struct resource *mem; 507 struct resource *mem;
507 void __iomem *base; 508 void __iomem *base;
508 int ret; 509 int ret;
@@ -532,13 +533,14 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
532 if (!IS_ENABLED(CONFIG_THERMAL_OF)) 533 if (!IS_ENABLED(CONFIG_THERMAL_OF))
533 return 0; 534 return 0;
534 535
535 tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info, 536 info->sensor_device = &pdev->dev;
536 &sun4i_ts_tz_ops); 537 info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
537 if (IS_ERR(tzd)) 538 info, &sun4i_ts_tz_ops);
539 if (IS_ERR(info->tzd))
538 dev_err(&pdev->dev, "could not register thermal sensor: %ld\n", 540 dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
539 PTR_ERR(tzd)); 541 PTR_ERR(info->tzd));
540 542
541 return PTR_ERR_OR_ZERO(tzd); 543 return PTR_ERR_OR_ZERO(info->tzd);
542} 544}
543 545
544static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, 546static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -584,15 +586,15 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
584 * of_node, and the device from this driver as third argument to 586 * of_node, and the device from this driver as third argument to
585 * return the temperature. 587 * return the temperature.
586 */ 588 */
587 struct thermal_zone_device *tzd; 589 info->sensor_device = pdev->dev.parent;
588 tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0, 590 info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
589 info, 591 0, info,
590 &sun4i_ts_tz_ops); 592 &sun4i_ts_tz_ops);
591 if (IS_ERR(tzd)) { 593 if (IS_ERR(info->tzd)) {
592 dev_err(&pdev->dev, 594 dev_err(&pdev->dev,
593 "could not register thermal sensor: %ld\n", 595 "could not register thermal sensor: %ld\n",
594 PTR_ERR(tzd)); 596 PTR_ERR(info->tzd));
595 return PTR_ERR(tzd); 597 return PTR_ERR(info->tzd);
596 } 598 }
597 } else { 599 } else {
598 indio_dev->num_channels = 600 indio_dev->num_channels =
@@ -688,7 +690,13 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
688 690
689 pm_runtime_put(&pdev->dev); 691 pm_runtime_put(&pdev->dev);
690 pm_runtime_disable(&pdev->dev); 692 pm_runtime_disable(&pdev->dev);
691 if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF)) 693
694 if (!IS_ENABLED(CONFIG_THERMAL_OF))
695 return 0;
696
697 thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
698
699 if (!info->no_irq)
692 iio_map_array_unregister(indio_dev); 700 iio_map_array_unregister(indio_dev);
693 701
694 return 0; 702 return 0;
@@ -700,6 +708,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = {
700 { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data }, 708 { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
701 { /* sentinel */ }, 709 { /* sentinel */ },
702}; 710};
711MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
703 712
704static struct platform_driver sun4i_gpadc_driver = { 713static struct platform_driver sun4i_gpadc_driver = {
705 .driver = { 714 .driver = {
@@ -711,6 +720,7 @@ static struct platform_driver sun4i_gpadc_driver = {
711 .probe = sun4i_gpadc_probe, 720 .probe = sun4i_gpadc_probe,
712 .remove = sun4i_gpadc_remove, 721 .remove = sun4i_gpadc_remove,
713}; 722};
723MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
714 724
715module_platform_driver(sun4i_gpadc_driver); 725module_platform_driver(sun4i_gpadc_driver);
716 726
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 4282ceca3d8f..6cbed7eb118a 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -614,7 +614,7 @@ static int tiadc_probe(struct platform_device *pdev)
614 return -EINVAL; 614 return -EINVAL;
615 } 615 }
616 616
617 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); 617 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
618 if (indio_dev == NULL) { 618 if (indio_dev == NULL) {
619 dev_err(&pdev->dev, "failed to allocate iio device\n"); 619 dev_err(&pdev->dev, "failed to allocate iio device\n");
620 return -ENOMEM; 620 return -ENOMEM;
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index dd99d273bae9..ff03324dee13 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/iio/buffer.h> 16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer_impl.h>
17#include <linux/iio/buffer-dma.h> 18#include <linux/iio/buffer-dma.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
19#include <linux/sizes.h> 20#include <linux/sizes.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 9fabed47053d..2b5a320f42c5 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/iio/iio.h> 15#include <linux/iio/iio.h>
16#include <linux/iio/buffer.h> 16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer_impl.h>
17#include <linux/iio/buffer-dma.h> 18#include <linux/iio/buffer-dma.h>
18#include <linux/iio/buffer-dmaengine.h> 19#include <linux/iio/buffer-dmaengine.h>
19 20
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 96dabbd2f004..88a7c5d4e4d2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
41static const struct inv_mpu6050_reg_map reg_set_6500 = { 41static const struct inv_mpu6050_reg_map reg_set_6500 = {
42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, 42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
43 .lpf = INV_MPU6050_REG_CONFIG, 43 .lpf = INV_MPU6050_REG_CONFIG,
44 .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
44 .user_ctrl = INV_MPU6050_REG_USER_CTRL, 45 .user_ctrl = INV_MPU6050_REG_USER_CTRL,
45 .fifo_en = INV_MPU6050_REG_FIFO_EN, 46 .fifo_en = INV_MPU6050_REG_FIFO_EN,
46 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, 47 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -211,6 +212,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
211EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); 212EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
212 213
213/** 214/**
215 * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
216 *
217 * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
218 * MPU6500 and above have a dedicated register for accelerometer
219 */
220static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
221 enum inv_mpu6050_filter_e val)
222{
223 int result;
224
225 result = regmap_write(st->map, st->reg->lpf, val);
226 if (result)
227 return result;
228
229 switch (st->chip_type) {
230 case INV_MPU6050:
231 case INV_MPU6000:
232 case INV_MPU9150:
233 /* old chips, nothing to do */
234 result = 0;
235 break;
236 default:
237 /* set accel lpf */
238 result = regmap_write(st->map, st->reg->accel_lpf, val);
239 break;
240 }
241
242 return result;
243}
244
245/**
214 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. 246 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
215 * 247 *
216 * Initial configuration: 248 * Initial configuration:
@@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
233 if (result) 265 if (result)
234 return result; 266 return result;
235 267
236 d = INV_MPU6050_FILTER_20HZ; 268 result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
237 result = regmap_write(st->map, st->reg->lpf, d);
238 if (result) 269 if (result)
239 return result; 270 return result;
240 271
@@ -537,6 +568,8 @@ error_write_raw:
537 * would be alising. This function basically search for the 568 * would be alising. This function basically search for the
538 * correct low pass parameters based on the fifo rate, e.g, 569 * correct low pass parameters based on the fifo rate, e.g,
539 * sampling frequency. 570 * sampling frequency.
571 *
572 * lpf is set automatically when setting sampling rate to avoid any aliases.
540 */ 573 */
541static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) 574static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
542{ 575{
@@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
552 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) 585 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
553 i++; 586 i++;
554 data = d[i]; 587 data = d[i];
555 result = regmap_write(st->map, st->reg->lpf, data); 588 result = inv_mpu6050_set_lpf_regs(st, data);
556 if (result) 589 if (result)
557 return result; 590 return result;
558 st->chip_config.lpf = data; 591 st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ef13de7a2c20..953a0c09d568 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
28 * struct inv_mpu6050_reg_map - Notable registers. 28 * struct inv_mpu6050_reg_map - Notable registers.
29 * @sample_rate_div: Divider applied to gyro output rate. 29 * @sample_rate_div: Divider applied to gyro output rate.
30 * @lpf: Configures internal low pass filter. 30 * @lpf: Configures internal low pass filter.
31 * @accel_lpf: Configures accelerometer low pass filter.
31 * @user_ctrl: Enables/resets the FIFO. 32 * @user_ctrl: Enables/resets the FIFO.
32 * @fifo_en: Determines which data will appear in FIFO. 33 * @fifo_en: Determines which data will appear in FIFO.
33 * @gyro_config: gyro config register. 34 * @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
47struct inv_mpu6050_reg_map { 48struct inv_mpu6050_reg_map {
48 u8 sample_rate_div; 49 u8 sample_rate_div;
49 u8 lpf; 50 u8 lpf;
51 u8 accel_lpf;
50 u8 user_ctrl; 52 u8 user_ctrl;
51 u8 fifo_en; 53 u8 fifo_en;
52 u8 gyro_config; 54 u8 gyro_config;
@@ -188,6 +190,7 @@ struct inv_mpu6050_state {
188#define INV_MPU6050_FIFO_THRESHOLD 500 190#define INV_MPU6050_FIFO_THRESHOLD 500
189 191
190/* mpu6500 registers */ 192/* mpu6500 registers */
193#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
191#define INV_MPU6500_REG_ACCEL_OFFSET 0x77 194#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
192 195
193/* delay time in milliseconds */ 196/* delay time in milliseconds */
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 978e1592c2a3..4061fed93f1f 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
451 return len; 451 return len;
452 452
453out_trigger_put: 453out_trigger_put:
454 iio_trigger_put(trig); 454 if (trig)
455 iio_trigger_put(trig);
455 return ret; 456 return ret;
456} 457}
457 458
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b30e0c1c6cc4..67838edd8b37 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
74static const struct reg_field reg_field_it = 74static const struct reg_field reg_field_it =
75 REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); 75 REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
76static const struct reg_field reg_field_als_intr = 76static const struct reg_field reg_field_als_intr =
77 REG_FIELD(LTR501_INTR, 0, 0);
78static const struct reg_field reg_field_ps_intr =
79 REG_FIELD(LTR501_INTR, 1, 1); 77 REG_FIELD(LTR501_INTR, 1, 1);
78static const struct reg_field reg_field_ps_intr =
79 REG_FIELD(LTR501_INTR, 0, 0);
80static const struct reg_field reg_field_als_rate = 80static const struct reg_field reg_field_als_rate =
81 REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); 81 REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
82static const struct reg_field reg_field_ps_rate = 82static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index ddf9bee89f77..aa4df0dcc8c9 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
40#define AS3935_AFE_PWR_BIT BIT(0) 40#define AS3935_AFE_PWR_BIT BIT(0)
41 41
42#define AS3935_INT 0x03 42#define AS3935_INT 0x03
43#define AS3935_INT_MASK 0x07 43#define AS3935_INT_MASK 0x0f
44#define AS3935_EVENT_INT BIT(3) 44#define AS3935_EVENT_INT BIT(3)
45#define AS3935_NOISE_INT BIT(1) 45#define AS3935_NOISE_INT BIT(0)
46 46
47#define AS3935_DATA 0x07 47#define AS3935_DATA 0x07
48#define AS3935_DATA_MASK 0x3F 48#define AS3935_DATA_MASK 0x3F
@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
215 215
216 st->buffer[0] = val & AS3935_DATA_MASK; 216 st->buffer[0] = val & AS3935_DATA_MASK;
217 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, 217 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
218 pf->timestamp); 218 iio_get_time_ns(indio_dev));
219err_read: 219err_read:
220 iio_trigger_notify_done(indio_dev->trig); 220 iio_trigger_notify_done(indio_dev->trig);
221 221
@@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
244 244
245 switch (val) { 245 switch (val) {
246 case AS3935_EVENT_INT: 246 case AS3935_EVENT_INT:
247 iio_trigger_poll(st->trig); 247 iio_trigger_poll_chained(st->trig);
248 break; 248 break;
249 case AS3935_NOISE_INT: 249 case AS3935_NOISE_INT:
250 dev_warn(&st->spi->dev, "noise level is too high\n"); 250 dev_warn(&st->spi->dev, "noise level is too high\n");
@@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
269 269
270static void calibrate_as3935(struct as3935_state *st) 270static void calibrate_as3935(struct as3935_state *st)
271{ 271{
272 mutex_lock(&st->lock);
273
274 /* mask disturber interrupt bit */ 272 /* mask disturber interrupt bit */
275 as3935_write(st, AS3935_INT, BIT(5)); 273 as3935_write(st, AS3935_INT, BIT(5));
276 274
@@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st)
280 278
281 mdelay(2); 279 mdelay(2);
282 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); 280 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
283
284 mutex_unlock(&st->lock);
285} 281}
286 282
287#ifdef CONFIG_PM_SLEEP 283#ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev)
318 val &= ~AS3935_AFE_PWR_BIT; 314 val &= ~AS3935_AFE_PWR_BIT;
319 ret = as3935_write(st, AS3935_AFE_GAIN, val); 315 ret = as3935_write(st, AS3935_AFE_GAIN, val);
320 316
317 calibrate_as3935(st);
318
321err_resume: 319err_resume:
322 mutex_unlock(&st->lock); 320 mutex_unlock(&st->lock);
323 321
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 02971e239a18..ece6926fa2e6 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
449 return ret; 449 return ret;
450 450
451 rt = (struct rt6_info *)dst; 451 rt = (struct rt6_info *)dst;
452 if (ipv6_addr_any(&fl6.saddr)) { 452 if (ipv6_addr_any(&src_in->sin6_addr)) {
453 ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
454 &fl6.daddr, 0, &fl6.saddr);
455 if (ret)
456 goto put;
457
458 src_in->sin6_family = AF_INET6; 453 src_in->sin6_family = AF_INET6;
459 src_in->sin6_addr = fl6.saddr; 454 src_in->sin6_addr = fl6.saddr;
460 } 455 }
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
471 466
472 *pdst = dst; 467 *pdst = dst;
473 return 0; 468 return 0;
474put:
475 dst_release(dst);
476 return ret;
477} 469}
478#else 470#else
479static int addr6_resolve(struct sockaddr_in6 *src_in, 471static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1844770f3ae8..2b4d613a3474 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1429 primary_path->packet_life_time = 1429 primary_path->packet_life_time =
1430 cm_req_get_primary_local_ack_timeout(req_msg); 1430 cm_req_get_primary_local_ack_timeout(req_msg);
1431 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1431 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1432 sa_path_set_service_id(primary_path, req_msg->service_id); 1432 primary_path->service_id = req_msg->service_id;
1433 1433
1434 if (req_msg->alt_local_lid) { 1434 if (req_msg->alt_local_lid) {
1435 alt_path->dgid = req_msg->alt_local_gid; 1435 alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1452 alt_path->packet_life_time = 1452 alt_path->packet_life_time =
1453 cm_req_get_alt_local_ack_timeout(req_msg); 1453 cm_req_get_alt_local_ack_timeout(req_msg);
1454 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1454 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1455 sa_path_set_service_id(alt_path, req_msg->service_id); 1455 alt_path->service_id = req_msg->service_id;
1456 } 1456 }
1457} 1457}
1458 1458
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 91b7a2fe5a55..31bb82d8ecd7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
1140 ib->sib_pkey = path->pkey; 1140 ib->sib_pkey = path->pkey;
1141 ib->sib_flowinfo = path->flow_label; 1141 ib->sib_flowinfo = path->flow_label;
1142 memcpy(&ib->sib_addr, &path->sgid, 16); 1142 memcpy(&ib->sib_addr, &path->sgid, 16);
1143 ib->sib_sid = sa_path_get_service_id(path); 1143 ib->sib_sid = path->service_id;
1144 ib->sib_scope_id = 0; 1144 ib->sib_scope_id = 0;
1145 } else { 1145 } else {
1146 ib->sib_pkey = listen_ib->sib_pkey; 1146 ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
1274 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1274 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1275 sizeof(req->local_gid)); 1275 sizeof(req->local_gid));
1276 req->has_gid = true; 1276 req->has_gid = true;
1277 req->service_id = 1277 req->service_id = req_param->primary_path->service_id;
1278 sa_path_get_service_id(req_param->primary_path);
1279 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1278 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1280 if (req->pkey != req_param->bth_pkey) 1279 if (req->pkey != req_param->bth_pkey)
1281 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1280 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1827 struct rdma_route *rt; 1826 struct rdma_route *rt;
1828 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1827 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
1829 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 1828 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
1830 const __be64 service_id = sa_path_get_service_id(path); 1829 const __be64 service_id =
1830 ib_event->param.req_rcvd.primary_path->service_id;
1831 int ret; 1831 int ret;
1832 1832
1833 id = rdma_create_id(listen_id->route.addr.dev_addr.net, 1833 id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
2345 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2345 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2346 path_rec.numb_path = 1; 2346 path_rec.numb_path = 1;
2347 path_rec.reversible = 1; 2347 path_rec.reversible = 1;
2348 sa_path_set_service_id(&path_rec, 2348 path_rec.service_id = rdma_get_service_id(&id_priv->id,
2349 rdma_get_service_id(&id_priv->id, 2349 cma_dst_addr(id_priv));
2350 cma_dst_addr(id_priv)));
2351 2350
2352 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2351 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2353 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2352 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cb7d372e4bdf..d92ab4eaa8f3 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
169int ib_sa_init(void); 169int ib_sa_init(void);
170void ib_sa_cleanup(void); 170void ib_sa_cleanup(void);
171 171
172int ibnl_init(void);
173void ibnl_cleanup(void);
174
175/**
176 * Check if there are any listeners to the netlink group
177 * @group: the netlink group ID
178 * Returns 0 on success or a negative for no listeners.
179 */
180int ibnl_chk_listeners(unsigned int group);
181
172int ib_nl_handle_resolve_resp(struct sk_buff *skb, 182int ib_nl_handle_resolve_resp(struct sk_buff *skb,
173 struct netlink_callback *cb); 183 struct netlink_callback *cb);
174int ib_nl_handle_set_timeout(struct sk_buff *skb, 184int ib_nl_handle_set_timeout(struct sk_buff *skb,
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b784055423c8..94931c474d41 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -37,6 +37,7 @@
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38#include <net/sock.h> 38#include <net/sock.h>
39#include <rdma/rdma_netlink.h> 39#include <rdma/rdma_netlink.h>
40#include "core_priv.h"
40 41
41struct ibnl_client { 42struct ibnl_client {
42 struct list_head list; 43 struct list_head list;
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
55 return -1; 56 return -1;
56 return 0; 57 return 0;
57} 58}
58EXPORT_SYMBOL(ibnl_chk_listeners);
59 59
60int ibnl_add_client(int index, int nops, 60int ibnl_add_client(int index, int nops,
61 const struct ibnl_client_cbs cb_table[]) 61 const struct ibnl_client_cbs cb_table[])
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e335b09c022e..fb7aec4047c8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -194,7 +194,7 @@ static u32 tid;
194 .field_name = "sa_path_rec:" #field 194 .field_name = "sa_path_rec:" #field
195 195
196static const struct ib_field path_rec_table[] = { 196static const struct ib_field path_rec_table[] = {
197 { PATH_REC_FIELD(ib.service_id), 197 { PATH_REC_FIELD(service_id),
198 .offset_words = 0, 198 .offset_words = 0,
199 .offset_bits = 0, 199 .offset_bits = 0,
200 .size_bits = 64 }, 200 .size_bits = 64 },
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
296 .field_name = "sa_path_rec:" #field 296 .field_name = "sa_path_rec:" #field
297 297
298static const struct ib_field opa_path_rec_table[] = { 298static const struct ib_field opa_path_rec_table[] = {
299 { OPA_PATH_REC_FIELD(opa.service_id), 299 { OPA_PATH_REC_FIELD(service_id),
300 .offset_words = 0, 300 .offset_words = 0,
301 .offset_bits = 0, 301 .offset_bits = 0,
302 .size_bits = 64 }, 302 .size_bits = 64 },
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
774 774
775 /* Now build the attributes */ 775 /* Now build the attributes */
776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
777 val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); 777 val64 = be64_to_cpu(sa_rec->service_id);
778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
779 sizeof(val64), &val64); 779 sizeof(val64), &val64);
780 } 780 }
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 3dbf811d3c51..21e60b1e2ff4 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
59 59
60 page = sg_page(sg); 60 page = sg_page(sg);
61 if (umem->writable && dirty) 61 if (!PageDirty(page) && umem->writable && dirty)
62 set_page_dirty_lock(page); 62 set_page_dirty_lock(page);
63 put_page(page); 63 put_page(page);
64 } 64 }
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 0780b1afefa9..8c4ec564e495 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
321 struct vm_area_struct *vma; 321 struct vm_area_struct *vma;
322 struct hstate *h; 322 struct hstate *h;
323 323
324 down_read(&mm->mmap_sem);
324 vma = find_vma(mm, ib_umem_start(umem)); 325 vma = find_vma(mm, ib_umem_start(umem));
325 if (!vma || !is_vm_hugetlb_page(vma)) 326 if (!vma || !is_vm_hugetlb_page(vma)) {
327 up_read(&mm->mmap_sem);
326 return -EINVAL; 328 return -EINVAL;
329 }
327 h = hstate_vma(vma); 330 h = hstate_vma(vma);
328 umem->page_shift = huge_page_shift(h); 331 umem->page_shift = huge_page_shift(h);
332 up_read(&mm->mmap_sem);
329 umem->hugetlb = 1; 333 umem->hugetlb = 1;
330 } else { 334 } else {
331 umem->hugetlb = 0; 335 umem->hugetlb = 0;
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 8b9587fe2303..94fd989c9060 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
96} 96}
97EXPORT_SYMBOL(ib_copy_qp_attr_to_user); 97EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
98 98
99void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 99static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
100 struct sa_path_rec *src) 100 struct sa_path_rec *src)
101{ 101{
102 memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); 102 memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
103 memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); 103 memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
104 104
105 dst->dlid = htons(ntohl(sa_path_get_dlid(src))); 105 dst->dlid = htons(ntohl(sa_path_get_dlid(src)));
106 dst->slid = htons(ntohl(sa_path_get_slid(src))); 106 dst->slid = htons(ntohl(sa_path_get_slid(src)));
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8d4139..08772836fded 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,10 @@
56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
57#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) 57#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
58 58
59#define BNXT_RE_UD_QP_HW_STALL 0x400000
60
61#define BNXT_RE_RQ_WQE_THRESHOLD 32
62
59struct bnxt_re_work { 63struct bnxt_re_work {
60 struct work_struct work; 64 struct work_struct work;
61 unsigned long event; 65 unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7ba9e699d7ab..c7bd68311d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,48 @@
61#include "ib_verbs.h" 61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h> 62#include <rdma/bnxt_re-abi.h>
63 63
64static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
64static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
65 struct bnxt_qplib_sge *sg_list, int num) 107 struct bnxt_qplib_sge *sg_list, int num)
66{ 108{
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
149 ib_attr->max_total_mcast_qp_attach = 0; 191 ib_attr->max_total_mcast_qp_attach = 0;
150 ib_attr->max_ah = dev_attr->max_ah; 192 ib_attr->max_ah = dev_attr->max_ah;
151 193
152 ib_attr->max_fmr = dev_attr->max_fmr; 194 ib_attr->max_fmr = 0;
153 ib_attr->max_map_per_fmr = 1; /* ? */ 195 ib_attr->max_map_per_fmr = 0;
154 196
155 ib_attr->max_srq = dev_attr->max_srq; 197 ib_attr->max_srq = dev_attr->max_srq;
156 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; 198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
410 return IB_LINK_LAYER_ETHERNET; 452 return IB_LINK_LAYER_ETHERNET;
411} 453}
412 454
455#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
456
457static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
458{
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct ib_mr *ib_mr = &fence->mr->ib_mr;
461 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
462
463 memset(wqe, 0, sizeof(*wqe));
464 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
465 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
466 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
467 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
468 wqe->bind.zero_based = false;
469 wqe->bind.parent_l_key = ib_mr->lkey;
470 wqe->bind.va = (u64)(unsigned long)fence->va;
471 wqe->bind.length = fence->size;
472 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
473 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
474
475 /* Save the initial rkey in fence structure for now;
476 * wqe->bind.r_key will be set at (re)bind time.
477 */
478 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
479}
480
481static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
482{
483 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
484 qplib_qp);
485 struct ib_pd *ib_pd = qp->ib_qp.pd;
486 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
489 struct bnxt_qplib_swqe wqe;
490 int rc;
491
492 memcpy(&wqe, fence_wqe, sizeof(wqe));
493 wqe.bind.r_key = fence->bind_rkey;
494 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
495
496 dev_dbg(rdev_to_dev(qp->rdev),
497 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
498 wqe.bind.r_key, qp->qplib_qp.id, pd);
499 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
500 if (rc) {
501 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
502 return rc;
503 }
504 bnxt_qplib_post_send_db(&qp->qplib_qp);
505
506 return rc;
507}
508
509static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
510{
511 struct bnxt_re_fence_data *fence = &pd->fence;
512 struct bnxt_re_dev *rdev = pd->rdev;
513 struct device *dev = &rdev->en_dev->pdev->dev;
514 struct bnxt_re_mr *mr = fence->mr;
515
516 if (fence->mw) {
517 bnxt_re_dealloc_mw(fence->mw);
518 fence->mw = NULL;
519 }
520 if (mr) {
521 if (mr->ib_mr.rkey)
522 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
523 true);
524 if (mr->ib_mr.lkey)
525 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
526 kfree(mr);
527 fence->mr = NULL;
528 }
529 if (fence->dma_addr) {
530 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
531 DMA_BIDIRECTIONAL);
532 fence->dma_addr = 0;
533 }
534}
535
536static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
537{
538 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
539 struct bnxt_re_fence_data *fence = &pd->fence;
540 struct bnxt_re_dev *rdev = pd->rdev;
541 struct device *dev = &rdev->en_dev->pdev->dev;
542 struct bnxt_re_mr *mr = NULL;
543 dma_addr_t dma_addr = 0;
544 struct ib_mw *mw;
545 u64 pbl_tbl;
546 int rc;
547
548 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
549 DMA_BIDIRECTIONAL);
550 rc = dma_mapping_error(dev, dma_addr);
551 if (rc) {
552 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
553 rc = -EIO;
554 fence->dma_addr = 0;
555 goto fail;
556 }
557 fence->dma_addr = dma_addr;
558
559 /* Allocate a MR */
560 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
561 if (!mr) {
562 rc = -ENOMEM;
563 goto fail;
564 }
565 fence->mr = mr;
566 mr->rdev = rdev;
567 mr->qplib_mr.pd = &pd->qplib_pd;
568 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
569 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
570 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
571 if (rc) {
572 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
573 goto fail;
574 }
575
576 /* Register MR */
577 mr->ib_mr.lkey = mr->qplib_mr.lkey;
578 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
579 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
580 pbl_tbl = dma_addr;
581 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
582 BNXT_RE_FENCE_PBL_SIZE, false);
583 if (rc) {
584 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
585 goto fail;
586 }
587 mr->ib_mr.rkey = mr->qplib_mr.rkey;
588
589 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
591 if (!mw) {
592 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd);
594 rc = -EINVAL;
595 goto fail;
596 }
597 fence->mw = mw;
598
599 bnxt_re_create_fence_wqe(pd);
600 return 0;
601
602fail:
603 bnxt_re_destroy_fence_mr(pd);
604 return rc;
605}
606
413/* Protection Domains */ 607/* Protection Domains */
414int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) 608int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
415{ 609{
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
417 struct bnxt_re_dev *rdev = pd->rdev; 611 struct bnxt_re_dev *rdev = pd->rdev;
418 int rc; 612 int rc;
419 613
614 bnxt_re_destroy_fence_mr(pd);
420 if (ib_pd->uobject && pd->dpi.dbr) { 615 if (ib_pd->uobject && pd->dpi.dbr) {
421 struct ib_ucontext *ib_uctx = ib_pd->uobject->context; 616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
422 struct bnxt_re_ucontext *ucntx; 617 struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
498 } 693 }
499 } 694 }
500 695
696 if (!udata)
697 if (bnxt_re_create_fence_mr(pd))
698 dev_warn(rdev_to_dev(rdev),
699 "Failed to create Fence-MR\n");
501 return &pd->ib_pd; 700 return &pd->ib_pd;
502dbfail: 701dbfail:
503 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, 702 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
849 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1048 /* Shadow QP SQ depth should be same as QP1 RQ depth */
850 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1049 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
851 qp->qplib_qp.sq.max_sge = 2; 1050 qp->qplib_qp.sq.max_sge = 2;
1051 /* Q full delta can be 1 since it is internal QP */
1052 qp->qplib_qp.sq.q_full_delta = 1;
852 1053
853 qp->qplib_qp.scq = qp1_qp->scq; 1054 qp->qplib_qp.scq = qp1_qp->scq;
854 qp->qplib_qp.rcq = qp1_qp->rcq; 1055 qp->qplib_qp.rcq = qp1_qp->rcq;
855 1056
856 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1057 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
857 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 1058 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1059 /* Q full delta can be 1 since it is internal QP */
1060 qp->qplib_qp.rq.q_full_delta = 1;
858 1061
859 qp->qplib_qp.mtu = qp1_qp->mtu; 1062 qp->qplib_qp.mtu = qp1_qp->mtu;
860 1063
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
917 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == 1120 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
918 IB_SIGNAL_ALL_WR) ? true : false); 1121 IB_SIGNAL_ALL_WR) ? true : false);
919 1122
920 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
921 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
922 dev_attr->max_qp_wqes + 1);
923
924 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; 1123 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
925 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) 1124 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
926 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; 1125 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
959 qp->qplib_qp.rq.max_wqe = min_t(u32, entries, 1158 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
960 dev_attr->max_qp_wqes + 1); 1159 dev_attr->max_qp_wqes + 1);
961 1160
1161 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1162 qp_init_attr->cap.max_recv_wr;
1163
962 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; 1164 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
963 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1165 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
964 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1166 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
967 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1169 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
968 1170
969 if (qp_init_attr->qp_type == IB_QPT_GSI) { 1171 if (qp_init_attr->qp_type == IB_QPT_GSI) {
1172 /* Allocate 1 more than what's provided */
1173 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1174 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1175 dev_attr->max_qp_wqes + 1);
1176 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1177 qp_init_attr->cap.max_send_wr;
970 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1178 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
971 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1179 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
972 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1180 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1006 } 1214 }
1007 1215
1008 } else { 1216 } else {
1217 /* Allocate 128 + 1 more than what's provided */
1218 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1219 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1220 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1221 dev_attr->max_qp_wqes +
1222 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1223 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1224
1225 /*
1226 * Reserving one slot for Phantom WQE. Application can
1227 * post one extra entry in this case. But allowing this to avoid
1228 * unexpected Queue full condition
1229 */
1230
1231 qp->qplib_qp.sq.q_full_delta -= 1;
1232
1009 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; 1233 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1010 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1234 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1011 if (udata) { 1235 if (udata) {
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1025 1249
1026 qp->ib_qp.qp_num = qp->qplib_qp.id; 1250 qp->ib_qp.qp_num = qp->qplib_qp.id;
1027 spin_lock_init(&qp->sq_lock); 1251 spin_lock_init(&qp->sq_lock);
1252 spin_lock_init(&qp->rq_lock);
1028 1253
1029 if (udata) { 1254 if (udata) {
1030 struct bnxt_re_qp_resp resp; 1255 struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
1129 } 1354 }
1130} 1355}
1131 1356
1132static int __from_ib_access_flags(int iflags)
1133{
1134 int qflags = 0;
1135
1136 if (iflags & IB_ACCESS_LOCAL_WRITE)
1137 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1138 if (iflags & IB_ACCESS_REMOTE_READ)
1139 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
1140 if (iflags & IB_ACCESS_REMOTE_WRITE)
1141 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
1142 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
1143 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
1144 if (iflags & IB_ACCESS_MW_BIND)
1145 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
1146 if (iflags & IB_ZERO_BASED)
1147 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
1148 if (iflags & IB_ACCESS_ON_DEMAND)
1149 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
1150 return qflags;
1151};
1152
1153static enum ib_access_flags __to_ib_access_flags(int qflags)
1154{
1155 enum ib_access_flags iflags = 0;
1156
1157 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
1158 iflags |= IB_ACCESS_LOCAL_WRITE;
1159 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
1160 iflags |= IB_ACCESS_REMOTE_WRITE;
1161 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
1162 iflags |= IB_ACCESS_REMOTE_READ;
1163 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
1164 iflags |= IB_ACCESS_REMOTE_ATOMIC;
1165 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
1166 iflags |= IB_ACCESS_MW_BIND;
1167 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
1168 iflags |= IB_ZERO_BASED;
1169 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
1170 iflags |= IB_ACCESS_ON_DEMAND;
1171 return iflags;
1172};
1173
1174static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1357static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1175 struct bnxt_re_qp *qp1_qp, 1358 struct bnxt_re_qp *qp1_qp,
1176 int qp_attr_mask) 1359 int qp_attr_mask)
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1378 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 1561 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1379 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1562 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1380 dev_attr->max_qp_wqes + 1); 1563 dev_attr->max_qp_wqes + 1);
1564 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1565 qp_attr->cap.max_send_wr;
1566 /*
1567 * Reserving one slot for Phantom WQE. Some application can
1568 * post one extra entry in this case. Allowing this to avoid
1569 * unexpected Queue full condition
1570 */
1571 qp->qplib_qp.sq.q_full_delta -= 1;
1381 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 1572 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1382 if (qp->qplib_qp.rq.max_wqe) { 1573 if (qp->qplib_qp.rq.max_wqe) {
1383 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 1574 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1384 qp->qplib_qp.rq.max_wqe = 1575 qp->qplib_qp.rq.max_wqe =
1385 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1576 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1577 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1578 qp_attr->cap.max_recv_wr;
1386 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 1579 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1387 } else { 1580 } else {
1388 /* SRQ was used prior, just ignore the RQ caps */ 1581 /* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
1883 return payload_sz; 2076 return payload_sz;
1884} 2077}
1885 2078
2079static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2080{
2081 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2082 qp->ib_qp.qp_type == IB_QPT_GSI ||
2083 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2084 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2085 int qp_attr_mask;
2086 struct ib_qp_attr qp_attr;
2087
2088 qp_attr_mask = IB_QP_STATE;
2089 qp_attr.qp_state = IB_QPS_RTS;
2090 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2091 qp->qplib_qp.wqe_cnt = 0;
2092 }
2093}
2094
1886static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2095static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
1887 struct bnxt_re_qp *qp, 2096 struct bnxt_re_qp *qp,
1888 struct ib_send_wr *wr) 2097 struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@ bad:
1928 wr = wr->next; 2137 wr = wr->next;
1929 } 2138 }
1930 bnxt_qplib_post_send_db(&qp->qplib_qp); 2139 bnxt_qplib_post_send_db(&qp->qplib_qp);
2140 bnxt_ud_qp_hw_stall_workaround(qp);
1931 spin_unlock_irqrestore(&qp->sq_lock, flags); 2141 spin_unlock_irqrestore(&qp->sq_lock, flags);
1932 return rc; 2142 return rc;
1933} 2143}
@@ -2024,6 +2234,7 @@ bad:
2024 wr = wr->next; 2234 wr = wr->next;
2025 } 2235 }
2026 bnxt_qplib_post_send_db(&qp->qplib_qp); 2236 bnxt_qplib_post_send_db(&qp->qplib_qp);
2237 bnxt_ud_qp_hw_stall_workaround(qp);
2027 spin_unlock_irqrestore(&qp->sq_lock, flags); 2238 spin_unlock_irqrestore(&qp->sq_lock, flags);
2028 2239
2029 return rc; 2240 return rc;
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2071 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2282 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2072 struct bnxt_qplib_swqe wqe; 2283 struct bnxt_qplib_swqe wqe;
2073 int rc = 0, payload_sz = 0; 2284 int rc = 0, payload_sz = 0;
2285 unsigned long flags;
2286 u32 count = 0;
2074 2287
2288 spin_lock_irqsave(&qp->rq_lock, flags);
2075 while (wr) { 2289 while (wr) {
2076 /* House keeping */ 2290 /* House keeping */
2077 memset(&wqe, 0, sizeof(wqe)); 2291 memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2100 *bad_wr = wr; 2314 *bad_wr = wr;
2101 break; 2315 break;
2102 } 2316 }
2317
2318 /* Ring DB if the RQEs posted reaches a threshold value */
2319 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2320 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2321 count = 0;
2322 }
2323
2103 wr = wr->next; 2324 wr = wr->next;
2104 } 2325 }
2105 bnxt_qplib_post_recv_db(&qp->qplib_qp); 2326
2327 if (count)
2328 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2329
2330 spin_unlock_irqrestore(&qp->rq_lock, flags);
2331
2106 return rc; 2332 return rc;
2107} 2333}
2108 2334
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2643 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2869 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2644} 2870}
2645 2871
2872static int send_phantom_wqe(struct bnxt_re_qp *qp)
2873{
2874 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2875 unsigned long flags;
2876 int rc = 0;
2877
2878 spin_lock_irqsave(&qp->sq_lock, flags);
2879
2880 rc = bnxt_re_bind_fence_mw(lib_qp);
2881 if (!rc) {
2882 lib_qp->sq.phantom_wqe_cnt++;
2883 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2884 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2885 lib_qp->id, lib_qp->sq.hwq.prod,
2886 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2887 lib_qp->sq.phantom_wqe_cnt);
2888 }
2889
2890 spin_unlock_irqrestore(&qp->sq_lock, flags);
2891 return rc;
2892}
2893
2646int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 2894int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2647{ 2895{
2648 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2896 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2649 struct bnxt_re_qp *qp; 2897 struct bnxt_re_qp *qp;
2650 struct bnxt_qplib_cqe *cqe; 2898 struct bnxt_qplib_cqe *cqe;
2651 int i, ncqe, budget; 2899 int i, ncqe, budget;
2900 struct bnxt_qplib_q *sq;
2901 struct bnxt_qplib_qp *lib_qp;
2652 u32 tbl_idx; 2902 u32 tbl_idx;
2653 struct bnxt_re_sqp_entries *sqp_entry = NULL; 2903 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2654 unsigned long flags; 2904 unsigned long flags;
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2661 } 2911 }
2662 cqe = &cq->cql[0]; 2912 cqe = &cq->cql[0];
2663 while (budget) { 2913 while (budget) {
2664 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); 2914 lib_qp = NULL;
2915 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2916 if (lib_qp) {
2917 sq = &lib_qp->sq;
2918 if (sq->send_phantom) {
2919 qp = container_of(lib_qp,
2920 struct bnxt_re_qp, qplib_qp);
2921 if (send_phantom_wqe(qp) == -ENOMEM)
2922 dev_err(rdev_to_dev(cq->rdev),
2923 "Phantom failed! Scheduled to send again\n");
2924 else
2925 sq->send_phantom = false;
2926 }
2927 }
2928
2665 if (!ncqe) 2929 if (!ncqe)
2666 break; 2930 break;
2667 2931
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2822 struct bnxt_re_dev *rdev = mr->rdev; 3086 struct bnxt_re_dev *rdev = mr->rdev;
2823 int rc; 3087 int rc;
2824 3088
3089 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3090 if (rc) {
3091 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3092 return rc;
3093 }
3094
2825 if (mr->npages && mr->pages) { 3095 if (mr->npages && mr->pages) {
2826 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3096 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
2827 &mr->qplib_frpl); 3097 &mr->qplib_frpl);
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2829 mr->npages = 0; 3099 mr->npages = 0;
2830 mr->pages = NULL; 3100 mr->pages = NULL;
2831 } 3101 }
2832 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2833
2834 if (!IS_ERR_OR_NULL(mr->ib_umem)) 3102 if (!IS_ERR_OR_NULL(mr->ib_umem))
2835 ib_umem_release(mr->ib_umem); 3103 ib_umem_release(mr->ib_umem);
2836 3104
@@ -2914,97 +3182,52 @@ fail:
2914 return ERR_PTR(rc); 3182 return ERR_PTR(rc);
2915} 3183}
2916 3184
2917/* Fast Memory Regions */ 3185struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
2918struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, 3186 struct ib_udata *udata)
2919 struct ib_fmr_attr *fmr_attr)
2920{ 3187{
2921 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3188 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2922 struct bnxt_re_dev *rdev = pd->rdev; 3189 struct bnxt_re_dev *rdev = pd->rdev;
2923 struct bnxt_re_fmr *fmr; 3190 struct bnxt_re_mw *mw;
2924 int rc; 3191 int rc;
2925 3192
2926 if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || 3193 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
2927 fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { 3194 if (!mw)
2928 dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
2929 return ERR_PTR(-ENOMEM); 3195 return ERR_PTR(-ENOMEM);
2930 } 3196 mw->rdev = rdev;
2931 fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); 3197 mw->qplib_mw.pd = &pd->qplib_pd;
2932 if (!fmr)
2933 return ERR_PTR(-ENOMEM);
2934
2935 fmr->rdev = rdev;
2936 fmr->qplib_fmr.pd = &pd->qplib_pd;
2937 fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2938 3198
2939 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3199 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
2940 if (rc) 3200 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3201 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3202 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3203 if (rc) {
3204 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
2941 goto fail; 3205 goto fail;
3206 }
3207 mw->ib_mw.rkey = mw->qplib_mw.rkey;
2942 3208
2943 fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); 3209 atomic_inc(&rdev->mw_count);
2944 fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; 3210 return &mw->ib_mw;
2945 fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
2946 3211
2947 atomic_inc(&rdev->mr_count);
2948 return &fmr->ib_fmr;
2949fail: 3212fail:
2950 kfree(fmr); 3213 kfree(mw);
2951 return ERR_PTR(rc); 3214 return ERR_PTR(rc);
2952} 3215}
2953 3216
2954int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, 3217int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
2955 u64 iova)
2956{ 3218{
2957 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3219 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
2958 ib_fmr); 3220 struct bnxt_re_dev *rdev = mw->rdev;
2959 struct bnxt_re_dev *rdev = fmr->rdev;
2960 int rc; 3221 int rc;
2961 3222
2962 fmr->qplib_fmr.va = iova; 3223 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
2963 fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; 3224 if (rc) {
2964 3225 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
2965 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, 3226 return rc;
2966 list_len, true);
2967 if (rc)
2968 dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
2969 fmr->ib_fmr.lkey);
2970 return rc;
2971}
2972
2973int bnxt_re_unmap_fmr(struct list_head *fmr_list)
2974{
2975 struct bnxt_re_dev *rdev;
2976 struct bnxt_re_fmr *fmr;
2977 struct ib_fmr *ib_fmr;
2978 int rc = 0;
2979
2980 /* Validate each FMRs inside the fmr_list */
2981 list_for_each_entry(ib_fmr, fmr_list, list) {
2982 fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
2983 rdev = fmr->rdev;
2984
2985 if (rdev) {
2986 rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
2987 &fmr->qplib_fmr, true);
2988 if (rc)
2989 break;
2990 }
2991 } 3227 }
2992 return rc;
2993}
2994
2995int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
2996{
2997 struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2998 ib_fmr);
2999 struct bnxt_re_dev *rdev = fmr->rdev;
3000 int rc;
3001 3228
3002 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3229 kfree(mw);
3003 if (rc) 3230 atomic_dec(&rdev->mw_count);
3004 dev_err(rdev_to_dev(rdev), "Failed to free FMR");
3005
3006 kfree(fmr);
3007 atomic_dec(&rdev->mr_count);
3008 return rc; 3231 return rc;
3009} 3232}
3010 3233
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c3d71765454..6c160f6a5398 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
44 u32 refcnt; 44 u32 refcnt;
45}; 45};
46 46
47#define BNXT_RE_FENCE_BYTES 64
48struct bnxt_re_fence_data {
49 u32 size;
50 u8 va[BNXT_RE_FENCE_BYTES];
51 dma_addr_t dma_addr;
52 struct bnxt_re_mr *mr;
53 struct ib_mw *mw;
54 struct bnxt_qplib_swqe bind_wqe;
55 u32 bind_rkey;
56};
57
47struct bnxt_re_pd { 58struct bnxt_re_pd {
48 struct bnxt_re_dev *rdev; 59 struct bnxt_re_dev *rdev;
49 struct ib_pd ib_pd; 60 struct ib_pd ib_pd;
50 struct bnxt_qplib_pd qplib_pd; 61 struct bnxt_qplib_pd qplib_pd;
51 struct bnxt_qplib_dpi dpi; 62 struct bnxt_qplib_dpi dpi;
63 struct bnxt_re_fence_data fence;
52}; 64};
53 65
54struct bnxt_re_ah { 66struct bnxt_re_ah {
@@ -62,6 +74,7 @@ struct bnxt_re_qp {
62 struct bnxt_re_dev *rdev; 74 struct bnxt_re_dev *rdev;
63 struct ib_qp ib_qp; 75 struct ib_qp ib_qp;
64 spinlock_t sq_lock; /* protect sq */ 76 spinlock_t sq_lock; /* protect sq */
77 spinlock_t rq_lock; /* protect rq */
65 struct bnxt_qplib_qp qplib_qp; 78 struct bnxt_qplib_qp qplib_qp;
66 struct ib_umem *sumem; 79 struct ib_umem *sumem;
67 struct ib_umem *rumem; 80 struct ib_umem *rumem;
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
181struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, 194struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
182 u32 max_num_sg); 195 u32 max_num_sg);
183int bnxt_re_dereg_mr(struct ib_mr *mr); 196int bnxt_re_dereg_mr(struct ib_mr *mr);
184struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 197struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
185 struct ib_fmr_attr *fmr_attr); 198 struct ib_udata *udata);
186int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, 199int bnxt_re_dealloc_mw(struct ib_mw *mw);
187 u64 iova);
188int bnxt_re_unmap_fmr(struct list_head *fmr_list);
189int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
190struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 200struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
191 u64 virt_addr, int mr_access_flags, 201 u64 virt_addr, int mr_access_flags,
192 struct ib_udata *udata); 202 struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 5d355401179b..1fce5e73216b 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
507 ibdev->dereg_mr = bnxt_re_dereg_mr; 507 ibdev->dereg_mr = bnxt_re_dereg_mr;
508 ibdev->alloc_mr = bnxt_re_alloc_mr; 508 ibdev->alloc_mr = bnxt_re_alloc_mr;
509 ibdev->map_mr_sg = bnxt_re_map_mr_sg; 509 ibdev->map_mr_sg = bnxt_re_map_mr_sg;
510 ibdev->alloc_fmr = bnxt_re_alloc_fmr;
511 ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
512 ibdev->unmap_fmr = bnxt_re_unmap_fmr;
513 ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
514 510
515 ibdev->reg_user_mr = bnxt_re_reg_user_mr; 511 ibdev->reg_user_mr = bnxt_re_reg_user_mr;
516 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; 512 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 43d08b5e9085..f05500bcdcf1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
284{ 284{
285 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 285 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
286 struct cmdq_create_qp1 req; 286 struct cmdq_create_qp1 req;
287 struct creq_create_qp1_resp *resp; 287 struct creq_create_qp1_resp resp;
288 struct bnxt_qplib_pbl *pbl; 288 struct bnxt_qplib_pbl *pbl;
289 struct bnxt_qplib_q *sq = &qp->sq; 289 struct bnxt_qplib_q *sq = &qp->sq;
290 struct bnxt_qplib_q *rq = &qp->rq; 290 struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
394 394
395 req.pd_id = cpu_to_le32(qp->pd->id); 395 req.pd_id = cpu_to_le32(qp->pd->id);
396 396
397 resp = (struct creq_create_qp1_resp *) 397 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
398 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 398 (void *)&resp, NULL, 0);
399 NULL, 0); 399 if (rc)
400 if (!resp) {
401 dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
402 rc = -EINVAL;
403 goto fail;
404 }
405 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
406 /* Cmd timed out */
407 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
408 rc = -ETIMEDOUT;
409 goto fail;
410 }
411 if (resp->status ||
412 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
413 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
414 dev_err(&rcfw->pdev->dev,
415 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
416 resp->status, le16_to_cpu(req.cookie),
417 le16_to_cpu(resp->cookie));
418 rc = -EINVAL;
419 goto fail; 400 goto fail;
420 } 401
421 qp->id = le32_to_cpu(resp->xid); 402 qp->id = le32_to_cpu(resp.xid);
422 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 403 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
423 sq->flush_in_progress = false; 404 sq->flush_in_progress = false;
424 rq->flush_in_progress = false; 405 rq->flush_in_progress = false;
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
442 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 423 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
443 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 424 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
444 struct cmdq_create_qp req; 425 struct cmdq_create_qp req;
445 struct creq_create_qp_resp *resp; 426 struct creq_create_qp_resp resp;
446 struct bnxt_qplib_pbl *pbl; 427 struct bnxt_qplib_pbl *pbl;
447 struct sq_psn_search **psn_search_ptr; 428 struct sq_psn_search **psn_search_ptr;
448 unsigned long int psn_search, poff = 0; 429 unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
627 } 608 }
628 req.pd_id = cpu_to_le32(qp->pd->id); 609 req.pd_id = cpu_to_le32(qp->pd->id);
629 610
630 resp = (struct creq_create_qp_resp *) 611 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
631 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 612 (void *)&resp, NULL, 0);
632 NULL, 0); 613 if (rc)
633 if (!resp) {
634 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
635 rc = -EINVAL;
636 goto fail;
637 }
638 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
639 /* Cmd timed out */
640 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
641 rc = -ETIMEDOUT;
642 goto fail;
643 }
644 if (resp->status ||
645 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
646 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
647 dev_err(&rcfw->pdev->dev,
648 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
649 resp->status, le16_to_cpu(req.cookie),
650 le16_to_cpu(resp->cookie));
651 rc = -EINVAL;
652 goto fail; 614 goto fail;
653 } 615
654 qp->id = le32_to_cpu(resp->xid); 616 qp->id = le32_to_cpu(resp.xid);
655 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 617 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
656 sq->flush_in_progress = false; 618 sq->flush_in_progress = false;
657 rq->flush_in_progress = false; 619 rq->flush_in_progress = false;
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
769{ 731{
770 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 732 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
771 struct cmdq_modify_qp req; 733 struct cmdq_modify_qp req;
772 struct creq_modify_qp_resp *resp; 734 struct creq_modify_qp_resp resp;
773 u16 cmd_flags = 0, pkey; 735 u16 cmd_flags = 0, pkey;
774 u32 temp32[4]; 736 u32 temp32[4];
775 u32 bmask; 737 u32 bmask;
738 int rc;
776 739
777 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); 740 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
778 741
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
862 825
863 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 826 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
864 827
865 resp = (struct creq_modify_qp_resp *) 828 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
866 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 829 (void *)&resp, NULL, 0);
867 NULL, 0); 830 if (rc)
868 if (!resp) { 831 return rc;
869 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
870 return -EINVAL;
871 }
872 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
873 /* Cmd timed out */
874 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
875 return -ETIMEDOUT;
876 }
877 if (resp->status ||
878 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
879 dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
880 dev_err(&rcfw->pdev->dev,
881 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
882 resp->status, le16_to_cpu(req.cookie),
883 le16_to_cpu(resp->cookie));
884 return -EINVAL;
885 }
886 qp->cur_qp_state = qp->state; 832 qp->cur_qp_state = qp->state;
887 return 0; 833 return 0;
888} 834}
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
891{ 837{
892 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 838 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
893 struct cmdq_query_qp req; 839 struct cmdq_query_qp req;
894 struct creq_query_qp_resp *resp; 840 struct creq_query_qp_resp resp;
841 struct bnxt_qplib_rcfw_sbuf *sbuf;
895 struct creq_query_qp_resp_sb *sb; 842 struct creq_query_qp_resp_sb *sb;
896 u16 cmd_flags = 0; 843 u16 cmd_flags = 0;
897 u32 temp32[4]; 844 u32 temp32[4];
898 int i; 845 int i, rc = 0;
899 846
900 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); 847 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
901 848
849 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
850 if (!sbuf)
851 return -ENOMEM;
852 sb = sbuf->sb;
853
902 req.qp_cid = cpu_to_le32(qp->id); 854 req.qp_cid = cpu_to_le32(qp->id);
903 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 855 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
904 resp = (struct creq_query_qp_resp *) 856 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
905 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 857 (void *)sbuf, 0);
906 (void **)&sb, 0); 858 if (rc)
907 if (!resp) { 859 goto bail;
908 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
909 return -EINVAL;
910 }
911 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
912 /* Cmd timed out */
913 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
914 return -ETIMEDOUT;
915 }
916 if (resp->status ||
917 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
918 dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
919 dev_err(&rcfw->pdev->dev,
920 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
921 resp->status, le16_to_cpu(req.cookie),
922 le16_to_cpu(resp->cookie));
923 return -EINVAL;
924 }
925 /* Extract the context from the side buffer */ 860 /* Extract the context from the side buffer */
926 qp->state = sb->en_sqd_async_notify_state & 861 qp->state = sb->en_sqd_async_notify_state &
927 CREQ_QUERY_QP_RESP_SB_STATE_MASK; 862 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
976 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 911 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
977 memcpy(qp->smac, sb->src_mac, 6); 912 memcpy(qp->smac, sb->src_mac, 6);
978 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 913 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
979 return 0; 914bail:
915 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
916 return rc;
980} 917}
981 918
982static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) 919static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1021{ 958{
1022 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 959 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1023 struct cmdq_destroy_qp req; 960 struct cmdq_destroy_qp req;
1024 struct creq_destroy_qp_resp *resp; 961 struct creq_destroy_qp_resp resp;
1025 unsigned long flags; 962 unsigned long flags;
1026 u16 cmd_flags = 0; 963 u16 cmd_flags = 0;
964 int rc;
1027 965
1028 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); 966 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1029 967
1030 req.qp_cid = cpu_to_le32(qp->id); 968 req.qp_cid = cpu_to_le32(qp->id);
1031 resp = (struct creq_destroy_qp_resp *) 969 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1032 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 970 (void *)&resp, NULL, 0);
1033 NULL, 0); 971 if (rc)
1034 if (!resp) { 972 return rc;
1035 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
1036 return -EINVAL;
1037 }
1038 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1039 /* Cmd timed out */
1040 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
1041 return -ETIMEDOUT;
1042 }
1043 if (resp->status ||
1044 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1045 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
1046 dev_err(&rcfw->pdev->dev,
1047 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1048 resp->status, le16_to_cpu(req.cookie),
1049 le16_to_cpu(resp->cookie));
1050 return -EINVAL;
1051 }
1052 973
1053 /* Must walk the associated CQs to nullified the QP ptr */ 974 /* Must walk the associated CQs to nullified the QP ptr */
1054 spin_lock_irqsave(&qp->scq->hwq.lock, flags); 975 spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1162 rc = -EINVAL; 1083 rc = -EINVAL;
1163 goto done; 1084 goto done;
1164 } 1085 }
1165 if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == 1086
1166 HWQ_CMP(sq->hwq.cons, &sq->hwq)) { 1087 if (bnxt_qplib_queue_full(sq)) {
1088 dev_err(&sq->hwq.pdev->dev,
1089 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1090 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1091 sq->q_full_delta);
1167 rc = -ENOMEM; 1092 rc = -ENOMEM;
1168 goto done; 1093 goto done;
1169 } 1094 }
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1373 } 1298 }
1374 1299
1375 sq->hwq.prod++; 1300 sq->hwq.prod++;
1301
1302 qp->wqe_cnt++;
1303
1376done: 1304done:
1377 return rc; 1305 return rc;
1378} 1306}
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1411 rc = -EINVAL; 1339 rc = -EINVAL;
1412 goto done; 1340 goto done;
1413 } 1341 }
1414 if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == 1342 if (bnxt_qplib_queue_full(rq)) {
1415 HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
1416 dev_err(&rq->hwq.pdev->dev, 1343 dev_err(&rq->hwq.pdev->dev,
1417 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); 1344 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1418 rc = -EINVAL; 1345 rc = -EINVAL;
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1483{ 1410{
1484 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1411 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1485 struct cmdq_create_cq req; 1412 struct cmdq_create_cq req;
1486 struct creq_create_cq_resp *resp; 1413 struct creq_create_cq_resp resp;
1487 struct bnxt_qplib_pbl *pbl; 1414 struct bnxt_qplib_pbl *pbl;
1488 u16 cmd_flags = 0; 1415 u16 cmd_flags = 0;
1489 int rc; 1416 int rc;
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1525 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 1452 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1526 CMDQ_CREATE_CQ_CNQ_ID_SFT); 1453 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1527 1454
1528 resp = (struct creq_create_cq_resp *) 1455 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1529 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1456 (void *)&resp, NULL, 0);
1530 NULL, 0); 1457 if (rc)
1531 if (!resp) {
1532 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
1533 return -EINVAL;
1534 }
1535 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1536 /* Cmd timed out */
1537 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
1538 rc = -ETIMEDOUT;
1539 goto fail;
1540 }
1541 if (resp->status ||
1542 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1543 dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
1544 dev_err(&rcfw->pdev->dev,
1545 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1546 resp->status, le16_to_cpu(req.cookie),
1547 le16_to_cpu(resp->cookie));
1548 rc = -EINVAL;
1549 goto fail; 1458 goto fail;
1550 } 1459
1551 cq->id = le32_to_cpu(resp->xid); 1460 cq->id = le32_to_cpu(resp.xid);
1552 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; 1461 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1553 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 1462 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1554 init_waitqueue_head(&cq->waitq); 1463 init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1566{ 1475{
1567 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1476 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1568 struct cmdq_destroy_cq req; 1477 struct cmdq_destroy_cq req;
1569 struct creq_destroy_cq_resp *resp; 1478 struct creq_destroy_cq_resp resp;
1570 u16 cmd_flags = 0; 1479 u16 cmd_flags = 0;
1480 int rc;
1571 1481
1572 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); 1482 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1573 1483
1574 req.cq_cid = cpu_to_le32(cq->id); 1484 req.cq_cid = cpu_to_le32(cq->id);
1575 resp = (struct creq_destroy_cq_resp *) 1485 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1576 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1486 (void *)&resp, NULL, 0);
1577 NULL, 0); 1487 if (rc)
1578 if (!resp) { 1488 return rc;
1579 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
1580 return -EINVAL;
1581 }
1582 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
1583 /* Cmd timed out */
1584 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
1585 return -ETIMEDOUT;
1586 }
1587 if (resp->status ||
1588 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
1589 dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
1590 dev_err(&rcfw->pdev->dev,
1591 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
1592 resp->status, le16_to_cpu(req.cookie),
1593 le16_to_cpu(resp->cookie));
1594 return -EINVAL;
1595 }
1596 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1489 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1597 return 0; 1490 return 0;
1598} 1491}
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
1664 return rc; 1557 return rc;
1665} 1558}
1666 1559
1560/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1561 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1562 */
1563static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1564 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1565{
1566 struct bnxt_qplib_q *sq = &qp->sq;
1567 struct bnxt_qplib_swq *swq;
1568 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1569 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1570 struct cq_req *peek_req_hwcqe;
1571 struct bnxt_qplib_qp *peek_qp;
1572 struct bnxt_qplib_q *peek_sq;
1573 int i, rc = 0;
1574
1575 /* Normal mode */
1576 /* Check for the psn_search marking before completing */
1577 swq = &sq->swq[sw_sq_cons];
1578 if (swq->psn_search &&
1579 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1580 /* Unmark */
1581 swq->psn_search->flags_next_psn = cpu_to_le32
1582 (le32_to_cpu(swq->psn_search->flags_next_psn)
1583 & ~0x80000000);
1584 dev_dbg(&cq->hwq.pdev->dev,
1585 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1586 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1587 sq->condition = true;
1588 sq->send_phantom = true;
1589
1590 /* TODO: Only ARM if the previous SQE is ARMALL */
1591 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1592
1593 rc = -EAGAIN;
1594 goto out;
1595 }
1596 if (sq->condition) {
1597 /* Peek at the completions */
1598 peek_raw_cq_cons = cq->hwq.cons;
1599 peek_sw_cq_cons = cq_cons;
1600 i = cq->hwq.max_elements;
1601 while (i--) {
1602 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1603 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1604 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1605 [CQE_IDX(peek_sw_cq_cons)];
1606 /* If the next hwcqe is VALID */
1607 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1608 cq->hwq.max_elements)) {
1609 /* If the next hwcqe is a REQ */
1610 if ((peek_hwcqe->cqe_type_toggle &
1611 CQ_BASE_CQE_TYPE_MASK) ==
1612 CQ_BASE_CQE_TYPE_REQ) {
1613 peek_req_hwcqe = (struct cq_req *)
1614 peek_hwcqe;
1615 peek_qp = (struct bnxt_qplib_qp *)
1616 ((unsigned long)
1617 le64_to_cpu
1618 (peek_req_hwcqe->qp_handle));
1619 peek_sq = &peek_qp->sq;
1620 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1621 peek_req_hwcqe->sq_cons_idx) - 1
1622 , &sq->hwq);
1623 /* If the hwcqe's sq's wr_id matches */
1624 if (peek_sq == sq &&
1625 sq->swq[peek_sq_cons_idx].wr_id ==
1626 BNXT_QPLIB_FENCE_WRID) {
1627 /*
1628 * Unbreak only if the phantom
1629 * comes back
1630 */
1631 dev_dbg(&cq->hwq.pdev->dev,
1632 "FP:Got Phantom CQE");
1633 sq->condition = false;
1634 sq->single = true;
1635 rc = 0;
1636 goto out;
1637 }
1638 }
1639 /* Valid but not the phantom, so keep looping */
1640 } else {
1641 /* Not valid yet, just exit and wait */
1642 rc = -EINVAL;
1643 goto out;
1644 }
1645 peek_sw_cq_cons++;
1646 peek_raw_cq_cons++;
1647 }
1648 dev_err(&cq->hwq.pdev->dev,
1649 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1650 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1651 rc = -EINVAL;
1652 }
1653out:
1654 return rc;
1655}
1656
1667static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 1657static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1668 struct cq_req *hwcqe, 1658 struct cq_req *hwcqe,
1669 struct bnxt_qplib_cqe **pcqe, int *budget) 1659 struct bnxt_qplib_cqe **pcqe, int *budget,
1660 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
1670{ 1661{
1671 struct bnxt_qplib_qp *qp; 1662 struct bnxt_qplib_qp *qp;
1672 struct bnxt_qplib_q *sq; 1663 struct bnxt_qplib_q *sq;
1673 struct bnxt_qplib_cqe *cqe; 1664 struct bnxt_qplib_cqe *cqe;
1674 u32 sw_cons, cqe_cons; 1665 u32 sw_sq_cons, cqe_sq_cons;
1666 struct bnxt_qplib_swq *swq;
1675 int rc = 0; 1667 int rc = 0;
1676 1668
1677 qp = (struct bnxt_qplib_qp *)((unsigned long) 1669 qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1683 } 1675 }
1684 sq = &qp->sq; 1676 sq = &qp->sq;
1685 1677
1686 cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1678 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1687 if (cqe_cons > sq->hwq.max_elements) { 1679 if (cqe_sq_cons > sq->hwq.max_elements) {
1688 dev_err(&cq->hwq.pdev->dev, 1680 dev_err(&cq->hwq.pdev->dev,
1689 "QPLIB: FP: CQ Process req reported "); 1681 "QPLIB: FP: CQ Process req reported ");
1690 dev_err(&cq->hwq.pdev->dev, 1682 dev_err(&cq->hwq.pdev->dev,
1691 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1683 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
1692 cqe_cons, sq->hwq.max_elements); 1684 cqe_sq_cons, sq->hwq.max_elements);
1693 return -EINVAL; 1685 return -EINVAL;
1694 } 1686 }
1695 /* If we were in the middle of flushing the SQ, continue */ 1687 /* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1698 1690
1699 /* Require to walk the sq's swq to fabricate CQEs for all previously 1691 /* Require to walk the sq's swq to fabricate CQEs for all previously
1700 * signaled SWQEs due to CQE aggregation from the current sq cons 1692 * signaled SWQEs due to CQE aggregation from the current sq cons
1701 * to the cqe_cons 1693 * to the cqe_sq_cons
1702 */ 1694 */
1703 cqe = *pcqe; 1695 cqe = *pcqe;
1704 while (*budget) { 1696 while (*budget) {
1705 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1697 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1706 if (sw_cons == cqe_cons) 1698 if (sw_sq_cons == cqe_sq_cons)
1699 /* Done */
1707 break; 1700 break;
1701
1702 swq = &sq->swq[sw_sq_cons];
1708 memset(cqe, 0, sizeof(*cqe)); 1703 memset(cqe, 0, sizeof(*cqe));
1709 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1704 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1710 cqe->qp_handle = (u64)(unsigned long)qp; 1705 cqe->qp_handle = (u64)(unsigned long)qp;
1711 cqe->src_qp = qp->id; 1706 cqe->src_qp = qp->id;
1712 cqe->wr_id = sq->swq[sw_cons].wr_id; 1707 cqe->wr_id = swq->wr_id;
1713 cqe->type = sq->swq[sw_cons].type; 1708 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
1709 goto skip;
1710 cqe->type = swq->type;
1714 1711
1715 /* For the last CQE, check for status. For errors, regardless 1712 /* For the last CQE, check for status. For errors, regardless
1716 * of the request being signaled or not, it must complete with 1713 * of the request being signaled or not, it must complete with
1717 * the hwcqe error status 1714 * the hwcqe error status
1718 */ 1715 */
1719 if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && 1716 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
1720 hwcqe->status != CQ_REQ_STATUS_OK) { 1717 hwcqe->status != CQ_REQ_STATUS_OK) {
1721 cqe->status = hwcqe->status; 1718 cqe->status = hwcqe->status;
1722 dev_err(&cq->hwq.pdev->dev, 1719 dev_err(&cq->hwq.pdev->dev,
1723 "QPLIB: FP: CQ Processed Req "); 1720 "QPLIB: FP: CQ Processed Req ");
1724 dev_err(&cq->hwq.pdev->dev, 1721 dev_err(&cq->hwq.pdev->dev,
1725 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", 1722 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
1726 sw_cons, cqe->wr_id, cqe->status); 1723 sw_sq_cons, cqe->wr_id, cqe->status);
1727 cqe++; 1724 cqe++;
1728 (*budget)--; 1725 (*budget)--;
1729 sq->flush_in_progress = true; 1726 sq->flush_in_progress = true;
1730 /* Must block new posting of SQ and RQ */ 1727 /* Must block new posting of SQ and RQ */
1731 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1728 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1729 sq->condition = false;
1730 sq->single = false;
1732 } else { 1731 } else {
1733 if (sq->swq[sw_cons].flags & 1732 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
1734 SQ_SEND_FLAGS_SIGNAL_COMP) { 1733 /* Before we complete, do WA 9060 */
1734 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
1735 cqe_sq_cons)) {
1736 *lib_qp = qp;
1737 goto out;
1738 }
1735 cqe->status = CQ_REQ_STATUS_OK; 1739 cqe->status = CQ_REQ_STATUS_OK;
1736 cqe++; 1740 cqe++;
1737 (*budget)--; 1741 (*budget)--;
1738 } 1742 }
1739 } 1743 }
1744skip:
1740 sq->hwq.cons++; 1745 sq->hwq.cons++;
1746 if (sq->single)
1747 break;
1741 } 1748 }
1749out:
1742 *pcqe = cqe; 1750 *pcqe = cqe;
1743 if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { 1751 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
1744 /* Out of budget */ 1752 /* Out of budget */
1745 rc = -EAGAIN; 1753 rc = -EAGAIN;
1746 goto done; 1754 goto done;
1747 } 1755 }
1756 /*
1757 * Back to normal completion mode only after it has completed all of
1758 * the WC for this CQE
1759 */
1760 sq->single = false;
1748 if (!sq->flush_in_progress) 1761 if (!sq->flush_in_progress)
1749 goto done; 1762 goto done;
1750flush: 1763flush:
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2074} 2087}
2075 2088
2076int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2089int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2077 int num_cqes) 2090 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2078{ 2091{
2079 struct cq_base *hw_cqe, **hw_cqe_ptr; 2092 struct cq_base *hw_cqe, **hw_cqe_ptr;
2080 unsigned long flags; 2093 unsigned long flags;
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2099 case CQ_BASE_CQE_TYPE_REQ: 2112 case CQ_BASE_CQE_TYPE_REQ:
2100 rc = bnxt_qplib_cq_process_req(cq, 2113 rc = bnxt_qplib_cq_process_req(cq,
2101 (struct cq_req *)hw_cqe, 2114 (struct cq_req *)hw_cqe,
2102 &cqe, &budget); 2115 &cqe, &budget,
2116 sw_cons, lib_qp);
2103 break; 2117 break;
2104 case CQ_BASE_CQE_TYPE_RES_RC: 2118 case CQ_BASE_CQE_TYPE_RES_RC:
2105 rc = bnxt_qplib_cq_process_res_rc(cq, 2119 rc = bnxt_qplib_cq_process_res_rc(cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f0150f8da1e3..36b7b7db0e3f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
88 88
89struct bnxt_qplib_swqe { 89struct bnxt_qplib_swqe {
90 /* General */ 90 /* General */
91#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
91 u64 wr_id; 92 u64 wr_id;
92 u8 reqs_type; 93 u8 reqs_type;
93 u8 type; 94 u8 type;
@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
216 struct scatterlist *sglist; 217 struct scatterlist *sglist;
217 u32 nmap; 218 u32 nmap;
218 u32 max_wqe; 219 u32 max_wqe;
220 u16 q_full_delta;
219 u16 max_sge; 221 u16 max_sge;
220 u32 psn; 222 u32 psn;
221 bool flush_in_progress; 223 bool flush_in_progress;
224 bool condition;
225 bool single;
226 bool send_phantom;
227 u32 phantom_wqe_cnt;
228 u32 phantom_cqe_cnt;
229 u32 next_cq_cons;
222}; 230};
223 231
224struct bnxt_qplib_qp { 232struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
242 u8 timeout; 250 u8 timeout;
243 u8 retry_cnt; 251 u8 retry_cnt;
244 u8 rnr_retry; 252 u8 rnr_retry;
253 u64 wqe_cnt;
245 u32 min_rnr_timer; 254 u32 min_rnr_timer;
246 u32 max_rd_atomic; 255 u32 max_rd_atomic;
247 u32 max_dest_rd_atomic; 256 u32 max_dest_rd_atomic;
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
301 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 310 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
302 !((raw_cons) & (cp_bit))) 311 !((raw_cons) & (cp_bit)))
303 312
313static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
314{
315 return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
316 &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
317 &qplib_q->hwq);
318}
319
304struct bnxt_qplib_cqe { 320struct bnxt_qplib_cqe {
305 u8 status; 321 u8 status;
306 u8 type; 322 u8 type;
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
432int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 448int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
433int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
434int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
435 int num); 451 int num, struct bnxt_qplib_qp **qp);
436void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 452void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
437void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 453void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
438int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 454int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 23fb7260662b..16e42754dbec 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -39,72 +39,55 @@
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/prefetch.h> 41#include <linux/prefetch.h>
42#include <linux/delay.h>
43
42#include "roce_hsi.h" 44#include "roce_hsi.h"
43#include "qplib_res.h" 45#include "qplib_res.h"
44#include "qplib_rcfw.h" 46#include "qplib_rcfw.h"
45static void bnxt_qplib_service_creq(unsigned long data); 47static void bnxt_qplib_service_creq(unsigned long data);
46 48
47/* Hardware communication channel */ 49/* Hardware communication channel */
48int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 50static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
49{ 51{
50 u16 cbit; 52 u16 cbit;
51 int rc; 53 int rc;
52 54
53 cookie &= RCFW_MAX_COOKIE_VALUE;
54 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 55 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
55 if (!test_bit(cbit, rcfw->cmdq_bitmap))
56 dev_warn(&rcfw->pdev->dev,
57 "QPLIB: CMD bit %d for cookie 0x%x is not set?",
58 cbit, cookie);
59
60 rc = wait_event_timeout(rcfw->waitq, 56 rc = wait_event_timeout(rcfw->waitq,
61 !test_bit(cbit, rcfw->cmdq_bitmap), 57 !test_bit(cbit, rcfw->cmdq_bitmap),
62 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 58 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
63 if (!rc) { 59 return rc ? 0 : -ETIMEDOUT;
64 dev_warn(&rcfw->pdev->dev,
65 "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
66 RCFW_CMD_WAIT_TIME_MS, cookie);
67 }
68
69 return rc;
70}; 60};
71 61
72int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 62static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
73{ 63{
74 u32 count = -1; 64 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
75 u16 cbit; 65 u16 cbit;
76 66
77 cookie &= RCFW_MAX_COOKIE_VALUE;
78 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 67 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
79 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 68 if (!test_bit(cbit, rcfw->cmdq_bitmap))
80 goto done; 69 goto done;
81 do { 70 do {
71 mdelay(1); /* 1m sec */
82 bnxt_qplib_service_creq((unsigned long)rcfw); 72 bnxt_qplib_service_creq((unsigned long)rcfw);
83 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 73 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
84done: 74done:
85 return count; 75 return count ? 0 : -ETIMEDOUT;
86}; 76};
87 77
88void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 78static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
89 struct cmdq_base *req, void **crsbe, 79 struct creq_base *resp, void *sb, u8 is_block)
90 u8 is_block)
91{ 80{
92 struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
93 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 81 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
94 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 82 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
95 struct bnxt_qplib_hwq *crsb = &rcfw->crsb; 83 struct bnxt_qplib_crsq *crsqe;
96 struct bnxt_qplib_crsqe *crsqe = NULL;
97 struct bnxt_qplib_crsbe **crsb_ptr;
98 u32 sw_prod, cmdq_prod; 84 u32 sw_prod, cmdq_prod;
99 u8 retry_cnt = 0xFF;
100 dma_addr_t dma_addr;
101 unsigned long flags; 85 unsigned long flags;
102 u32 size, opcode; 86 u32 size, opcode;
103 u16 cookie, cbit; 87 u16 cookie, cbit;
104 int pg, idx; 88 int pg, idx;
105 u8 *preq; 89 u8 *preq;
106 90
107retry:
108 opcode = req->opcode; 91 opcode = req->opcode;
109 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 92 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
110 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 93 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@ retry:
112 dev_err(&rcfw->pdev->dev, 95 dev_err(&rcfw->pdev->dev,
113 "QPLIB: RCFW not initialized, reject opcode 0x%x", 96 "QPLIB: RCFW not initialized, reject opcode 0x%x",
114 opcode); 97 opcode);
115 return NULL; 98 return -EINVAL;
116 } 99 }
117 100
118 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 101 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
119 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 102 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
120 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 103 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
121 return NULL; 104 return -EINVAL;
122 } 105 }
123 106
124 /* Cmdq are in 16-byte units, each request can consume 1 or more 107 /* Cmdq are in 16-byte units, each request can consume 1 or more
125 * cmdqe 108 * cmdqe
126 */ 109 */
127 spin_lock_irqsave(&cmdq->lock, flags); 110 spin_lock_irqsave(&cmdq->lock, flags);
128 if (req->cmd_size > cmdq->max_elements - 111 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
129 ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
130 (cmdq->max_elements - 1))) {
131 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 112 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
132 spin_unlock_irqrestore(&cmdq->lock, flags); 113 spin_unlock_irqrestore(&cmdq->lock, flags);
133 114 return -EAGAIN;
134 if (!retry_cnt--)
135 return NULL;
136 goto retry;
137 } 115 }
138 116
139 retry_cnt = 0xFF;
140 117
141 cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; 118 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
142 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 119 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
143 if (is_block) 120 if (is_block)
144 cookie |= RCFW_CMD_IS_BLOCKING; 121 cookie |= RCFW_CMD_IS_BLOCKING;
122
123 set_bit(cbit, rcfw->cmdq_bitmap);
145 req->cookie = cpu_to_le16(cookie); 124 req->cookie = cpu_to_le16(cookie);
146 if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { 125 crsqe = &rcfw->crsqe_tbl[cbit];
147 dev_err(&rcfw->pdev->dev, 126 if (crsqe->resp) {
148 "QPLIB: RCFW MAX outstanding cmd reached!");
149 atomic_dec(&rcfw->seq_num);
150 spin_unlock_irqrestore(&cmdq->lock, flags); 127 spin_unlock_irqrestore(&cmdq->lock, flags);
151 128 return -EBUSY;
152 if (!retry_cnt--)
153 return NULL;
154 goto retry;
155 } 129 }
156 /* Reserve a resp buffer slot if requested */ 130 memset(resp, 0, sizeof(*resp));
157 if (req->resp_size && crsbe) { 131 crsqe->resp = (struct creq_qp_event *)resp;
158 spin_lock(&crsb->lock); 132 crsqe->resp->cookie = req->cookie;
159 sw_prod = HWQ_CMP(crsb->prod, crsb); 133 crsqe->req_size = req->cmd_size;
160 crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; 134 if (req->resp_size && sb) {
161 *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] 135 struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
162 [get_crsb_idx(sw_prod)]; 136
163 bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); 137 req->resp_addr = cpu_to_le64(sbuf->dma_addr);
164 req->resp_addr = cpu_to_le64(dma_addr); 138 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
165 crsb->prod++; 139 BNXT_QPLIB_CMDQE_UNITS;
166 spin_unlock(&crsb->lock);
167
168 req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
169 BNXT_QPLIB_CMDQE_UNITS - 1) /
170 BNXT_QPLIB_CMDQE_UNITS;
171 } 140 }
141
172 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 142 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
173 preq = (u8 *)req; 143 preq = (u8 *)req;
174 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 144 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@ retry:
190 preq += min_t(u32, size, sizeof(*cmdqe)); 160 preq += min_t(u32, size, sizeof(*cmdqe));
191 size -= min_t(u32, size, sizeof(*cmdqe)); 161 size -= min_t(u32, size, sizeof(*cmdqe));
192 cmdq->prod++; 162 cmdq->prod++;
163 rcfw->seq_num++;
193 } while (size > 0); 164 } while (size > 0);
194 165
166 rcfw->seq_num++;
167
195 cmdq_prod = cmdq->prod; 168 cmdq_prod = cmdq->prod;
196 if (rcfw->flags & FIRMWARE_FIRST_FLAG) { 169 if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
197 /* The very first doorbell write is required to set this flag 170 /* The very first doorbell write
198 * which prompts the FW to reset its internal pointers 171 * is required to set this flag
172 * which prompts the FW to reset
173 * its internal pointers
199 */ 174 */
200 cmdq_prod |= FIRMWARE_FIRST_FLAG; 175 cmdq_prod |= FIRMWARE_FIRST_FLAG;
201 rcfw->flags &= ~FIRMWARE_FIRST_FLAG; 176 rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
202 } 177 }
203 sw_prod = HWQ_CMP(crsq->prod, crsq);
204 crsqe = &crsq->crsq[sw_prod];
205 memset(crsqe, 0, sizeof(*crsqe));
206 crsq->prod++;
207 crsqe->req_size = req->cmd_size;
208 178
209 /* ring CMDQ DB */ 179 /* ring CMDQ DB */
180 wmb();
210 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 181 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
211 rcfw->cmdq_bar_reg_prod_off); 182 rcfw->cmdq_bar_reg_prod_off);
212 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 183 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@ retry:
214done: 185done:
215 spin_unlock_irqrestore(&cmdq->lock, flags); 186 spin_unlock_irqrestore(&cmdq->lock, flags);
216 /* Return the CREQ response pointer */ 187 /* Return the CREQ response pointer */
217 return crsqe ? &crsqe->qp_event : NULL; 188 return 0;
218} 189}
219 190
191int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
192 struct cmdq_base *req,
193 struct creq_base *resp,
194 void *sb, u8 is_block)
195{
196 struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
197 u16 cookie;
198 u8 opcode, retry_cnt = 0xFF;
199 int rc = 0;
200
201 do {
202 opcode = req->opcode;
203 rc = __send_message(rcfw, req, resp, sb, is_block);
204 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
205 if (!rc)
206 break;
207
208 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
209 /* send failed */
210 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
211 cookie, opcode);
212 return rc;
213 }
214 is_block ? mdelay(1) : usleep_range(500, 1000);
215
216 } while (retry_cnt--);
217
218 if (is_block)
219 rc = __block_for_resp(rcfw, cookie);
220 else
221 rc = __wait_for_resp(rcfw, cookie);
222 if (rc) {
223 /* timed out */
224 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
225 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
226 return rc;
227 }
228
229 if (evnt->status) {
230 /* failed with status */
231 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
232 cookie, opcode, evnt->status);
233 rc = -EFAULT;
234 }
235
236 return rc;
237}
220/* Completions */ 238/* Completions */
221static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 239static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
222 struct creq_func_event *func_event) 240 struct creq_func_event *func_event)
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
260static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 278static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
261 struct creq_qp_event *qp_event) 279 struct creq_qp_event *qp_event)
262{ 280{
263 struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
264 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 281 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
265 struct bnxt_qplib_crsqe *crsqe; 282 struct bnxt_qplib_crsq *crsqe;
266 u16 cbit, cookie, blocked = 0;
267 unsigned long flags; 283 unsigned long flags;
268 u32 sw_cons; 284 u16 cbit, blocked = 0;
285 u16 cookie;
286 __le16 mcookie;
269 287
270 switch (qp_event->event) { 288 switch (qp_event->event) {
271 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 289 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
275 default: 293 default:
276 /* Command Response */ 294 /* Command Response */
277 spin_lock_irqsave(&cmdq->lock, flags); 295 spin_lock_irqsave(&cmdq->lock, flags);
278 sw_cons = HWQ_CMP(crsq->cons, crsq); 296 cookie = le16_to_cpu(qp_event->cookie);
279 crsqe = &crsq->crsq[sw_cons]; 297 mcookie = qp_event->cookie;
280 crsq->cons++;
281 memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
282
283 cookie = le16_to_cpu(crsqe->qp_event.cookie);
284 blocked = cookie & RCFW_CMD_IS_BLOCKING; 298 blocked = cookie & RCFW_CMD_IS_BLOCKING;
285 cookie &= RCFW_MAX_COOKIE_VALUE; 299 cookie &= RCFW_MAX_COOKIE_VALUE;
286 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 300 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
301 crsqe = &rcfw->crsqe_tbl[cbit];
302 if (crsqe->resp &&
303 crsqe->resp->cookie == mcookie) {
304 memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
305 crsqe->resp = NULL;
306 } else {
307 dev_err(&rcfw->pdev->dev,
308 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
309 crsqe->resp ? "mismatch" : "collision",
310 crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
311 }
287 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 312 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
288 dev_warn(&rcfw->pdev->dev, 313 dev_warn(&rcfw->pdev->dev,
289 "QPLIB: CMD bit %d was not requested", cbit); 314 "QPLIB: CMD bit %d was not requested", cbit);
290
291 cmdq->cons += crsqe->req_size; 315 cmdq->cons += crsqe->req_size;
292 spin_unlock_irqrestore(&cmdq->lock, flags); 316 crsqe->req_size = 0;
317
293 if (!blocked) 318 if (!blocked)
294 wake_up(&rcfw->waitq); 319 wake_up(&rcfw->waitq);
295 break; 320 spin_unlock_irqrestore(&cmdq->lock, flags);
296 } 321 }
297 return 0; 322 return 0;
298} 323}
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
305 struct creq_base *creqe, **creq_ptr; 330 struct creq_base *creqe, **creq_ptr;
306 u32 sw_cons, raw_cons; 331 u32 sw_cons, raw_cons;
307 unsigned long flags; 332 unsigned long flags;
308 u32 type; 333 u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
309 334
310 /* Service the CREQ until empty */ 335 /* Service the CREQ until budget is over */
311 spin_lock_irqsave(&creq->lock, flags); 336 spin_lock_irqsave(&creq->lock, flags);
312 raw_cons = creq->cons; 337 raw_cons = creq->cons;
313 while (1) { 338 while (budget > 0) {
314 sw_cons = HWQ_CMP(raw_cons, creq); 339 sw_cons = HWQ_CMP(raw_cons, creq);
315 creq_ptr = (struct creq_base **)creq->pbl_ptr; 340 creq_ptr = (struct creq_base **)creq->pbl_ptr;
316 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 341 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
320 type = creqe->type & CREQ_BASE_TYPE_MASK; 345 type = creqe->type & CREQ_BASE_TYPE_MASK;
321 switch (type) { 346 switch (type) {
322 case CREQ_BASE_TYPE_QP_EVENT: 347 case CREQ_BASE_TYPE_QP_EVENT:
323 if (!bnxt_qplib_process_qp_event 348 bnxt_qplib_process_qp_event
324 (rcfw, (struct creq_qp_event *)creqe)) 349 (rcfw, (struct creq_qp_event *)creqe);
325 rcfw->creq_qp_event_processed++; 350 rcfw->creq_qp_event_processed++;
326 else {
327 dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
328 dev_warn(&rcfw->pdev->dev,
329 "QPLIB: type = 0x%x not handled",
330 type);
331 }
332 break; 351 break;
333 case CREQ_BASE_TYPE_FUNC_EVENT: 352 case CREQ_BASE_TYPE_FUNC_EVENT:
334 if (!bnxt_qplib_process_func_event 353 if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
346 break; 365 break;
347 } 366 }
348 raw_cons++; 367 raw_cons++;
368 budget--;
349 } 369 }
370
350 if (creq->cons != raw_cons) { 371 if (creq->cons != raw_cons) {
351 creq->cons = raw_cons; 372 creq->cons = raw_cons;
352 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 373 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
375/* RCFW */ 396/* RCFW */
376int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 397int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
377{ 398{
378 struct creq_deinitialize_fw_resp *resp;
379 struct cmdq_deinitialize_fw req; 399 struct cmdq_deinitialize_fw req;
400 struct creq_deinitialize_fw_resp resp;
380 u16 cmd_flags = 0; 401 u16 cmd_flags = 0;
402 int rc;
381 403
382 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 404 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
383 resp = (struct creq_deinitialize_fw_resp *) 405 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
384 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 406 NULL, 0);
385 NULL, 0); 407 if (rc)
386 if (!resp) 408 return rc;
387 return -EINVAL;
388
389 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
390 return -ETIMEDOUT;
391
392 if (resp->status ||
393 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
394 return -EFAULT;
395 409
396 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 410 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
397 return 0; 411 return 0;
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
417int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 431int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
418 struct bnxt_qplib_ctx *ctx, int is_virtfn) 432 struct bnxt_qplib_ctx *ctx, int is_virtfn)
419{ 433{
420 struct creq_initialize_fw_resp *resp;
421 struct cmdq_initialize_fw req; 434 struct cmdq_initialize_fw req;
435 struct creq_initialize_fw_resp resp;
422 u16 cmd_flags = 0, level; 436 u16 cmd_flags = 0, level;
437 int rc;
423 438
424 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 439 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
425 440
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
482 497
483skip_ctx_setup: 498skip_ctx_setup:
484 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 499 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
485 resp = (struct creq_initialize_fw_resp *) 500 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
486 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 501 NULL, 0);
487 NULL, 0); 502 if (rc)
488 if (!resp) { 503 return rc;
489 dev_err(&rcfw->pdev->dev,
490 "QPLIB: RCFW: INITIALIZE_FW send failed");
491 return -EINVAL;
492 }
493 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
494 /* Cmd timed out */
495 dev_err(&rcfw->pdev->dev,
496 "QPLIB: RCFW: INITIALIZE_FW timed out");
497 return -ETIMEDOUT;
498 }
499 if (resp->status ||
500 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
501 dev_err(&rcfw->pdev->dev,
502 "QPLIB: RCFW: INITIALIZE_FW failed");
503 return -EINVAL;
504 }
505 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 504 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
506 return 0; 505 return 0;
507} 506}
508 507
509void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 508void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
510{ 509{
511 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); 510 kfree(rcfw->crsqe_tbl);
512 kfree(rcfw->crsq.crsq);
513 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 511 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
514 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 512 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
515
516 rcfw->pdev = NULL; 513 rcfw->pdev = NULL;
517} 514}
518 515
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
539 goto fail; 536 goto fail;
540 } 537 }
541 538
542 rcfw->crsq.max_elements = rcfw->cmdq.max_elements; 539 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
543 rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, 540 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
544 sizeof(*rcfw->crsq.crsq), GFP_KERNEL); 541 if (!rcfw->crsqe_tbl)
545 if (!rcfw->crsq.crsq)
546 goto fail; 542 goto fail;
547 543
548 rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
549 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
550 &rcfw->crsb.max_elements,
551 BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
552 HWQ_TYPE_CTX)) {
553 dev_err(&rcfw->pdev->dev,
554 "QPLIB: HW channel CRSB allocation failed");
555 goto fail;
556 }
557 return 0; 544 return 0;
558 545
559fail: 546fail:
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
606 int rc; 593 int rc;
607 594
608 /* General */ 595 /* General */
609 atomic_set(&rcfw->seq_num, 0); 596 rcfw->seq_num = 0;
610 rcfw->flags = FIRMWARE_FIRST_FLAG; 597 rcfw->flags = FIRMWARE_FIRST_FLAG;
611 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 598 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
612 sizeof(unsigned long)); 599 sizeof(unsigned long));
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
636 623
637 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 624 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
638 625
639 /* CRSQ */
640 rcfw->crsq.prod = 0;
641 rcfw->crsq.cons = 0;
642
643 /* CREQ */ 626 /* CREQ */
644 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 627 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
645 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 628 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
692 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 675 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
693 return 0; 676 return 0;
694} 677}
678
679struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
680 struct bnxt_qplib_rcfw *rcfw,
681 u32 size)
682{
683 struct bnxt_qplib_rcfw_sbuf *sbuf;
684
685 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
686 if (!sbuf)
687 return NULL;
688
689 sbuf->size = size;
690 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
691 &sbuf->dma_addr, GFP_ATOMIC);
692 if (!sbuf->sb)
693 goto bail;
694
695 return sbuf;
696bail:
697 kfree(sbuf);
698 return NULL;
699}
700
701void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
702 struct bnxt_qplib_rcfw_sbuf *sbuf)
703{
704 if (sbuf->sb)
705 dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
706 sbuf->sb, sbuf->dma_addr);
707 kfree(sbuf);
708}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index d3567d75bf58..09ce121770cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -73,6 +73,7 @@
73#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT 73#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
74#define RCFW_MAX_COOKIE_VALUE 0x7FFF 74#define RCFW_MAX_COOKIE_VALUE 0x7FFF
75#define RCFW_CMD_IS_BLOCKING 0x8000 75#define RCFW_CMD_IS_BLOCKING 0x8000
76#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
76 77
77/* Cmdq contains a fix number of a 16-Byte slots */ 78/* Cmdq contains a fix number of a 16-Byte slots */
78struct bnxt_qplib_cmdqe { 79struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
94 u8 data[1024]; 95 u8 data[1024];
95}; 96};
96 97
97/* CRSQ SB */
98#define BNXT_QPLIB_CRSBE_MAX_CNT 4
99#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
100#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
101
102#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
103#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
104
105static inline u32 get_crsb_pg(u32 val)
106{
107 return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
108}
109
110static inline u32 get_crsb_idx(u32 val)
111{
112 return val & MAX_CRSB_IDX_PER_PG;
113}
114
115static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
116 u32 prod, dma_addr_t *dma_addr)
117{
118 *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
119 *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
120 BNXT_QPLIB_CRSBE_UNITS;
121}
122
123/* CREQ */ 98/* CREQ */
124/* Allocate 1 per QP for async error notification for now */ 99/* Allocate 1 per QP for async error notification for now */
125#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) 100#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
158#define CREQ_DB(db, raw_cons, cp_bit) \ 133#define CREQ_DB(db, raw_cons, cp_bit) \
159 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) 134 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
160 135
136#define CREQ_ENTRY_POLL_BUDGET 0x100
137
161/* HWQ */ 138/* HWQ */
162struct bnxt_qplib_crsqe { 139
163 struct creq_qp_event qp_event; 140struct bnxt_qplib_crsq {
141 struct creq_qp_event *resp;
164 u32 req_size; 142 u32 req_size;
165}; 143};
166 144
167struct bnxt_qplib_crsq { 145struct bnxt_qplib_rcfw_sbuf {
168 struct bnxt_qplib_crsqe *crsq; 146 void *sb;
169 u32 prod; 147 dma_addr_t dma_addr;
170 u32 cons; 148 u32 size;
171 u32 max_elements;
172}; 149};
173 150
174/* RCFW Communication Channels */ 151/* RCFW Communication Channels */
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
185 wait_queue_head_t waitq; 162 wait_queue_head_t waitq;
186 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 163 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
187 struct creq_func_event *); 164 struct creq_func_event *);
188 atomic_t seq_num; 165 u32 seq_num;
189 166
190 /* Bar region info */ 167 /* Bar region info */
191 void __iomem *cmdq_bar_reg_iomem; 168 void __iomem *cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
203 180
204 /* Actual Cmd and Resp Queues */ 181 /* Actual Cmd and Resp Queues */
205 struct bnxt_qplib_hwq cmdq; 182 struct bnxt_qplib_hwq cmdq;
206 struct bnxt_qplib_crsq crsq; 183 struct bnxt_qplib_crsq *crsqe_tbl;
207 struct bnxt_qplib_hwq crsb;
208}; 184};
209 185
210void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 186void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
219 (struct bnxt_qplib_rcfw *, 195 (struct bnxt_qplib_rcfw *,
220 struct creq_func_event *)); 196 struct creq_func_event *));
221 197
222int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 198struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
223int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 199 struct bnxt_qplib_rcfw *rcfw,
224void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 200 u32 size);
225 struct cmdq_base *req, void **crsbe, 201void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
226 u8 is_block); 202 struct bnxt_qplib_rcfw_sbuf *sbuf);
203int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
204 struct cmdq_base *req, struct creq_base *resp,
205 void *sbuf, u8 is_block);
227 206
228int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); 207int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
229int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 208int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6277d802ca4b..2e4855509719 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
48 48
49#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 49#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
50 50
51#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
52 ((HWQ_CMP(hwq->prod, hwq)\
53 - HWQ_CMP(hwq->cons, hwq))\
54 & (hwq->max_elements - 1)))
51enum bnxt_qplib_hwq_type { 55enum bnxt_qplib_hwq_type {
52 HWQ_TYPE_CTX, 56 HWQ_TYPE_CTX,
53 HWQ_TYPE_QUEUE, 57 HWQ_TYPE_QUEUE,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7b31eccedf11..fde18cf0e406 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
55 struct bnxt_qplib_dev_attr *attr) 55 struct bnxt_qplib_dev_attr *attr)
56{ 56{
57 struct cmdq_query_func req; 57 struct cmdq_query_func req;
58 struct creq_query_func_resp *resp; 58 struct creq_query_func_resp resp;
59 struct bnxt_qplib_rcfw_sbuf *sbuf;
59 struct creq_query_func_resp_sb *sb; 60 struct creq_query_func_resp_sb *sb;
60 u16 cmd_flags = 0; 61 u16 cmd_flags = 0;
61 u32 temp; 62 u32 temp;
62 u8 *tqm_alloc; 63 u8 *tqm_alloc;
63 int i; 64 int i, rc = 0;
64 65
65 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); 66 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
66 67
67 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 68 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
68 resp = (struct creq_query_func_resp *) 69 if (!sbuf) {
69 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
70 0);
71 if (!resp) {
72 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
73 return -EINVAL;
74 }
75 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
76 /* Cmd timed out */
77 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
78 return -ETIMEDOUT;
79 }
80 if (resp->status ||
81 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
82 dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
83 dev_err(&rcfw->pdev->dev, 70 dev_err(&rcfw->pdev->dev,
84 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 71 "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
85 resp->status, le16_to_cpu(req.cookie), 72 return -ENOMEM;
86 le16_to_cpu(resp->cookie));
87 return -EINVAL;
88 } 73 }
74
75 sb = sbuf->sb;
76 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
77 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
78 (void *)sbuf, 0);
79 if (rc)
80 goto bail;
81
89 /* Extract the context from the side buffer */ 82 /* Extract the context from the side buffer */
90 attr->max_qp = le32_to_cpu(sb->max_qp); 83 attr->max_qp = le32_to_cpu(sb->max_qp);
91 attr->max_qp_rd_atom = 84 attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
95 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 88 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
96 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; 89 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
97 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); 90 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
91 /*
92 * 128 WQEs needs to be reserved for the HW (8916). Prevent
93 * reporting the max number
94 */
95 attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
98 attr->max_qp_sges = sb->max_sge; 96 attr->max_qp_sges = sb->max_sge;
99 attr->max_cq = le32_to_cpu(sb->max_cq); 97 attr->max_cq = le32_to_cpu(sb->max_cq);
100 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); 98 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
130 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); 128 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
131 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 129 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
132 } 130 }
133 return 0; 131
132bail:
133 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
134 return rc;
134} 135}
135 136
136/* SGID */ 137/* SGID */
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
178 /* Remove GID from the SGID table */ 179 /* Remove GID from the SGID table */
179 if (update) { 180 if (update) {
180 struct cmdq_delete_gid req; 181 struct cmdq_delete_gid req;
181 struct creq_delete_gid_resp *resp; 182 struct creq_delete_gid_resp resp;
182 u16 cmd_flags = 0; 183 u16 cmd_flags = 0;
184 int rc;
183 185
184 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); 186 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
185 if (sgid_tbl->hw_id[index] == 0xFFFF) { 187 if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
188 return -EINVAL; 190 return -EINVAL;
189 } 191 }
190 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); 192 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
191 resp = (struct creq_delete_gid_resp *) 193 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
192 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 194 (void *)&resp, NULL, 0);
193 0); 195 if (rc)
194 if (!resp) { 196 return rc;
195 dev_err(&res->pdev->dev,
196 "QPLIB: SP: DELETE_GID send failed");
197 return -EINVAL;
198 }
199 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
200 le16_to_cpu(req.cookie))) {
201 /* Cmd timed out */
202 dev_err(&res->pdev->dev,
203 "QPLIB: SP: DELETE_GID timed out");
204 return -ETIMEDOUT;
205 }
206 if (resp->status ||
207 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
208 dev_err(&res->pdev->dev,
209 "QPLIB: SP: DELETE_GID failed ");
210 dev_err(&res->pdev->dev,
211 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
212 resp->status, le16_to_cpu(req.cookie),
213 le16_to_cpu(resp->cookie));
214 return -EINVAL;
215 }
216 } 197 }
217 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 198 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
218 sizeof(bnxt_qplib_gid_zero)); 199 sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
234 struct bnxt_qplib_res, 215 struct bnxt_qplib_res,
235 sgid_tbl); 216 sgid_tbl);
236 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 217 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
237 int i, free_idx, rc = 0; 218 int i, free_idx;
238 219
239 if (!sgid_tbl) { 220 if (!sgid_tbl) {
240 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); 221 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
266 } 247 }
267 if (update) { 248 if (update) {
268 struct cmdq_add_gid req; 249 struct cmdq_add_gid req;
269 struct creq_add_gid_resp *resp; 250 struct creq_add_gid_resp resp;
270 u16 cmd_flags = 0; 251 u16 cmd_flags = 0;
271 u32 temp32[4]; 252 u32 temp32[4];
272 u16 temp16[3]; 253 u16 temp16[3];
254 int rc;
273 255
274 RCFW_CMD_PREP(req, ADD_GID, cmd_flags); 256 RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
275 257
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
290 req.src_mac[1] = cpu_to_be16(temp16[1]); 272 req.src_mac[1] = cpu_to_be16(temp16[1]);
291 req.src_mac[2] = cpu_to_be16(temp16[2]); 273 req.src_mac[2] = cpu_to_be16(temp16[2]);
292 274
293 resp = (struct creq_add_gid_resp *) 275 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
294 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 276 (void *)&resp, NULL, 0);
295 NULL, 0); 277 if (rc)
296 if (!resp) { 278 return rc;
297 dev_err(&res->pdev->dev, 279 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
298 "QPLIB: SP: ADD_GID send failed");
299 return -EINVAL;
300 }
301 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
302 le16_to_cpu(req.cookie))) {
303 /* Cmd timed out */
304 dev_err(&res->pdev->dev,
305 "QPIB: SP: ADD_GID timed out");
306 return -ETIMEDOUT;
307 }
308 if (resp->status ||
309 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
310 dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
311 dev_err(&res->pdev->dev,
312 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
313 resp->status, le16_to_cpu(req.cookie),
314 le16_to_cpu(resp->cookie));
315 return -EINVAL;
316 }
317 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
318 } 280 }
319 /* Add GID to the sgid_tbl */ 281 /* Add GID to the sgid_tbl */
320 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); 282 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
325 287
326 *index = free_idx; 288 *index = free_idx;
327 /* unlock */ 289 /* unlock */
328 return rc; 290 return 0;
329} 291}
330 292
331/* pkeys */ 293/* pkeys */
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
422{ 384{
423 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 385 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
424 struct cmdq_create_ah req; 386 struct cmdq_create_ah req;
425 struct creq_create_ah_resp *resp; 387 struct creq_create_ah_resp resp;
426 u16 cmd_flags = 0; 388 u16 cmd_flags = 0;
427 u32 temp32[4]; 389 u32 temp32[4];
428 u16 temp16[3]; 390 u16 temp16[3];
391 int rc;
429 392
430 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); 393 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
431 394
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
450 req.dest_mac[1] = cpu_to_le16(temp16[1]); 413 req.dest_mac[1] = cpu_to_le16(temp16[1]);
451 req.dest_mac[2] = cpu_to_le16(temp16[2]); 414 req.dest_mac[2] = cpu_to_le16(temp16[2]);
452 415
453 resp = (struct creq_create_ah_resp *) 416 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
454 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 417 NULL, 1);
455 NULL, 1); 418 if (rc)
456 if (!resp) { 419 return rc;
457 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); 420
458 return -EINVAL; 421 ah->id = le32_to_cpu(resp.xid);
459 }
460 if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
461 /* Cmd timed out */
462 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
463 return -ETIMEDOUT;
464 }
465 if (resp->status ||
466 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
467 dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
468 dev_err(&rcfw->pdev->dev,
469 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
470 resp->status, le16_to_cpu(req.cookie),
471 le16_to_cpu(resp->cookie));
472 return -EINVAL;
473 }
474 ah->id = le32_to_cpu(resp->xid);
475 return 0; 422 return 0;
476} 423}
477 424
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
479{ 426{
480 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 427 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
481 struct cmdq_destroy_ah req; 428 struct cmdq_destroy_ah req;
482 struct creq_destroy_ah_resp *resp; 429 struct creq_destroy_ah_resp resp;
483 u16 cmd_flags = 0; 430 u16 cmd_flags = 0;
431 int rc;
484 432
485 /* Clean up the AH table in the device */ 433 /* Clean up the AH table in the device */
486 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); 434 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
487 435
488 req.ah_cid = cpu_to_le32(ah->id); 436 req.ah_cid = cpu_to_le32(ah->id);
489 437
490 resp = (struct creq_destroy_ah_resp *) 438 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
491 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 439 NULL, 1);
492 NULL, 1); 440 if (rc)
493 if (!resp) { 441 return rc;
494 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
495 return -EINVAL;
496 }
497 if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
498 /* Cmd timed out */
499 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
500 return -ETIMEDOUT;
501 }
502 if (resp->status ||
503 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
504 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
505 dev_err(&rcfw->pdev->dev,
506 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
507 resp->status, le16_to_cpu(req.cookie),
508 le16_to_cpu(resp->cookie));
509 return -EINVAL;
510 }
511 return 0; 442 return 0;
512} 443}
513 444
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
516{ 447{
517 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 448 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
518 struct cmdq_deallocate_key req; 449 struct cmdq_deallocate_key req;
519 struct creq_deallocate_key_resp *resp; 450 struct creq_deallocate_key_resp resp;
520 u16 cmd_flags = 0; 451 u16 cmd_flags = 0;
452 int rc;
521 453
522 if (mrw->lkey == 0xFFFFFFFF) { 454 if (mrw->lkey == 0xFFFFFFFF) {
523 dev_info(&res->pdev->dev, 455 dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
536 else 468 else
537 req.key = cpu_to_le32(mrw->lkey); 469 req.key = cpu_to_le32(mrw->lkey);
538 470
539 resp = (struct creq_deallocate_key_resp *) 471 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
540 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 472 NULL, 0);
541 NULL, 0); 473 if (rc)
542 if (!resp) { 474 return rc;
543 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); 475
544 return -EINVAL;
545 }
546 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
547 /* Cmd timed out */
548 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
549 return -ETIMEDOUT;
550 }
551 if (resp->status ||
552 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
553 dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
554 dev_err(&res->pdev->dev,
555 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
556 resp->status, le16_to_cpu(req.cookie),
557 le16_to_cpu(resp->cookie));
558 return -EINVAL;
559 }
560 /* Free the qplib's MRW memory */ 476 /* Free the qplib's MRW memory */
561 if (mrw->hwq.max_elements) 477 if (mrw->hwq.max_elements)
562 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); 478 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
568{ 484{
569 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 485 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
570 struct cmdq_allocate_mrw req; 486 struct cmdq_allocate_mrw req;
571 struct creq_allocate_mrw_resp *resp; 487 struct creq_allocate_mrw_resp resp;
572 u16 cmd_flags = 0; 488 u16 cmd_flags = 0;
573 unsigned long tmp; 489 unsigned long tmp;
490 int rc;
574 491
575 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); 492 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
576 493
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
584 tmp = (unsigned long)mrw; 501 tmp = (unsigned long)mrw;
585 req.mrw_handle = cpu_to_le64(tmp); 502 req.mrw_handle = cpu_to_le64(tmp);
586 503
587 resp = (struct creq_allocate_mrw_resp *) 504 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
588 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 505 (void *)&resp, NULL, 0);
589 NULL, 0); 506 if (rc)
590 if (!resp) { 507 return rc;
591 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); 508
592 return -EINVAL;
593 }
594 if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
595 /* Cmd timed out */
596 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
597 return -ETIMEDOUT;
598 }
599 if (resp->status ||
600 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
601 dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
602 dev_err(&rcfw->pdev->dev,
603 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
604 resp->status, le16_to_cpu(req.cookie),
605 le16_to_cpu(resp->cookie));
606 return -EINVAL;
607 }
608 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 509 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
609 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 510 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
610 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 511 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
611 mrw->rkey = le32_to_cpu(resp->xid); 512 mrw->rkey = le32_to_cpu(resp.xid);
612 else 513 else
613 mrw->lkey = le32_to_cpu(resp->xid); 514 mrw->lkey = le32_to_cpu(resp.xid);
614 return 0; 515 return 0;
615} 516}
616 517
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
619{ 520{
620 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 521 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
621 struct cmdq_deregister_mr req; 522 struct cmdq_deregister_mr req;
622 struct creq_deregister_mr_resp *resp; 523 struct creq_deregister_mr_resp resp;
623 u16 cmd_flags = 0; 524 u16 cmd_flags = 0;
624 int rc; 525 int rc;
625 526
626 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); 527 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
627 528
628 req.lkey = cpu_to_le32(mrw->lkey); 529 req.lkey = cpu_to_le32(mrw->lkey);
629 resp = (struct creq_deregister_mr_resp *) 530 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
630 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 531 (void *)&resp, NULL, block);
631 NULL, block); 532 if (rc)
632 if (!resp) { 533 return rc;
633 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
634 return -EINVAL;
635 }
636 if (block)
637 rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
638 le16_to_cpu(req.cookie));
639 else
640 rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
641 le16_to_cpu(req.cookie));
642 if (!rc) {
643 /* Cmd timed out */
644 dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
645 return -ETIMEDOUT;
646 }
647 if (resp->status ||
648 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
649 dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
650 dev_err(&rcfw->pdev->dev,
651 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
652 resp->status, le16_to_cpu(req.cookie),
653 le16_to_cpu(resp->cookie));
654 return -EINVAL;
655 }
656 534
657 /* Free the qplib's MR memory */ 535 /* Free the qplib's MR memory */
658 if (mrw->hwq.max_elements) { 536 if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
669{ 547{
670 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 548 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
671 struct cmdq_register_mr req; 549 struct cmdq_register_mr req;
672 struct creq_register_mr_resp *resp; 550 struct creq_register_mr_resp resp;
673 u16 cmd_flags = 0, level; 551 u16 cmd_flags = 0, level;
674 int pg_ptrs, pages, i, rc; 552 int pg_ptrs, pages, i, rc;
675 dma_addr_t **pbl_ptr; 553 dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
730 req.key = cpu_to_le32(mr->lkey); 608 req.key = cpu_to_le32(mr->lkey);
731 req.mr_size = cpu_to_le64(mr->total_size); 609 req.mr_size = cpu_to_le64(mr->total_size);
732 610
733 resp = (struct creq_register_mr_resp *) 611 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
734 bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 612 (void *)&resp, NULL, block);
735 NULL, block); 613 if (rc)
736 if (!resp) {
737 dev_err(&res->pdev->dev, "SP: REG_MR send failed");
738 rc = -EINVAL;
739 goto fail;
740 }
741 if (block)
742 rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
743 le16_to_cpu(req.cookie));
744 else
745 rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
746 le16_to_cpu(req.cookie));
747 if (!rc) {
748 /* Cmd timed out */
749 dev_err(&res->pdev->dev, "SP: REG_MR timed out");
750 rc = -ETIMEDOUT;
751 goto fail;
752 }
753 if (resp->status ||
754 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
755 dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
756 dev_err(&res->pdev->dev,
757 "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
758 resp->status, le16_to_cpu(req.cookie),
759 le16_to_cpu(resp->cookie));
760 rc = -EINVAL;
761 goto fail; 614 goto fail;
762 } 615
763 return 0; 616 return 0;
764 617
765fail: 618fail:
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
804{ 657{
805 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 658 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
806 struct cmdq_map_tc_to_cos req; 659 struct cmdq_map_tc_to_cos req;
807 struct creq_map_tc_to_cos_resp *resp; 660 struct creq_map_tc_to_cos_resp resp;
808 u16 cmd_flags = 0; 661 u16 cmd_flags = 0;
809 int tleft; 662 int rc = 0;
810 663
811 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); 664 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
812 req.cos0 = cpu_to_le16(cids[0]); 665 req.cos0 = cpu_to_le16(cids[0]);
813 req.cos1 = cpu_to_le16(cids[1]); 666 req.cos1 = cpu_to_le16(cids[1]);
814 667
815 resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); 668 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
816 if (!resp) { 669 (void *)&resp, NULL, 0);
817 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
818 return -EINVAL;
819 }
820
821 tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
822 if (!tleft) {
823 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
824 return -ETIMEDOUT;
825 }
826
827 if (resp->status ||
828 le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
829 dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
830 dev_err(&res->pdev->dev,
831 "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
832 resp->status, le16_to_cpu(req.cookie),
833 le16_to_cpu(resp->cookie));
834 return -EINVAL;
835 }
836
837 return 0; 670 return 0;
838} 671}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 1442a617e968..a543f959098b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,8 @@
40#ifndef __BNXT_QPLIB_SP_H__ 40#ifndef __BNXT_QPLIB_SP_H__
41#define __BNXT_QPLIB_SP_H__ 41#define __BNXT_QPLIB_SP_H__
42 42
43#define BNXT_QPLIB_RESERVED_QP_WRS 128
44
43struct bnxt_qplib_dev_attr { 45struct bnxt_qplib_dev_attr {
44 char fw_ver[32]; 46 char fw_ver[32];
45 u16 max_sgid; 47 u16 max_sgid;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b6fe45924c6e..0910faf3587b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
488 488
489 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 489 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
490 release_ep_resources(ep); 490 release_ep_resources(ep);
491 kfree_skb(skb);
491 return 0; 492 return 0;
492} 493}
493 494
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
498 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 499 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
499 c4iw_put_ep(&ep->parent_ep->com); 500 c4iw_put_ep(&ep->parent_ep->com);
500 release_ep_resources(ep); 501 release_ep_resources(ep);
502 kfree_skb(skb);
501 return 0; 503 return 0;
502} 504}
503 505
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
569 571
570 pr_debug("%s rdev %p\n", __func__, rdev); 572 pr_debug("%s rdev %p\n", __func__, rdev);
571 req->cmd = CPL_ABORT_NO_RST; 573 req->cmd = CPL_ABORT_NO_RST;
574 skb_get(skb);
572 ret = c4iw_ofld_send(rdev, skb); 575 ret = c4iw_ofld_send(rdev, skb);
573 if (ret) { 576 if (ret) {
574 __state_set(&ep->com, DEAD); 577 __state_set(&ep->com, DEAD);
575 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 578 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
576 } 579 } else
580 kfree_skb(skb);
577} 581}
578 582
579static int send_flowc(struct c4iw_ep *ep) 583static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2517 goto reject; 2521 goto reject;
2518 } 2522 }
2519 2523
2520 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2524 hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
2525 sizeof(struct tcphdr) +
2521 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2526 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2522 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2527 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2523 child_ep->mtu = peer_mss + hdrs; 2528 child_ep->mtu = peer_mss + hdrs;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 329fb65e8fb0..ae0b79aeea2e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
767 kfree(entry); 767 kfree(entry);
768 } 768 }
769 769
770 list_for_each_safe(pos, nxt, &uctx->qpids) { 770 list_for_each_safe(pos, nxt, &uctx->cqids) {
771 entry = list_entry(pos, struct c4iw_qid_list, entry); 771 entry = list_entry(pos, struct c4iw_qid_list, entry);
772 list_del_init(&entry->entry); 772 list_del_init(&entry->entry);
773 kfree(entry); 773 kfree(entry);
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
881 if (!rdev->free_workq) { 881 if (!rdev->free_workq) {
882 err = -ENOMEM; 882 err = -ENOMEM;
883 goto err_free_status_page; 883 goto err_free_status_page_and_wr_log;
884 } 884 }
885 885
886 rdev->status_page->db_off = 0; 886 rdev->status_page->db_off = 0;
887 887
888 return 0; 888 return 0;
889err_free_status_page: 889err_free_status_page_and_wr_log:
890 if (c4iw_wr_log && rdev->wr_log)
891 kfree(rdev->wr_log);
890 free_page((unsigned long)rdev->status_page); 892 free_page((unsigned long)rdev->status_page);
891destroy_ocqp_pool: 893destroy_ocqp_pool:
892 c4iw_ocqp_pool_destroy(rdev); 894 c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
903{ 905{
904 destroy_workqueue(rdev->free_workq); 906 destroy_workqueue(rdev->free_workq);
905 kfree(rdev->wr_log); 907 kfree(rdev->wr_log);
908 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
906 free_page((unsigned long)rdev->status_page); 909 free_page((unsigned long)rdev->status_page);
907 c4iw_pblpool_destroy(rdev); 910 c4iw_pblpool_destroy(rdev);
908 c4iw_rqtpool_destroy(rdev); 911 c4iw_rqtpool_destroy(rdev);
912 c4iw_ocqp_pool_destroy(rdev);
909 c4iw_destroy_resource(&rdev->resource); 913 c4iw_destroy_resource(&rdev->resource);
910} 914}
911 915
@@ -971,7 +975,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
971 devp->rdev.lldi.sge_egrstatuspagesize); 975 devp->rdev.lldi.sge_egrstatuspagesize);
972 976
973 devp->rdev.hw_queue.t4_eq_status_entries = 977 devp->rdev.hw_queue.t4_eq_status_entries =
974 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; 978 devp->rdev.lldi.sge_egrstatuspagesize / 64;
975 devp->rdev.hw_queue.t4_max_eq_size = 65520; 979 devp->rdev.hw_queue.t4_max_eq_size = 65520;
976 devp->rdev.hw_queue.t4_max_iq_size = 65520; 980 devp->rdev.hw_queue.t4_max_iq_size = 65520;
977 devp->rdev.hw_queue.t4_max_rq_size = 8192 - 981 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 5d6b1eeaa9a0..2ba00b89df6a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
6312 } 6312 }
6313} 6313}
6314 6314
6315static void write_global_credit(struct hfi1_devdata *dd, 6315/*
6316 u8 vau, u16 total, u16 shared) 6316 * Set up allocation unit vaulue.
6317 */
6318void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6317{ 6319{
6318 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 6320 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6319 ((u64)total << 6321
6320 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | 6322 /* do not modify other values in the register */
6321 ((u64)shared << 6323 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6322 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | 6324 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6323 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); 6325 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6324} 6326}
6325 6327
6326/* 6328/*
6327 * Set up initial VL15 credits of the remote. Assumes the rest of 6329 * Set up initial VL15 credits of the remote. Assumes the rest of
6328 * the CM credit registers are zero from a previous global or credit reset . 6330 * the CM credit registers are zero from a previous global or credit reset.
6331 * Shared limit for VL15 will always be 0.
6329 */ 6332 */
6330void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) 6333void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6331{ 6334{
6332 /* leave shared count at zero for both global and VL15 */ 6335 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6333 write_global_credit(dd, vau, vl15buf, 0); 6336
6337 /* set initial values for total and shared credit limit */
6338 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6339 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6340
6341 /*
6342 * Set total limit to be equal to VL15 credits.
6343 * Leave shared limit at 0.
6344 */
6345 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6346 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6334 6347
6335 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6348 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6336 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6349 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
6348 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6361 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6349 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6362 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6350 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6363 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6351 write_global_credit(dd, 0, 0, 0); 6364 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6352 /* reset the CM block */ 6365 /* reset the CM block */
6353 pio_send_control(dd, PSC_CM_RESET); 6366 pio_send_control(dd, PSC_CM_RESET);
6367 /* reset cached value */
6368 dd->vl15buf_cached = 0;
6354} 6369}
6355 6370
6356/* convert a vCU to a CU */ 6371/* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
6839{ 6854{
6840 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6855 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6841 link_up_work); 6856 link_up_work);
6857 struct hfi1_devdata *dd = ppd->dd;
6858
6842 set_link_state(ppd, HLS_UP_INIT); 6859 set_link_state(ppd, HLS_UP_INIT);
6843 6860
6844 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6861 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6845 read_ltp_rtt(ppd->dd); 6862 read_ltp_rtt(dd);
6846 /* 6863 /*
6847 * OPA specifies that certain counters are cleared on a transition 6864 * OPA specifies that certain counters are cleared on a transition
6848 * to link up, so do that. 6865 * to link up, so do that.
6849 */ 6866 */
6850 clear_linkup_counters(ppd->dd); 6867 clear_linkup_counters(dd);
6851 /* 6868 /*
6852 * And (re)set link up default values. 6869 * And (re)set link up default values.
6853 */ 6870 */
6854 set_linkup_defaults(ppd); 6871 set_linkup_defaults(ppd);
6855 6872
6873 /*
6874 * Set VL15 credits. Use cached value from verify cap interrupt.
6875 * In case of quick linkup or simulator, vl15 value will be set by
6876 * handle_linkup_change. VerifyCap interrupt handler will not be
6877 * called in those scenarios.
6878 */
6879 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6880 set_up_vl15(dd, dd->vl15buf_cached);
6881
6856 /* enforce link speed enabled */ 6882 /* enforce link speed enabled */
6857 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6883 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6858 /* oops - current speed is not enabled, bounce */ 6884 /* oops - current speed is not enabled, bounce */
6859 dd_dev_err(ppd->dd, 6885 dd_dev_err(dd,
6860 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6886 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6861 ppd->link_speed_active, ppd->link_speed_enabled); 6887 ppd->link_speed_active, ppd->link_speed_enabled);
6862 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, 6888 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
7357 */ 7383 */
7358 if (vau == 0) 7384 if (vau == 0)
7359 vau = 1; 7385 vau = 1;
7360 set_up_vl15(dd, vau, vl15buf); 7386 set_up_vau(dd, vau);
7387
7388 /*
7389 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7390 * credits value and wait for link-up interrupt ot set it.
7391 */
7392 set_up_vl15(dd, 0);
7393 dd->vl15buf_cached = vl15buf;
7361 7394
7362 /* set up the LCB CRC mode */ 7395 /* set up the LCB CRC mode */
7363 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; 7396 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5bfa839d1c48..793514f1d15f 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -839,7 +839,9 @@
839#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull 839#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
840#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull 840#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
841#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) 841#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
842#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
842#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 843#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
844#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
843#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull 845#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
844#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull 846#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
845#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 847#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index da322e6668cc..414a04a481c2 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1045,6 +1045,14 @@ struct hfi1_devdata {
1045 /* initial vl15 credits to use */ 1045 /* initial vl15 credits to use */
1046 u16 vl15_init; 1046 u16 vl15_init;
1047 1047
1048 /*
1049 * Cached value for vl15buf, read during verify cap interrupt. VL15
1050 * credits are to be kept at 0 and set when handling the link-up
1051 * interrupt. This removes the possibility of receiving VL15 MAD
1052 * packets before this HFI is ready.
1053 */
1054 u16 vl15buf_cached;
1055
1048 /* Misc small ints */ 1056 /* Misc small ints */
1049 u8 n_krcv_queues; 1057 u8 n_krcv_queues;
1050 u8 qos_shift; 1058 u8 qos_shift;
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
1598int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1606int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
1599int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1607int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
1600 1608
1601void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); 1609void set_up_vau(struct hfi1_devdata *dd, u8 vau);
1610void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
1602void reset_link_credits(struct hfi1_devdata *dd); 1611void reset_link_credits(struct hfi1_devdata *dd);
1603void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1612void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1604 1613
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index ba265d0ae93b..04a5082d5ac5 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
130 * the remote values. Both sides must be using the values. 130 * the remote values. Both sides must be using the values.
131 */ 131 */
132 if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 132 if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
133 set_up_vl15(dd, dd->vau, dd->vl15_init); 133 set_up_vau(dd, dd->vau);
134 set_up_vl15(dd, dd->vl15_init);
134 assign_remote_cm_au_table(dd, dd->vcu); 135 assign_remote_cm_au_table(dd, dd->vcu);
135 } 136 }
136 137
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 93faf86d54b6..6a9f6f9819e1 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
207 /* 207 /*
208 * Save BARs and command to rewrite after device reset. 208 * Save BARs and command to rewrite after device reset.
209 */ 209 */
210 dd->pcibar0 = addr; 210 pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
211 dd->pcibar1 = addr >> 32; 211 pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
212 pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); 212 pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
213 pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); 213 pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
214 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); 214 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 069bdaf061ab..1080778a1f7c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2159,8 +2159,11 @@ send_last:
2159 ret = hfi1_rvt_get_rwqe(qp, 1); 2159 ret = hfi1_rvt_get_rwqe(qp, 1);
2160 if (ret < 0) 2160 if (ret < 0)
2161 goto nack_op_err; 2161 goto nack_op_err;
2162 if (!ret) 2162 if (!ret) {
2163 /* peer will send again */
2164 rvt_put_ss(&qp->r_sge);
2163 goto rnr_nak; 2165 goto rnr_nak;
2166 }
2164 wc.ex.imm_data = ohdr->u.rc.imm_data; 2167 wc.ex.imm_data = ohdr->u.rc.imm_data;
2165 wc.wc_flags = IB_WC_WITH_IMM; 2168 wc.wc_flags = IB_WC_WITH_IMM;
2166 goto send_last; 2169 goto send_last;
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 50d140d25e38..2f3bbcac1e34 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
196}; 196};
197 197
198static struct attribute *port_cc_default_attributes[] = { 198static struct attribute *port_cc_default_attributes[] = {
199 &cc_prescan_attr.attr 199 &cc_prescan_attr.attr,
200 NULL
200}; 201};
201 202
202static struct kobj_type port_cc_ktype = { 203static struct kobj_type port_cc_ktype = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index f3bc01bce483..6ae98aa7f74e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
784 } 784 }
785 785
786 ctrl_ird |= IETF_PEER_TO_PEER; 786 ctrl_ird |= IETF_PEER_TO_PEER;
787 ctrl_ird |= IETF_FLPDU_ZERO_LEN;
788 787
789 switch (mpa_key) { 788 switch (mpa_key) {
790 case MPA_KEY_REQUEST: 789 case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
2446 } else { 2445 } else {
2447 type = I40IW_CM_EVENT_CONNECTED; 2446 type = I40IW_CM_EVENT_CONNECTED;
2448 cm_node->state = I40IW_CM_STATE_OFFLOADED; 2447 cm_node->state = I40IW_CM_STATE_OFFLOADED;
2449 i40iw_send_ack(cm_node);
2450 } 2448 }
2449 i40iw_send_ack(cm_node);
2451 break; 2450 break;
2452 default: 2451 default:
2453 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); 2452 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index f82483b3d1e7..a027e2072477 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
285 struct i40iw_sc_dev *dev = vsi->dev; 285 struct i40iw_sc_dev *dev = vsi->dev;
286 struct i40iw_sc_qp *qp = NULL; 286 struct i40iw_sc_qp *qp = NULL;
287 bool qs_handle_change = false; 287 bool qs_handle_change = false;
288 bool mss_change = false;
289 unsigned long flags; 288 unsigned long flags;
290 u16 qs_handle; 289 u16 qs_handle;
291 int i; 290 int i;
292 291
293 if (vsi->mss != l2params->mss) { 292 vsi->mss = l2params->mss;
294 mss_change = true;
295 vsi->mss = l2params->mss;
296 }
297 293
298 i40iw_fill_qos_list(l2params->qs_handle_list); 294 i40iw_fill_qos_list(l2params->qs_handle_list);
299 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { 295 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
300 qs_handle = l2params->qs_handle_list[i]; 296 qs_handle = l2params->qs_handle_list[i];
301 if (vsi->qos[i].qs_handle != qs_handle) 297 if (vsi->qos[i].qs_handle != qs_handle)
302 qs_handle_change = true; 298 qs_handle_change = true;
303 else if (!mss_change)
304 continue; /* no MSS nor qs handle change */
305 spin_lock_irqsave(&vsi->qos[i].lock, flags); 299 spin_lock_irqsave(&vsi->qos[i].lock, flags);
306 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); 300 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
307 while (qp) { 301 while (qp) {
308 if (mss_change)
309 i40iw_qp_mss_modify(dev, qp);
310 if (qs_handle_change) { 302 if (qs_handle_change) {
311 qp->qs_handle = qs_handle; 303 qp->qs_handle = qs_handle;
312 /* issue cqp suspend command */ 304 /* issue cqp suspend command */
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
2395 2387
2396 set_64bit_val(wqe, 2388 set_64bit_val(wqe,
2397 8, 2389 8,
2398 LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
2399 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); 2390 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2400 2391
2401 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); 2392 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
2410 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | 2401 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2411 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | 2402 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2412 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | 2403 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2413 LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
2414 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | 2404 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2415 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | 2405 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2416 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | 2406 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 2728af3103ce..a3f18a22f5ed 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1319 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, 1319 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1320 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); 1320 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1321 if (status) 1321 if (status)
1322 goto exit; 1322 goto error;
1323 info.fpm_query_buf_pa = mem.pa; 1323 info.fpm_query_buf_pa = mem.pa;
1324 info.fpm_query_buf = mem.va; 1324 info.fpm_query_buf = mem.va;
1325 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, 1325 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1326 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); 1326 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1327 if (status) 1327 if (status)
1328 goto exit; 1328 goto error;
1329 info.fpm_commit_buf_pa = mem.pa; 1329 info.fpm_commit_buf_pa = mem.pa;
1330 info.fpm_commit_buf = mem.va; 1330 info.fpm_commit_buf = mem.va;
1331 info.hmc_fn_id = ldev->fid; 1331 info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1347 info.exception_lan_queue = 1; 1347 info.exception_lan_queue = 1;
1348 info.vchnl_send = i40iw_virtchnl_send; 1348 info.vchnl_send = i40iw_virtchnl_send;
1349 status = i40iw_device_init(&iwdev->sc_dev, &info); 1349 status = i40iw_device_init(&iwdev->sc_dev, &info);
1350exit: 1350
1351 if (status) { 1351 if (status)
1352 kfree(iwdev->hmc_info_mem); 1352 goto error;
1353 iwdev->hmc_info_mem = NULL;
1354 }
1355 memset(&vsi_info, 0, sizeof(vsi_info)); 1353 memset(&vsi_info, 0, sizeof(vsi_info));
1356 vsi_info.dev = &iwdev->sc_dev; 1354 vsi_info.dev = &iwdev->sc_dev;
1357 vsi_info.back_vsi = (void *)iwdev; 1355 vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@ exit:
1362 memset(&stats_info, 0, sizeof(stats_info)); 1360 memset(&stats_info, 0, sizeof(stats_info));
1363 stats_info.fcn_id = ldev->fid; 1361 stats_info.fcn_id = ldev->fid;
1364 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); 1362 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1363 if (!stats_info.pestat) {
1364 status = I40IW_ERR_NO_MEMORY;
1365 goto error;
1366 }
1365 stats_info.stats_initialize = true; 1367 stats_info.stats_initialize = true;
1366 if (stats_info.pestat) 1368 if (stats_info.pestat)
1367 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); 1369 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
1368 } 1370 }
1369 return status; 1371 return status;
1372error:
1373 kfree(iwdev->hmc_info_mem);
1374 iwdev->hmc_info_mem = NULL;
1375 return status;
1370} 1376}
1371 1377
1372/** 1378/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index aa66c1c63dfa..f27be3e7830b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
199 struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); 199 struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
200void *i40iw_remove_head(struct list_head *list); 200void *i40iw_remove_head(struct list_head *list);
201void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); 201void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
202void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
203 202
204void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); 203void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
205void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); 204void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 7b76259752b0..959ec81fba99 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
541struct i40iw_modify_qp_info { 541struct i40iw_modify_qp_info {
542 u64 rx_win0; 542 u64 rx_win0;
543 u64 rx_win1; 543 u64 rx_win1;
544 u16 new_mss;
545 u8 next_iwarp_state; 544 u8 next_iwarp_state;
546 u8 termlen; 545 u8 termlen;
547 bool ord_valid; 546 bool ord_valid;
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
554 bool dont_send_term; 553 bool dont_send_term;
555 bool dont_send_fin; 554 bool dont_send_fin;
556 bool cached_var_valid; 555 bool cached_var_valid;
557 bool mss_change;
558 bool force_loopback; 556 bool force_loopback;
559}; 557};
560 558
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 409a3781e735..56d986924a4c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
757} 757}
758 758
759/** 759/**
760 * i40iw_qp_mss_modify - modify mss for qp
761 * @dev: hardware control device structure
762 * @qp: hardware control qp
763 */
764void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
765{
766 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
767 struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
768 struct i40iw_modify_qp_info info;
769
770 memset(&info, 0, sizeof(info));
771 info.mss_change = true;
772 info.new_mss = qp->vsi->mss;
773 i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
774}
775
776/**
777 * i40iw_term_modify_qp - modify qp for term message 760 * i40iw_term_modify_qp - modify qp for term message
778 * @qp: hardware control qp 761 * @qp: hardware control qp
779 * @next_state: qp's next state 762 * @next_state: qp's next state
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index f4d13683a403..48fd327f876b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
443 if (!dev->vchnl_up) 443 if (!dev->vchnl_up)
444 return I40IW_ERR_NOT_READY; 444 return I40IW_ERR_NOT_READY;
445 if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { 445 if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
446 if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) 446 vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
447 vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
448 else
449 vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
450 return I40IW_SUCCESS; 447 return I40IW_SUCCESS;
451 } 448 }
452 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { 449 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index b4694717f6f3..21d31cb1325f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
1578 if (port < 0) 1578 if (port < 0)
1579 return; 1579 return;
1580 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); 1580 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1581 ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
1581 1582
1582 mlx4_ib_query_ah(&ah.ibah, &ah_attr); 1583 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1583 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) 1584 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772da0963..9ecc089d4529 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2979,6 +2979,18 @@ error_0:
2979 return ret; 2979 return ret;
2980} 2980}
2981 2981
2982static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
2983{
2984 switch (umr_fence_cap) {
2985 case MLX5_CAP_UMR_FENCE_NONE:
2986 return MLX5_FENCE_MODE_NONE;
2987 case MLX5_CAP_UMR_FENCE_SMALL:
2988 return MLX5_FENCE_MODE_INITIATOR_SMALL;
2989 default:
2990 return MLX5_FENCE_MODE_STRONG_ORDERING;
2991 }
2992}
2993
2982static int create_dev_resources(struct mlx5_ib_resources *devr) 2994static int create_dev_resources(struct mlx5_ib_resources *devr)
2983{ 2995{
2984 struct ib_srq_init_attr attr; 2996 struct ib_srq_init_attr attr;
@@ -3680,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3680 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3692 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
3681 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3693 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
3682 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3694 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
3683 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3695 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
3684 dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3696 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
3697 dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3698 }
3685 if (mlx5_core_is_pf(mdev)) { 3699 if (mlx5_core_is_pf(mdev)) {
3686 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3700 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3687 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3701 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
@@ -3693,6 +3707,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3693 3707
3694 mlx5_ib_internal_fill_odp_caps(dev); 3708 mlx5_ib_internal_fill_odp_caps(dev);
3695 3709
3710 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3711
3696 if (MLX5_CAP_GEN(mdev, imaicl)) { 3712 if (MLX5_CAP_GEN(mdev, imaicl)) {
3697 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 3713 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
3698 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 3714 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 38c877bc45e5..bdcf25410c99 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
349 struct mlx5_ib_wq rq; 349 struct mlx5_ib_wq rq;
350 350
351 u8 sq_signal_bits; 351 u8 sq_signal_bits;
352 u8 fm_cache; 352 u8 next_fence;
353 struct mlx5_ib_wq sq; 353 struct mlx5_ib_wq sq;
354 354
355 /* serialize qp state modifications 355 /* serialize qp state modifications
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
654 struct mlx5_ib_port *port; 654 struct mlx5_ib_port *port;
655 struct mlx5_sq_bfreg bfreg; 655 struct mlx5_sq_bfreg bfreg;
656 struct mlx5_sq_bfreg fp_bfreg; 656 struct mlx5_sq_bfreg fp_bfreg;
657 u8 umr_fence;
657}; 658};
658 659
659static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) 660static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1e43a3..ebb6768684de 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
3738 } 3738 }
3739} 3739}
3740 3740
3741static u8 get_fence(u8 fence, struct ib_send_wr *wr)
3742{
3743 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
3744 wr->send_flags & IB_SEND_FENCE))
3745 return MLX5_FENCE_MODE_STRONG_ORDERING;
3746
3747 if (unlikely(fence)) {
3748 if (wr->send_flags & IB_SEND_FENCE)
3749 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
3750 else
3751 return fence;
3752 } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
3753 return MLX5_FENCE_MODE_FENCE;
3754 }
3755
3756 return 0;
3757}
3758
3759static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3741static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3760 struct mlx5_wqe_ctrl_seg **ctrl, 3742 struct mlx5_wqe_ctrl_seg **ctrl,
3761 struct ib_send_wr *wr, unsigned *idx, 3743 struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3784static void finish_wqe(struct mlx5_ib_qp *qp, 3766static void finish_wqe(struct mlx5_ib_qp *qp,
3785 struct mlx5_wqe_ctrl_seg *ctrl, 3767 struct mlx5_wqe_ctrl_seg *ctrl,
3786 u8 size, unsigned idx, u64 wr_id, 3768 u8 size, unsigned idx, u64 wr_id,
3787 int nreq, u8 fence, u8 next_fence, 3769 int nreq, u8 fence, u32 mlx5_opcode)
3788 u32 mlx5_opcode)
3789{ 3770{
3790 u8 opmod = 0; 3771 u8 opmod = 0;
3791 3772
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
3793 mlx5_opcode | ((u32)opmod << 24)); 3774 mlx5_opcode | ((u32)opmod << 24));
3794 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 3775 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
3795 ctrl->fm_ce_se |= fence; 3776 ctrl->fm_ce_se |= fence;
3796 qp->fm_cache = next_fence;
3797 if (unlikely(qp->wq_sig)) 3777 if (unlikely(qp->wq_sig))
3798 ctrl->signature = wq_sig(ctrl); 3778 ctrl->signature = wq_sig(ctrl);
3799 3779
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3853 goto out; 3833 goto out;
3854 } 3834 }
3855 3835
3856 fence = qp->fm_cache;
3857 num_sge = wr->num_sge; 3836 num_sge = wr->num_sge;
3858 if (unlikely(num_sge > qp->sq.max_gs)) { 3837 if (unlikely(num_sge > qp->sq.max_gs)) {
3859 mlx5_ib_warn(dev, "\n"); 3838 mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3870 goto out; 3849 goto out;
3871 } 3850 }
3872 3851
3852 if (wr->opcode == IB_WR_LOCAL_INV ||
3853 wr->opcode == IB_WR_REG_MR) {
3854 fence = dev->umr_fence;
3855 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
3856 } else if (wr->send_flags & IB_SEND_FENCE) {
3857 if (qp->next_fence)
3858 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
3859 else
3860 fence = MLX5_FENCE_MODE_FENCE;
3861 } else {
3862 fence = qp->next_fence;
3863 }
3864
3873 switch (ibqp->qp_type) { 3865 switch (ibqp->qp_type) {
3874 case IB_QPT_XRC_INI: 3866 case IB_QPT_XRC_INI:
3875 xrc = seg; 3867 xrc = seg;
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3896 goto out; 3888 goto out;
3897 3889
3898 case IB_WR_LOCAL_INV: 3890 case IB_WR_LOCAL_INV:
3899 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
3900 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 3891 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
3901 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 3892 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
3902 set_linv_wr(qp, &seg, &size); 3893 set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3904 break; 3895 break;
3905 3896
3906 case IB_WR_REG_MR: 3897 case IB_WR_REG_MR:
3907 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
3908 qp->sq.wr_data[idx] = IB_WR_REG_MR; 3898 qp->sq.wr_data[idx] = IB_WR_REG_MR;
3909 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 3899 ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
3910 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); 3900 err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3927 goto out; 3917 goto out;
3928 } 3918 }
3929 3919
3930 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3920 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
3931 nreq, get_fence(fence, wr), 3921 fence, MLX5_OPCODE_UMR);
3932 next_fence, MLX5_OPCODE_UMR);
3933 /* 3922 /*
3934 * SET_PSV WQEs are not signaled and solicited 3923 * SET_PSV WQEs are not signaled and solicited
3935 * on error 3924 * on error
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3954 goto out; 3943 goto out;
3955 } 3944 }
3956 3945
3957 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3946 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
3958 nreq, get_fence(fence, wr), 3947 fence, MLX5_OPCODE_SET_PSV);
3959 next_fence, MLX5_OPCODE_SET_PSV);
3960 err = begin_wqe(qp, &seg, &ctrl, wr, 3948 err = begin_wqe(qp, &seg, &ctrl, wr,
3961 &idx, &size, nreq); 3949 &idx, &size, nreq);
3962 if (err) { 3950 if (err) {
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3966 goto out; 3954 goto out;
3967 } 3955 }
3968 3956
3969 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
3970 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, 3957 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
3971 mr->sig->psv_wire.psv_idx, &seg, 3958 mr->sig->psv_wire.psv_idx, &seg,
3972 &size); 3959 &size);
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3976 goto out; 3963 goto out;
3977 } 3964 }
3978 3965
3979 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3966 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
3980 nreq, get_fence(fence, wr), 3967 fence, MLX5_OPCODE_SET_PSV);
3981 next_fence, MLX5_OPCODE_SET_PSV); 3968 qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
3982 num_sge = 0; 3969 num_sge = 0;
3983 goto skip_psv; 3970 goto skip_psv;
3984 3971
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
4089 } 4076 }
4090 } 4077 }
4091 4078
4092 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4079 qp->next_fence = next_fence;
4093 get_fence(fence, wr), next_fence, 4080 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
4094 mlx5_ib_opcode[wr->opcode]); 4081 mlx5_ib_opcode[wr->opcode]);
4095skip_psv: 4082skip_psv:
4096 if (0) 4083 if (0)
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index fb983df7c157..30b256a2c54e 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
610 ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; 610 ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
611 } 611 }
612 ctrl_ird |= IETF_PEER_TO_PEER; 612 ctrl_ird |= IETF_PEER_TO_PEER;
613 ctrl_ird |= IETF_FLPDU_ZERO_LEN;
614 613
615 switch (mpa_key) { 614 switch (mpa_key) {
616 case MPA_KEY_REQUEST: 615 case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
1826 type = NES_CM_EVENT_CONNECTED; 1825 type = NES_CM_EVENT_CONNECTED;
1827 cm_node->state = NES_CM_STATE_TSA; 1826 cm_node->state = NES_CM_STATE_TSA;
1828 } 1827 }
1829 1828 send_ack(cm_node, NULL);
1830 break; 1829 break;
1831 default: 1830 default:
1832 WARN_ON(1); 1831 WARN_ON(1);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index aa08c76a4245..d961f79b317c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,7 +58,10 @@
58#define QEDR_MSG_QP " QP" 58#define QEDR_MSG_QP " QP"
59#define QEDR_MSG_GSI " GSI" 59#define QEDR_MSG_GSI " GSI"
60 60
61#define QEDR_CQ_MAGIC_NUMBER (0x11223344) 61#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
62
63#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
64#define FW_PAGE_SHIFT (12)
62 65
63struct qedr_dev; 66struct qedr_dev;
64 67
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 3d7705cec770..d86dbe814d98 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
270 return rc; 270 return rc;
271 } 271 }
272 272
273 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 273 if (sgid_attr.ndev) {
274 if (vlan_id < VLAN_CFI_MASK) 274 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
275 has_vlan = true; 275 if (vlan_id < VLAN_CFI_MASK)
276 if (sgid_attr.ndev) 276 has_vlan = true;
277
277 dev_put(sgid_attr.ndev); 278 dev_put(sgid_attr.ndev);
279 }
278 280
279 if (!memcmp(&sgid, &zgid, sizeof(sgid))) { 281 if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
280 DP_ERR(dev, "gsi post send: GID not found GID index %d\n", 282 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 17685cfea6a2..d6723c365c7f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
653 653
654static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, 654static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
655 struct qedr_pbl *pbl, 655 struct qedr_pbl *pbl,
656 struct qedr_pbl_info *pbl_info) 656 struct qedr_pbl_info *pbl_info, u32 pg_shift)
657{ 657{
658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; 658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
659 u32 fw_pg_cnt, fw_pg_per_umem_pg;
659 struct qedr_pbl *pbl_tbl; 660 struct qedr_pbl *pbl_tbl;
660 struct scatterlist *sg; 661 struct scatterlist *sg;
661 struct regpair *pbe; 662 struct regpair *pbe;
663 u64 pg_addr;
662 int entry; 664 int entry;
663 u32 addr;
664 665
665 if (!pbl_info->num_pbes) 666 if (!pbl_info->num_pbes)
666 return; 667 return;
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
683 684
684 shift = umem->page_shift; 685 shift = umem->page_shift;
685 686
687 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
688
686 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 689 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
687 pages = sg_dma_len(sg) >> shift; 690 pages = sg_dma_len(sg) >> shift;
691 pg_addr = sg_dma_address(sg);
688 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 692 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
689 /* store the page address in pbe */ 693 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
690 pbe->lo = cpu_to_le32(sg_dma_address(sg) + 694 pbe->lo = cpu_to_le32(pg_addr);
691 (pg_cnt << shift)); 695 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
692 addr = upper_32_bits(sg_dma_address(sg) + 696
693 (pg_cnt << shift)); 697 pg_addr += BIT(pg_shift);
694 pbe->hi = cpu_to_le32(addr); 698 pbe_cnt++;
695 pbe_cnt++; 699 total_num_pbes++;
696 total_num_pbes++; 700 pbe++;
697 pbe++; 701
698 702 if (total_num_pbes == pbl_info->num_pbes)
699 if (total_num_pbes == pbl_info->num_pbes) 703 return;
700 return; 704
701 705 /* If the given pbl is full storing the pbes,
702 /* If the given pbl is full storing the pbes, 706 * move to next pbl.
703 * move to next pbl. 707 */
704 */ 708 if (pbe_cnt ==
705 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { 709 (pbl_info->pbl_size / sizeof(u64))) {
706 pbl_tbl++; 710 pbl_tbl++;
707 pbe = (struct regpair *)pbl_tbl->va; 711 pbe = (struct regpair *)pbl_tbl->va;
708 pbe_cnt = 0; 712 pbe_cnt = 0;
713 }
714
715 fw_pg_cnt++;
709 } 716 }
710 } 717 }
711 } 718 }
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
754 u64 buf_addr, size_t buf_len, 761 u64 buf_addr, size_t buf_len,
755 int access, int dmasync) 762 int access, int dmasync)
756{ 763{
757 int page_cnt; 764 u32 fw_pages;
758 int rc; 765 int rc;
759 766
760 q->buf_addr = buf_addr; 767 q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
766 return PTR_ERR(q->umem); 773 return PTR_ERR(q->umem);
767 } 774 }
768 775
769 page_cnt = ib_umem_page_count(q->umem); 776 fw_pages = ib_umem_page_count(q->umem) <<
770 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); 777 (q->umem->page_shift - FW_PAGE_SHIFT);
778
779 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
771 if (rc) 780 if (rc)
772 goto err0; 781 goto err0;
773 782
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
777 goto err0; 786 goto err0;
778 } 787 }
779 788
780 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); 789 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
790 FW_PAGE_SHIFT);
781 791
782 return 0; 792 return 0;
783 793
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2226 goto err1; 2236 goto err1;
2227 2237
2228 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, 2238 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2229 &mr->info.pbl_info); 2239 &mr->info.pbl_info, mr->umem->page_shift);
2230 2240
2231 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); 2241 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2232 if (rc) { 2242 if (rc) {
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3209 case IB_WC_REG_MR: 3219 case IB_WC_REG_MR:
3210 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; 3220 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3211 break; 3221 break;
3222 case IB_WC_RDMA_READ:
3223 case IB_WC_SEND:
3224 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3225 break;
3212 default: 3226 default:
3213 break; 3227 break;
3214 } 3228 }
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index fc8b88514da5..4ddbcac5eabe 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1956,8 +1956,10 @@ send_last:
1956 ret = qib_get_rwqe(qp, 1); 1956 ret = qib_get_rwqe(qp, 1);
1957 if (ret < 0) 1957 if (ret < 0)
1958 goto nack_op_err; 1958 goto nack_op_err;
1959 if (!ret) 1959 if (!ret) {
1960 rvt_put_ss(&qp->r_sge);
1960 goto rnr_nak; 1961 goto rnr_nak;
1962 }
1961 wc.ex.imm_data = ohdr->u.rc.imm_data; 1963 wc.ex.imm_data = ohdr->u.rc.imm_data;
1962 hdrsize += 4; 1964 hdrsize += 4;
1963 wc.wc_flags = IB_WC_WITH_IMM; 1965 wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ecdba2fce083..1ac5b8551a4d 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -68,6 +68,7 @@
68static inline u32 rxe_crc32(struct rxe_dev *rxe, 68static inline u32 rxe_crc32(struct rxe_dev *rxe,
69 u32 crc, void *next, size_t len) 69 u32 crc, void *next, size_t len)
70{ 70{
71 u32 retval;
71 int err; 72 int err;
72 73
73 SHASH_DESC_ON_STACK(shash, rxe->tfm); 74 SHASH_DESC_ON_STACK(shash, rxe->tfm);
@@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
81 return crc32_le(crc, next, len); 82 return crc32_le(crc, next, len);
82 } 83 }
83 84
84 return *(u32 *)shash_desc_ctx(shash); 85 retval = *(u32 *)shash_desc_ctx(shash);
86 barrier_data(shash_desc_ctx(shash));
87 return retval;
85} 88}
86 89
87int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); 90int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 83d709e74dfb..073e66783f1d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
740 740
741 sge = ibwr->sg_list; 741 sge = ibwr->sg_list;
742 for (i = 0; i < num_sge; i++, sge++) { 742 for (i = 0; i < num_sge; i++, sge++) {
743 if (qp->is_user && copy_from_user(p, (__user void *) 743 memcpy(p, (void *)(uintptr_t)sge->addr,
744 (uintptr_t)sge->addr, sge->length)) 744 sge->length);
745 return -EFAULT;
746
747 else if (!qp->is_user)
748 memcpy(p, (void *)(uintptr_t)sge->addr,
749 sge->length);
750 745
751 p += sge->length; 746 p += sge->length;
752 } 747 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 874b24366e4d..7871379342f4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
178static int ipoib_get_link_ksettings(struct net_device *netdev, 178static int ipoib_get_link_ksettings(struct net_device *netdev,
179 struct ethtool_link_ksettings *cmd) 179 struct ethtool_link_ksettings *cmd)
180{ 180{
181 struct ipoib_dev_priv *priv = netdev_priv(netdev); 181 struct ipoib_dev_priv *priv = ipoib_priv(netdev);
182 struct ib_port_attr attr; 182 struct ib_port_attr attr;
183 int ret, speed, width; 183 int ret, speed, width;
184 184
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0060b2f9f659..efe7402f4885 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -863,7 +863,6 @@ dev_stop:
863 set_bit(IPOIB_STOP_REAPER, &priv->flags); 863 set_bit(IPOIB_STOP_REAPER, &priv->flags);
864 cancel_delayed_work(&priv->ah_reap_task); 864 cancel_delayed_work(&priv->ah_reap_task);
865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
866 napi_enable(&priv->napi);
867 ipoib_ib_dev_stop(dev); 866 ipoib_ib_dev_stop(dev);
868 return -1; 867 return -1;
869} 868}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2869d1adb1de..1015a63de6ae 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1590,12 +1590,14 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
1590 wait_for_completion(&priv->ntbl.deleted); 1590 wait_for_completion(&priv->ntbl.deleted);
1591} 1591}
1592 1592
1593void ipoib_dev_uninit_default(struct net_device *dev) 1593static void ipoib_dev_uninit_default(struct net_device *dev)
1594{ 1594{
1595 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1595 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1596 1596
1597 ipoib_transport_dev_cleanup(dev); 1597 ipoib_transport_dev_cleanup(dev);
1598 1598
1599 netif_napi_del(&priv->napi);
1600
1599 ipoib_cm_dev_cleanup(dev); 1601 ipoib_cm_dev_cleanup(dev);
1600 1602
1601 kfree(priv->rx_ring); 1603 kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@ out_rx_ring_cleanup:
1649 kfree(priv->rx_ring); 1651 kfree(priv->rx_ring);
1650 1652
1651out: 1653out:
1654 netif_napi_del(&priv->napi);
1652 return -ENOMEM; 1655 return -ENOMEM;
1653} 1656}
1654 1657
@@ -2237,6 +2240,7 @@ event_failed:
2237 2240
2238device_init_failed: 2241device_init_failed:
2239 free_netdev(priv->dev); 2242 free_netdev(priv->dev);
2243 kfree(priv);
2240 2244
2241alloc_mem_failed: 2245alloc_mem_failed:
2242 return ERR_PTR(result); 2246 return ERR_PTR(result);
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
2277 2281
2278static void ipoib_remove_one(struct ib_device *device, void *client_data) 2282static void ipoib_remove_one(struct ib_device *device, void *client_data)
2279{ 2283{
2280 struct ipoib_dev_priv *priv, *tmp; 2284 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2281 struct list_head *dev_list = client_data; 2285 struct list_head *dev_list = client_data;
2282 2286
2283 if (!dev_list) 2287 if (!dev_list)
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2300 flush_workqueue(priv->wq); 2304 flush_workqueue(priv->wq);
2301 2305
2302 unregister_netdev(priv->dev); 2306 unregister_netdev(priv->dev);
2303 free_netdev(priv->dev); 2307 if (device->free_rdma_netdev)
2308 device->free_rdma_netdev(priv->dev);
2309 else
2310 free_netdev(priv->dev);
2311
2312 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
2313 kfree(cpriv);
2314
2304 kfree(priv); 2315 kfree(priv);
2305 } 2316 }
2306 2317
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 36dc4fcaa3cd..081b33deff1b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
133 snprintf(intf_name, sizeof intf_name, "%s.%04x", 133 snprintf(intf_name, sizeof intf_name, "%s.%04x",
134 ppriv->dev->name, pkey); 134 ppriv->dev->name, pkey);
135 135
136 if (!rtnl_trylock())
137 return restart_syscall();
138
136 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 139 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
137 if (!priv) 140 if (!priv)
138 return -ENOMEM; 141 return -ENOMEM;
139 142
140 if (!rtnl_trylock())
141 return restart_syscall();
142
143 down_write(&ppriv->vlan_rwsem); 143 down_write(&ppriv->vlan_rwsem);
144 144
145 /* 145 /*
@@ -167,8 +167,10 @@ out:
167 167
168 rtnl_unlock(); 168 rtnl_unlock();
169 169
170 if (result) 170 if (result) {
171 free_netdev(priv->dev); 171 free_netdev(priv->dev);
172 kfree(priv);
173 }
172 174
173 return result; 175 return result;
174} 176}
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
209 211
210 if (dev) { 212 if (dev) {
211 free_netdev(dev); 213 free_netdev(dev);
214 kfree(priv);
212 return 0; 215 return 0;
213 } 216 }
214 217
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index def723a5df29..2354c742caa1 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
320 ch->path.sgid = target->sgid; 320 ch->path.sgid = target->sgid;
321 ch->path.dgid = target->orig_dgid; 321 ch->path.dgid = target->orig_dgid;
322 ch->path.pkey = target->pkey; 322 ch->path.pkey = target->pkey;
323 sa_path_set_service_id(&ch->path, target->service_id); 323 ch->path.service_id = target->service_id;
324 324
325 return 0; 325 return 0;
326} 326}
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
575 return 0; 575 return 0;
576 576
577err_qp: 577err_qp:
578 srp_destroy_qp(ch, qp); 578 ib_destroy_qp(qp);
579 579
580err_send_cq: 580err_send_cq:
581 ib_free_cq(send_cq); 581 ib_free_cq(send_cq);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 485900f953e0..abc266e40e17 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -213,7 +213,7 @@ static int tm2_touchkey_probe(struct i2c_client *client,
213 /* led device */ 213 /* led device */
214 touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME; 214 touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
215 touchkey->led_dev.brightness = LED_FULL; 215 touchkey->led_dev.brightness = LED_FULL;
216 touchkey->led_dev.max_brightness = LED_FULL; 216 touchkey->led_dev.max_brightness = LED_ON;
217 touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set; 217 touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
218 218
219 error = devm_led_classdev_register(&client->dev, &touchkey->led_dev); 219 error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index f11807db6979..400869e61a06 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -256,6 +256,42 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
256 return 0; 256 return 0;
257} 257}
258 258
259#ifdef CONFIG_ACPI
260static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
261 struct platform_device *pdev)
262{
263 unsigned long long hrv = 0;
264 acpi_status status;
265
266 if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
267 axp20x_pek->axp20x->variant == AXP288_ID) {
268 status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
269 "_HRV", NULL, &hrv);
270 if (ACPI_FAILURE(status))
271 dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
272
273 /*
274 * On Cherry Trail platforms (hrv == 3), do not register the
275 * input device if there is an "INTCFD9" or "ACPI0011" gpio
276 * button ACPI device, as that handles the power button too,
277 * and otherwise we end up reporting all presses twice.
278 */
279 if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
280 acpi_dev_present("ACPI0011", NULL, -1)))
281 return false;
282
283 }
284
285 return true;
286}
287#else
288static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
289 struct platform_device *pdev)
290{
291 return true;
292}
293#endif
294
259static int axp20x_pek_probe(struct platform_device *pdev) 295static int axp20x_pek_probe(struct platform_device *pdev)
260{ 296{
261 struct axp20x_pek *axp20x_pek; 297 struct axp20x_pek *axp20x_pek;
@@ -268,13 +304,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
268 304
269 axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); 305 axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
270 306
271 /* 307 if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
272 * Do not register the input device if there is an "INTCFD9"
273 * gpio button ACPI device, that handles the power button too,
274 * and otherwise we end up reporting all presses twice.
275 */
276 if (!acpi_dev_found("INTCFD9") ||
277 !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) {
278 error = axp20x_pek_probe_input_device(axp20x_pek, pdev); 308 error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
279 if (error) 309 if (error)
280 return error; 310 return error;
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e37d37273182..f600f3a7a3c6 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
248 248
249 if (!btns_desc) { 249 if (!btns_desc) {
250 dev_err(dev, "ACPI Button Descriptors not found\n"); 250 dev_err(dev, "ACPI Button Descriptors not found\n");
251 return ERR_PTR(-ENODEV); 251 button_info = ERR_PTR(-ENODEV);
252 goto out;
252 } 253 }
253 254
254 /* The first package describes the collection */ 255 /* The first package describes the collection */
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
264 } 265 }
265 if (collection_uid == -1) { 266 if (collection_uid == -1) {
266 dev_err(dev, "Invalid Button Collection Descriptor\n"); 267 dev_err(dev, "Invalid Button Collection Descriptor\n");
267 return ERR_PTR(-ENODEV); 268 button_info = ERR_PTR(-ENODEV);
269 goto out;
268 } 270 }
269 271
270 /* There are package.count - 1 buttons + 1 terminating empty entry */ 272 /* There are package.count - 1 buttons + 1 terminating empty entry */
271 button_info = devm_kcalloc(dev, btns_desc->package.count, 273 button_info = devm_kcalloc(dev, btns_desc->package.count,
272 sizeof(*button_info), GFP_KERNEL); 274 sizeof(*button_info), GFP_KERNEL);
273 if (!button_info) 275 if (!button_info) {
274 return ERR_PTR(-ENOMEM); 276 button_info = ERR_PTR(-ENOMEM);
277 goto out;
278 }
275 279
276 /* Parse the button descriptors */ 280 /* Parse the button descriptors */
277 for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { 281 for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
278 if (soc_button_parse_btn_desc(dev, 282 if (soc_button_parse_btn_desc(dev,
279 &btns_desc->package.elements[i], 283 &btns_desc->package.elements[i],
280 collection_uid, 284 collection_uid,
281 &button_info[btn])) 285 &button_info[btn])) {
282 return ERR_PTR(-ENODEV); 286 button_info = ERR_PTR(-ENODEV);
287 goto out;
288 }
283 } 289 }
284 290
291out:
292 kfree(buf.pointer);
285 return button_info; 293 return button_info;
286} 294}
287 295
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..f431da07f861 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
554 struct completion *completion) 554 struct completion *completion)
555{ 555{
556 struct device *dev = &client->dev; 556 struct device *dev = &client->dev;
557 long ret;
558 int error; 557 int error;
559 int len; 558 int len;
560 u8 buffer[ETP_I2C_INF_LENGTH]; 559 u8 buffer[ETP_I2C_REPORT_LEN];
560
561 len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
562 if (len != ETP_I2C_REPORT_LEN) {
563 error = len < 0 ? len : -EIO;
564 dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
565 error, len);
566 }
561 567
562 reinit_completion(completion); 568 reinit_completion(completion);
563 enable_irq(client->irq); 569 enable_irq(client->irq);
564 570
565 error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); 571 error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
566 if (!error)
567 ret = wait_for_completion_interruptible_timeout(completion,
568 msecs_to_jiffies(300));
569 disable_irq(client->irq);
570
571 if (error) { 572 if (error) {
572 dev_err(dev, "device reset failed: %d\n", error); 573 dev_err(dev, "device reset failed: %d\n", error);
573 return error; 574 } else if (!wait_for_completion_timeout(completion,
574 } else if (ret == 0) { 575 msecs_to_jiffies(300))) {
575 dev_err(dev, "timeout waiting for device reset\n"); 576 dev_err(dev, "timeout waiting for device reset\n");
576 return -ETIMEDOUT; 577 error = -ETIMEDOUT;
577 } else if (ret < 0) {
578 error = ret;
579 dev_err(dev, "error waiting for device reset: %d\n", error);
580 return error;
581 } 578 }
582 579
580 disable_irq(client->irq);
581
582 if (error)
583 return error;
584
583 len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); 585 len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
584 if (len != ETP_I2C_INF_LENGTH) { 586 if (len != ETP_I2C_INF_LENGTH) {
585 error = len < 0 ? len : -EIO; 587 error = len < 0 ? len : -EIO;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e73d968023f7..f1fa1f172107 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad
1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1122 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1123 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1124 * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1125 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1126 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1125 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1127 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1525 }, 1527 },
1526 }, 1528 },
1527 { 1529 {
1530 /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
1531 .matches = {
1532 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1533 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
1534 },
1535 },
1536 {
1528 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ 1537 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
1529 .matches = { 1538 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1539 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1546 }, 1555 },
1547 }, 1556 },
1548 { 1557 {
1558 /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
1559 .matches = {
1560 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1561 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
1562 },
1563 },
1564 {
1549 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ 1565 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
1550 .matches = { 1566 .matches = {
1551 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1567 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 131df9d3660f..16c30460ef04 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -176,6 +176,12 @@ static const char * const smbus_pnp_ids[] = {
176 NULL 176 NULL
177}; 177};
178 178
179static const char * const forcepad_pnp_ids[] = {
180 "SYN300D",
181 "SYN3014",
182 NULL
183};
184
179/* 185/*
180 * Send a command to the synpatics touchpad by special commands 186 * Send a command to the synpatics touchpad by special commands
181 */ 187 */
@@ -397,6 +403,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse,
397{ 403{
398 int error; 404 int error;
399 405
406 memset(info, 0, sizeof(*info));
407
400 error = synaptics_identify(psmouse, info); 408 error = synaptics_identify(psmouse, info);
401 if (error) 409 if (error)
402 return error; 410 return error;
@@ -480,13 +488,6 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
480 { } 488 { }
481}; 489};
482 490
483/* This list has been kindly provided by Synaptics. */
484static const char * const forcepad_pnp_ids[] = {
485 "SYN300D",
486 "SYN3014",
487 NULL
488};
489
490/***************************************************************************** 491/*****************************************************************************
491 * Synaptics communications functions 492 * Synaptics communications functions
492 ****************************************************************************/ 493 ****************************************************************************/
@@ -1687,7 +1688,8 @@ enum {
1687 SYNAPTICS_INTERTOUCH_ON, 1688 SYNAPTICS_INTERTOUCH_ON,
1688}; 1689};
1689 1690
1690static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET; 1691static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ?
1692 SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF;
1691module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644); 1693module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644);
1692MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device."); 1694MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device.");
1693 1695
@@ -1737,8 +1739,16 @@ static int synaptics_setup_intertouch(struct psmouse *psmouse,
1737 1739
1738 if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) { 1740 if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) {
1739 if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && 1741 if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
1740 !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) 1742 !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) {
1743
1744 if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids))
1745 psmouse_info(psmouse,
1746 "Your touchpad (%s) says it can support a different bus. "
1747 "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n",
1748 psmouse->ps2dev.serio->firmware_id);
1749
1741 return -ENXIO; 1750 return -ENXIO;
1751 }
1742 } 1752 }
1743 1753
1744 psmouse_info(psmouse, "Trying to set up SMBus access\n"); 1754 psmouse_info(psmouse, "Trying to set up SMBus access\n");
@@ -1810,6 +1820,15 @@ int synaptics_init(struct psmouse *psmouse)
1810 } 1820 }
1811 1821
1812 if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) { 1822 if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) {
1823 if ((!IS_ENABLED(CONFIG_RMI4_SMB) ||
1824 !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) &&
1825 /* Forcepads need F21, which is not ready */
1826 !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) {
1827 psmouse_warn(psmouse,
1828 "The touchpad can support a better bus than the too old PS/2 protocol. "
1829 "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n");
1830 }
1831
1813 error = synaptics_setup_intertouch(psmouse, &info, true); 1832 error = synaptics_setup_intertouch(psmouse, &info, true);
1814 if (!error) 1833 if (!error)
1815 return PSMOUSE_SYNAPTICS_SMBUS; 1834 return PSMOUSE_SYNAPTICS_SMBUS;
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 77dad045a468..ad71a5e768dc 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
146 if (!serio) 146 if (!serio)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 serio->id.type = SERIO_8042; 149 serio->id.type = SERIO_PS_PSTHRU;
150 serio->write = rmi_f03_pt_write; 150 serio->write = rmi_f03_pt_write;
151 serio->port_data = f03; 151 serio->port_data = f03;
152 152
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index dea63e2db3e6..f5206e2c767e 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -31,9 +31,6 @@
31#define F54_GET_REPORT 1 31#define F54_GET_REPORT 1
32#define F54_FORCE_CAL 2 32#define F54_FORCE_CAL 2
33 33
34/* Fixed sizes of reports */
35#define F54_QUERY_LEN 27
36
37/* F54 capabilities */ 34/* F54 capabilities */
38#define F54_CAP_BASELINE (1 << 2) 35#define F54_CAP_BASELINE (1 << 2)
39#define F54_CAP_IMAGE8 (1 << 3) 36#define F54_CAP_IMAGE8 (1 << 3)
@@ -95,7 +92,6 @@ struct rmi_f54_reports {
95struct f54_data { 92struct f54_data {
96 struct rmi_function *fn; 93 struct rmi_function *fn;
97 94
98 u8 qry[F54_QUERY_LEN];
99 u8 num_rx_electrodes; 95 u8 num_rx_electrodes;
100 u8 num_tx_electrodes; 96 u8 num_tx_electrodes;
101 u8 capabilities; 97 u8 capabilities;
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn)
632{ 628{
633 int error; 629 int error;
634 struct f54_data *f54; 630 struct f54_data *f54;
631 u8 buf[6];
635 632
636 f54 = dev_get_drvdata(&fn->dev); 633 f54 = dev_get_drvdata(&fn->dev);
637 634
638 error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, 635 error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
639 &f54->qry, sizeof(f54->qry)); 636 buf, sizeof(buf));
640 if (error) { 637 if (error) {
641 dev_err(&fn->dev, "%s: Failed to query F54 properties\n", 638 dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
642 __func__); 639 __func__);
643 return error; 640 return error;
644 } 641 }
645 642
646 f54->num_rx_electrodes = f54->qry[0]; 643 f54->num_rx_electrodes = buf[0];
647 f54->num_tx_electrodes = f54->qry[1]; 644 f54->num_tx_electrodes = buf[1];
648 f54->capabilities = f54->qry[2]; 645 f54->capabilities = buf[2];
649 f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8); 646 f54->clock_rate = buf[3] | (buf[4] << 8);
650 f54->family = f54->qry[5]; 647 f54->family = buf[5];
651 648
652 rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", 649 rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
653 f54->num_rx_electrodes); 650 f54->num_rx_electrodes);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 09720d950686..f932a83b4990 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
723 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), 723 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
724 }, 724 },
725 }, 725 },
726 {
727 /* Fujitsu UH554 laptop */
728 .matches = {
729 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
730 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
731 },
732 },
726 { } 733 { }
727}; 734};
728 735
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2302aef2b2d4..dd042a9b0aaa 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type)
350 case MXT_TOUCH_KEYARRAY_T15: 350 case MXT_TOUCH_KEYARRAY_T15:
351 case MXT_TOUCH_PROXIMITY_T23: 351 case MXT_TOUCH_PROXIMITY_T23:
352 case MXT_TOUCH_PROXKEY_T52: 352 case MXT_TOUCH_PROXKEY_T52:
353 case MXT_TOUCH_MULTITOUCHSCREEN_T100:
353 case MXT_PROCI_GRIPFACE_T20: 354 case MXT_PROCI_GRIPFACE_T20:
354 case MXT_PROCG_NOISE_T22: 355 case MXT_PROCG_NOISE_T22:
355 case MXT_PROCI_ONETOUCH_T24: 356 case MXT_PROCI_ONETOUCH_T24:
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 8cf8d8d5d4ef..f872817e81e4 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
471static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, 471static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
472 M09_REGISTER_OFFSET, 0, 31); 472 M09_REGISTER_OFFSET, 0, 31);
473static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, 473static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
474 M09_REGISTER_THRESHOLD, 20, 80); 474 M09_REGISTER_THRESHOLD, 0, 80);
475static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, 475static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
476 NO_REGISTER, 3, 14); 476 NO_REGISTER, 3, 14);
477 477
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 813dd68a5c82..0dbcf105f7db 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -526,6 +526,7 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
526{ 526{
527 struct i2c_client *client = to_i2c_client(dev); 527 struct i2c_client *client = to_i2c_client(dev);
528 528
529 disable_irq(client->irq);
529 silead_ts_set_power(client, SILEAD_POWER_OFF); 530 silead_ts_set_power(client, SILEAD_POWER_OFF);
530 return 0; 531 return 0;
531} 532}
@@ -551,6 +552,8 @@ static int __maybe_unused silead_ts_resume(struct device *dev)
551 return -ENODEV; 552 return -ENODEV;
552 } 553 }
553 554
555 enable_irq(client->irq);
556
554 return 0; 557 return 0;
555} 558}
556 559
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5d6cf2..0f1219fa8561 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3879,11 +3879,9 @@ static void irte_ga_prepare(void *entry,
3879 u8 vector, u32 dest_apicid, int devid) 3879 u8 vector, u32 dest_apicid, int devid)
3880{ 3880{
3881 struct irte_ga *irte = (struct irte_ga *) entry; 3881 struct irte_ga *irte = (struct irte_ga *) entry;
3882 struct iommu_dev_data *dev_data = search_dev_data(devid);
3883 3882
3884 irte->lo.val = 0; 3883 irte->lo.val = 0;
3885 irte->hi.val = 0; 3884 irte->hi.val = 0;
3886 irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
3887 irte->lo.fields_remap.int_type = delivery_mode; 3885 irte->lo.fields_remap.int_type = delivery_mode;
3888 irte->lo.fields_remap.dm = dest_mode; 3886 irte->lo.fields_remap.dm = dest_mode;
3889 irte->hi.fields.vector = vector; 3887 irte->hi.fields.vector = vector;
@@ -3939,10 +3937,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3939 struct irte_ga *irte = (struct irte_ga *) entry; 3937 struct irte_ga *irte = (struct irte_ga *) entry;
3940 struct iommu_dev_data *dev_data = search_dev_data(devid); 3938 struct iommu_dev_data *dev_data = search_dev_data(devid);
3941 3939
3942 if (!dev_data || !dev_data->use_vapic) { 3940 if (!dev_data || !dev_data->use_vapic ||
3941 !irte->lo.fields_remap.guest_mode) {
3943 irte->hi.fields.vector = vector; 3942 irte->hi.fields.vector = vector;
3944 irte->lo.fields_remap.destination = dest_apicid; 3943 irte->lo.fields_remap.destination = dest_apicid;
3945 irte->lo.fields_remap.guest_mode = 0;
3946 modify_irte_ga(devid, index, irte, NULL); 3944 modify_irte_ga(devid, index, irte, NULL);
3947 } 3945 }
3948} 3946}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8348f366ddd1..62618e77bedc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -396,13 +396,13 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
396 dma_addr_t iova, size_t size) 396 dma_addr_t iova, size_t size)
397{ 397{
398 struct iova_domain *iovad = &cookie->iovad; 398 struct iova_domain *iovad = &cookie->iovad;
399 unsigned long shift = iova_shift(iovad);
400 399
401 /* The MSI case is only ever cleaning up its most recent allocation */ 400 /* The MSI case is only ever cleaning up its most recent allocation */
402 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 401 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
403 cookie->msi_iova -= size; 402 cookie->msi_iova -= size;
404 else 403 else
405 free_iova_fast(iovad, iova >> shift, size >> shift); 404 free_iova_fast(iovad, iova_pfn(iovad, iova),
405 size >> iova_shift(iovad));
406} 406}
407 407
408static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, 408static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
@@ -617,11 +617,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
617{ 617{
618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
619 struct iommu_dma_cookie *cookie = domain->iova_cookie; 619 struct iommu_dma_cookie *cookie = domain->iova_cookie;
620 struct iova_domain *iovad = &cookie->iovad; 620 size_t iova_off = 0;
621 size_t iova_off = iova_offset(iovad, phys);
622 dma_addr_t iova; 621 dma_addr_t iova;
623 622
624 size = iova_align(iovad, size + iova_off); 623 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
624 iova_off = iova_offset(&cookie->iovad, phys);
625 size = iova_align(&cookie->iovad, size + iova_off);
626 }
627
625 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 628 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
626 if (!iova) 629 if (!iova)
627 return DMA_ERROR_CODE; 630 return DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 90ab0115d78e..fc2765ccdb57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2055,11 +2055,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
2055 if (context_copied(context)) { 2055 if (context_copied(context)) {
2056 u16 did_old = context_domain_id(context); 2056 u16 did_old = context_domain_id(context);
2057 2057
2058 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) 2058 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2059 iommu->flush.flush_context(iommu, did_old, 2059 iommu->flush.flush_context(iommu, did_old,
2060 (((u16)bus) << 8) | devfn, 2060 (((u16)bus) << 8) | devfn,
2061 DMA_CCMD_MASK_NOBIT, 2061 DMA_CCMD_MASK_NOBIT,
2062 DMA_CCMD_DEVICE_INVL); 2062 DMA_CCMD_DEVICE_INVL);
2063 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2064 DMA_TLB_DSI_FLUSH);
2065 }
2063 } 2066 }
2064 2067
2065 pgd = domain->pgd; 2068 pgd = domain->pgd;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a27ef570c328..bc1efbfb9ddf 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -18,6 +18,7 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/component.h> 19#include <linux/component.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/dma-mapping.h>
21#include <linux/dma-iommu.h> 22#include <linux/dma-iommu.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 9f44ee8ea1bc..19779b88a479 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -118,6 +118,7 @@ static const struct iommu_ops
118 118
119 ops = iommu_ops_from_fwnode(fwnode); 119 ops = iommu_ops_from_fwnode(fwnode);
120 if ((ops && !ops->of_xlate) || 120 if ((ops && !ops->of_xlate) ||
121 !of_device_is_available(iommu_spec->np) ||
121 (!ops && !of_iommu_driver_present(iommu_spec->np))) 122 (!ops && !of_iommu_driver_present(iommu_spec->np)))
122 return NULL; 123 return NULL;
123 124
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
236 ops = ERR_PTR(err); 237 ops = ERR_PTR(err);
237 } 238 }
238 239
240 /* Ignore all other errors apart from EPROBE_DEFER */
241 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
242 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
243 ops = NULL;
244 }
245
239 return ops; 246 return ops;
240} 247}
241 248
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d2306c821ebb..31d6b5a582d2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -106,10 +106,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
106static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, 106static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
107 u32 *mask, u32 *addr) 107 u32 *mask, u32 *addr)
108{ 108{
109 unsigned int ofst; 109 unsigned int ofst = (hwirq / 32) * 4;
110
111 hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
112 ofst = hwirq / 32 * 4;
113 110
114 *mask = 1 << (hwirq % 32); 111 *mask = 1 << (hwirq % 32);
115 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; 112 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
@@ -337,9 +334,15 @@ static int mbigen_device_probe(struct platform_device *pdev)
337 mgn_chip->pdev = pdev; 334 mgn_chip->pdev = pdev;
338 335
339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
340 mgn_chip->base = devm_ioremap_resource(&pdev->dev, res); 337 if (!res)
341 if (IS_ERR(mgn_chip->base)) 338 return -EINVAL;
342 return PTR_ERR(mgn_chip->base); 339
340 mgn_chip->base = devm_ioremap(&pdev->dev, res->start,
341 resource_size(res));
342 if (!mgn_chip->base) {
343 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
344 return -ENOMEM;
345 }
343 346
344 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) 347 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
345 err = mbigen_of_create_domain(pdev, mgn_chip); 348 err = mbigen_of_create_domain(pdev, mgn_chip);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index eb7fbe159963..929f8558bf1c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
140} 140}
141 141
142#ifdef CONFIG_CLKSRC_MIPS_GIC 142#ifdef CONFIG_CLKSRC_MIPS_GIC
143u64 gic_read_count(void) 143u64 notrace gic_read_count(void)
144{ 144{
145 unsigned int hi, hi2, lo; 145 unsigned int hi, hi2, lo;
146 146
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void)
167 return bits; 167 return bits;
168} 168}
169 169
170void gic_write_compare(u64 cnt) 170void notrace gic_write_compare(u64 cnt)
171{ 171{
172 if (mips_cm_is64) { 172 if (mips_cm_is64) {
173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); 173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt)
179 } 179 }
180} 180}
181 181
182void gic_write_cpu_compare(u64 cnt, int cpu) 182void notrace gic_write_cpu_compare(u64 cnt, int cpu)
183{ 183{
184 unsigned long flags; 184 unsigned long flags;
185 185
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5fe5846..72a391e01011 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) 142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
143{ 143{
144 struct irq_domain *root_domain = 144 struct irq_domain *root_domain =
145 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 145 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
146 &xtensa_mx_irq_domain_ops, 146 &xtensa_mx_irq_domain_ops,
147 &xtensa_mx_irq_chip); 147 &xtensa_mx_irq_chip);
148 irq_set_default_host(root_domain); 148 irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae1770964..f728755fa292 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) 89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
90{ 90{
91 struct irq_domain *root_domain = 91 struct irq_domain *root_domain =
92 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 92 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
93 &xtensa_irq_domain_ops, &xtensa_irq_chip); 93 &xtensa_irq_domain_ops, &xtensa_irq_chip);
94 irq_set_default_host(root_domain); 94 irq_set_default_host(root_domain);
95 return 0; 95 return 0;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index d07dd5196ffc..8aa158a09180 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
2364 id); 2364 id);
2365 return NULL; 2365 return NULL;
2366 } else { 2366 } else {
2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); 2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
2368 if (!rs) 2368 if (!rs)
2369 return NULL; 2369 return NULL;
2370 rs->state = CCPResetIdle; 2370 rs->state = CCPResetIdle;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 8b7faea2ddf8..422dced7c90a 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
75 if (sk->sk_state != MISDN_BOUND) 75 if (sk->sk_state != MISDN_BOUND)
76 continue; 76 continue;
77 if (!cskb) 77 if (!cskb)
78 cskb = skb_copy(skb, GFP_KERNEL); 78 cskb = skb_copy(skb, GFP_ATOMIC);
79 if (!cskb) { 79 if (!cskb) {
80 printk(KERN_WARNING "%s no skb\n", __func__); 80 printk(KERN_WARNING "%s no skb\n", __func__);
81 break; 81 break;
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1548259297c1..2cfd9389ee96 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
242 242
243 spin_lock_irqsave(lock, flags); 243 spin_lock_irqsave(lock, flags);
244 val = bcm6328_led_read(addr); 244 val = bcm6328_led_read(addr);
245 val |= (BIT(reg) << (((sel % 4) * 4) + 16)); 245 val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
246 bcm6328_led_write(addr, val); 246 bcm6328_led_write(addr, val);
247 spin_unlock_irqrestore(lock, flags); 247 spin_unlock_irqrestore(lock, flags);
248 } 248 }
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
269 269
270 spin_lock_irqsave(lock, flags); 270 spin_lock_irqsave(lock, flags);
271 val = bcm6328_led_read(addr); 271 val = bcm6328_led_read(addr);
272 val |= (BIT(reg) << ((sel % 4) * 4)); 272 val |= (BIT(reg % 4) << ((sel % 4) * 4));
273 bcm6328_led_write(addr, val); 273 bcm6328_led_write(addr, val);
274 spin_unlock_irqrestore(lock, flags); 274 spin_unlock_irqrestore(lock, flags);
275 } 275 }
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 78a7ce816a47..9a873118ea5f 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client,
285 "slave address 0x%02x\n", 285 "slave address 0x%02x\n",
286 client->name, chip->bits, client->addr); 286 client->name, chip->bits, client->addr);
287 287
288 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) 288 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
289 return -EIO; 289 return -EIO;
290 290
291 if (pdata) { 291 if (pdata) {
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index afa3b4099214..e95ea65380c8 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -20,7 +20,6 @@
20#include <linux/sched/loadavg.h> 20#include <linux/sched/loadavg.h>
21#include <linux/leds.h> 21#include <linux/leds.h>
22#include <linux/reboot.h> 22#include <linux/reboot.h>
23#include <linux/suspend.h>
24#include "../leds.h" 23#include "../leds.h"
25 24
26static int panic_heartbeats; 25static int panic_heartbeats;
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = {
163 .deactivate = heartbeat_trig_deactivate, 162 .deactivate = heartbeat_trig_deactivate,
164}; 163};
165 164
166static int heartbeat_pm_notifier(struct notifier_block *nb,
167 unsigned long pm_event, void *unused)
168{
169 int rc;
170
171 switch (pm_event) {
172 case PM_SUSPEND_PREPARE:
173 case PM_HIBERNATION_PREPARE:
174 case PM_RESTORE_PREPARE:
175 led_trigger_unregister(&heartbeat_led_trigger);
176 break;
177 case PM_POST_SUSPEND:
178 case PM_POST_HIBERNATION:
179 case PM_POST_RESTORE:
180 rc = led_trigger_register(&heartbeat_led_trigger);
181 if (rc)
182 pr_err("could not re-register heartbeat trigger\n");
183 break;
184 default:
185 break;
186 }
187 return NOTIFY_DONE;
188}
189
190static int heartbeat_reboot_notifier(struct notifier_block *nb, 165static int heartbeat_reboot_notifier(struct notifier_block *nb,
191 unsigned long code, void *unused) 166 unsigned long code, void *unused)
192{ 167{
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
201 return NOTIFY_DONE; 176 return NOTIFY_DONE;
202} 177}
203 178
204static struct notifier_block heartbeat_pm_nb = {
205 .notifier_call = heartbeat_pm_notifier,
206};
207
208static struct notifier_block heartbeat_reboot_nb = { 179static struct notifier_block heartbeat_reboot_nb = {
209 .notifier_call = heartbeat_reboot_notifier, 180 .notifier_call = heartbeat_reboot_notifier,
210}; 181};
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void)
221 atomic_notifier_chain_register(&panic_notifier_list, 192 atomic_notifier_chain_register(&panic_notifier_list,
222 &heartbeat_panic_nb); 193 &heartbeat_panic_nb);
223 register_reboot_notifier(&heartbeat_reboot_nb); 194 register_reboot_notifier(&heartbeat_reboot_nb);
224 register_pm_notifier(&heartbeat_pm_nb);
225 } 195 }
226 return rc; 196 return rc;
227} 197}
228 198
229static void __exit heartbeat_trig_exit(void) 199static void __exit heartbeat_trig_exit(void)
230{ 200{
231 unregister_pm_notifier(&heartbeat_pm_nb);
232 unregister_reboot_notifier(&heartbeat_reboot_nb); 201 unregister_reboot_notifier(&heartbeat_reboot_nb);
233 atomic_notifier_chain_unregister(&panic_notifier_list, 202 atomic_notifier_chain_unregister(&panic_notifier_list,
234 &heartbeat_panic_nb); 203 &heartbeat_panic_nb);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index bf7419a56454..f4eace5ea184 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
485 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 485 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
486 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); 486 pr_debug(" version: %d\n", le32_to_cpu(sb->version));
487 pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 487 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
488 *(__u32 *)(sb->uuid+0), 488 le32_to_cpu(*(__u32 *)(sb->uuid+0)),
489 *(__u32 *)(sb->uuid+4), 489 le32_to_cpu(*(__u32 *)(sb->uuid+4)),
490 *(__u32 *)(sb->uuid+8), 490 le32_to_cpu(*(__u32 *)(sb->uuid+8)),
491 *(__u32 *)(sb->uuid+12)); 491 le32_to_cpu(*(__u32 *)(sb->uuid+12)));
492 pr_debug(" events: %llu\n", 492 pr_debug(" events: %llu\n",
493 (unsigned long long) le64_to_cpu(sb->events)); 493 (unsigned long long) le64_to_cpu(sb->events));
494 pr_debug("events cleared: %llu\n", 494 pr_debug("events cleared: %llu\n",
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5db11a405129..840c1496b2b1 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
218 * Buffers are freed after this timeout 218 * Buffers are freed after this timeout
219 */ 219 */
220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
221static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 221static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
222 222
223static unsigned long dm_bufio_peak_allocated; 223static unsigned long dm_bufio_peak_allocated;
224static unsigned long dm_bufio_allocated_kmem_cache; 224static unsigned long dm_bufio_allocated_kmem_cache;
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
1334{ 1334{
1335 struct dm_io_request io_req = { 1335 struct dm_io_request io_req = {
1336 .bi_op = REQ_OP_WRITE, 1336 .bi_op = REQ_OP_WRITE,
1337 .bi_op_flags = REQ_PREFLUSH, 1337 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1338 .mem.type = DM_IO_KMEM, 1338 .mem.type = DM_IO_KMEM,
1339 .mem.ptr.addr = NULL, 1339 .mem.ptr.addr = NULL,
1340 .client = c->dm_io, 1340 .client = c->dm_io,
@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1558 return true; 1558 return true;
1559} 1559}
1560 1560
1561static unsigned get_retain_buffers(struct dm_bufio_client *c) 1561static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1562{ 1562{
1563 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); 1563 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1564 return retain_bytes / c->block_size; 1564 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1565} 1565}
1566 1566
1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, 1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1571 struct dm_buffer *b, *tmp; 1571 struct dm_buffer *b, *tmp;
1572 unsigned long freed = 0; 1572 unsigned long freed = 0;
1573 unsigned long count = nr_to_scan; 1573 unsigned long count = nr_to_scan;
1574 unsigned retain_target = get_retain_buffers(c); 1574 unsigned long retain_target = get_retain_buffers(c);
1575 1575
1576 for (l = 0; l < LIST_SIZE; l++) { 1576 for (l = 0; l < LIST_SIZE; l++) {
1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { 1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1795{ 1795{
1796 struct dm_buffer *b, *tmp; 1796 struct dm_buffer *b, *tmp;
1797 unsigned retain_target = get_retain_buffers(c); 1797 unsigned long retain_target = get_retain_buffers(c);
1798 unsigned count; 1798 unsigned long count;
1799 LIST_HEAD(write_list); 1799 LIST_HEAD(write_list);
1800 1800
1801 dm_bufio_lock(c); 1801 dm_bufio_lock(c);
@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1957 1957
1958module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); 1958module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1960 1960
1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); 1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 9b1afdfb13f0..707233891291 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
33{ 33{
34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); 34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
35 35
36 if (!b) {
37 DMERR("couldn't create background_tracker");
38 return NULL;
39 }
40
36 b->max_work = max_work; 41 b->max_work = max_work;
37 atomic_set(&b->pending_promotes, 0); 42 atomic_set(&b->pending_promotes, 0);
38 atomic_set(&b->pending_writebacks, 0); 43 atomic_set(&b->pending_writebacks, 0);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 72479bd61e11..e5eb9c9b4bc8 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1120 * Cache entries may not be populated. So we cannot rely on the 1120 * Cache entries may not be populated. So we cannot rely on the
1121 * size of the clean queue. 1121 * size of the clean queue.
1122 */ 1122 */
1123 unsigned nr_clean;
1124
1125 if (idle) { 1123 if (idle) {
1126 /* 1124 /*
1127 * We'd like to clean everything. 1125 * We'd like to clean everything.
@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1129 return q_size(&mq->dirty) == 0u; 1127 return q_size(&mq->dirty) == 0u;
1130 } 1128 }
1131 1129
1132 nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1130 /*
1133 return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1131 * If we're busy we don't worry about cleaning at all.
1134 percent_to_target(mq, CLEAN_TARGET); 1132 */
1133 return true;
1135} 1134}
1136 1135
1137static bool free_target_met(struct smq_policy *mq, bool idle) 1136static bool free_target_met(struct smq_policy *mq)
1138{ 1137{
1139 unsigned nr_free; 1138 unsigned nr_free;
1140 1139
1141 if (!idle)
1142 return true;
1143
1144 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1140 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1145 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1141 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1146 percent_to_target(mq, FREE_TARGET); 1142 percent_to_target(mq, FREE_TARGET);
@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
1190 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1186 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
1191 return; 1187 return;
1192 1188
1193 e = q_peek(&mq->clean, mq->clean.nr_levels, true); 1189 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1194 if (!e) { 1190 if (!e) {
1195 if (!clean_target_met(mq, false)) 1191 if (!clean_target_met(mq, true))
1196 queue_writeback(mq); 1192 queue_writeback(mq);
1197 return; 1193 return;
1198 } 1194 }
@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1220 * We always claim to be 'idle' to ensure some demotions happen 1216 * We always claim to be 'idle' to ensure some demotions happen
1221 * with continuous loads. 1217 * with continuous loads.
1222 */ 1218 */
1223 if (!free_target_met(mq, true)) 1219 if (!free_target_met(mq))
1224 queue_demotion(mq); 1220 queue_demotion(mq);
1225 return; 1221 return;
1226 } 1222 }
@@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1421 spin_lock_irqsave(&mq->lock, flags); 1417 spin_lock_irqsave(&mq->lock, flags);
1422 r = btracker_issue(mq->bg_work, result); 1418 r = btracker_issue(mq->bg_work, result);
1423 if (r == -ENODATA) { 1419 if (r == -ENODATA) {
1424 /* find some writeback work to do */ 1420 if (!clean_target_met(mq, idle)) {
1425 if (mq->migrations_allowed && !free_target_met(mq, idle))
1426 queue_demotion(mq);
1427
1428 else if (!clean_target_met(mq, idle))
1429 queue_writeback(mq); 1421 queue_writeback(mq);
1430 1422 r = btracker_issue(mq->bg_work, result);
1431 r = btracker_issue(mq->bg_work, result); 1423 }
1432 } 1424 }
1433 spin_unlock_irqrestore(&mq->lock, flags); 1425 spin_unlock_irqrestore(&mq->lock, flags);
1434 1426
@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
1452 clear_pending(mq, e); 1444 clear_pending(mq, e);
1453 if (success) { 1445 if (success) {
1454 e->oblock = work->oblock; 1446 e->oblock = work->oblock;
1447 e->level = NR_CACHE_LEVELS - 1;
1455 push(mq, e); 1448 push(mq, e);
1456 // h, q, a 1449 // h, q, a
1457 } else { 1450 } else {
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1db375f50a13..d682a0511381 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
94 94
95static void __iot_io_end(struct io_tracker *iot, sector_t len) 95static void __iot_io_end(struct io_tracker *iot, sector_t len)
96{ 96{
97 if (!len)
98 return;
99
97 iot->in_flight -= len; 100 iot->in_flight -= len;
98 if (!iot->in_flight) 101 if (!iot->in_flight)
99 iot->idle_time = jiffies; 102 iot->idle_time = jiffies;
@@ -474,7 +477,7 @@ struct cache {
474 spinlock_t invalidation_lock; 477 spinlock_t invalidation_lock;
475 struct list_head invalidation_requests; 478 struct list_head invalidation_requests;
476 479
477 struct io_tracker origin_tracker; 480 struct io_tracker tracker;
478 481
479 struct work_struct commit_ws; 482 struct work_struct commit_ws;
480 struct batcher committer; 483 struct batcher committer;
@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
901 904
902static bool accountable_bio(struct cache *cache, struct bio *bio) 905static bool accountable_bio(struct cache *cache, struct bio *bio)
903{ 906{
904 return ((bio->bi_bdev == cache->origin_dev->bdev) && 907 return bio_op(bio) != REQ_OP_DISCARD;
905 bio_op(bio) != REQ_OP_DISCARD);
906} 908}
907 909
908static void accounted_begin(struct cache *cache, struct bio *bio) 910static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
912 914
913 if (accountable_bio(cache, bio)) { 915 if (accountable_bio(cache, bio)) {
914 pb->len = bio_sectors(bio); 916 pb->len = bio_sectors(bio);
915 iot_io_begin(&cache->origin_tracker, pb->len); 917 iot_io_begin(&cache->tracker, pb->len);
916 } 918 }
917} 919}
918 920
@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
921 size_t pb_data_size = get_per_bio_data_size(cache); 923 size_t pb_data_size = get_per_bio_data_size(cache);
922 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 924 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
923 925
924 iot_io_end(&cache->origin_tracker, pb->len); 926 iot_io_end(&cache->tracker, pb->len);
925} 927}
926 928
927static void accounted_request(struct cache *cache, struct bio *bio) 929static void accounted_request(struct cache *cache, struct bio *bio)
@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1716 1718
1717enum busy { 1719enum busy {
1718 IDLE, 1720 IDLE,
1719 MODERATE,
1720 BUSY 1721 BUSY
1721}; 1722};
1722 1723
1723static enum busy spare_migration_bandwidth(struct cache *cache) 1724static enum busy spare_migration_bandwidth(struct cache *cache)
1724{ 1725{
1725 bool idle = iot_idle_for(&cache->origin_tracker, HZ); 1726 bool idle = iot_idle_for(&cache->tracker, HZ);
1726 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1727 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1727 cache->sectors_per_block; 1728 cache->sectors_per_block;
1728 1729
1729 if (current_volume <= cache->migration_threshold) 1730 if (idle && current_volume <= cache->migration_threshold)
1730 return idle ? IDLE : MODERATE; 1731 return IDLE;
1731 else 1732 else
1732 return idle ? MODERATE : BUSY; 1733 return BUSY;
1733} 1734}
1734 1735
1735static void inc_hit_counter(struct cache *cache, struct bio *bio) 1736static void inc_hit_counter(struct cache *cache, struct bio *bio)
@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
2045 2046
2046 for (;;) { 2047 for (;;) {
2047 b = spare_migration_bandwidth(cache); 2048 b = spare_migration_bandwidth(cache);
2048 if (b == BUSY)
2049 break;
2050 2049
2051 r = policy_get_background_work(cache->policy, b == IDLE, &op); 2050 r = policy_get_background_work(cache->policy, b == IDLE, &op);
2052 if (r == -ENODATA) 2051 if (r == -ENODATA)
@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2717 2716
2718 batcher_init(&cache->committer, commit_op, cache, 2717 batcher_init(&cache->committer, commit_op, cache,
2719 issue_op, cache, cache->wq); 2718 issue_op, cache, cache->wq);
2720 iot_init(&cache->origin_tracker); 2719 iot_init(&cache->tracker);
2721 2720
2722 init_rwsem(&cache->background_work_lock); 2721 init_rwsem(&cache->background_work_lock);
2723 prevent_background_work(cache); 2722 prevent_background_work(cache);
@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
2941 2940
2942 cancel_delayed_work(&cache->waker); 2941 cancel_delayed_work(&cache->waker);
2943 flush_workqueue(cache->wq); 2942 flush_workqueue(cache->wq);
2944 WARN_ON(cache->origin_tracker.in_flight); 2943 WARN_ON(cache->tracker.in_flight);
2945 2944
2946 /* 2945 /*
2947 * If it's a flush suspend there won't be any deferred bios, so this 2946 * If it's a flush suspend there won't be any deferred bios, so this
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c7f7c8d76576..93b181088168 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
783 for (i = 0; i < commit_sections; i++) 783 for (i = 0; i < commit_sections; i++)
784 rw_section_mac(ic, commit_start + i, true); 784 rw_section_mac(ic, commit_start + i, true);
785 } 785 }
786 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); 786 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
787 commit_sections, &io_comp);
787 } else { 788 } else {
788 unsigned to_end; 789 unsigned to_end;
789 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); 790 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -1104,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
1104static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1105static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1105{ 1106{
1106 struct bio *bio; 1107 struct bio *bio;
1107 spin_lock_irq(&ic->endio_wait.lock); 1108 unsigned long flags;
1109
1110 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1108 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1111 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1109 bio_list_add(&ic->flush_bio_list, bio); 1112 bio_list_add(&ic->flush_bio_list, bio);
1110 spin_unlock_irq(&ic->endio_wait.lock); 1113 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1114
1111 queue_work(ic->commit_wq, &ic->commit_work); 1115 queue_work(ic->commit_wq, &ic->commit_work);
1112} 1116}
1113 1117
@@ -2374,21 +2378,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2374 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 2378 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2375} 2379}
2376 2380
2377/* FIXME: use new kvmalloc */
2378static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
2379{
2380 void *ptr = NULL;
2381
2382 if (size <= PAGE_SIZE)
2383 ptr = kmalloc(size, GFP_KERNEL | gfp);
2384 if (!ptr && size <= KMALLOC_MAX_SIZE)
2385 ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
2386 if (!ptr)
2387 ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
2388
2389 return ptr;
2390}
2391
2392static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) 2381static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2393{ 2382{
2394 unsigned i; 2383 unsigned i;
@@ -2407,7 +2396,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2407 struct page_list *pl; 2396 struct page_list *pl;
2408 unsigned i; 2397 unsigned i;
2409 2398
2410 pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); 2399 pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
2411 if (!pl) 2400 if (!pl)
2412 return NULL; 2401 return NULL;
2413 2402
@@ -2437,7 +2426,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
2437 struct scatterlist **sl; 2426 struct scatterlist **sl;
2438 unsigned i; 2427 unsigned i;
2439 2428
2440 sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); 2429 sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
2441 if (!sl) 2430 if (!sl)
2442 return NULL; 2431 return NULL;
2443 2432
@@ -2453,7 +2442,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
2453 2442
2454 n_pages = (end_index - start_index + 1); 2443 n_pages = (end_index - start_index + 1);
2455 2444
2456 s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); 2445 s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
2457 if (!s) { 2446 if (!s) {
2458 dm_integrity_free_journal_scatterlist(ic, sl); 2447 dm_integrity_free_journal_scatterlist(ic, sl);
2459 return NULL; 2448 return NULL;
@@ -2617,7 +2606,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2617 goto bad; 2606 goto bad;
2618 } 2607 }
2619 2608
2620 sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); 2609 sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
2621 if (!sg) { 2610 if (!sg) {
2622 *error = "Unable to allocate sg list"; 2611 *error = "Unable to allocate sg list";
2623 r = -ENOMEM; 2612 r = -ENOMEM;
@@ -2673,7 +2662,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2673 r = -ENOMEM; 2662 r = -ENOMEM;
2674 goto bad; 2663 goto bad;
2675 } 2664 }
2676 ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); 2665 ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
2677 if (!ic->sk_requests) { 2666 if (!ic->sk_requests) {
2678 *error = "Unable to allocate sk requests"; 2667 *error = "Unable to allocate sk requests";
2679 r = -ENOMEM; 2668 r = -ENOMEM;
@@ -2740,7 +2729,7 @@ retest_commit_id:
2740 r = -ENOMEM; 2729 r = -ENOMEM;
2741 goto bad; 2730 goto bad;
2742 } 2731 }
2743 ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); 2732 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
2744 if (!ic->journal_tree) { 2733 if (!ic->journal_tree) {
2745 *error = "Could not allocate memory for journal tree"; 2734 *error = "Could not allocate memory for journal tree";
2746 r = -ENOMEM; 2735 r = -ENOMEM;
@@ -3054,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3054 ti->error = "The device is too small"; 3043 ti->error = "The device is too small";
3055 goto bad; 3044 goto bad;
3056 } 3045 }
3046 if (ti->len > ic->provided_data_sectors) {
3047 r = -EINVAL;
3048 ti->error = "Not enough provided sectors for requested mapping size";
3049 goto bad;
3050 }
3057 3051
3058 if (!buffer_sectors) 3052 if (!buffer_sectors)
3059 buffer_sectors = 1; 3053 buffer_sectors = 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3702e502466d..8d5ca30f6551 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
317 else if (op == REQ_OP_WRITE_SAME) 317 else if (op == REQ_OP_WRITE_SAME)
318 special_cmd_max_sectors = q->limits.max_write_same_sectors; 318 special_cmd_max_sectors = q->limits.max_write_same_sectors;
319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
320 op == REQ_OP_WRITE_SAME) && 320 op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
321 special_cmd_max_sectors == 0) { 321 atomic_inc(&io->count);
322 dec_count(io, region, -EOPNOTSUPP); 322 dec_count(io, region, -EOPNOTSUPP);
323 return; 323 return;
324 } 324 }
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0555b4410e05..41852ae287a5 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
1710 } 1710 }
1711 1711
1712 /* 1712 /*
1713 * Try to avoid low memory issues when a device is suspended. 1713 * Use __GFP_HIGH to avoid low memory issues when a device is
1714 * suspended and the ioctl is needed to resume it.
1714 * Use kmalloc() rather than vmalloc() when we can. 1715 * Use kmalloc() rather than vmalloc() when we can.
1715 */ 1716 */
1716 dmi = NULL; 1717 dmi = NULL;
1717 noio_flag = memalloc_noio_save(); 1718 noio_flag = memalloc_noio_save();
1718 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); 1719 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
1719 memalloc_noio_restore(noio_flag); 1720 memalloc_noio_restore(noio_flag);
1720 1721
1721 if (!dmi) { 1722 if (!dmi) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 926a6bcb32c8..3df056b73b66 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -447,7 +447,7 @@ failed:
447 * it has been invoked. 447 * it has been invoked.
448 */ 448 */
449#define dm_report_EIO(m) \ 449#define dm_report_EIO(m) \
450({ \ 450do { \
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
452 \ 452 \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ 453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
@@ -455,8 +455,7 @@ failed:
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \ 457 dm_noflush_suspending((m)->ti)); \
458 -EIO; \ 458} while (0)
459})
460 459
461/* 460/*
462 * Map cloned requests (request-based multipath) 461 * Map cloned requests (request-based multipath)
@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
481 if (!pgpath) { 480 if (!pgpath) {
482 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
483 return DM_MAPIO_DELAY_REQUEUE; 482 return DM_MAPIO_DELAY_REQUEUE;
484 return dm_report_EIO(m); /* Failed */ 483 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL;
485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
487 if (pg_init_all_paths(m)) 487 if (pg_init_all_paths(m))
@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
558 if (!pgpath) { 558 if (!pgpath) {
559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
560 return DM_MAPIO_REQUEUE; 560 return DM_MAPIO_REQUEUE;
561 return dm_report_EIO(m); 561 dm_report_EIO(m);
562 return -EIO;
562 } 563 }
563 564
564 mpio->pgpath = pgpath; 565 mpio->pgpath = pgpath;
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1493 if (atomic_read(&m->nr_valid_paths) == 0 && 1494 if (atomic_read(&m->nr_valid_paths) == 0 &&
1494 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1495 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1495 if (error == -EIO) 1496 if (error == -EIO)
1496 error = dm_report_EIO(m); 1497 dm_report_EIO(m);
1497 /* complete with the original error */ 1498 /* complete with the original error */
1498 r = DM_ENDIO_DONE; 1499 r = DM_ENDIO_DONE;
1499 } 1500 }
@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
1524 fail_path(mpio->pgpath); 1525 fail_path(mpio->pgpath);
1525 1526
1526 if (atomic_read(&m->nr_valid_paths) == 0 && 1527 if (atomic_read(&m->nr_valid_paths) == 0 &&
1527 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1528 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1528 return dm_report_EIO(m); 1529 dm_report_EIO(m);
1530 return -EIO;
1531 }
1529 1532
1530 /* Queue for the daemon to resubmit */ 1533 /* Queue for the daemon to resubmit */
1531 dm_bio_restore(get_bio_details_from_bio(clone), clone); 1534 dm_bio_restore(get_bio_details_from_bio(clone), clone);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7d893228c40f..b4b75dad816a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1927,7 +1927,7 @@ struct dm_raid_superblock {
1927 /******************************************************************** 1927 /********************************************************************
1928 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! 1928 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1929 * 1929 *
1930 * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist 1930 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
1931 */ 1931 */
1932 1932
1933 __le32 flags; /* Flags defining array states for reshaping */ 1933 __le32 flags; /* Flags defining array states for reshaping */
@@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2092 sb->layout = cpu_to_le32(mddev->layout); 2092 sb->layout = cpu_to_le32(mddev->layout);
2093 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); 2093 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2094 2094
2095 /********************************************************************
2096 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
2097 *
2098 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
2099 */
2095 sb->new_level = cpu_to_le32(mddev->new_level); 2100 sb->new_level = cpu_to_le32(mddev->new_level);
2096 sb->new_layout = cpu_to_le32(mddev->new_layout); 2101 sb->new_layout = cpu_to_le32(mddev->new_layout);
2097 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); 2102 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2438 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; 2443 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2439 2444
2440 if (!test_and_clear_bit(FirstUse, &rdev->flags)) { 2445 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2441 /* Retrieve device size stored in superblock to be prepared for shrink */ 2446 /*
2442 rdev->sectors = le64_to_cpu(sb->sectors); 2447 * Retrieve rdev size stored in superblock to be prepared for shrink.
2448 * Check extended superblock members are present otherwise the size
2449 * will not be set!
2450 */
2451 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2452 rdev->sectors = le64_to_cpu(sb->sectors);
2453
2443 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); 2454 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2444 if (rdev->recovery_offset == MaxSector) 2455 if (rdev->recovery_offset == MaxSector)
2445 set_bit(In_sync, &rdev->flags); 2456 set_bit(In_sync, &rdev->flags);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index a95cbb80fb34..4da8858856fb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
145 145
146struct dm_raid1_bio_record { 146struct dm_raid1_bio_record {
147 struct mirror *m; 147 struct mirror *m;
148 /* if details->bi_bdev == NULL, details were not saved */
148 struct dm_bio_details details; 149 struct dm_bio_details details;
149 region_t write_region; 150 region_t write_region;
150}; 151};
@@ -260,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
260 struct mirror *m; 261 struct mirror *m;
261 struct dm_io_request io_req = { 262 struct dm_io_request io_req = {
262 .bi_op = REQ_OP_WRITE, 263 .bi_op = REQ_OP_WRITE,
263 .bi_op_flags = REQ_PREFLUSH, 264 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
264 .mem.type = DM_IO_KMEM, 265 .mem.type = DM_IO_KMEM,
265 .mem.ptr.addr = NULL, 266 .mem.ptr.addr = NULL,
266 .client = ms->io_client, 267 .client = ms->io_client,
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1198 struct dm_raid1_bio_record *bio_record = 1199 struct dm_raid1_bio_record *bio_record =
1199 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1200 1201
1202 bio_record->details.bi_bdev = NULL;
1203
1201 if (rw == WRITE) { 1204 if (rw == WRITE) {
1202 /* Save region for mirror_end_io() handler */ 1205 /* Save region for mirror_end_io() handler */
1203 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1206 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1256 } 1259 }
1257 1260
1258 if (error == -EOPNOTSUPP) 1261 if (error == -EOPNOTSUPP)
1259 return error; 1262 goto out;
1260 1263
1261 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1264 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1262 return error; 1265 goto out;
1263 1266
1264 if (unlikely(error)) { 1267 if (unlikely(error)) {
1268 if (!bio_record->details.bi_bdev) {
1269 /*
1270 * There wasn't enough memory to record necessary
1271 * information for a retry or there was no other
1272 * mirror in-sync.
1273 */
1274 DMERR_LIMIT("Mirror read failed.");
1275 return -EIO;
1276 }
1277
1265 m = bio_record->m; 1278 m = bio_record->m;
1266 1279
1267 DMERR("Mirror read failed from %s. Trying alternative device.", 1280 DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1277 bd = &bio_record->details; 1290 bd = &bio_record->details;
1278 1291
1279 dm_bio_restore(bd, bio); 1292 dm_bio_restore(bd, bio);
1293 bio_record->details.bi_bdev = NULL;
1280 bio->bi_error = 0; 1294 bio->bi_error = 0;
1281 1295
1282 queue_bio(ms, bio, rw); 1296 queue_bio(ms, bio, rw);
@@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1285 DMERR("All replicated volumes dead, failing I/O"); 1299 DMERR("All replicated volumes dead, failing I/O");
1286 } 1300 }
1287 1301
1302out:
1303 bio_record->details.bi_bdev = NULL;
1304
1288 return error; 1305 return error;
1289} 1306}
1290 1307
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 2af27026aa2e..b639fa7246ee 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
507 case DM_MAPIO_KILL: 507 case DM_MAPIO_KILL:
508 /* The target wants to complete the I/O */ 508 /* The target wants to complete the I/O */
509 dm_kill_unmapped_request(rq, -EIO); 509 dm_kill_unmapped_request(rq, -EIO);
510 break;
510 default: 511 default:
511 DMWARN("unimplemented target map return value: %d", r); 512 DMWARN("unimplemented target map return value: %d", r);
512 BUG(); 513 BUG();
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b93476c3ba3f..c5534d294773 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
741 /* 741 /*
742 * Commit exceptions to disk. 742 * Commit exceptions to disk.
743 */ 743 */
744 if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) 744 if (ps->valid && area_io(ps, REQ_OP_WRITE,
745 REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
745 ps->valid = 0; 746 ps->valid = 0;
746 747
747 /* 748 /*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 0f0251d0d337..d31d18d9727c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
484 if (r < 0) 484 if (r < 0)
485 return r; 485 return r;
486 486
487 r = save_sm_roots(pmd); 487 r = dm_tm_pre_commit(pmd->tm);
488 if (r < 0) 488 if (r < 0)
489 return r; 489 return r;
490 490
491 r = dm_tm_pre_commit(pmd->tm); 491 r = save_sm_roots(pmd);
492 if (r < 0) 492 if (r < 0)
493 return r; 493 return r;
494 494
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 17ad50daed08..28808e5ec0fd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1094 return; 1094 return;
1095 } 1095 }
1096 1096
1097 /*
1098 * Increment the unmapped blocks. This prevents a race between the
1099 * passdown io and reallocation of freed blocks.
1100 */
1101 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1102 if (r) {
1103 metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1104 bio_io_error(m->bio);
1105 cell_defer_no_holder(tc, m->cell);
1106 mempool_free(m, pool->mapping_pool);
1107 return;
1108 }
1109
1097 discard_parent = bio_alloc(GFP_NOIO, 1); 1110 discard_parent = bio_alloc(GFP_NOIO, 1);
1098 if (!discard_parent) { 1111 if (!discard_parent) {
1099 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", 1112 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1114 end_discard(&op, r); 1127 end_discard(&op, r);
1115 } 1128 }
1116 } 1129 }
1117
1118 /*
1119 * Increment the unmapped blocks. This prevents a race between the
1120 * passdown io and reallocation of freed blocks.
1121 */
1122 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1123 if (r) {
1124 metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1125 bio_io_error(m->bio);
1126 cell_defer_no_holder(tc, m->cell);
1127 mempool_free(m, pool->mapping_pool);
1128 return;
1129 }
1130} 1130}
1131 1131
1132static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) 1132static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 97de961a3bfc..1ec9b2c51c07 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
166 return r; 166 return r;
167 } 167 }
168 168
169 if (likely(v->version >= 1)) 169 if (likely(v->salt_size && (v->version >= 1)))
170 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 170 r = verity_hash_update(v, req, v->salt, v->salt_size, res);
171 171
172 return r; 172 return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
177{ 177{
178 int r; 178 int r;
179 179
180 if (unlikely(!v->version)) { 180 if (unlikely(v->salt_size && (!v->version))) {
181 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 181 r = verity_hash_update(v, req, v->salt, v->salt_size, res);
182 182
183 if (r < 0) { 183 if (r < 0) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6ef9500226c0..37ccd73c79ec 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor)
1657 1657
1658 bio_init(&md->flush_bio, NULL, 0); 1658 bio_init(&md->flush_bio, NULL, 0);
1659 md->flush_bio.bi_bdev = md->bdev; 1659 md->flush_bio.bi_bdev = md->bdev;
1660 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1660 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1661 1661
1662 dm_stats_init(&md->stats); 1662 dm_stats_init(&md->stats);
1663 1663
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 7299ce2f08a8..03082e17c65c 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
1311 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 1311 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
1312 lock_comm(cinfo, 1); 1312 lock_comm(cinfo, 1);
1313 ret = __sendmsg(cinfo, &cmsg); 1313 ret = __sendmsg(cinfo, &cmsg);
1314 if (ret) 1314 if (ret) {
1315 unlock_comm(cinfo);
1315 return ret; 1316 return ret;
1317 }
1316 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; 1318 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
1317 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); 1319 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
1318 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; 1320 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82f798be964f..87edc342ccb3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
765 test_bit(FailFast, &rdev->flags) && 765 test_bit(FailFast, &rdev->flags) &&
766 !test_bit(LastDev, &rdev->flags)) 766 !test_bit(LastDev, &rdev->flags))
767 ff = MD_FAILFAST; 767 ff = MD_FAILFAST;
768 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff; 768 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
769 769
770 atomic_inc(&mddev->pending_writes); 770 atomic_inc(&mddev->pending_writes);
771 submit_bio(bio); 771 submit_bio(bio);
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
5174 5174
5175static void no_op(struct percpu_ref *r) {} 5175static void no_op(struct percpu_ref *r) {}
5176 5176
5177int mddev_init_writes_pending(struct mddev *mddev)
5178{
5179 if (mddev->writes_pending.percpu_count_ptr)
5180 return 0;
5181 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5182 return -ENOMEM;
5183 /* We want to start with the refcount at zero */
5184 percpu_ref_put(&mddev->writes_pending);
5185 return 0;
5186}
5187EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5188
5177static int md_alloc(dev_t dev, char *name) 5189static int md_alloc(dev_t dev, char *name)
5178{ 5190{
5179 /* 5191 /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
5239 blk_queue_make_request(mddev->queue, md_make_request); 5251 blk_queue_make_request(mddev->queue, md_make_request);
5240 blk_set_stacking_limits(&mddev->queue->limits); 5252 blk_set_stacking_limits(&mddev->queue->limits);
5241 5253
5242 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5243 goto abort;
5244 /* We want to start with the refcount at zero */
5245 percpu_ref_put(&mddev->writes_pending);
5246 disk = alloc_disk(1 << shift); 5254 disk = alloc_disk(1 << shift);
5247 if (!disk) { 5255 if (!disk) {
5248 blk_cleanup_queue(mddev->queue); 5256 blk_cleanup_queue(mddev->queue);
@@ -8022,18 +8030,15 @@ EXPORT_SYMBOL(md_write_end);
8022 * may proceed without blocking. It is important to call this before 8030 * may proceed without blocking. It is important to call this before
8023 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8031 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8024 * Must be called with mddev_lock held. 8032 * Must be called with mddev_lock held.
8025 *
8026 * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
8027 * is dropped, so return -EAGAIN after notifying userspace.
8028 */ 8033 */
8029int md_allow_write(struct mddev *mddev) 8034void md_allow_write(struct mddev *mddev)
8030{ 8035{
8031 if (!mddev->pers) 8036 if (!mddev->pers)
8032 return 0; 8037 return;
8033 if (mddev->ro) 8038 if (mddev->ro)
8034 return 0; 8039 return;
8035 if (!mddev->pers->sync_request) 8040 if (!mddev->pers->sync_request)
8036 return 0; 8041 return;
8037 8042
8038 spin_lock(&mddev->lock); 8043 spin_lock(&mddev->lock);
8039 if (mddev->in_sync) { 8044 if (mddev->in_sync) {
@@ -8046,13 +8051,12 @@ int md_allow_write(struct mddev *mddev)
8046 spin_unlock(&mddev->lock); 8051 spin_unlock(&mddev->lock);
8047 md_update_sb(mddev, 0); 8052 md_update_sb(mddev, 0);
8048 sysfs_notify_dirent_safe(mddev->sysfs_state); 8053 sysfs_notify_dirent_safe(mddev->sysfs_state);
8054 /* wait for the dirty state to be recorded in the metadata */
8055 wait_event(mddev->sb_wait,
8056 !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
8057 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8049 } else 8058 } else
8050 spin_unlock(&mddev->lock); 8059 spin_unlock(&mddev->lock);
8051
8052 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
8053 return -EAGAIN;
8054 else
8055 return 0;
8056} 8060}
8057EXPORT_SYMBOL_GPL(md_allow_write); 8061EXPORT_SYMBOL_GPL(md_allow_write);
8058 8062
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4e75d121bfcc..0fa1de42c42b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
648extern void md_wakeup_thread(struct md_thread *thread); 648extern void md_wakeup_thread(struct md_thread *thread);
649extern void md_check_recovery(struct mddev *mddev); 649extern void md_check_recovery(struct mddev *mddev);
650extern void md_reap_sync_thread(struct mddev *mddev); 650extern void md_reap_sync_thread(struct mddev *mddev);
651extern int mddev_init_writes_pending(struct mddev *mddev);
651extern void md_write_start(struct mddev *mddev, struct bio *bi); 652extern void md_write_start(struct mddev *mddev, struct bio *bi);
652extern void md_write_inc(struct mddev *mddev, struct bio *bi); 653extern void md_write_inc(struct mddev *mddev, struct bio *bi);
653extern void md_write_end(struct mddev *mddev); 654extern void md_write_end(struct mddev *mddev);
@@ -665,7 +666,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
665 bool metadata_op); 666 bool metadata_op);
666extern void md_do_sync(struct md_thread *thread); 667extern void md_do_sync(struct md_thread *thread);
667extern void md_new_event(struct mddev *mddev); 668extern void md_new_event(struct mddev *mddev);
668extern int md_allow_write(struct mddev *mddev); 669extern void md_allow_write(struct mddev *mddev);
669extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 670extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
670extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 671extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
671extern int md_check_no_bitmap(struct mddev *mddev); 672extern int md_check_no_bitmap(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a14325..32adf6b4a9c7 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
142 142
143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) 143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
144{ 144{
145 int r;
146 uint32_t old_count;
145 enum allocation_event ev; 147 enum allocation_event ev;
146 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); 148 struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
147 149
148 return sm_ll_dec(&smd->ll, b, &ev); 150 r = sm_ll_dec(&smd->ll, b, &ev);
151 if (!r && (ev == SM_FREE)) {
152 /*
153 * It's only free if it's also free in the last
154 * transaction.
155 */
156 r = sm_ll_lookup(&smd->old_ll, b, &old_count);
157 if (!r && !old_count)
158 smd->nr_allocated_this_transaction--;
159 }
160
161 return r;
149} 162}
150 163
151static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) 164static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 84e58596594d..d6c0bc76e837 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -385,7 +385,7 @@ static int raid0_run(struct mddev *mddev)
385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); 387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 388 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389 389
390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391 blk_queue_io_opt(mddev->queue, 391 blk_queue_io_opt(mddev->queue,
@@ -459,6 +459,95 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
459 } 459 }
460} 460}
461 461
462static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
463{
464 struct r0conf *conf = mddev->private;
465 struct strip_zone *zone;
466 sector_t start = bio->bi_iter.bi_sector;
467 sector_t end;
468 unsigned int stripe_size;
469 sector_t first_stripe_index, last_stripe_index;
470 sector_t start_disk_offset;
471 unsigned int start_disk_index;
472 sector_t end_disk_offset;
473 unsigned int end_disk_index;
474 unsigned int disk;
475
476 zone = find_zone(conf, &start);
477
478 if (bio_end_sector(bio) > zone->zone_end) {
479 struct bio *split = bio_split(bio,
480 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481 mddev->bio_set);
482 bio_chain(split, bio);
483 generic_make_request(bio);
484 bio = split;
485 end = zone->zone_end;
486 } else
487 end = bio_end_sector(bio);
488
489 if (zone != conf->strip_zone)
490 end = end - zone[-1].zone_end;
491
492 /* Now start and end is the offset in zone */
493 stripe_size = zone->nb_dev * mddev->chunk_sectors;
494
495 first_stripe_index = start;
496 sector_div(first_stripe_index, stripe_size);
497 last_stripe_index = end;
498 sector_div(last_stripe_index, stripe_size);
499
500 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
501 mddev->chunk_sectors;
502 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
503 mddev->chunk_sectors) +
504 first_stripe_index * mddev->chunk_sectors;
505 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
506 mddev->chunk_sectors;
507 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
508 mddev->chunk_sectors) +
509 last_stripe_index * mddev->chunk_sectors;
510
511 for (disk = 0; disk < zone->nb_dev; disk++) {
512 sector_t dev_start, dev_end;
513 struct bio *discard_bio = NULL;
514 struct md_rdev *rdev;
515
516 if (disk < start_disk_index)
517 dev_start = (first_stripe_index + 1) *
518 mddev->chunk_sectors;
519 else if (disk > start_disk_index)
520 dev_start = first_stripe_index * mddev->chunk_sectors;
521 else
522 dev_start = start_disk_offset;
523
524 if (disk < end_disk_index)
525 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
526 else if (disk > end_disk_index)
527 dev_end = last_stripe_index * mddev->chunk_sectors;
528 else
529 dev_end = end_disk_offset;
530
531 if (dev_end <= dev_start)
532 continue;
533
534 rdev = conf->devlist[(zone - conf->strip_zone) *
535 conf->strip_zone[0].nb_dev + disk];
536 if (__blkdev_issue_discard(rdev->bdev,
537 dev_start + zone->dev_start + rdev->data_offset,
538 dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
539 !discard_bio)
540 continue;
541 bio_chain(discard_bio, bio);
542 if (mddev->gendisk)
543 trace_block_bio_remap(bdev_get_queue(rdev->bdev),
544 discard_bio, disk_devt(mddev->gendisk),
545 bio->bi_iter.bi_sector);
546 generic_make_request(discard_bio);
547 }
548 bio_endio(bio);
549}
550
462static void raid0_make_request(struct mddev *mddev, struct bio *bio) 551static void raid0_make_request(struct mddev *mddev, struct bio *bio)
463{ 552{
464 struct strip_zone *zone; 553 struct strip_zone *zone;
@@ -473,6 +562,11 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
473 return; 562 return;
474 } 563 }
475 564
565 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
566 raid0_handle_discard(mddev, bio);
567 return;
568 }
569
476 bio_sector = bio->bi_iter.bi_sector; 570 bio_sector = bio->bi_iter.bi_sector;
477 sector = bio_sector; 571 sector = bio_sector;
478 chunk_sects = mddev->chunk_sectors; 572 chunk_sects = mddev->chunk_sectors;
@@ -498,19 +592,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
498 bio->bi_iter.bi_sector = sector + zone->dev_start + 592 bio->bi_iter.bi_sector = sector + zone->dev_start +
499 tmp_dev->data_offset; 593 tmp_dev->data_offset;
500 594
501 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 595 if (mddev->gendisk)
502 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 596 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
503 /* Just ignore it */ 597 bio, disk_devt(mddev->gendisk),
504 bio_endio(bio); 598 bio_sector);
505 } else { 599 mddev_check_writesame(mddev, bio);
506 if (mddev->gendisk) 600 mddev_check_write_zeroes(mddev, bio);
507 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 601 generic_make_request(bio);
508 bio, disk_devt(mddev->gendisk),
509 bio_sector);
510 mddev_check_writesame(mddev, bio);
511 mddev_check_write_zeroes(mddev, bio);
512 generic_make_request(bio);
513 }
514} 602}
515 603
516static void raid0_status(struct seq_file *seq, struct mddev *mddev) 604static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7ed59351fe97..e1a7e3d4c5e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -666,8 +666,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
666 break; 666 break;
667 } 667 }
668 continue; 668 continue;
669 } else 669 } else {
670 if ((sectors > best_good_sectors) && (best_disk >= 0))
671 best_disk = -1;
670 best_good_sectors = sectors; 672 best_good_sectors = sectors;
673 }
671 674
672 if (best_disk >= 0) 675 if (best_disk >= 0)
673 /* At least two disks to choose from so failfast is OK */ 676 /* At least two disks to choose from so failfast is OK */
@@ -1529,17 +1532,16 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1529 plug = container_of(cb, struct raid1_plug_cb, cb); 1532 plug = container_of(cb, struct raid1_plug_cb, cb);
1530 else 1533 else
1531 plug = NULL; 1534 plug = NULL;
1532 spin_lock_irqsave(&conf->device_lock, flags);
1533 if (plug) { 1535 if (plug) {
1534 bio_list_add(&plug->pending, mbio); 1536 bio_list_add(&plug->pending, mbio);
1535 plug->pending_cnt++; 1537 plug->pending_cnt++;
1536 } else { 1538 } else {
1539 spin_lock_irqsave(&conf->device_lock, flags);
1537 bio_list_add(&conf->pending_bio_list, mbio); 1540 bio_list_add(&conf->pending_bio_list, mbio);
1538 conf->pending_count++; 1541 conf->pending_count++;
1539 } 1542 spin_unlock_irqrestore(&conf->device_lock, flags);
1540 spin_unlock_irqrestore(&conf->device_lock, flags);
1541 if (!plug)
1542 md_wakeup_thread(mddev->thread); 1543 md_wakeup_thread(mddev->thread);
1544 }
1543 } 1545 }
1544 1546
1545 r1_bio_write_done(r1_bio); 1547 r1_bio_write_done(r1_bio);
@@ -3061,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
3061 mdname(mddev)); 3063 mdname(mddev));
3062 return -EIO; 3064 return -EIO;
3063 } 3065 }
3066 if (mddev_init_writes_pending(mddev) < 0)
3067 return -ENOMEM;
3064 /* 3068 /*
3065 * copy the already verified devices into our private RAID1 3069 * copy the already verified devices into our private RAID1
3066 * bookkeeping area. [whatever we allocate in run(), 3070 * bookkeeping area. [whatever we allocate in run(),
@@ -3197,7 +3201,7 @@ static int raid1_reshape(struct mddev *mddev)
3197 struct r1conf *conf = mddev->private; 3201 struct r1conf *conf = mddev->private;
3198 int cnt, raid_disks; 3202 int cnt, raid_disks;
3199 unsigned long flags; 3203 unsigned long flags;
3200 int d, d2, err; 3204 int d, d2;
3201 3205
3202 /* Cannot change chunk_size, layout, or level */ 3206 /* Cannot change chunk_size, layout, or level */
3203 if (mddev->chunk_sectors != mddev->new_chunk_sectors || 3207 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3209,11 +3213,8 @@ static int raid1_reshape(struct mddev *mddev)
3209 return -EINVAL; 3213 return -EINVAL;
3210 } 3214 }
3211 3215
3212 if (!mddev_is_clustered(mddev)) { 3216 if (!mddev_is_clustered(mddev))
3213 err = md_allow_write(mddev); 3217 md_allow_write(mddev);
3214 if (err)
3215 return err;
3216 }
3217 3218
3218 raid_disks = mddev->raid_disks + mddev->delta_disks; 3219 raid_disks = mddev->raid_disks + mddev->delta_disks;
3219 3220
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6b86a0032cf8..797ed60abd5e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1282,17 +1282,16 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1282 plug = container_of(cb, struct raid10_plug_cb, cb); 1282 plug = container_of(cb, struct raid10_plug_cb, cb);
1283 else 1283 else
1284 plug = NULL; 1284 plug = NULL;
1285 spin_lock_irqsave(&conf->device_lock, flags);
1286 if (plug) { 1285 if (plug) {
1287 bio_list_add(&plug->pending, mbio); 1286 bio_list_add(&plug->pending, mbio);
1288 plug->pending_cnt++; 1287 plug->pending_cnt++;
1289 } else { 1288 } else {
1289 spin_lock_irqsave(&conf->device_lock, flags);
1290 bio_list_add(&conf->pending_bio_list, mbio); 1290 bio_list_add(&conf->pending_bio_list, mbio);
1291 conf->pending_count++; 1291 conf->pending_count++;
1292 } 1292 spin_unlock_irqrestore(&conf->device_lock, flags);
1293 spin_unlock_irqrestore(&conf->device_lock, flags);
1294 if (!plug)
1295 md_wakeup_thread(mddev->thread); 1293 md_wakeup_thread(mddev->thread);
1294 }
1296} 1295}
1297 1296
1298static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1297static void raid10_write_request(struct mddev *mddev, struct bio *bio,
@@ -3612,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
3612 int first = 1; 3611 int first = 1;
3613 bool discard_supported = false; 3612 bool discard_supported = false;
3614 3613
3614 if (mddev_init_writes_pending(mddev) < 0)
3615 return -ENOMEM;
3616
3615 if (mddev->private == NULL) { 3617 if (mddev->private == NULL) {
3616 conf = setup_conf(mddev); 3618 conf = setup_conf(mddev);
3617 if (IS_ERR(conf)) 3619 if (IS_ERR(conf))
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 26ba09282e7c..0a7af8b0a80a 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -24,6 +24,7 @@
24#include "md.h" 24#include "md.h"
25#include "raid5.h" 25#include "raid5.h"
26#include "bitmap.h" 26#include "bitmap.h"
27#include "raid5-log.h"
27 28
28/* 29/*
29 * metadata/data stored in disk with 4k size unit (a block) regardless 30 * metadata/data stored in disk with 4k size unit (a block) regardless
@@ -622,20 +623,30 @@ static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
622 __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 623 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
623 spin_unlock_irqrestore(&log->io_list_lock, flags); 624 spin_unlock_irqrestore(&log->io_list_lock, flags);
624 625
626 /*
627 * In case of journal device failures, submit_bio will get error
628 * and calls endio, then active stripes will continue write
629 * process. Therefore, it is not necessary to check Faulty bit
630 * of journal device here.
631 *
632 * We can't check split_bio after current_bio is submitted. If
633 * io->split_bio is null, after current_bio is submitted, current_bio
634 * might already be completed and the io_unit is freed. We submit
635 * split_bio first to avoid the issue.
636 */
637 if (io->split_bio) {
638 if (io->has_flush)
639 io->split_bio->bi_opf |= REQ_PREFLUSH;
640 if (io->has_fua)
641 io->split_bio->bi_opf |= REQ_FUA;
642 submit_bio(io->split_bio);
643 }
644
625 if (io->has_flush) 645 if (io->has_flush)
626 io->current_bio->bi_opf |= REQ_PREFLUSH; 646 io->current_bio->bi_opf |= REQ_PREFLUSH;
627 if (io->has_fua) 647 if (io->has_fua)
628 io->current_bio->bi_opf |= REQ_FUA; 648 io->current_bio->bi_opf |= REQ_FUA;
629 submit_bio(io->current_bio); 649 submit_bio(io->current_bio);
630
631 if (!io->split_bio)
632 return;
633
634 if (io->has_flush)
635 io->split_bio->bi_opf |= REQ_PREFLUSH;
636 if (io->has_fua)
637 io->split_bio->bi_opf |= REQ_FUA;
638 submit_bio(io->split_bio);
639} 650}
640 651
641/* deferred io_unit will be dispatched here */ 652/* deferred io_unit will be dispatched here */
@@ -670,6 +681,11 @@ static void r5c_disable_writeback_async(struct work_struct *work)
670 return; 681 return;
671 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 682 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
672 mdname(mddev)); 683 mdname(mddev));
684
685 /* wait superblock change before suspend */
686 wait_event(mddev->sb_wait,
687 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
688
673 mddev_suspend(mddev); 689 mddev_suspend(mddev);
674 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 690 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
675 mddev_resume(mddev); 691 mddev_resume(mddev);
@@ -1766,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1766 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 1782 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1767 mb, PAGE_SIZE)); 1783 mb, PAGE_SIZE));
1768 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1784 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
1769 REQ_FUA, false)) { 1785 REQ_SYNC | REQ_FUA, false)) {
1770 __free_page(page); 1786 __free_page(page);
1771 return -EIO; 1787 return -EIO;
1772 } 1788 }
@@ -2372,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2372 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 2388 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2373 mb, PAGE_SIZE)); 2389 mb, PAGE_SIZE));
2374 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2390 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2375 REQ_OP_WRITE, REQ_FUA, false); 2391 REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
2376 sh->log_start = ctx->pos; 2392 sh->log_start = ctx->pos;
2377 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 2393 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2378 atomic_inc(&log->stripe_in_journal_count); 2394 atomic_inc(&log->stripe_in_journal_count);
@@ -2621,8 +2637,11 @@ int r5c_try_caching_write(struct r5conf *conf,
2621 * When run in degraded mode, array is set to write-through mode. 2637 * When run in degraded mode, array is set to write-through mode.
2622 * This check helps drain pending write safely in the transition to 2638 * This check helps drain pending write safely in the transition to
2623 * write-through mode. 2639 * write-through mode.
2640 *
2641 * When a stripe is syncing, the write is also handled in write
2642 * through mode.
2624 */ 2643 */
2625 if (s->failed) { 2644 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2626 r5c_make_stripe_write_out(sh); 2645 r5c_make_stripe_write_out(sh);
2627 return -EAGAIN; 2646 return -EAGAIN;
2628 } 2647 }
@@ -2825,6 +2844,9 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
2825 } 2844 }
2826 2845
2827 r5l_append_flush_payload(log, sh->sector); 2846 r5l_append_flush_payload(log, sh->sector);
2847 /* stripe is flused to raid disks, we can do resync now */
2848 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2849 set_bit(STRIPE_HANDLE, &sh->state);
2828} 2850}
2829 2851
2830int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) 2852int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
@@ -2973,7 +2995,7 @@ ioerr:
2973 return ret; 2995 return ret;
2974} 2996}
2975 2997
2976void r5c_update_on_rdev_error(struct mddev *mddev) 2998void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
2977{ 2999{
2978 struct r5conf *conf = mddev->private; 3000 struct r5conf *conf = mddev->private;
2979 struct r5l_log *log = conf->log; 3001 struct r5l_log *log = conf->log;
@@ -2981,7 +3003,8 @@ void r5c_update_on_rdev_error(struct mddev *mddev)
2981 if (!log) 3003 if (!log)
2982 return; 3004 return;
2983 3005
2984 if (raid5_calc_degraded(conf) > 0 && 3006 if ((raid5_calc_degraded(conf) > 0 ||
3007 test_bit(Journal, &rdev->flags)) &&
2985 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 3008 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2986 schedule_work(&log->disable_writeback_work); 3009 schedule_work(&log->disable_writeback_work);
2987} 3010}
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 27097101ccca..328d67aedda4 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -28,7 +28,8 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
28extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 28extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
29extern void r5c_check_cached_full_stripe(struct r5conf *conf); 29extern void r5c_check_cached_full_stripe(struct r5conf *conf);
30extern struct md_sysfs_entry r5c_journal_mode; 30extern struct md_sysfs_entry r5c_journal_mode;
31extern void r5c_update_on_rdev_error(struct mddev *mddev); 31extern void r5c_update_on_rdev_error(struct mddev *mddev,
32 struct md_rdev *rdev);
32extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); 33extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
33 34
34extern struct dma_async_tx_descriptor * 35extern struct dma_async_tx_descriptor *
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 5d25bebf3328..ccce92e68d7f 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
907 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); 907 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
908 908
909 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 909 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
910 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0, 910 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
911 false)) { 911 REQ_FUA, 0, false)) {
912 md_error(rdev->mddev, rdev); 912 md_error(rdev->mddev, rdev);
913 ret = -EIO; 913 ret = -EIO;
914 } 914 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2e38cfac5b1d..ec0f951ae19f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
104{ 104{
105 int i; 105 int i;
106 local_irq_disable(); 106 spin_lock_irq(conf->hash_locks);
107 spin_lock(conf->hash_locks);
108 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 107 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
109 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 108 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
110 spin_lock(&conf->device_lock); 109 spin_lock(&conf->device_lock);
@@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
114{ 113{
115 int i; 114 int i;
116 spin_unlock(&conf->device_lock); 115 spin_unlock(&conf->device_lock);
117 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 116 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
118 spin_unlock(conf->hash_locks + i - 1); 117 spin_unlock(conf->hash_locks + i);
119 local_irq_enable(); 118 spin_unlock_irq(conf->hash_locks);
120} 119}
121 120
122/* Find first data disk in a raid6 stripe */ 121/* Find first data disk in a raid6 stripe */
@@ -234,11 +233,15 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
234 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 233 if (test_bit(R5_InJournal, &sh->dev[i].flags))
235 injournal++; 234 injournal++;
236 /* 235 /*
237 * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with 236 * In the following cases, the stripe cannot be released to cached
238 * data in journal, so they are not released to cached lists 237 * lists. Therefore, we make the stripe write out and set
238 * STRIPE_HANDLE:
239 * 1. when quiesce in r5c write back;
240 * 2. when resync is requested fot the stripe.
239 */ 241 */
240 if (conf->quiesce && r5c_is_writeback(conf->log) && 242 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) ||
241 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { 243 (conf->quiesce && r5c_is_writeback(conf->log) &&
244 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) {
242 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 245 if (test_bit(STRIPE_R5C_CACHING, &sh->state))
243 r5c_make_stripe_write_out(sh); 246 r5c_make_stripe_write_out(sh);
244 set_bit(STRIPE_HANDLE, &sh->state); 247 set_bit(STRIPE_HANDLE, &sh->state);
@@ -714,12 +717,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
714 717
715static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 718static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
716{ 719{
717 local_irq_disable();
718 if (sh1 > sh2) { 720 if (sh1 > sh2) {
719 spin_lock(&sh2->stripe_lock); 721 spin_lock_irq(&sh2->stripe_lock);
720 spin_lock_nested(&sh1->stripe_lock, 1); 722 spin_lock_nested(&sh1->stripe_lock, 1);
721 } else { 723 } else {
722 spin_lock(&sh1->stripe_lock); 724 spin_lock_irq(&sh1->stripe_lock);
723 spin_lock_nested(&sh2->stripe_lock, 1); 725 spin_lock_nested(&sh2->stripe_lock, 1);
724 } 726 }
725} 727}
@@ -727,8 +729,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
727static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 729static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
728{ 730{
729 spin_unlock(&sh1->stripe_lock); 731 spin_unlock(&sh1->stripe_lock);
730 spin_unlock(&sh2->stripe_lock); 732 spin_unlock_irq(&sh2->stripe_lock);
731 local_irq_enable();
732} 733}
733 734
734/* Only freshly new full stripe normal write stripe can be added to a batch list */ 735/* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -2312,14 +2313,12 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2312 struct stripe_head *osh, *nsh; 2313 struct stripe_head *osh, *nsh;
2313 LIST_HEAD(newstripes); 2314 LIST_HEAD(newstripes);
2314 struct disk_info *ndisks; 2315 struct disk_info *ndisks;
2315 int err; 2316 int err = 0;
2316 struct kmem_cache *sc; 2317 struct kmem_cache *sc;
2317 int i; 2318 int i;
2318 int hash, cnt; 2319 int hash, cnt;
2319 2320
2320 err = md_allow_write(conf->mddev); 2321 md_allow_write(conf->mddev);
2321 if (err)
2322 return err;
2323 2322
2324 /* Step 1 */ 2323 /* Step 1 */
2325 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2324 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@@ -2694,7 +2693,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2694 bdevname(rdev->bdev, b), 2693 bdevname(rdev->bdev, b),
2695 mdname(mddev), 2694 mdname(mddev),
2696 conf->raid_disks - mddev->degraded); 2695 conf->raid_disks - mddev->degraded);
2697 r5c_update_on_rdev_error(mddev); 2696 r5c_update_on_rdev_error(mddev, rdev);
2698} 2697}
2699 2698
2700/* 2699/*
@@ -3055,6 +3054,11 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
3055 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3054 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to
3056 * no_space_stripes list. 3055 * no_space_stripes list.
3057 * 3056 *
3057 * 3. during journal failure
3058 * In journal failure, we try to flush all cached data to raid disks
3059 * based on data in stripe cache. The array is read-only to upper
3060 * layers, so we would skip all pending writes.
3061 *
3058 */ 3062 */
3059static inline bool delay_towrite(struct r5conf *conf, 3063static inline bool delay_towrite(struct r5conf *conf,
3060 struct r5dev *dev, 3064 struct r5dev *dev,
@@ -3068,6 +3072,9 @@ static inline bool delay_towrite(struct r5conf *conf,
3068 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3072 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
3069 s->injournal > 0) 3073 s->injournal > 0)
3070 return true; 3074 return true;
3075 /* case 3 above */
3076 if (s->log_failed && s->injournal)
3077 return true;
3071 return false; 3078 return false;
3072} 3079}
3073 3080
@@ -4078,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
4078 set_bit(STRIPE_INSYNC, &sh->state); 4085 set_bit(STRIPE_INSYNC, &sh->state);
4079 else { 4086 else {
4080 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4087 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
4081 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4088 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4082 /* don't try to repair!! */ 4089 /* don't try to repair!! */
4083 set_bit(STRIPE_INSYNC, &sh->state); 4090 set_bit(STRIPE_INSYNC, &sh->state);
4084 else { 4091 pr_warn_ratelimited("%s: mismatch sector in range "
4092 "%llu-%llu\n", mdname(conf->mddev),
4093 (unsigned long long) sh->sector,
4094 (unsigned long long) sh->sector +
4095 STRIPE_SECTORS);
4096 } else {
4085 sh->check_state = check_state_compute_run; 4097 sh->check_state = check_state_compute_run;
4086 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4098 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
4087 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 4099 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4230,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
4230 } 4242 }
4231 } else { 4243 } else {
4232 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4244 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
4233 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4245 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4234 /* don't try to repair!! */ 4246 /* don't try to repair!! */
4235 set_bit(STRIPE_INSYNC, &sh->state); 4247 set_bit(STRIPE_INSYNC, &sh->state);
4236 else { 4248 pr_warn_ratelimited("%s: mismatch sector in range "
4249 "%llu-%llu\n", mdname(conf->mddev),
4250 (unsigned long long) sh->sector,
4251 (unsigned long long) sh->sector +
4252 STRIPE_SECTORS);
4253 } else {
4237 int *target = &sh->ops.target; 4254 int *target = &sh->ops.target;
4238 4255
4239 sh->ops.target = -1; 4256 sh->ops.target = -1;
@@ -4653,8 +4670,13 @@ static void handle_stripe(struct stripe_head *sh)
4653 4670
4654 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4671 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4655 spin_lock(&sh->stripe_lock); 4672 spin_lock(&sh->stripe_lock);
4656 /* Cannot process 'sync' concurrently with 'discard' */ 4673 /*
4657 if (!test_bit(STRIPE_DISCARD, &sh->state) && 4674 * Cannot process 'sync' concurrently with 'discard'.
4675 * Flush data in r5cache before 'sync'.
4676 */
4677 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
4678 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) &&
4679 !test_bit(STRIPE_DISCARD, &sh->state) &&
4658 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4680 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4659 set_bit(STRIPE_SYNCING, &sh->state); 4681 set_bit(STRIPE_SYNCING, &sh->state);
4660 clear_bit(STRIPE_INSYNC, &sh->state); 4682 clear_bit(STRIPE_INSYNC, &sh->state);
@@ -4701,10 +4723,15 @@ static void handle_stripe(struct stripe_head *sh)
4701 " to_write=%d failed=%d failed_num=%d,%d\n", 4723 " to_write=%d failed=%d failed_num=%d,%d\n",
4702 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4724 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4703 s.failed_num[0], s.failed_num[1]); 4725 s.failed_num[0], s.failed_num[1]);
4704 /* check if the array has lost more than max_degraded devices and, 4726 /*
4727 * check if the array has lost more than max_degraded devices and,
4705 * if so, some requests might need to be failed. 4728 * if so, some requests might need to be failed.
4729 *
4730 * When journal device failed (log_failed), we will only process
4731 * the stripe if there is data need write to raid disks
4706 */ 4732 */
4707 if (s.failed > conf->max_degraded || s.log_failed) { 4733 if (s.failed > conf->max_degraded ||
4734 (s.log_failed && s.injournal == 0)) {
4708 sh->check_state = 0; 4735 sh->check_state = 0;
4709 sh->reconstruct_state = 0; 4736 sh->reconstruct_state = 0;
4710 break_stripe_batch_list(sh, 0); 4737 break_stripe_batch_list(sh, 0);
@@ -5277,8 +5304,10 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
5277 struct stripe_head *sh, *tmp; 5304 struct stripe_head *sh, *tmp;
5278 struct list_head *handle_list = NULL; 5305 struct list_head *handle_list = NULL;
5279 struct r5worker_group *wg; 5306 struct r5worker_group *wg;
5280 bool second_try = !r5c_is_writeback(conf->log); 5307 bool second_try = !r5c_is_writeback(conf->log) &&
5281 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); 5308 !r5l_log_disk_error(conf);
5309 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
5310 r5l_log_disk_error(conf);
5282 5311
5283again: 5312again:
5284 wg = NULL; 5313 wg = NULL;
@@ -6313,7 +6342,6 @@ int
6313raid5_set_cache_size(struct mddev *mddev, int size) 6342raid5_set_cache_size(struct mddev *mddev, int size)
6314{ 6343{
6315 struct r5conf *conf = mddev->private; 6344 struct r5conf *conf = mddev->private;
6316 int err;
6317 6345
6318 if (size <= 16 || size > 32768) 6346 if (size <= 16 || size > 32768)
6319 return -EINVAL; 6347 return -EINVAL;
@@ -6325,10 +6353,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6325 ; 6353 ;
6326 mutex_unlock(&conf->cache_size_mutex); 6354 mutex_unlock(&conf->cache_size_mutex);
6327 6355
6328 6356 md_allow_write(mddev);
6329 err = md_allow_write(mddev);
6330 if (err)
6331 return err;
6332 6357
6333 mutex_lock(&conf->cache_size_mutex); 6358 mutex_lock(&conf->cache_size_mutex);
6334 while (size > conf->max_nr_stripes) 6359 while (size > conf->max_nr_stripes)
@@ -7093,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
7093 long long min_offset_diff = 0; 7118 long long min_offset_diff = 0;
7094 int first = 1; 7119 int first = 1;
7095 7120
7121 if (mddev_init_writes_pending(mddev) < 0)
7122 return -ENOMEM;
7123
7096 if (mddev->recovery_cp != MaxSector) 7124 if (mddev->recovery_cp != MaxSector)
7097 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7125 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
7098 mdname(mddev)); 7126 mdname(mddev));
@@ -7530,7 +7558,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7530 * neilb: there is no locking about new writes here, 7558 * neilb: there is no locking about new writes here,
7531 * so this cannot be safe. 7559 * so this cannot be safe.
7532 */ 7560 */
7533 if (atomic_read(&conf->active_stripes)) { 7561 if (atomic_read(&conf->active_stripes) ||
7562 atomic_read(&conf->r5c_cached_full_stripes) ||
7563 atomic_read(&conf->r5c_cached_partial_stripes)) {
7534 return -EBUSY; 7564 return -EBUSY;
7535 } 7565 }
7536 log_exit(conf); 7566 log_exit(conf);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b72edd27f880..55d9c2b82b7e 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,6 +2,12 @@
2# Multimedia device configuration 2# Multimedia device configuration
3# 3#
4 4
5config CEC_CORE
6 tristate
7
8config CEC_NOTIFIER
9 bool
10
5menuconfig MEDIA_SUPPORT 11menuconfig MEDIA_SUPPORT
6 tristate "Multimedia support" 12 tristate "Multimedia support"
7 depends on HAS_IOMEM 13 depends on HAS_IOMEM
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 523fea3648ad..044503aa8801 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -4,8 +4,6 @@
4 4
5media-objs := media-device.o media-devnode.o media-entity.o 5media-objs := media-device.o media-devnode.o media-entity.o
6 6
7obj-$(CONFIG_CEC_CORE) += cec/
8
9# 7#
10# I2C drivers should come before other drivers, otherwise they'll fail 8# I2C drivers should come before other drivers, otherwise they'll fail
11# when compiled as builtin drivers 9# when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE) += dvb-core/
26# There are both core and drivers at RC subtree - merge before drivers 24# There are both core and drivers at RC subtree - merge before drivers
27obj-y += rc/ 25obj-y += rc/
28 26
27obj-$(CONFIG_CEC_CORE) += cec/
28
29# 29#
30# Finally, merge the drivers that require the core 30# Finally, merge the drivers that require the core
31# 31#
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index f944d93e3167..43428cec3a01 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,19 +1,6 @@
1config CEC_CORE
2 tristate
3 depends on MEDIA_CEC_SUPPORT
4 default y
5
6config MEDIA_CEC_NOTIFIER
7 bool
8
9config MEDIA_CEC_RC 1config MEDIA_CEC_RC
10 bool "HDMI CEC RC integration" 2 bool "HDMI CEC RC integration"
11 depends on CEC_CORE && RC_CORE 3 depends on CEC_CORE && RC_CORE
4 depends on CEC_CORE=m || RC_CORE=y
12 ---help--- 5 ---help---
13 Pass on CEC remote control messages to the RC framework. 6 Pass on CEC remote control messages to the RC framework.
14
15config MEDIA_CEC_DEBUG
16 bool "HDMI CEC debugfs interface"
17 depends on CEC_CORE && DEBUG_FS
18 ---help---
19 Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 402a6c62a3e8..eaf408e64669 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,6 +1,6 @@
1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o 1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
2 2
3ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) 3ifeq ($(CONFIG_CEC_NOTIFIER),y)
4 cec-objs += cec-notifier.o 4 cec-objs += cec-notifier.o
5endif 5endif
6 6
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f5fe01c9da8a..9dfc79800c71 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); 1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
1865} 1865}
1866 1866
1867#ifdef CONFIG_MEDIA_CEC_DEBUG 1867#ifdef CONFIG_DEBUG_FS
1868/* 1868/*
1869 * Log the current state of the CEC adapter. 1869 * Log the current state of the CEC adapter.
1870 * Very useful for debugging. 1870 * Very useful for debugging.
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 0860fb458757..999926f731c8 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
271 bool block, struct cec_msg __user *parg) 271 bool block, struct cec_msg __user *parg)
272{ 272{
273 struct cec_msg msg = {}; 273 struct cec_msg msg = {};
274 long err = 0; 274 long err;
275 275
276 if (copy_from_user(&msg, parg, sizeof(msg))) 276 if (copy_from_user(&msg, parg, sizeof(msg)))
277 return -EFAULT; 277 return -EFAULT;
278 mutex_lock(&adap->lock);
279 if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
280 err = -ENONET;
281 mutex_unlock(&adap->lock);
282 if (err)
283 return err;
284 278
285 err = cec_receive_msg(fh, &msg, block); 279 err = cec_receive_msg(fh, &msg, block);
286 if (err) 280 if (err)
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f9ebff90f8eb..2f87748ba4fc 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
187 put_device(&devnode->dev); 187 put_device(&devnode->dev);
188} 188}
189 189
190#ifdef CONFIG_MEDIA_CEC_NOTIFIER 190#ifdef CONFIG_CEC_NOTIFIER
191static void cec_cec_notify(struct cec_adapter *adap, u16 pa) 191static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
192{ 192{
193 cec_s_phys_addr(adap, pa, false); 193 cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
323 } 323 }
324 324
325 dev_set_drvdata(&adap->devnode.dev, adap); 325 dev_set_drvdata(&adap->devnode.dev, adap);
326#ifdef CONFIG_MEDIA_CEC_DEBUG 326#ifdef CONFIG_DEBUG_FS
327 if (!top_cec_dir) 327 if (!top_cec_dir)
328 return 0; 328 return 0;
329 329
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
355 adap->rc = NULL; 355 adap->rc = NULL;
356#endif 356#endif
357 debugfs_remove_recursive(adap->cec_dir); 357 debugfs_remove_recursive(adap->cec_dir);
358#ifdef CONFIG_MEDIA_CEC_NOTIFIER 358#ifdef CONFIG_CEC_NOTIFIER
359 if (adap->notifier) 359 if (adap->notifier)
360 cec_notifier_unregister(adap->notifier); 360 cec_notifier_unregister(adap->notifier);
361#endif 361#endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
395 return ret; 395 return ret;
396 } 396 }
397 397
398#ifdef CONFIG_MEDIA_CEC_DEBUG 398#ifdef CONFIG_DEBUG_FS
399 top_cec_dir = debugfs_create_dir("cec", NULL); 399 top_cec_dir = debugfs_create_dir("cec", NULL);
400 if (IS_ERR_OR_NULL(top_cec_dir)) { 400 if (IS_ERR_OR_NULL(top_cec_dir)) {
401 pr_warn("cec: Failed to create debugfs cec dir\n"); 401 pr_warn("cec: Failed to create debugfs cec dir\n");
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index fd181c99ce11..aaa9471c7d11 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
220 220
221config VIDEO_ADV7604_CEC 221config VIDEO_ADV7604_CEC
222 bool "Enable Analog Devices ADV7604 CEC support" 222 bool "Enable Analog Devices ADV7604 CEC support"
223 depends on VIDEO_ADV7604 && CEC_CORE 223 depends on VIDEO_ADV7604
224 select CEC_CORE
224 ---help--- 225 ---help---
225 When selected the adv7604 will support the optional 226 When selected the adv7604 will support the optional
226 HDMI CEC feature. 227 HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
240 241
241config VIDEO_ADV7842_CEC 242config VIDEO_ADV7842_CEC
242 bool "Enable Analog Devices ADV7842 CEC support" 243 bool "Enable Analog Devices ADV7842 CEC support"
243 depends on VIDEO_ADV7842 && CEC_CORE 244 depends on VIDEO_ADV7842
245 select CEC_CORE
244 ---help--- 246 ---help---
245 When selected the adv7842 will support the optional 247 When selected the adv7842 will support the optional
246 HDMI CEC feature. 248 HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
478 480
479config VIDEO_ADV7511_CEC 481config VIDEO_ADV7511_CEC
480 bool "Enable Analog Devices ADV7511 CEC support" 482 bool "Enable Analog Devices ADV7511 CEC support"
481 depends on VIDEO_ADV7511 && CEC_CORE 483 depends on VIDEO_ADV7511
484 select CEC_CORE
482 ---help--- 485 ---help---
483 When selected the adv7511 will support the optional 486 When selected the adv7511 will support the optional
484 HDMI CEC feature. 487 HDMI CEC feature.
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index acef4eca269f..3251cba89e8f 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
223static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, 223static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
224 u8 mask, u8 val) 224 u8 mask, u8 val)
225{ 225{
226 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); 226 i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
227} 227}
228 228
229static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) 229static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ac026ee1ca07..041cb80a26b1 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
501 501
502config VIDEO_SAMSUNG_S5P_CEC 502config VIDEO_SAMSUNG_S5P_CEC
503 tristate "Samsung S5P CEC driver" 503 tristate "Samsung S5P CEC driver"
504 depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 504 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
505 select MEDIA_CEC_NOTIFIER 505 select CEC_CORE
506 select CEC_NOTIFIER
506 ---help--- 507 ---help---
507 This is a driver for Samsung S5P HDMI CEC interface. It uses the 508 This is a driver for Samsung S5P HDMI CEC interface. It uses the
508 generic CEC framework interface. 509 generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
511 512
512config VIDEO_STI_HDMI_CEC 513config VIDEO_STI_HDMI_CEC
513 tristate "STMicroelectronics STiH4xx HDMI CEC driver" 514 tristate "STMicroelectronics STiH4xx HDMI CEC driver"
514 depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) 515 depends on ARCH_STI || COMPILE_TEST
515 select MEDIA_CEC_NOTIFIER 516 select CEC_CORE
517 select CEC_NOTIFIER
516 ---help--- 518 ---help---
517 This is a driver for STIH4xx HDMI CEC interface. It uses the 519 This is a driver for STIH4xx HDMI CEC interface. It uses the
518 generic CEC framework interface. 520 generic CEC framework interface.
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 57a842ff3097..b7731b18ecae 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
493} 493}
494 494
495static struct vdec_common_if vdec_h264_if = { 495static struct vdec_common_if vdec_h264_if = {
496 vdec_h264_init, 496 .init = vdec_h264_init,
497 vdec_h264_decode, 497 .decode = vdec_h264_decode,
498 vdec_h264_get_param, 498 .get_param = vdec_h264_get_param,
499 vdec_h264_deinit, 499 .deinit = vdec_h264_deinit,
500}; 500};
501 501
502struct vdec_common_if *get_h264_dec_comm_if(void); 502struct vdec_common_if *get_h264_dec_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 6e7a62ae0842..b9fad6a48879 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
620} 620}
621 621
622static struct vdec_common_if vdec_vp8_if = { 622static struct vdec_common_if vdec_vp8_if = {
623 vdec_vp8_init, 623 .init = vdec_vp8_init,
624 vdec_vp8_decode, 624 .decode = vdec_vp8_decode,
625 vdec_vp8_get_param, 625 .get_param = vdec_vp8_get_param,
626 vdec_vp8_deinit, 626 .deinit = vdec_vp8_deinit,
627}; 627};
628 628
629struct vdec_common_if *get_vp8_dec_comm_if(void); 629struct vdec_common_if *get_vp8_dec_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5539b1853f16..1daee1207469 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
979} 979}
980 980
981static struct vdec_common_if vdec_vp9_if = { 981static struct vdec_common_if vdec_vp9_if = {
982 vdec_vp9_init, 982 .init = vdec_vp9_init,
983 vdec_vp9_decode, 983 .decode = vdec_vp9_decode,
984 vdec_vp9_get_param, 984 .get_param = vdec_vp9_get_param,
985 vdec_vp9_deinit, 985 .deinit = vdec_vp9_deinit,
986}; 986};
987 987
988struct vdec_common_if *get_vp9_dec_comm_if(void); 988struct vdec_common_if *get_vp9_dec_comm_if(void);
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b36ac19dc6e4..154de92dd809 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -26,7 +26,8 @@ config VIDEO_VIVID
26 26
27config VIDEO_VIVID_CEC 27config VIDEO_VIVID_CEC
28 bool "Enable CEC emulation support" 28 bool "Enable CEC emulation support"
29 depends on VIDEO_VIVID && CEC_CORE 29 depends on VIDEO_VIVID
30 select CEC_CORE
30 ---help--- 31 ---help---
31 When selected the vivid module will emulate the optional 32 When selected the vivid module will emulate the optional
32 HDMI CEC feature. 33 HDMI CEC feature.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 90f66dc7c0d7..a2fc1a1d58b0 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
211 */ 211 */
212void ir_raw_event_handle(struct rc_dev *dev) 212void ir_raw_event_handle(struct rc_dev *dev)
213{ 213{
214 if (!dev->raw) 214 if (!dev->raw || !dev->raw->thread)
215 return; 215 return;
216 216
217 wake_up_process(dev->raw->thread); 217 wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
490{ 490{
491 int rc; 491 int rc;
492 struct ir_raw_handler *handler; 492 struct ir_raw_handler *handler;
493 struct task_struct *thread;
493 494
494 if (!dev) 495 if (!dev)
495 return -EINVAL; 496 return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
507 * because the event is coming from userspace 508 * because the event is coming from userspace
508 */ 509 */
509 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { 510 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
510 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, 511 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
511 "rc%u", dev->minor); 512 dev->minor);
512 513
513 if (IS_ERR(dev->raw->thread)) { 514 if (IS_ERR(thread)) {
514 rc = PTR_ERR(dev->raw->thread); 515 rc = PTR_ERR(thread);
515 goto out; 516 goto out;
516 } 517 }
518
519 dev->raw->thread = thread;
517 } 520 }
518 521
519 mutex_lock(&ir_raw_handler_lock); 522 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index e12ec50bf0bf..90a5f8fd5eea 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
183 static unsigned long delt; 183 static unsigned long delt;
184 unsigned long deltintr; 184 unsigned long deltintr;
185 unsigned long flags; 185 unsigned long flags;
186 int counter = 0;
186 int iir, lsr; 187 int iir, lsr;
187 188
188 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { 189 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
190 if (++counter > 256) {
191 dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
192 break;
193 }
194
189 switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ 195 switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
190 case UART_IIR_MSI: 196 case UART_IIR_MSI:
191 (void)inb(io + UART_MSR); 197 (void)inb(io + UART_MSR);
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 8937f3986a01..18ead44824ba 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_PULSE8_CEC 1config USB_PULSE8_CEC
2 tristate "Pulse Eight HDMI CEC" 2 tristate "Pulse Eight HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 3eb86607efb8..030ef01b1ff0 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_RAINSHADOW_CEC 1config USB_RAINSHADOW_CEC
2 tristate "RainShadow Tech HDMI CEC" 2 tristate "RainShadow Tech HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 541ca543f71f..4126552c9055 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
119 119
120 while (true) { 120 while (true) {
121 unsigned long flags; 121 unsigned long flags;
122 bool exit_loop; 122 bool exit_loop = false;
123 char data; 123 char data;
124 124
125 spin_lock_irqsave(&rain->buf_lock, flags); 125 spin_lock_irqsave(&rain->buf_lock, flags);
@@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv)
336 serio_set_drvdata(serio, rain); 336 serio_set_drvdata(serio, rain);
337 INIT_WORK(&rain->work, rain_irq_work_handler); 337 INIT_WORK(&rain->work, rain_irq_work_handler);
338 mutex_init(&rain->write_lock); 338 mutex_init(&rain->write_lock);
339 spin_lock_init(&rain->buf_lock);
339 340
340 err = serio_open(serio, drv); 341 err = serio_open(serio, drv);
341 if (err) 342 if (err)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 94afbbf92807..c0175ea7e7ad 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
868 868
869void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 869void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
870{ 870{
871 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) 871 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
872 return NULL; 872 return NULL;
873 873
874 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); 874 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 35910f945bfa..99e644cda4d1 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
581 return of_platform_populate(np, NULL, NULL, dev); 581 return of_platform_populate(np, NULL, NULL, dev);
582} 582}
583 583
584static int atmel_ebi_resume(struct device *dev) 584static __maybe_unused int atmel_ebi_resume(struct device *dev)
585{ 585{
586 struct atmel_ebi *ebi = dev_get_drvdata(dev); 586 struct atmel_ebi *ebi = dev_get_drvdata(dev);
587 struct atmel_ebi_dev *ebid; 587 struct atmel_ebi_dev *ebid;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index bf0fe0137dfe..6d1b4b707cc2 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -512,7 +512,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
512 pr_info("gpmc cs%i access configuration:\n", cs); 512 pr_info("gpmc cs%i access configuration:\n", cs);
513 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); 513 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
514 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); 514 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
515 GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13, 515 GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
516 GPMC_CONFIG1_DEVICESIZE_MAX, "device-width"); 516 GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
517 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); 517 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
518 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); 518 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75488e65cd96..8d46e3ad9529 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
245 int ret; 245 int ret;
246 246
247 ret = regmap_read_poll_timeout(arizona->regmap, 247 ret = regmap_read_poll_timeout(arizona->regmap,
248 ARIZONA_INTERRUPT_RAW_STATUS_5, val, 248 reg, val, ((val & mask) == target),
249 ((val & mask) == target),
250 ARIZONA_REG_POLL_DELAY_US, 249 ARIZONA_REG_POLL_DELAY_US,
251 timeout_ms * 1000); 250 timeout_ms * 1000);
252 if (ret) 251 if (ret)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2cba76e6fa3c..07bbd4cc1852 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -492,6 +492,7 @@ config ASPEED_LPC_CTRL
492 492
493config PCI_ENDPOINT_TEST 493config PCI_ENDPOINT_TEST
494 depends on PCI 494 depends on PCI
495 select CRC32
495 tristate "PCI Endpoint Test driver" 496 tristate "PCI Endpoint Test driver"
496 ---help--- 497 ---help---
497 Enable this configuration option to enable the host side test driver 498 Enable this configuration option to enable the host side test driver
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 4472ce11f98d..8c32040b9c09 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -45,7 +45,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
45 mutex_init(&ctx->mapping_lock); 45 mutex_init(&ctx->mapping_lock);
46 ctx->mapping = NULL; 46 ctx->mapping = NULL;
47 47
48 if (cxl_is_psl8(afu)) { 48 if (cxl_is_power8()) {
49 spin_lock_init(&ctx->sste_lock); 49 spin_lock_init(&ctx->sste_lock);
50 50
51 /* 51 /*
@@ -189,7 +189,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
189 if (start + len > ctx->afu->adapter->ps_size) 189 if (start + len > ctx->afu->adapter->ps_size)
190 return -EINVAL; 190 return -EINVAL;
191 191
192 if (cxl_is_psl9(ctx->afu)) { 192 if (cxl_is_power9()) {
193 /* 193 /*
194 * Make sure there is a valid problem state 194 * Make sure there is a valid problem state
195 * area space for this AFU. 195 * area space for this AFU.
@@ -324,7 +324,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
324{ 324{
325 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); 325 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
326 326
327 if (cxl_is_psl8(ctx->afu)) 327 if (cxl_is_power8())
328 free_page((u64)ctx->sstp); 328 free_page((u64)ctx->sstp);
329 if (ctx->ff_page) 329 if (ctx->ff_page)
330 __free_page(ctx->ff_page); 330 __free_page(ctx->ff_page);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index c8568ea7c518..a03f8e7535e5 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -357,6 +357,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
357#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ 357#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */
358#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ 358#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */
359#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ 359#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */
360#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */
360 361
361/****** CXL_PSL_TFC_An ******************************************************/ 362/****** CXL_PSL_TFC_An ******************************************************/
362#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ 363#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
@@ -844,24 +845,15 @@ static inline bool cxl_is_power8(void)
844 845
845static inline bool cxl_is_power9(void) 846static inline bool cxl_is_power9(void)
846{ 847{
847 /* intermediate solution */ 848 if (pvr_version_is(PVR_POWER9))
848 if (!cxl_is_power8() &&
849 (cpu_has_feature(CPU_FTRS_POWER9) ||
850 cpu_has_feature(CPU_FTR_POWER9_DD1)))
851 return true; 849 return true;
852 return false; 850 return false;
853} 851}
854 852
855static inline bool cxl_is_psl8(struct cxl_afu *afu) 853static inline bool cxl_is_power9_dd1(void)
856{ 854{
857 if (afu->adapter->caia_major == 1) 855 if ((pvr_version_is(PVR_POWER9)) &&
858 return true; 856 cpu_has_feature(CPU_FTR_POWER9_DD1))
859 return false;
860}
861
862static inline bool cxl_is_psl9(struct cxl_afu *afu)
863{
864 if (afu->adapter->caia_major == 2)
865 return true; 857 return true;
866 return false; 858 return false;
867} 859}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 5344448f514e..c79e39bad7a4 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -187,7 +187,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
187 187
188static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) 188static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
189{ 189{
190 if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS)) 190 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
191 return true; 191 return true;
192 192
193 return false; 193 return false;
@@ -195,16 +195,23 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
195 195
196static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) 196static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
197{ 197{
198 if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM)) 198 u64 crs; /* Translation Checkout Response Status */
199 return true;
200 199
201 if ((cxl_is_psl9(ctx->afu)) && 200 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
202 ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
203 (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
204 CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
205 CXL_PSL9_DSISR_An_PF_STEG)))
206 return true; 201 return true;
207 202
203 if (cxl_is_power9()) {
204 crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
205 if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
206 (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
207 (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
208 (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
209 (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
210 (crs == CXL_PSL9_DSISR_An_URTCH)) {
211 return true;
212 }
213 }
214
208 return false; 215 return false;
209} 216}
210 217
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 17b433f1ce23..0761271d68c5 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
159 159
160 /* Do this outside the status_mutex to avoid a circular dependency with 160 /* Do this outside the status_mutex to avoid a circular dependency with
161 * the locking in cxl_mmap_fault() */ 161 * the locking in cxl_mmap_fault() */
162 if (copy_from_user(&work, uwork, 162 if (copy_from_user(&work, uwork, sizeof(work)))
163 sizeof(struct cxl_ioctl_start_work))) { 163 return -EFAULT;
164 rc = -EFAULT;
165 goto out;
166 }
167 164
168 mutex_lock(&ctx->status_mutex); 165 mutex_lock(&ctx->status_mutex);
169 if (ctx->status != OPENED) { 166 if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 1703655072b1..c1ba0d42cbc8 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -329,8 +329,15 @@ static int __init init_cxl(void)
329 329
330 cxl_debugfs_init(); 330 cxl_debugfs_init();
331 331
332 if ((rc = register_cxl_calls(&cxl_calls))) 332 /*
333 goto err; 333 * we don't register the callback on P9. slb callack is only
334 * used for the PSL8 MMU and CX4.
335 */
336 if (cxl_is_power8()) {
337 rc = register_cxl_calls(&cxl_calls);
338 if (rc)
339 goto err;
340 }
334 341
335 if (cpu_has_feature(CPU_FTR_HVMODE)) { 342 if (cpu_has_feature(CPU_FTR_HVMODE)) {
336 cxl_ops = &cxl_native_ops; 343 cxl_ops = &cxl_native_ops;
@@ -347,7 +354,8 @@ static int __init init_cxl(void)
347 354
348 return 0; 355 return 0;
349err1: 356err1:
350 unregister_cxl_calls(&cxl_calls); 357 if (cxl_is_power8())
358 unregister_cxl_calls(&cxl_calls);
351err: 359err:
352 cxl_debugfs_exit(); 360 cxl_debugfs_exit();
353 cxl_file_exit(); 361 cxl_file_exit();
@@ -366,7 +374,8 @@ static void exit_cxl(void)
366 374
367 cxl_debugfs_exit(); 375 cxl_debugfs_exit();
368 cxl_file_exit(); 376 cxl_file_exit();
369 unregister_cxl_calls(&cxl_calls); 377 if (cxl_is_power8())
378 unregister_cxl_calls(&cxl_calls);
370 idr_destroy(&cxl_adapter_idr); 379 idr_destroy(&cxl_adapter_idr);
371} 380}
372 381
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 871a2f09c718..2b2f8894149d 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -105,11 +105,16 @@ static int native_afu_reset(struct cxl_afu *afu)
105 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 105 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
106 false); 106 false);
107 107
108 /* Re-enable any masked interrupts */ 108 /*
109 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 109 * Re-enable any masked interrupts when the AFU is not
110 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; 110 * activated to avoid side effects after attaching a process
111 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 111 * in dedicated mode.
112 112 */
113 if (afu->current_mode == 0) {
114 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
115 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
116 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
117 }
113 118
114 return rc; 119 return rc;
115} 120}
@@ -139,9 +144,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
139 144
140 pr_devel("PSL purge request\n"); 145 pr_devel("PSL purge request\n");
141 146
142 if (cxl_is_psl8(afu)) 147 if (cxl_is_power8())
143 trans_fault = CXL_PSL_DSISR_TRANS; 148 trans_fault = CXL_PSL_DSISR_TRANS;
144 if (cxl_is_psl9(afu)) 149 if (cxl_is_power9())
145 trans_fault = CXL_PSL9_DSISR_An_TF; 150 trans_fault = CXL_PSL9_DSISR_An_TF;
146 151
147 if (!cxl_ops->link_ok(afu->adapter, afu)) { 152 if (!cxl_ops->link_ok(afu->adapter, afu)) {
@@ -603,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
603 if (!test_tsk_thread_flag(current, TIF_32BIT)) 608 if (!test_tsk_thread_flag(current, TIF_32BIT))
604 sr |= CXL_PSL_SR_An_SF; 609 sr |= CXL_PSL_SR_An_SF;
605 } 610 }
606 if (cxl_is_psl9(ctx->afu)) { 611 if (cxl_is_power9()) {
607 if (radix_enabled()) 612 if (radix_enabled())
608 sr |= CXL_PSL_SR_An_XLAT_ror; 613 sr |= CXL_PSL_SR_An_XLAT_ror;
609 else 614 else
@@ -1117,10 +1122,10 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1117 1122
1118static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) 1123static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1119{ 1124{
1120 if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS)) 1125 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1121 return true; 1126 return true;
1122 1127
1123 if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF)) 1128 if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1124 return true; 1129 return true;
1125 1130
1126 return false; 1131 return false;
@@ -1194,10 +1199,10 @@ static void native_irq_wait(struct cxl_context *ctx)
1194 if (ph != ctx->pe) 1199 if (ph != ctx->pe)
1195 return; 1200 return;
1196 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 1201 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1197 if (cxl_is_psl8(ctx->afu) && 1202 if (cxl_is_power8() &&
1198 ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) 1203 ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1199 return; 1204 return;
1200 if (cxl_is_psl9(ctx->afu) && 1205 if (cxl_is_power9() &&
1201 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) 1206 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1202 return; 1207 return;
1203 /* 1208 /*
@@ -1302,13 +1307,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
1302 1307
1303void cxl_native_release_psl_err_irq(struct cxl *adapter) 1308void cxl_native_release_psl_err_irq(struct cxl *adapter)
1304{ 1309{
1305 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) 1310 if (adapter->native->err_virq == 0 ||
1311 adapter->native->err_virq !=
1312 irq_find_mapping(NULL, adapter->native->err_hwirq))
1306 return; 1313 return;
1307 1314
1308 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1315 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1309 cxl_unmap_irq(adapter->native->err_virq, adapter); 1316 cxl_unmap_irq(adapter->native->err_virq, adapter);
1310 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1317 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1311 kfree(adapter->irq_name); 1318 kfree(adapter->irq_name);
1319 adapter->native->err_virq = 0;
1312} 1320}
1313 1321
1314int cxl_native_register_serr_irq(struct cxl_afu *afu) 1322int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1354,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
1346 1354
1347void cxl_native_release_serr_irq(struct cxl_afu *afu) 1355void cxl_native_release_serr_irq(struct cxl_afu *afu)
1348{ 1356{
1349 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1357 if (afu->serr_virq == 0 ||
1358 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1350 return; 1359 return;
1351 1360
1352 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1361 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1353 cxl_unmap_irq(afu->serr_virq, afu); 1362 cxl_unmap_irq(afu->serr_virq, afu);
1354 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1363 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1355 kfree(afu->err_irq_name); 1364 kfree(afu->err_irq_name);
1365 afu->serr_virq = 0;
1356} 1366}
1357 1367
1358int cxl_native_register_psl_irq(struct cxl_afu *afu) 1368int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1385,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
1375 1385
1376void cxl_native_release_psl_irq(struct cxl_afu *afu) 1386void cxl_native_release_psl_irq(struct cxl_afu *afu)
1377{ 1387{
1378 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) 1388 if (afu->native->psl_virq == 0 ||
1389 afu->native->psl_virq !=
1390 irq_find_mapping(NULL, afu->native->psl_hwirq))
1379 return; 1391 return;
1380 1392
1381 cxl_unmap_irq(afu->native->psl_virq, afu); 1393 cxl_unmap_irq(afu->native->psl_virq, afu);
1382 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1394 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1383 kfree(afu->psl_irq_name); 1395 kfree(afu->psl_irq_name);
1396 afu->native->psl_virq = 0;
1384} 1397}
1385 1398
1386static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 1399static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 6dc1ee5b92c9..1eb9859809bf 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -436,7 +436,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
436 /* nMMU_ID Defaults to: b’000001001’*/ 436 /* nMMU_ID Defaults to: b’000001001’*/
437 xsl_dsnctl |= ((u64)0x09 << (63-28)); 437 xsl_dsnctl |= ((u64)0x09 << (63-28));
438 438
439 if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) { 439 if (!(cxl_is_power9_dd1())) {
440 /* 440 /*
441 * Used to identify CAPI packets which should be sorted into 441 * Used to identify CAPI packets which should be sorted into
442 * the Non-Blocking queues by the PHB. This field should match 442 * the Non-Blocking queues by the PHB. This field should match
@@ -491,7 +491,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
491 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL); 491 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
492 492
493 /* Disable vc dd1 fix */ 493 /* Disable vc dd1 fix */
494 if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1))) 494 if (cxl_is_power9_dd1())
495 cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); 495 cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
496 496
497 return 0; 497 return 0;
@@ -1439,8 +1439,7 @@ int cxl_pci_reset(struct cxl *adapter)
1439 * The adapter is about to be reset, so ignore errors. 1439 * The adapter is about to be reset, so ignore errors.
1440 * Not supported on P9 DD1 1440 * Not supported on P9 DD1
1441 */ 1441 */
1442 if ((cxl_is_power8()) || 1442 if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
1443 ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
1444 cxl_data_cache_flush(adapter); 1443 cxl_data_cache_flush(adapter);
1445 1444
1446 /* pcie_warm_reset requests a fundamental pci reset which includes a 1445 /* pcie_warm_reset requests a fundamental pci reset which includes a
@@ -1750,7 +1749,6 @@ static const struct cxl_service_layer_ops psl9_ops = {
1750 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, 1749 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
1751 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, 1750 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
1752 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, 1751 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
1753 .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
1754 .debugfs_stop_trace = cxl_stop_trace_psl9, 1752 .debugfs_stop_trace = cxl_stop_trace_psl9,
1755 .write_timebase_ctrl = write_timebase_ctrl_psl9, 1753 .write_timebase_ctrl = write_timebase_ctrl_psl9,
1756 .timebase_read = timebase_read_psl9, 1754 .timebase_read = timebase_read_psl9,
@@ -1889,8 +1887,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
1889 * Flush adapter datacache as its about to be removed. 1887 * Flush adapter datacache as its about to be removed.
1890 * Not supported on P9 DD1. 1888 * Not supported on P9 DD1.
1891 */ 1889 */
1892 if ((cxl_is_power8()) || 1890 if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
1893 ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
1894 cxl_data_cache_flush(adapter); 1891 cxl_data_cache_flush(adapter);
1895 1892
1896 cxl_deconfigure_adapter(adapter); 1893 cxl_deconfigure_adapter(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index d1928fdd0f43..07aad8576334 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
763{ 763{
764 struct mei_cl_device *cldev = to_mei_cl_device(dev); 764 struct mei_cl_device *cldev = to_mei_cl_device(dev);
765 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 765 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
766 u8 version = mei_me_cl_ver(cldev->me_cl);
766 767
767 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); 768 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
769 cldev->name, uuid, version);
768} 770}
769static DEVICE_ATTR_RO(modalias); 771static DEVICE_ATTR_RO(modalias);
770 772
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index c862cd4583cc..b8069eec18cb 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -309,6 +309,9 @@ static inline enum xp_retval
309xpc_send(short partid, int ch_number, u32 flags, void *payload, 309xpc_send(short partid, int ch_number, u32 flags, void *payload,
310 u16 payload_size) 310 u16 payload_size)
311{ 311{
312 if (!xpc_interface.send)
313 return xpNotLoaded;
314
312 return xpc_interface.send(partid, ch_number, flags, payload, 315 return xpc_interface.send(partid, ch_number, flags, payload,
313 payload_size); 316 payload_size);
314} 317}
@@ -317,6 +320,9 @@ static inline enum xp_retval
317xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, 320xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
318 u16 payload_size, xpc_notify_func func, void *key) 321 u16 payload_size, xpc_notify_func func, void *key)
319{ 322{
323 if (!xpc_interface.send_notify)
324 return xpNotLoaded;
325
320 return xpc_interface.send_notify(partid, ch_number, flags, payload, 326 return xpc_interface.send_notify(partid, ch_number, flags, payload,
321 payload_size, func, key); 327 payload_size, func, key);
322} 328}
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
324static inline void 330static inline void
325xpc_received(short partid, int ch_number, void *payload) 331xpc_received(short partid, int ch_number, void *payload)
326{ 332{
327 return xpc_interface.received(partid, ch_number, payload); 333 if (xpc_interface.received)
334 xpc_interface.received(partid, ch_number, payload);
328} 335}
329 336
330static inline enum xp_retval 337static inline enum xp_retval
331xpc_partid_to_nasids(short partid, void *nasids) 338xpc_partid_to_nasids(short partid, void *nasids)
332{ 339{
340 if (!xpc_interface.partid_to_nasids)
341 return xpNotLoaded;
342
333 return xpc_interface.partid_to_nasids(partid, nasids); 343 return xpc_interface.partid_to_nasids(partid, nasids);
334} 344}
335 345
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 01be66d02ca8..6d7f557fd1c1 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
69EXPORT_SYMBOL_GPL(xpc_registrations); 69EXPORT_SYMBOL_GPL(xpc_registrations);
70 70
71/* 71/*
72 * Initialize the XPC interface to indicate that XPC isn't loaded. 72 * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
73 */ 73 */
74static enum xp_retval 74struct xpc_interface xpc_interface = { };
75xpc_notloaded(void)
76{
77 return xpNotLoaded;
78}
79
80struct xpc_interface xpc_interface = {
81 (void (*)(int))xpc_notloaded,
82 (void (*)(int))xpc_notloaded,
83 (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
84 (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
85 void *))xpc_notloaded,
86 (void (*)(short, int, void *))xpc_notloaded,
87 (enum xp_retval(*)(short, void *))xpc_notloaded
88};
89EXPORT_SYMBOL_GPL(xpc_interface); 75EXPORT_SYMBOL_GPL(xpc_interface);
90 76
91/* 77/*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
115void 101void
116xpc_clear_interface(void) 102xpc_clear_interface(void)
117{ 103{
118 xpc_interface.connect = (void (*)(int))xpc_notloaded; 104 memset(&xpc_interface, 0, sizeof(xpc_interface));
119 xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
120 xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
121 xpc_notloaded;
122 xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
123 u16, xpc_notify_func,
124 void *))xpc_notloaded;
125 xpc_interface.received = (void (*)(short, int, void *))
126 xpc_notloaded;
127 xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
128 xpc_notloaded;
129} 105}
130EXPORT_SYMBOL_GPL(xpc_clear_interface); 106EXPORT_SYMBOL_GPL(xpc_clear_interface);
131 107
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
188 164
189 mutex_unlock(&registration->mutex); 165 mutex_unlock(&registration->mutex);
190 166
191 xpc_interface.connect(ch_number); 167 if (xpc_interface.connect)
168 xpc_interface.connect(ch_number);
192 169
193 return xpSuccess; 170 return xpSuccess;
194} 171}
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
237 registration->assigned_limit = 0; 214 registration->assigned_limit = 0;
238 registration->idle_limit = 0; 215 registration->idle_limit = 0;
239 216
240 xpc_interface.disconnect(ch_number); 217 if (xpc_interface.disconnect)
218 xpc_interface.disconnect(ch_number);
241 219
242 mutex_unlock(&registration->mutex); 220 mutex_unlock(&registration->mutex);
243 221
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 1304160de168..13ef162cf066 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -27,6 +27,7 @@ struct mmc_pwrseq_simple {
27 struct mmc_pwrseq pwrseq; 27 struct mmc_pwrseq pwrseq;
28 bool clk_enabled; 28 bool clk_enabled;
29 u32 post_power_on_delay_ms; 29 u32 post_power_on_delay_ms;
30 u32 power_off_delay_us;
30 struct clk *ext_clk; 31 struct clk *ext_clk;
31 struct gpio_descs *reset_gpios; 32 struct gpio_descs *reset_gpios;
32}; 33};
@@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
78 79
79 mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); 80 mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
80 81
82 if (pwrseq->power_off_delay_us)
83 usleep_range(pwrseq->power_off_delay_us,
84 2 * pwrseq->power_off_delay_us);
85
81 if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { 86 if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
82 clk_disable_unprepare(pwrseq->ext_clk); 87 clk_disable_unprepare(pwrseq->ext_clk);
83 pwrseq->clk_enabled = false; 88 pwrseq->clk_enabled = false;
@@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
119 124
120 device_property_read_u32(dev, "post-power-on-delay-ms", 125 device_property_read_u32(dev, "post-power-on-delay-ms",
121 &pwrseq->post_power_on_delay_ms); 126 &pwrseq->post_power_on_delay_ms);
127 device_property_read_u32(dev, "power-off-delay-us",
128 &pwrseq->power_off_delay_us);
122 129
123 pwrseq->pwrseq.dev = dev; 130 pwrseq->pwrseq.dev = dev;
124 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; 131 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 772d0900026d..951d2cdd7888 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) 108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{ 109{
110 writeq(val, host->base + MIO_EMM_INT(host)); 110 writeq(val, host->base + MIO_EMM_INT(host));
111 if (!host->dma_active || (host->dma_active && !host->has_ciu3)) 111 if (!host->has_ciu3)
112 writeq(val, host->base + MIO_EMM_INT_EN(host)); 112 writeq(val, host->base + MIO_EMM_INT_EN(host));
113} 113}
114 114
@@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
267 } 267 }
268 268
269 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, 269 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
270 "power-gpios", 270 "power",
271 GPIOD_OUT_HIGH); 271 GPIOD_OUT_HIGH);
272 if (IS_ERR(host->global_pwr_gpiod)) { 272 if (IS_ERR(host->global_pwr_gpiod)) {
273 dev_err(&pdev->dev, "Invalid power GPIO\n"); 273 dev_err(&pdev->dev, "Invalid power GPIO\n");
@@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev)
288 if (ret) { 288 if (ret) {
289 dev_err(&pdev->dev, "Error populating slots\n"); 289 dev_err(&pdev->dev, "Error populating slots\n");
290 octeon_mmc_set_shared_power(host, 0); 290 octeon_mmc_set_shared_power(host, 0);
291 return ret; 291 goto error;
292 } 292 }
293 i++; 293 i++;
294 } 294 }
295 return 0; 295 return 0;
296
297error:
298 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
299 if (host->slot[i])
300 cvm_mmc_of_slot_remove(host->slot[i]);
301 if (host->slot_pdev[i])
302 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
303 }
304 return ret;
296} 305}
297 306
298static int octeon_mmc_remove(struct platform_device *pdev) 307static int octeon_mmc_remove(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index fe3d77267cd6..b9cc95998799 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
146 return 0; 146 return 0;
147 147
148error: 148error:
149 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
150 if (host->slot[i])
151 cvm_mmc_of_slot_remove(host->slot[i]);
152 if (host->slot_pdev[i])
153 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
154 }
149 clk_disable_unprepare(host->clk); 155 clk_disable_unprepare(host->clk);
150 return ret; 156 return ret;
151} 157}
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 58b51ba6aabd..b8aaf0fdb77c 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
839 cvm_mmc_reset_bus(slot); 839 cvm_mmc_reset_bus(slot);
840 if (host->global_pwr_gpiod) 840 if (host->global_pwr_gpiod)
841 host->set_shared_power(host, 0); 841 host->set_shared_power(host, 0);
842 else 842 else if (!IS_ERR(mmc->supply.vmmc))
843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
844 break; 844 break;
845 845
846 case MMC_POWER_UP: 846 case MMC_POWER_UP:
847 if (host->global_pwr_gpiod) 847 if (host->global_pwr_gpiod)
848 host->set_shared_power(host, 1); 848 host->set_shared_power(host, 1);
849 else 849 else if (!IS_ERR(mmc->supply.vmmc))
850 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 850 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
851 break; 851 break;
852 } 852 }
@@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
968 return -EINVAL; 968 return -EINVAL;
969 } 969 }
970 970
971 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); 971 ret = mmc_regulator_get_supply(mmc);
972 if (IS_ERR(mmc->supply.vmmc)) { 972 if (ret == -EPROBE_DEFER)
973 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) 973 return ret;
974 return -EPROBE_DEFER; 974 /*
975 /* 975 * Legacy Octeon firmware has no regulator entry, fall-back to
976 * Legacy Octeon firmware has no regulator entry, fall-back to 976 * a hard-coded voltage to get a sane OCR.
977 * a hard-coded voltage to get a sane OCR. 977 */
978 */ 978 if (IS_ERR(mmc->supply.vmmc))
979 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 979 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
980 } else {
981 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
982 if (ret > 0)
983 mmc->ocr_avail = ret;
984 }
985 980
986 /* Common MMC bindings */ 981 /* Common MMC bindings */
987 ret = mmc_of_parse(mmc); 982 ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 1842ed341af1..de962c2d5e00 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
210 int i; 210 int i;
211 bool use_desc_chain_mode = true; 211 bool use_desc_chain_mode = true;
212 212
213 /*
214 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
215 * reported. For some strange reason this occurs in descriptor
216 * chain mode only. So let's fall back to bounce buffer mode
217 * for command SD_IO_RW_EXTENDED.
218 */
219 if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
220 return;
221
213 for_each_sg(data->sg, sg, data->sg_len, i) 222 for_each_sg(data->sg, sg, data->sg_len, i)
214 /* check for 8 byte alignment */ 223 /* check for 8 byte alignment */
215 if (sg->offset & 7) { 224 if (sg->offset & 7) {
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3275d4995812..61666d269771 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
187}; 187};
188 188
189static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { 189static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
190 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 190 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
191 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
191 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, 192 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
192 .ops = &sdhci_iproc_ops, 193 .ops = &sdhci_iproc_ops,
193}; 194};
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 6356781f1cca..f7e26b031e76 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
787 return ret; 787 return ret;
788} 788}
789 789
790void xenon_clean_phy(struct sdhci_host *host)
791{
792 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
793 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
794
795 kfree(priv->phy_params);
796}
797
798static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, 790static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
799 const char *phy_name) 791 const char *phy_name)
800{ 792{
@@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
819 if (ret) 811 if (ret)
820 return ret; 812 return ret;
821 813
822 ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); 814 return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
823 if (ret)
824 xenon_clean_phy(host);
825
826 return ret;
827} 815}
828 816
829int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) 817int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 67246655315b..bc1781bb070b 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev)
486 486
487 err = xenon_sdhc_prepare(host); 487 err = xenon_sdhc_prepare(host);
488 if (err) 488 if (err)
489 goto clean_phy_param; 489 goto err_clk;
490 490
491 err = sdhci_add_host(host); 491 err = sdhci_add_host(host);
492 if (err) 492 if (err)
@@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev)
496 496
497remove_sdhc: 497remove_sdhc:
498 xenon_sdhc_unprepare(host); 498 xenon_sdhc_unprepare(host);
499clean_phy_param:
500 xenon_clean_phy(host);
501err_clk: 499err_clk:
502 clk_disable_unprepare(pltfm_host->clk); 500 clk_disable_unprepare(pltfm_host->clk);
503free_pltfm: 501free_pltfm:
@@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev)
510 struct sdhci_host *host = platform_get_drvdata(pdev); 508 struct sdhci_host *host = platform_get_drvdata(pdev);
511 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 509 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
512 510
513 xenon_clean_phy(host);
514
515 sdhci_remove_host(host, 0); 511 sdhci_remove_host(host, 0);
516 512
517 xenon_sdhc_unprepare(host); 513 xenon_sdhc_unprepare(host);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 6e6523ea01ce..73debb42dc2f 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -93,7 +93,6 @@ struct xenon_priv {
93}; 93};
94 94
95int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); 95int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
96void xenon_clean_phy(struct sdhci_host *host);
97int xenon_phy_parse_dt(struct device_node *np, 96int xenon_phy_parse_dt(struct device_node *np,
98 struct sdhci_host *host); 97 struct sdhci_host *host);
99void xenon_soc_pad_ctrl(struct sdhci_host *host, 98void xenon_soc_pad_ctrl(struct sdhci_host *host,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d474378ed810..b1dd12729f19 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
202 return 0; 202 return 0;
203} 203}
204 204
205const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 205static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming, 206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming, 207 .free = nand_ooblayout_free_lp_hamming,
208}; 208};
@@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4361 /* Initialize the ->data_interface field. */ 4361 /* Initialize the ->data_interface field. */
4362 ret = nand_init_data_interface(chip); 4362 ret = nand_init_data_interface(chip);
4363 if (ret) 4363 if (ret)
4364 return ret; 4364 goto err_nand_init;
4365 4365
4366 /* 4366 /*
4367 * Setup the data interface correctly on the chip and controller side. 4367 * Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4373 */ 4373 */
4374 ret = nand_setup_data_interface(chip); 4374 ret = nand_setup_data_interface(chip);
4375 if (ret) 4375 if (ret)
4376 return ret; 4376 goto err_nand_init;
4377 4377
4378 nand_maf_id = chip->id.data[0]; 4378 nand_maf_id = chip->id.data[0];
4379 nand_dev_id = chip->id.data[1]; 4379 nand_dev_id = chip->id.data[1];
@@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4404 mtd->size = i * chip->chipsize; 4404 mtd->size = i * chip->chipsize;
4405 4405
4406 return 0; 4406 return 0;
4407
4408err_nand_init:
4409 /* Free manufacturer priv data. */
4410 nand_manufacturer_cleanup(chip);
4411
4412 return ret;
4407} 4413}
4408EXPORT_SYMBOL(nand_scan_ident); 4414EXPORT_SYMBOL(nand_scan_ident);
4409 4415
@@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd)
4574 4580
4575 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 4581 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4576 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 4582 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4577 !(chip->bbt_options & NAND_BBT_USE_FLASH))) 4583 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4578 return -EINVAL; 4584 ret = -EINVAL;
4585 goto err_ident;
4586 }
4579 4587
4580 if (invalid_ecc_page_accessors(chip)) { 4588 if (invalid_ecc_page_accessors(chip)) {
4581 pr_err("Invalid ECC page accessors setup\n"); 4589 pr_err("Invalid ECC page accessors setup\n");
4582 return -EINVAL; 4590 ret = -EINVAL;
4591 goto err_ident;
4583 } 4592 }
4584 4593
4585 if (!(chip->options & NAND_OWN_BUFFERS)) { 4594 if (!(chip->options & NAND_OWN_BUFFERS)) {
4586 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); 4595 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4587 if (!nbuf) 4596 if (!nbuf) {
4588 return -ENOMEM; 4597 ret = -ENOMEM;
4598 goto err_ident;
4599 }
4589 4600
4590 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); 4601 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4591 if (!nbuf->ecccalc) { 4602 if (!nbuf->ecccalc) {
@@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd)
4608 4619
4609 chip->buffers = nbuf; 4620 chip->buffers = nbuf;
4610 } else { 4621 } else {
4611 if (!chip->buffers) 4622 if (!chip->buffers) {
4612 return -ENOMEM; 4623 ret = -ENOMEM;
4624 goto err_ident;
4625 }
4613 } 4626 }
4614 4627
4615 /* Set the internal oob buffer location, just after the page data */ 4628 /* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd)
4842 return 0; 4855 return 0;
4843 4856
4844 /* Build bad block table */ 4857 /* Build bad block table */
4845 return chip->scan_bbt(mtd); 4858 ret = chip->scan_bbt(mtd);
4859 if (ret)
4860 goto err_free;
4861 return 0;
4862
4846err_free: 4863err_free:
4847 if (nbuf) { 4864 if (nbuf) {
4848 kfree(nbuf->databuf); 4865 kfree(nbuf->databuf);
@@ -4850,6 +4867,13 @@ err_free:
4850 kfree(nbuf->ecccalc); 4867 kfree(nbuf->ecccalc);
4851 kfree(nbuf); 4868 kfree(nbuf);
4852 } 4869 }
4870
4871err_ident:
4872 /* Clean up nand_scan_ident(). */
4873
4874 /* Free manufacturer priv data. */
4875 nand_manufacturer_cleanup(chip);
4876
4853 return ret; 4877 return ret;
4854} 4878}
4855EXPORT_SYMBOL(nand_scan_tail); 4879EXPORT_SYMBOL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 9d5ca0e540b5..92e2cf8e9ff9 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -6,7 +6,6 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 */ 8 */
9#include <linux/module.h>
10#include <linux/mtd/nand.h> 9#include <linux/mtd/nand.h>
11#include <linux/sizes.h> 10#include <linux/sizes.h>
12 11
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index 9cfc4035a420..1e0755997762 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
84 case 7: 84 case 7:
85 chip->ecc_strength_ds = 60; 85 chip->ecc_strength_ds = 60;
86 break; 86 break;
87 default:
88 WARN(1, "Could not decode ECC info");
89 chip->ecc_step_ds = 0;
87 } 90 }
88 } 91 }
89 } else { 92 } else {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 05b6e1065203..49b286c6c10f 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -55,10 +55,10 @@
55 * byte 1 for other packets in the page (PKT_N, for N > 0) 55 * byte 1 for other packets in the page (PKT_N, for N > 0)
56 * ERR_COUNT_PKT_N is the max error count over all but the first packet. 56 * ERR_COUNT_PKT_N is the max error count over all but the first packet.
57 */ 57 */
58#define DECODE_OK_PKT_0(v) ((v) & BIT(7))
59#define DECODE_OK_PKT_N(v) ((v) & BIT(15))
60#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) 58#define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f)
61#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) 59#define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f)
60#define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0)
61#define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0)
62 62
63/* Offsets relative to pbus_base */ 63/* Offsets relative to pbus_base */
64#define PBUS_CS_CTRL 0x83c 64#define PBUS_CS_CTRL 0x83c
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
193 chip->ecc.strength); 193 chip->ecc.strength);
194 if (res < 0) 194 if (res < 0)
195 mtd->ecc_stats.failed++; 195 mtd->ecc_stats.failed++;
196 else
197 mtd->ecc_stats.corrected += res;
196 198
197 bitflips = max(res, bitflips); 199 bitflips = max(res, bitflips);
198 buf += pkt_size; 200 buf += pkt_size;
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
202 return bitflips; 204 return bitflips;
203} 205}
204 206
205static int decode_error_report(struct tango_nfc *nfc) 207static int decode_error_report(struct nand_chip *chip)
206{ 208{
207 u32 status, res; 209 u32 status, res;
210 struct mtd_info *mtd = nand_to_mtd(chip);
211 struct tango_nfc *nfc = to_tango_nfc(chip->controller);
208 212
209 status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); 213 status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
210 if (status & PAGE_IS_EMPTY) 214 if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
212 216
213 res = readl_relaxed(nfc->mem_base + ERROR_REPORT); 217 res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
214 218
215 if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) 219 if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
216 return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); 220 return -EBADMSG;
221
222 /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
223 mtd->ecc_stats.corrected +=
224 ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
217 225
218 return -EBADMSG; 226 return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
219} 227}
220 228
221static void tango_dma_callback(void *arg) 229static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
282 if (err) 290 if (err)
283 return err; 291 return err;
284 292
285 res = decode_error_report(nfc); 293 res = decode_error_report(chip);
286 if (res < 0) { 294 if (res < 0) {
287 chip->ecc.read_oob_raw(mtd, chip, page); 295 chip->ecc.read_oob_raw(mtd, chip, page);
288 res = check_erased_page(chip, buf); 296 res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = {
663 { .compatible = "sigma,smp8758-nand" }, 671 { .compatible = "sigma,smp8758-nand" },
664 { /* sentinel */ } 672 { /* sentinel */ }
665}; 673};
674MODULE_DEVICE_TABLE(of, tango_nand_ids);
666 675
667static struct platform_driver tango_nand_driver = { 676static struct platform_driver tango_nand_driver = {
668 .probe = tango_nand_probe, 677 .probe = tango_nand_probe,
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 62ee439d5882..53a1cb551def 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -756,6 +756,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
756 struct net_device *dev = dev_id; 756 struct net_device *dev = dev_id;
757 struct arcnet_local *lp; 757 struct arcnet_local *lp;
758 int recbuf, status, diagstatus, didsomething, boguscount; 758 int recbuf, status, diagstatus, didsomething, boguscount;
759 unsigned long flags;
759 int retval = IRQ_NONE; 760 int retval = IRQ_NONE;
760 761
761 arc_printk(D_DURING, dev, "\n"); 762 arc_printk(D_DURING, dev, "\n");
@@ -765,7 +766,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
765 lp = netdev_priv(dev); 766 lp = netdev_priv(dev);
766 BUG_ON(!lp); 767 BUG_ON(!lp);
767 768
768 spin_lock(&lp->lock); 769 spin_lock_irqsave(&lp->lock, flags);
769 770
770 /* RESET flag was enabled - if device is not running, we must 771 /* RESET flag was enabled - if device is not running, we must
771 * clear it right away (but nothing else). 772 * clear it right away (but nothing else).
@@ -774,7 +775,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
774 if (lp->hw.status(dev) & RESETflag) 775 if (lp->hw.status(dev) & RESETflag)
775 lp->hw.command(dev, CFLAGScmd | RESETclear); 776 lp->hw.command(dev, CFLAGScmd | RESETclear);
776 lp->hw.intmask(dev, 0); 777 lp->hw.intmask(dev, 0);
777 spin_unlock(&lp->lock); 778 spin_unlock_irqrestore(&lp->lock, flags);
778 return retval; 779 return retval;
779 } 780 }
780 781
@@ -998,7 +999,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
998 udelay(1); 999 udelay(1);
999 lp->hw.intmask(dev, lp->intmask); 1000 lp->hw.intmask(dev, lp->intmask);
1000 1001
1001 spin_unlock(&lp->lock); 1002 spin_unlock_irqrestore(&lp->lock, flags);
1002 return retval; 1003 return retval;
1003} 1004}
1004EXPORT_SYMBOL(arcnet_interrupt); 1005EXPORT_SYMBOL(arcnet_interrupt);
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 2056878fb087..4fa2e46b48d3 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked)
212 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ 212 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
213 ackpkt->soft.cap.mes.ack = acked; 213 ackpkt->soft.cap.mes.ack = acked;
214 214
215 arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n", 215 arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n",
216 *((int *)&ackpkt->soft.cap.cookie[0])); 216 *((int *)&ackpkt->soft.cap.cookie[0]));
217 217
218 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); 218 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 239de38fbd6a..47f80b83dcf4 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -135,6 +135,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
135 for (i = 0; i < ci->devcount; i++) { 135 for (i = 0; i < ci->devcount; i++) {
136 struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; 136 struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
137 struct com20020_dev *card; 137 struct com20020_dev *card;
138 int dev_id_mask = 0xf;
138 139
139 dev = alloc_arcdev(device); 140 dev = alloc_arcdev(device);
140 if (!dev) { 141 if (!dev) {
@@ -166,6 +167,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
166 arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); 167 arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND);
167 arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); 168 arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT);
168 169
170 SET_NETDEV_DEV(dev, &pdev->dev);
169 dev->base_addr = ioaddr; 171 dev->base_addr = ioaddr;
170 dev->dev_addr[0] = node; 172 dev->dev_addr[0] = node;
171 dev->irq = pdev->irq; 173 dev->irq = pdev->irq;
@@ -179,8 +181,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
179 181
180 /* Get the dev_id from the PLX rotary coder */ 182 /* Get the dev_id from the PLX rotary coder */
181 if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) 183 if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
182 dev->dev_id = 0xc; 184 dev_id_mask = 0x3;
183 dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; 185 dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
184 186
185 snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); 187 snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
186 188
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 13d9ad4b3f5c..78043a9c5981 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared)
246 return -ENODEV; 246 return -ENODEV;
247 } 247 }
248 248
249 dev->base_addr = ioaddr;
250
251 arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", 249 arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
252 lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); 250 lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
253 251
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c5fd4259da33..e5386ab706ec 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -90,10 +90,13 @@ enum ad_link_speed_type {
90 AD_LINK_SPEED_100MBPS, 90 AD_LINK_SPEED_100MBPS,
91 AD_LINK_SPEED_1000MBPS, 91 AD_LINK_SPEED_1000MBPS,
92 AD_LINK_SPEED_2500MBPS, 92 AD_LINK_SPEED_2500MBPS,
93 AD_LINK_SPEED_5000MBPS,
93 AD_LINK_SPEED_10000MBPS, 94 AD_LINK_SPEED_10000MBPS,
95 AD_LINK_SPEED_14000MBPS,
94 AD_LINK_SPEED_20000MBPS, 96 AD_LINK_SPEED_20000MBPS,
95 AD_LINK_SPEED_25000MBPS, 97 AD_LINK_SPEED_25000MBPS,
96 AD_LINK_SPEED_40000MBPS, 98 AD_LINK_SPEED_40000MBPS,
99 AD_LINK_SPEED_50000MBPS,
97 AD_LINK_SPEED_56000MBPS, 100 AD_LINK_SPEED_56000MBPS,
98 AD_LINK_SPEED_100000MBPS, 101 AD_LINK_SPEED_100000MBPS,
99}; 102};
@@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port)
259 * %AD_LINK_SPEED_100MBPS, 262 * %AD_LINK_SPEED_100MBPS,
260 * %AD_LINK_SPEED_1000MBPS, 263 * %AD_LINK_SPEED_1000MBPS,
261 * %AD_LINK_SPEED_2500MBPS, 264 * %AD_LINK_SPEED_2500MBPS,
265 * %AD_LINK_SPEED_5000MBPS,
262 * %AD_LINK_SPEED_10000MBPS 266 * %AD_LINK_SPEED_10000MBPS
267 * %AD_LINK_SPEED_14000MBPS,
263 * %AD_LINK_SPEED_20000MBPS 268 * %AD_LINK_SPEED_20000MBPS
264 * %AD_LINK_SPEED_25000MBPS 269 * %AD_LINK_SPEED_25000MBPS
265 * %AD_LINK_SPEED_40000MBPS 270 * %AD_LINK_SPEED_40000MBPS
271 * %AD_LINK_SPEED_50000MBPS
266 * %AD_LINK_SPEED_56000MBPS 272 * %AD_LINK_SPEED_56000MBPS
267 * %AD_LINK_SPEED_100000MBPS 273 * %AD_LINK_SPEED_100000MBPS
268 */ 274 */
@@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port)
296 speed = AD_LINK_SPEED_2500MBPS; 302 speed = AD_LINK_SPEED_2500MBPS;
297 break; 303 break;
298 304
305 case SPEED_5000:
306 speed = AD_LINK_SPEED_5000MBPS;
307 break;
308
299 case SPEED_10000: 309 case SPEED_10000:
300 speed = AD_LINK_SPEED_10000MBPS; 310 speed = AD_LINK_SPEED_10000MBPS;
301 break; 311 break;
302 312
313 case SPEED_14000:
314 speed = AD_LINK_SPEED_14000MBPS;
315 break;
316
303 case SPEED_20000: 317 case SPEED_20000:
304 speed = AD_LINK_SPEED_20000MBPS; 318 speed = AD_LINK_SPEED_20000MBPS;
305 break; 319 break;
@@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port)
312 speed = AD_LINK_SPEED_40000MBPS; 326 speed = AD_LINK_SPEED_40000MBPS;
313 break; 327 break;
314 328
329 case SPEED_50000:
330 speed = AD_LINK_SPEED_50000MBPS;
331 break;
332
315 case SPEED_56000: 333 case SPEED_56000:
316 speed = AD_LINK_SPEED_56000MBPS; 334 speed = AD_LINK_SPEED_56000MBPS;
317 break; 335 break;
@@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
707 case AD_LINK_SPEED_2500MBPS: 725 case AD_LINK_SPEED_2500MBPS:
708 bandwidth = nports * 2500; 726 bandwidth = nports * 2500;
709 break; 727 break;
728 case AD_LINK_SPEED_5000MBPS:
729 bandwidth = nports * 5000;
730 break;
710 case AD_LINK_SPEED_10000MBPS: 731 case AD_LINK_SPEED_10000MBPS:
711 bandwidth = nports * 10000; 732 bandwidth = nports * 10000;
712 break; 733 break;
734 case AD_LINK_SPEED_14000MBPS:
735 bandwidth = nports * 14000;
736 break;
713 case AD_LINK_SPEED_20000MBPS: 737 case AD_LINK_SPEED_20000MBPS:
714 bandwidth = nports * 20000; 738 bandwidth = nports * 20000;
715 break; 739 break;
@@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
719 case AD_LINK_SPEED_40000MBPS: 743 case AD_LINK_SPEED_40000MBPS:
720 bandwidth = nports * 40000; 744 bandwidth = nports * 40000;
721 break; 745 break;
746 case AD_LINK_SPEED_50000MBPS:
747 bandwidth = nports * 50000;
748 break;
722 case AD_LINK_SPEED_56000MBPS: 749 case AD_LINK_SPEED_56000MBPS:
723 bandwidth = nports * 56000; 750 bandwidth = nports * 56000;
724 break; 751 break;
@@ -2577,7 +2604,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2577 return -1; 2604 return -1;
2578 2605
2579 ad_info->aggregator_id = aggregator->aggregator_identifier; 2606 ad_info->aggregator_id = aggregator->aggregator_identifier;
2580 ad_info->ports = aggregator->num_of_ports; 2607 ad_info->ports = __agg_active_ports(aggregator);
2581 ad_info->actor_key = aggregator->actor_oper_aggregator_key; 2608 ad_info->actor_key = aggregator->actor_oper_aggregator_key;
2582 ad_info->partner_key = aggregator->partner_oper_aggregator_key; 2609 ad_info->partner_key = aggregator->partner_oper_aggregator_key;
2583 ether_addr_copy(ad_info->partner_system, 2610 ether_addr_copy(ad_info->partner_system,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2be78807fd6e..8ab6bdbe1682 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2612 bond_for_each_slave_rcu(bond, slave, iter) { 2612 bond_for_each_slave_rcu(bond, slave, iter) {
2613 unsigned long trans_start = dev_trans_start(slave->dev); 2613 unsigned long trans_start = dev_trans_start(slave->dev);
2614 2614
2615 slave->new_link = BOND_LINK_NOCHANGE;
2616
2615 if (slave->link != BOND_LINK_UP) { 2617 if (slave->link != BOND_LINK_UP) {
2616 if (bond_time_in_interval(bond, trans_start, 1) && 2618 if (bond_time_in_interval(bond, trans_start, 1) &&
2617 bond_time_in_interval(bond, slave->last_rx, 1)) { 2619 bond_time_in_interval(bond, slave->last_rx, 1)) {
2618 2620
2619 slave->link = BOND_LINK_UP; 2621 slave->new_link = BOND_LINK_UP;
2620 slave_state_changed = 1; 2622 slave_state_changed = 1;
2621 2623
2622 /* primary_slave has no meaning in round-robin 2624 /* primary_slave has no meaning in round-robin
@@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2643 if (!bond_time_in_interval(bond, trans_start, 2) || 2645 if (!bond_time_in_interval(bond, trans_start, 2) ||
2644 !bond_time_in_interval(bond, slave->last_rx, 2)) { 2646 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2645 2647
2646 slave->link = BOND_LINK_DOWN; 2648 slave->new_link = BOND_LINK_DOWN;
2647 slave_state_changed = 1; 2649 slave_state_changed = 1;
2648 2650
2649 if (slave->link_failure_count < UINT_MAX) 2651 if (slave->link_failure_count < UINT_MAX)
@@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2674 if (!rtnl_trylock()) 2676 if (!rtnl_trylock())
2675 goto re_arm; 2677 goto re_arm;
2676 2678
2679 bond_for_each_slave(bond, slave, iter) {
2680 if (slave->new_link != BOND_LINK_NOCHANGE)
2681 slave->link = slave->new_link;
2682 }
2683
2677 if (slave_state_changed) { 2684 if (slave_state_changed) {
2678 bond_slave_state_change(bond); 2685 bond_slave_state_change(bond);
2679 if (BOND_MODE(bond) == BOND_MODE_XOR) 2686 if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -4185,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev)
4185 struct bonding *bond = netdev_priv(bond_dev); 4192 struct bonding *bond = netdev_priv(bond_dev);
4186 if (bond->wq) 4193 if (bond->wq)
4187 destroy_workqueue(bond->wq); 4194 destroy_workqueue(bond->wq);
4188 free_netdev(bond_dev);
4189} 4195}
4190 4196
4191void bond_setup(struct net_device *bond_dev) 4197void bond_setup(struct net_device *bond_dev)
@@ -4205,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev)
4205 bond_dev->netdev_ops = &bond_netdev_ops; 4211 bond_dev->netdev_ops = &bond_netdev_ops;
4206 bond_dev->ethtool_ops = &bond_ethtool_ops; 4212 bond_dev->ethtool_ops = &bond_ethtool_ops;
4207 4213
4208 bond_dev->destructor = bond_destructor; 4214 bond_dev->needs_free_netdev = true;
4215 bond_dev->priv_destructor = bond_destructor;
4209 4216
4210 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 4217 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
4211 4218
@@ -4271,10 +4278,10 @@ static int bond_check_params(struct bond_params *params)
4271 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 4278 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4272 struct bond_opt_value newval; 4279 struct bond_opt_value newval;
4273 const struct bond_opt_value *valptr; 4280 const struct bond_opt_value *valptr;
4274 int arp_all_targets_value; 4281 int arp_all_targets_value = 0;
4275 u16 ad_actor_sys_prio = 0; 4282 u16 ad_actor_sys_prio = 0;
4276 u16 ad_user_port_key = 0; 4283 u16 ad_user_port_key = 0;
4277 __be32 arp_target[BOND_MAX_ARP_TARGETS]; 4284 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4278 int arp_ip_count; 4285 int arp_ip_count;
4279 int bond_mode = BOND_MODE_ROUNDROBIN; 4286 int bond_mode = BOND_MODE_ROUNDROBIN;
4280 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 4287 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
@@ -4501,7 +4508,6 @@ static int bond_check_params(struct bond_params *params)
4501 arp_validate_value = 0; 4508 arp_validate_value = 0;
4502 } 4509 }
4503 4510
4504 arp_all_targets_value = 0;
4505 if (arp_all_targets) { 4511 if (arp_all_targets) {
4506 bond_opt_initstr(&newval, arp_all_targets); 4512 bond_opt_initstr(&newval, arp_all_targets);
4507 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), 4513 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
@@ -4730,7 +4736,7 @@ int bond_create(struct net *net, const char *name)
4730 4736
4731 rtnl_unlock(); 4737 rtnl_unlock();
4732 if (res < 0) 4738 if (res < 0)
4733 bond_destructor(bond_dev); 4739 free_netdev(bond_dev);
4734 return res; 4740 return res;
4735} 4741}
4736 4742
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce759456..71a7c3b44fdd 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev)
1121 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1121 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1122 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1122 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1123 dev->priv_flags |= IFF_NO_QUEUE; 1123 dev->priv_flags |= IFF_NO_QUEUE;
1124 dev->destructor = free_netdev; 1124 dev->needs_free_netdev = true;
1125 dev->netdev_ops = &cfhsi_netdevops; 1125 dev->netdev_ops = &cfhsi_netdevops;
1126 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1126 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1127 skb_queue_head_init(&cfhsi->qhead[i]); 1127 skb_queue_head_init(&cfhsi->qhead[i]);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index c2dea4916e5d..76e1d3545105 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev)
428 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 428 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
429 dev->mtu = CAIF_MAX_MTU; 429 dev->mtu = CAIF_MAX_MTU;
430 dev->priv_flags |= IFF_NO_QUEUE; 430 dev->priv_flags |= IFF_NO_QUEUE;
431 dev->destructor = free_netdev; 431 dev->needs_free_netdev = true;
432 skb_queue_head_init(&serdev->head); 432 skb_queue_head_init(&serdev->head);
433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY; 433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
434 serdev->common.use_frag = true; 434 serdev->common.use_frag = true;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 3a529fbe539f..fc21afe852b9 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev)
712 dev->flags = IFF_NOARP | IFF_POINTOPOINT; 712 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
713 dev->priv_flags |= IFF_NO_QUEUE; 713 dev->priv_flags |= IFF_NO_QUEUE;
714 dev->mtu = SPI_MAX_PAYLOAD_SIZE; 714 dev->mtu = SPI_MAX_PAYLOAD_SIZE;
715 dev->destructor = free_netdev; 715 dev->needs_free_netdev = true;
716 skb_queue_head_init(&cfspi->qhead); 716 skb_queue_head_init(&cfspi->qhead);
717 skb_queue_head_init(&cfspi->chead); 717 skb_queue_head_init(&cfspi->chead);
718 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 718 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 6122768c8644..1794ea0420b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
617 netdev->tx_queue_len = 100; 617 netdev->tx_queue_len = 100;
618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP; 618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
619 netdev->mtu = CFV_DEF_MTU_SIZE; 619 netdev->mtu = CFV_DEF_MTU_SIZE;
620 netdev->destructor = free_netdev; 620 netdev->needs_free_netdev = true;
621} 621}
622 622
623/* Create debugfs counters for the device */ 623/* Create debugfs counters for the device */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 611d16a7061d..ae4ed03dc642 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
391 can_update_state_error_stats(dev, new_state); 391 can_update_state_error_stats(dev, new_state);
392 priv->state = new_state; 392 priv->state = new_state;
393 393
394 if (!cf)
395 return;
396
394 if (unlikely(new_state == CAN_STATE_BUS_OFF)) { 397 if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
395 cf->can_id |= CAN_ERR_BUSOFF; 398 cf->can_id |= CAN_ERR_BUSOFF;
396 return; 399 return;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 0d57be5ea97b..85268be0c913 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
489 struct pucan_rx_msg *msg_list, int msg_count) 489 struct pucan_rx_msg *msg_list, int msg_count)
490{ 490{
491 void *msg_ptr = msg_list; 491 void *msg_ptr = msg_list;
492 int i, msg_size; 492 int i, msg_size = 0;
493 493
494 for (i = 0; i < msg_count; i++) { 494 for (i = 0; i < msg_count; i++) {
495 msg_size = peak_canfd_handle_msg(priv, msg_ptr); 495 msg_size = peak_canfd_handle_msg(priv, msg_ptr);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index eb7173713bbc..6a6e896e52fa 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev)
417static void slc_free_netdev(struct net_device *dev) 417static void slc_free_netdev(struct net_device *dev)
418{ 418{
419 int i = dev->base_addr; 419 int i = dev->base_addr;
420 free_netdev(dev); 420
421 slcan_devs[i] = NULL; 421 slcan_devs[i] = NULL;
422} 422}
423 423
@@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = {
436static void slc_setup(struct net_device *dev) 436static void slc_setup(struct net_device *dev)
437{ 437{
438 dev->netdev_ops = &slc_netdev_ops; 438 dev->netdev_ops = &slc_netdev_ops;
439 dev->destructor = slc_free_netdev; 439 dev->needs_free_netdev = true;
440 dev->priv_destructor = slc_free_netdev;
440 441
441 dev->hard_header_len = 0; 442 dev->hard_header_len = 0;
442 dev->addr_len = 0; 443 dev->addr_len = 0;
@@ -761,8 +762,6 @@ static void __exit slcan_exit(void)
761 if (sl->tty) { 762 if (sl->tty) {
762 printk(KERN_ERR "%s: tty discipline still running\n", 763 printk(KERN_ERR "%s: tty discipline still running\n",
763 dev->name); 764 dev->name);
764 /* Intentionally leak the control block. */
765 dev->destructor = NULL;
766 } 765 }
767 766
768 unregister_netdev(dev); 767 unregister_netdev(dev);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index eecee7f8dfb7..afcc1312dbaf 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
265 sizeof(*dm), 265 sizeof(*dm),
266 1000); 266 1000);
267 267
268 kfree(dm);
269
268 return rc; 270 return rc;
269} 271}
270 272
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 57913dbbae0a..1ca76e03e965 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf,
908 const struct peak_usb_adapter *peak_usb_adapter = NULL; 908 const struct peak_usb_adapter *peak_usb_adapter = NULL;
909 int i, err = -ENOMEM; 909 int i, err = -ENOMEM;
910 910
911 usb_dev = interface_to_usbdev(intf);
912
913 /* get corresponding PCAN-USB adapter */ 911 /* get corresponding PCAN-USB adapter */
914 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) 912 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
915 if (peak_usb_adapters_list[i]->device_id == usb_id_product) { 913 if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
@@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf,
920 if (!peak_usb_adapter) { 918 if (!peak_usb_adapter) {
921 /* should never come except device_id bad usage in this file */ 919 /* should never come except device_id bad usage in this file */
922 pr_err("%s: didn't find device id. 0x%x in devices list\n", 920 pr_err("%s: didn't find device id. 0x%x in devices list\n",
923 PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); 921 PCAN_USB_DRIVER_NAME, usb_id_product);
924 return -ENODEV; 922 return -ENODEV;
925 } 923 }
926 924
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index facca33d53e9..a8cb33264ff1 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = {
152static void vcan_setup(struct net_device *dev) 152static void vcan_setup(struct net_device *dev)
153{ 153{
154 dev->type = ARPHRD_CAN; 154 dev->type = ARPHRD_CAN;
155 dev->mtu = CAN_MTU; 155 dev->mtu = CANFD_MTU;
156 dev->hard_header_len = 0; 156 dev->hard_header_len = 0;
157 dev->addr_len = 0; 157 dev->addr_len = 0;
158 dev->tx_queue_len = 0; 158 dev->tx_queue_len = 0;
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
163 dev->flags |= IFF_ECHO; 163 dev->flags |= IFF_ECHO;
164 164
165 dev->netdev_ops = &vcan_netdev_ops; 165 dev->netdev_ops = &vcan_netdev_ops;
166 dev->destructor = free_netdev; 166 dev->needs_free_netdev = true;
167} 167}
168 168
169static struct rtnl_link_ops vcan_link_ops __read_mostly = { 169static struct rtnl_link_ops vcan_link_ops __read_mostly = {
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 7fbb24795681..cfe889e8f172 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = {
150static void vxcan_setup(struct net_device *dev) 150static void vxcan_setup(struct net_device *dev)
151{ 151{
152 dev->type = ARPHRD_CAN; 152 dev->type = ARPHRD_CAN;
153 dev->mtu = CAN_MTU; 153 dev->mtu = CANFD_MTU;
154 dev->hard_header_len = 0; 154 dev->hard_header_len = 0;
155 dev->addr_len = 0; 155 dev->addr_len = 0;
156 dev->tx_queue_len = 0; 156 dev->tx_queue_len = 0;
157 dev->flags = (IFF_NOARP|IFF_ECHO); 157 dev->flags = (IFF_NOARP|IFF_ECHO);
158 dev->netdev_ops = &vxcan_netdev_ops; 158 dev->netdev_ops = &vxcan_netdev_ops;
159 dev->destructor = free_netdev; 159 dev->needs_free_netdev = true;
160} 160}
161 161
162/* forward declaration for rtnl_create_link() */ 162/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 19581d783d8e..d034d8cd7d22 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -849,6 +849,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
849 mv88e6xxx_g1_stats_read(chip, reg, &low); 849 mv88e6xxx_g1_stats_read(chip, reg, &low);
850 if (s->sizeof_stat == 8) 850 if (s->sizeof_stat == 8)
851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
852 break;
853 default:
854 return UINT64_MAX;
852 } 855 }
853 value = (((u64)high) << 16) | low; 856 value = (((u64)high) << 16) | low;
854 return value; 857 return value;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 96046bb12ca1..14c0be98e0a4 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
114 return -EOPNOTSUPP; 114 return -EOPNOTSUPP;
115} 115}
116 116
117int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, 117static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
118 int src_port, u16 data) 118 int src_dev, int src_port, u16 data)
119{ 119{
120 return -EOPNOTSUPP; 120 return -EOPNOTSUPP;
121} 121}
122 122
123int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) 123static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
124{ 124{
125 return -EOPNOTSUPP; 125 return -EOPNOTSUPP;
126} 126}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 149244aac20a..9905b52fe293 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev)
328 struct dummy_priv *priv = netdev_priv(dev); 328 struct dummy_priv *priv = netdev_priv(dev);
329 329
330 kfree(priv->vfinfo); 330 kfree(priv->vfinfo);
331 free_netdev(dev);
332} 331}
333 332
334static void dummy_setup(struct net_device *dev) 333static void dummy_setup(struct net_device *dev)
@@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev)
338 /* Initialize the device structure. */ 337 /* Initialize the device structure. */
339 dev->netdev_ops = &dummy_netdev_ops; 338 dev->netdev_ops = &dummy_netdev_ops;
340 dev->ethtool_ops = &dummy_ethtool_ops; 339 dev->ethtool_ops = &dummy_ethtool_ops;
341 dev->destructor = dummy_free_netdev; 340 dev->needs_free_netdev = true;
341 dev->priv_destructor = dummy_free_netdev;
342 342
343 /* Fill in device structure with ethernet-generic values. */ 343 /* Fill in device structure with ethernet-generic values. */
344 dev->flags |= IFF_NOARP; 344 dev->flags |= IFF_NOARP;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index b0a3b85fc6f8..db02bc2fb4b2 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev)
748 748
749 ret = ax_mii_init(dev); 749 ret = ax_mii_init(dev);
750 if (ret) 750 if (ret)
751 goto out_irq; 751 goto err_out;
752 752
753 ax_NS8390_init(dev, 0); 753 ax_NS8390_init(dev, 0);
754 754
755 ret = register_netdev(dev); 755 ret = register_netdev(dev);
756 if (ret) 756 if (ret)
757 goto out_irq; 757 goto err_out;
758 758
759 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", 759 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
760 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, 760 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev)
762 762
763 return 0; 763 return 0;
764 764
765 out_irq:
766 /* cleanup irq */
767 free_irq(dev->irq, dev);
768 err_out: 765 err_out:
769 return ret; 766 return ret;
770} 767}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 08d11cede9c9..f5b237e0bd60 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
61 61
62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63 63
64#define ENA_REGS_ADMIN_INTR_MASK 1
65
64/*****************************************************************************/ 66/*****************************************************************************/
65/*****************************************************************************/ 67/*****************************************************************************/
66/*****************************************************************************/ 68/*****************************************************************************/
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
232 tail_masked = admin_queue->sq.tail & queue_size_mask; 234 tail_masked = admin_queue->sq.tail & queue_size_mask;
233 235
234 /* In case of queue FULL */ 236 /* In case of queue FULL */
235 cnt = admin_queue->sq.tail - admin_queue->sq.head; 237 cnt = atomic_read(&admin_queue->outstanding_cmds);
236 if (cnt >= admin_queue->q_depth) { 238 if (cnt >= admin_queue->q_depth) {
237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", 239 pr_debug("admin queue is full.\n");
238 admin_queue->sq.tail, admin_queue->sq.head,
239 admin_queue->q_depth);
240 admin_queue->stats.out_of_space++; 240 admin_queue->stats.out_of_space++;
241 return ERR_PTR(-ENOSPC); 241 return ERR_PTR(-ENOSPC);
242 } 242 }
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
508static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 508static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
509 struct ena_com_admin_queue *admin_queue) 509 struct ena_com_admin_queue *admin_queue)
510{ 510{
511 unsigned long flags; 511 unsigned long flags, timeout;
512 u32 start_time;
513 int ret; 512 int ret;
514 513
515 start_time = ((u32)jiffies_to_usecs(jiffies)); 514 timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
515
516 while (1) {
517 spin_lock_irqsave(&admin_queue->q_lock, flags);
518 ena_com_handle_admin_completion(admin_queue);
519 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
520
521 if (comp_ctx->status != ENA_CMD_SUBMITTED)
522 break;
516 523
517 while (comp_ctx->status == ENA_CMD_SUBMITTED) { 524 if (time_is_before_jiffies(timeout)) {
518 if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
519 ADMIN_CMD_TIMEOUT_US) {
520 pr_err("Wait for completion (polling) timeout\n"); 525 pr_err("Wait for completion (polling) timeout\n");
521 /* ENA didn't have any completion */ 526 /* ENA didn't have any completion */
522 spin_lock_irqsave(&admin_queue->q_lock, flags); 527 spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
528 goto err; 533 goto err;
529 } 534 }
530 535
531 spin_lock_irqsave(&admin_queue->q_lock, flags);
532 ena_com_handle_admin_completion(admin_queue);
533 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
534
535 msleep(100); 536 msleep(100);
536 } 537 }
537 538
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1455 1456
1456void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1457void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1457{ 1458{
1459 u32 mask_value = 0;
1460
1461 if (polling)
1462 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1463
1464 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1458 ena_dev->admin_queue.polling = polling; 1465 ena_dev->admin_queue.polling = polling;
1459} 1466}
1460 1467
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 67b2338f8fb3..3ee55e2fd694 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = {
80 ENA_STAT_TX_ENTRY(tx_poll), 80 ENA_STAT_TX_ENTRY(tx_poll),
81 ENA_STAT_TX_ENTRY(doorbells), 81 ENA_STAT_TX_ENTRY(doorbells),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 82 ENA_STAT_TX_ENTRY(prepare_ctx_err),
83 ENA_STAT_TX_ENTRY(missing_tx_comp),
84 ENA_STAT_TX_ENTRY(bad_req_id), 83 ENA_STAT_TX_ENTRY(bad_req_id),
85}; 84};
86 85
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
94 ENA_STAT_RX_ENTRY(dma_mapping_err), 93 ENA_STAT_RX_ENTRY(dma_mapping_err),
95 ENA_STAT_RX_ENTRY(bad_desc_num), 94 ENA_STAT_RX_ENTRY(bad_desc_num),
96 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 95 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
96 ENA_STAT_RX_ENTRY(empty_rx_ring),
97}; 97};
98 98
99static const struct ena_stats ena_stats_ena_com_strings[] = { 99static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7c1214d78855..4f16ed38bcf3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
190 rxr->sgl_size = adapter->max_rx_sgl_size; 190 rxr->sgl_size = adapter->max_rx_sgl_size;
191 rxr->smoothed_interval = 191 rxr->smoothed_interval =
192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
193 rxr->empty_rx_queue = 0;
193 } 194 }
194} 195}
195 196
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1078 rx_ring->per_napi_bytes = 0; 1079 rx_ring->per_napi_bytes = 0;
1079} 1080}
1080 1081
1082static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1083 struct ena_ring *rx_ring)
1084{
1085 struct ena_eth_io_intr_reg intr_reg;
1086
1087 /* Update intr register: rx intr delay,
1088 * tx intr delay and interrupt unmask
1089 */
1090 ena_com_update_intr_reg(&intr_reg,
1091 rx_ring->smoothed_interval,
1092 tx_ring->smoothed_interval,
1093 true);
1094
1095 /* It is a shared MSI-X.
1096 * Tx and Rx CQ have pointer to it.
1097 * So we use one of them to reach the intr reg
1098 */
1099 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1100}
1101
1081static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1102static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1082 struct ena_ring *rx_ring) 1103 struct ena_ring *rx_ring)
1083{ 1104{
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
1108{ 1129{
1109 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1130 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1110 struct ena_ring *tx_ring, *rx_ring; 1131 struct ena_ring *tx_ring, *rx_ring;
1111 struct ena_eth_io_intr_reg intr_reg;
1112 1132
1113 u32 tx_work_done; 1133 u32 tx_work_done;
1114 u32 rx_work_done; 1134 u32 rx_work_done;
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
1149 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1169 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1150 ena_adjust_intr_moderation(rx_ring, tx_ring); 1170 ena_adjust_intr_moderation(rx_ring, tx_ring);
1151 1171
1152 /* Update intr register: rx intr delay, 1172 ena_unmask_interrupt(tx_ring, rx_ring);
1153 * tx intr delay and interrupt unmask
1154 */
1155 ena_com_update_intr_reg(&intr_reg,
1156 rx_ring->smoothed_interval,
1157 tx_ring->smoothed_interval,
1158 true);
1159
1160 /* It is a shared MSI-X.
1161 * Tx and Rx CQ have pointer to it.
1162 * So we use one of them to reach the intr reg
1163 */
1164 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1165 } 1173 }
1166 1174
1167
1168 ena_update_ring_numa_node(tx_ring, rx_ring); 1175 ena_update_ring_numa_node(tx_ring, rx_ring);
1169 1176
1170 ret = rx_work_done; 1177 ret = rx_work_done;
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter)
1485 1492
1486 ena_napi_enable_all(adapter); 1493 ena_napi_enable_all(adapter);
1487 1494
1495 /* Enable completion queues interrupt */
1496 for (i = 0; i < adapter->num_queues; i++)
1497 ena_unmask_interrupt(&adapter->tx_ring[i],
1498 &adapter->rx_ring[i]);
1499
1488 /* schedule napi in case we had pending packets 1500 /* schedule napi in case we had pending packets
1489 * from the last time we disable napi 1501 * from the last time we disable napi
1490 */ 1502 */
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1532 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1544 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1533 qid, rc); 1545 qid, rc);
1534 ena_com_destroy_io_queue(ena_dev, ena_qid); 1546 ena_com_destroy_io_queue(ena_dev, ena_qid);
1547 return rc;
1535 } 1548 }
1536 1549
1537 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); 1550 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1596 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1609 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1597 qid, rc); 1610 qid, rc);
1598 ena_com_destroy_io_queue(ena_dev, ena_qid); 1611 ena_com_destroy_io_queue(ena_dev, ena_qid);
1612 return rc;
1599 } 1613 }
1600 1614
1601 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); 1615 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1981 1995
1982 tx_info->tx_descs = nb_hw_desc; 1996 tx_info->tx_descs = nb_hw_desc;
1983 tx_info->last_jiffies = jiffies; 1997 tx_info->last_jiffies = jiffies;
1998 tx_info->print_once = 0;
1984 1999
1985 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2000 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
1986 tx_ring->ring_size); 2001 tx_ring->ring_size);
@@ -2550,13 +2565,44 @@ err:
2550 "Reset attempt failed. Can not reset the device\n"); 2565 "Reset attempt failed. Can not reset the device\n");
2551} 2566}
2552 2567
2553static void check_for_missing_tx_completions(struct ena_adapter *adapter) 2568static int check_missing_comp_in_queue(struct ena_adapter *adapter,
2569 struct ena_ring *tx_ring)
2554{ 2570{
2555 struct ena_tx_buffer *tx_buf; 2571 struct ena_tx_buffer *tx_buf;
2556 unsigned long last_jiffies; 2572 unsigned long last_jiffies;
2573 u32 missed_tx = 0;
2574 int i;
2575
2576 for (i = 0; i < tx_ring->ring_size; i++) {
2577 tx_buf = &tx_ring->tx_buffer_info[i];
2578 last_jiffies = tx_buf->last_jiffies;
2579 if (unlikely(last_jiffies &&
2580 time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
2581 if (!tx_buf->print_once)
2582 netif_notice(adapter, tx_err, adapter->netdev,
2583 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2584 tx_ring->qid, i);
2585
2586 tx_buf->print_once = 1;
2587 missed_tx++;
2588
2589 if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
2590 netif_err(adapter, tx_err, adapter->netdev,
2591 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2592 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
2593 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2594 return -EIO;
2595 }
2596 }
2597 }
2598
2599 return 0;
2600}
2601
2602static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2603{
2557 struct ena_ring *tx_ring; 2604 struct ena_ring *tx_ring;
2558 int i, j, budget; 2605 int i, budget, rc;
2559 u32 missed_tx;
2560 2606
2561 /* Make sure the driver doesn't turn the device in other process */ 2607 /* Make sure the driver doesn't turn the device in other process */
2562 smp_rmb(); 2608 smp_rmb();
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2572 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { 2618 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2573 tx_ring = &adapter->tx_ring[i]; 2619 tx_ring = &adapter->tx_ring[i];
2574 2620
2575 for (j = 0; j < tx_ring->ring_size; j++) { 2621 rc = check_missing_comp_in_queue(adapter, tx_ring);
2576 tx_buf = &tx_ring->tx_buffer_info[j]; 2622 if (unlikely(rc))
2577 last_jiffies = tx_buf->last_jiffies; 2623 return;
2578 if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
2579 netif_notice(adapter, tx_err, adapter->netdev,
2580 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2581 tx_ring->qid, j);
2582
2583 u64_stats_update_begin(&tx_ring->syncp);
2584 missed_tx = tx_ring->tx_stats.missing_tx_comp++;
2585 u64_stats_update_end(&tx_ring->syncp);
2586
2587 /* Clear last jiffies so the lost buffer won't
2588 * be counted twice.
2589 */
2590 tx_buf->last_jiffies = 0;
2591
2592 if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
2593 netif_err(adapter, tx_err, adapter->netdev,
2594 "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
2595 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
2596 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2597 }
2598 }
2599 }
2600 2624
2601 budget--; 2625 budget--;
2602 if (!budget) 2626 if (!budget)
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2606 adapter->last_monitored_tx_qid = i % adapter->num_queues; 2630 adapter->last_monitored_tx_qid = i % adapter->num_queues;
2607} 2631}
2608 2632
2633/* trigger napi schedule after 2 consecutive detections */
2634#define EMPTY_RX_REFILL 2
2635/* For the rare case where the device runs out of Rx descriptors and the
2636 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2637 * for example).
2638 * This case will lead to a deadlock:
2639 * The device won't send interrupts since all the new Rx packets will be dropped
2640 * The napi handler won't allocate new Rx descriptors so the device will be
2641 * able to send new packets.
2642 *
2643 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2644 * It is recommended to have at least 512MB, with a minimum of 128MB for
2645 * constrained environment).
2646 *
2647 * When such a situation is detected - Reschedule napi
2648 */
2649static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2650{
2651 struct ena_ring *rx_ring;
2652 int i, refill_required;
2653
2654 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2655 return;
2656
2657 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2658 return;
2659
2660 for (i = 0; i < adapter->num_queues; i++) {
2661 rx_ring = &adapter->rx_ring[i];
2662
2663 refill_required =
2664 ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2665 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2666 rx_ring->empty_rx_queue++;
2667
2668 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2669 u64_stats_update_begin(&rx_ring->syncp);
2670 rx_ring->rx_stats.empty_rx_ring++;
2671 u64_stats_update_end(&rx_ring->syncp);
2672
2673 netif_err(adapter, drv, adapter->netdev,
2674 "trigger refill for ring %d\n", i);
2675
2676 napi_schedule(rx_ring->napi);
2677 rx_ring->empty_rx_queue = 0;
2678 }
2679 } else {
2680 rx_ring->empty_rx_queue = 0;
2681 }
2682 }
2683}
2684
2609/* Check for keep alive expiration */ 2685/* Check for keep alive expiration */
2610static void check_for_missing_keep_alive(struct ena_adapter *adapter) 2686static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2611{ 2687{
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data)
2660 2736
2661 check_for_missing_tx_completions(adapter); 2737 check_for_missing_tx_completions(adapter);
2662 2738
2739 check_for_empty_rx_ring(adapter);
2740
2663 if (debug_area) 2741 if (debug_area)
2664 ena_dump_stats_to_buf(adapter, debug_area); 2742 ena_dump_stats_to_buf(adapter, debug_area);
2665 2743
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
2840{ 2918{
2841 int release_bars; 2919 int release_bars;
2842 2920
2921 if (ena_dev->mem_bar)
2922 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
2923
2924 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
2925
2843 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 2926 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
2844 pci_release_selected_regions(pdev, release_bars); 2927 pci_release_selected_regions(pdev, release_bars);
2845} 2928}
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2927 goto err_free_ena_dev; 3010 goto err_free_ena_dev;
2928 } 3011 }
2929 3012
2930 ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), 3013 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
2931 pci_resource_len(pdev, ENA_REG_BAR)); 3014 pci_resource_start(pdev, ENA_REG_BAR),
3015 pci_resource_len(pdev, ENA_REG_BAR));
2932 if (!ena_dev->reg_bar) { 3016 if (!ena_dev->reg_bar) {
2933 dev_err(&pdev->dev, "failed to remap regs bar\n"); 3017 dev_err(&pdev->dev, "failed to remap regs bar\n");
2934 rc = -EFAULT; 3018 rc = -EFAULT;
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2948 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); 3032 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
2949 3033
2950 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3034 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2951 ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), 3035 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
2952 pci_resource_len(pdev, ENA_MEM_BAR)); 3036 pci_resource_start(pdev, ENA_MEM_BAR),
3037 pci_resource_len(pdev, ENA_MEM_BAR));
2953 if (!ena_dev->mem_bar) { 3038 if (!ena_dev->mem_bar) {
2954 rc = -EFAULT; 3039 rc = -EFAULT;
2955 goto err_device_destroy; 3040 goto err_device_destroy;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0e22bce6239d..a4d3d5e21068 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 1 46#define DRV_MODULE_VER_MAJOR 1
47#define DRV_MODULE_VER_MINOR 1 47#define DRV_MODULE_VER_MINOR 1
48#define DRV_MODULE_VER_SUBMINOR 2 48#define DRV_MODULE_VER_SUBMINOR 7
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
@@ -146,7 +146,18 @@ struct ena_tx_buffer {
146 u32 tx_descs; 146 u32 tx_descs;
147 /* num of buffers used by this skb */ 147 /* num of buffers used by this skb */
148 u32 num_of_bufs; 148 u32 num_of_bufs;
149 /* Save the last jiffies to detect missing tx packets */ 149
150 /* Used for detect missing tx packets to limit the number of prints */
151 u32 print_once;
152 /* Save the last jiffies to detect missing tx packets
153 *
154 * sets to non zero value on ena_start_xmit and set to zero on
155 * napi and timer_Service_routine.
156 *
157 * while this value is not protected by lock,
158 * a given packet is not expected to be handled by ena_start_xmit
159 * and by napi/timer_service at the same time.
160 */
150 unsigned long last_jiffies; 161 unsigned long last_jiffies;
151 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; 162 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
152} ____cacheline_aligned; 163} ____cacheline_aligned;
@@ -170,7 +181,6 @@ struct ena_stats_tx {
170 u64 napi_comp; 181 u64 napi_comp;
171 u64 tx_poll; 182 u64 tx_poll;
172 u64 doorbells; 183 u64 doorbells;
173 u64 missing_tx_comp;
174 u64 bad_req_id; 184 u64 bad_req_id;
175}; 185};
176 186
@@ -184,6 +194,7 @@ struct ena_stats_rx {
184 u64 dma_mapping_err; 194 u64 dma_mapping_err;
185 u64 bad_desc_num; 195 u64 bad_desc_num;
186 u64 rx_copybreak_pkt; 196 u64 rx_copybreak_pkt;
197 u64 empty_rx_ring;
187}; 198};
188 199
189struct ena_ring { 200struct ena_ring {
@@ -231,6 +242,7 @@ struct ena_ring {
231 struct ena_stats_tx tx_stats; 242 struct ena_stats_tx tx_stats;
232 struct ena_stats_rx rx_stats; 243 struct ena_stats_rx rx_stats;
233 }; 244 };
245 int empty_rx_queue;
234} ____cacheline_aligned; 246} ____cacheline_aligned;
235 247
236struct ena_stats_dev { 248struct ena_stats_dev {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b3bc87fe3764..0a98c369df20 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
324 struct xgbe_ring *ring, 324 struct xgbe_ring *ring,
325 struct xgbe_ring_data *rdata) 325 struct xgbe_ring_data *rdata)
326{ 326{
327 int order, ret; 327 int ret;
328 328
329 if (!ring->rx_hdr_pa.pages) { 329 if (!ring->rx_hdr_pa.pages) {
330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); 330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
333 } 333 }
334 334
335 if (!ring->rx_buf_pa.pages) { 335 if (!ring->rx_buf_pa.pages) {
336 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
337 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, 336 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
338 order); 337 PAGE_ALLOC_COSTLY_ORDER);
339 if (ret) 338 if (ret)
340 return ret; 339 return ret;
341 } 340 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4ee15ff06a44..faeb4935ef3e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -200,29 +200,18 @@ err_exit:
200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204
205 /* TX checksums offloads*/ 203 /* TX checksums offloads*/
206 tpo_ipv4header_crc_offload_en_set(self, 1); 204 tpo_ipv4header_crc_offload_en_set(self, 1);
207 tpo_tcp_udp_crc_offload_en_set(self, 1); 205 tpo_tcp_udp_crc_offload_en_set(self, 1);
208 if (err < 0)
209 goto err_exit;
210 206
211 /* RX checksums offloads*/ 207 /* RX checksums offloads*/
212 rpo_ipv4header_crc_offload_en_set(self, 1); 208 rpo_ipv4header_crc_offload_en_set(self, 1);
213 rpo_tcp_udp_crc_offload_en_set(self, 1); 209 rpo_tcp_udp_crc_offload_en_set(self, 1);
214 if (err < 0)
215 goto err_exit;
216 210
217 /* LSO offloads*/ 211 /* LSO offloads*/
218 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 212 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
219 if (err < 0)
220 goto err_exit;
221
222 err = aq_hw_err_from_flags(self);
223 213
224err_exit: 214 return aq_hw_err_from_flags(self);
225 return err;
226} 215}
227 216
228static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) 217static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 42150708191d..1bceb7358e5c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -200,25 +200,18 @@ err_exit:
200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204 unsigned int i; 203 unsigned int i;
205 204
206 /* TX checksums offloads*/ 205 /* TX checksums offloads*/
207 tpo_ipv4header_crc_offload_en_set(self, 1); 206 tpo_ipv4header_crc_offload_en_set(self, 1);
208 tpo_tcp_udp_crc_offload_en_set(self, 1); 207 tpo_tcp_udp_crc_offload_en_set(self, 1);
209 if (err < 0)
210 goto err_exit;
211 208
212 /* RX checksums offloads*/ 209 /* RX checksums offloads*/
213 rpo_ipv4header_crc_offload_en_set(self, 1); 210 rpo_ipv4header_crc_offload_en_set(self, 1);
214 rpo_tcp_udp_crc_offload_en_set(self, 1); 211 rpo_tcp_udp_crc_offload_en_set(self, 1);
215 if (err < 0)
216 goto err_exit;
217 212
218 /* LSO offloads*/ 213 /* LSO offloads*/
219 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 214 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
220 if (err < 0)
221 goto err_exit;
222 215
223/* LRO offloads */ 216/* LRO offloads */
224 { 217 {
@@ -245,10 +238,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
245 238
246 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 239 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
247 } 240 }
248 err = aq_hw_err_from_flags(self); 241 return aq_hw_err_from_flags(self);
249
250err_exit:
251 return err;
252} 242}
253 243
254static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) 244static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b8e3d88f0879..a66aee51ab5b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
193 struct aq_hw_caps_s *aq_hw_caps, 193 struct aq_hw_caps_s *aq_hw_caps,
194 u32 *regs_buff); 194 u32 *regs_buff);
195 195
196int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
197 struct ethtool_cmd *cmd);
198
199int hw_atl_utils_hw_set_power(struct aq_hw_s *self, 196int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
200 unsigned int power_state); 197 unsigned int power_state);
201 198
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 63f2deec2a52..77a1c03255de 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1353,6 +1353,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1353 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && 1353 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1354 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1354 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1355 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); 1355 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1356 err = -EIO;
1356 goto err_dma; 1357 goto err_dma;
1357 } 1358 }
1358 1359
@@ -1366,10 +1367,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1366 * pcibios_set_master to do the needed arch specific settings */ 1367 * pcibios_set_master to do the needed arch specific settings */
1367 pci_set_master(pdev); 1368 pci_set_master(pdev);
1368 1369
1369 err = -ENOMEM;
1370 netdev = alloc_etherdev(sizeof(struct atl2_adapter)); 1370 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1371 if (!netdev) 1371 if (!netdev) {
1372 err = -ENOMEM;
1372 goto err_alloc_etherdev; 1373 goto err_alloc_etherdev;
1374 }
1373 1375
1374 SET_NETDEV_DEV(netdev, &pdev->dev); 1376 SET_NETDEV_DEV(netdev, &pdev->dev);
1375 1377
@@ -1408,8 +1410,6 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1408 if (err) 1410 if (err)
1409 goto err_sw_init; 1411 goto err_sw_init;
1410 1412
1411 err = -EIO;
1412
1413 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; 1413 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1414 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1414 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1415 1415
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 099b374c1b17..5274501428e4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
2026 priv->num_rx_desc_words = params->num_rx_desc_words; 2026 priv->num_rx_desc_words = params->num_rx_desc_words;
2027 2027
2028 priv->irq0 = platform_get_irq(pdev, 0); 2028 priv->irq0 = platform_get_irq(pdev, 0);
2029 if (!priv->is_lite) 2029 if (!priv->is_lite) {
2030 priv->irq1 = platform_get_irq(pdev, 1); 2030 priv->irq1 = platform_get_irq(pdev, 1);
2031 priv->wol_irq = platform_get_irq(pdev, 2); 2031 priv->wol_irq = platform_get_irq(pdev, 2);
2032 } else {
2033 priv->wol_irq = platform_get_irq(pdev, 1);
2034 }
2032 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2035 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2033 dev_err(&pdev->dev, "invalid interrupts\n"); 2036 dev_err(&pdev->dev, "invalid interrupts\n");
2034 ret = -EINVAL; 2037 ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index eccb3d1b6abb..f619c4cac51f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1926 } 1926 }
1927 1927
1928 /* select a non-FCoE queue */ 1928 /* select a non-FCoE queue */
1929 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1930} 1930}
1931 1931
1932void bnx2x_set_num_queues(struct bnx2x *bp) 1932void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3883 /* when transmitting in a vf, start bd must hold the ethertype 3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it 3884 * for fw to enforce it
3885 */ 3885 */
3886 u16 vlan_tci = 0;
3886#ifndef BNX2X_STOP_ON_ERROR 3887#ifndef BNX2X_STOP_ON_ERROR
3887 if (IS_VF(bp)) 3888 if (IS_VF(bp)) {
3888#endif 3889#endif
3889 tx_start_bd->vlan_or_ethertype = 3890 /* Still need to consider inband vlan for enforced */
3890 cpu_to_le16(ntohs(eth->h_proto)); 3891 if (__vlan_get_tag(skb, &vlan_tci)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(ntohs(eth->h_proto));
3894 } else {
3895 tx_start_bd->bd_flags.as_bitfield |=
3896 (X_ETH_INBAND_VLAN <<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(vlan_tci);
3900 }
3891#ifndef BNX2X_STOP_ON_ERROR 3901#ifndef BNX2X_STOP_ON_ERROR
3892 else 3902 } else {
3893 /* used by FW for packet accounting */ 3903 /* used by FW for packet accounting */
3894 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3905 }
3895#endif 3906#endif
3896 } 3907 }
3897 3908
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a851f95c307a..349a46593abf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
12729 } else { 12729 } else {
12730 /* If no mc addresses are required, flush the configuration */ 12730 /* If no mc addresses are required, flush the configuration */
12731 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12731 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12732 if (rc) 12732 if (rc < 0)
12733 BNX2X_ERR("Failed to clear multicast configuration %d\n", 12733 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12734 rc); 12734 rc);
12735 } 12735 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bdfd53b46bc5..9ca994d0bab6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
901 /* release VF resources */ 901 /* release VF resources */
902 bnx2x_vf_free_resc(bp, vf); 902 bnx2x_vf_free_resc(bp, vf);
903 903
904 vf->malicious = false;
905
904 /* re-open the mailbox */ 906 /* re-open the mailbox */
905 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 907 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
906 return; 908 return;
@@ -1822,9 +1824,11 @@ get_vf:
1822 vf->abs_vfid, qidx); 1824 vf->abs_vfid, qidx);
1823 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1825 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1824 case EVENT_RING_OPCODE_VF_FLR: 1826 case EVENT_RING_OPCODE_VF_FLR:
1825 case EVENT_RING_OPCODE_MALICIOUS_VF:
1826 /* Do nothing for now */ 1827 /* Do nothing for now */
1827 return 0; 1828 return 0;
1829 case EVENT_RING_OPCODE_MALICIOUS_VF:
1830 vf->malicious = true;
1831 return 0;
1828 } 1832 }
1829 1833
1830 return 0; 1834 return 0;
@@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1905 continue; 1909 continue;
1906 } 1910 }
1907 1911
1912 if (vf->malicious) {
1913 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1914 "vf %d malicious so no stats for it\n",
1915 vf->abs_vfid);
1916 continue;
1917 }
1918
1908 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1919 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1909 "add addresses for vf %d\n", vf->abs_vfid); 1920 "add addresses for vf %d\n", vf->abs_vfid);
1910 for_each_vfq(vf, j) { 1921 for_each_vfq(vf, j) {
@@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3042{ 3053{
3043 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3054 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3044 sizeof(struct bnx2x_vf_mbx_msg)); 3055 sizeof(struct bnx2x_vf_mbx_msg));
3045 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3056 BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3046 sizeof(union pf_vf_bulletin)); 3057 sizeof(union pf_vf_bulletin));
3047} 3058}
3048 3059
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 888d0b6632e8..53466f6cebab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -141,6 +141,7 @@ struct bnx2x_virtf {
141#define VF_RESET 3 /* VF FLR'd, pending cleanup */ 141#define VF_RESET 3 /* VF FLR'd, pending cleanup */
142 142
143 bool flr_clnup_stage; /* true during flr cleanup */ 143 bool flr_clnup_stage; /* true during flr cleanup */
144 bool malicious; /* true if FW indicated so, until FLR */
144 145
145 /* dma */ 146 /* dma */
146 dma_addr_t fw_stat_map; 147 dma_addr_t fw_stat_map;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b56c54d68d5e..74e8e215524d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1301,10 +1301,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1301 cp_cons = NEXT_CMP(cp_cons); 1301 cp_cons = NEXT_CMP(cp_cons);
1302 } 1302 }
1303 1303
1304 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 1304 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1305 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1305 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1306 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1306 if (agg_bufs > MAX_SKB_FRAGS)
1307 agg_bufs, (int)MAX_SKB_FRAGS); 1307 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1308 agg_bufs, (int)MAX_SKB_FRAGS);
1308 return NULL; 1309 return NULL;
1309 } 1310 }
1310 1311
@@ -1562,6 +1563,45 @@ next_rx_no_prod:
1562 return rc; 1563 return rc;
1563} 1564}
1564 1565
1566/* In netpoll mode, if we are using a combined completion ring, we need to
1567 * discard the rx packets and recycle the buffers.
1568 */
1569static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1570 u32 *raw_cons, u8 *event)
1571{
1572 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1573 u32 tmp_raw_cons = *raw_cons;
1574 struct rx_cmp_ext *rxcmp1;
1575 struct rx_cmp *rxcmp;
1576 u16 cp_cons;
1577 u8 cmp_type;
1578
1579 cp_cons = RING_CMP(tmp_raw_cons);
1580 rxcmp = (struct rx_cmp *)
1581 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1582
1583 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1584 cp_cons = RING_CMP(tmp_raw_cons);
1585 rxcmp1 = (struct rx_cmp_ext *)
1586 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1587
1588 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1589 return -EBUSY;
1590
1591 cmp_type = RX_CMP_TYPE(rxcmp);
1592 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1593 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1594 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1595 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1596 struct rx_tpa_end_cmp_ext *tpa_end1;
1597
1598 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1599 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1600 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1601 }
1602 return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1603}
1604
1565#define BNXT_GET_EVENT_PORT(data) \ 1605#define BNXT_GET_EVENT_PORT(data) \
1566 ((data) & \ 1606 ((data) & \
1567 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 1607 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1744,7 +1784,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1744 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1784 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1745 rx_pkts = budget; 1785 rx_pkts = budget;
1746 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1786 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1747 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1787 if (likely(budget))
1788 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1789 else
1790 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1791 &event);
1748 if (likely(rc >= 0)) 1792 if (likely(rc >= 0))
1749 rx_pkts += rc; 1793 rx_pkts += rc;
1750 else if (rc == -EBUSY) /* partial completion */ 1794 else if (rc == -EBUSY) /* partial completion */
@@ -6663,12 +6707,11 @@ static void bnxt_poll_controller(struct net_device *dev)
6663 struct bnxt *bp = netdev_priv(dev); 6707 struct bnxt *bp = netdev_priv(dev);
6664 int i; 6708 int i;
6665 6709
6666 for (i = 0; i < bp->cp_nr_rings; i++) { 6710 /* Only process tx rings/combined rings in netpoll mode. */
6667 struct bnxt_irq *irq = &bp->irq_tbl[i]; 6711 for (i = 0; i < bp->tx_nr_rings; i++) {
6712 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6668 6713
6669 disable_irq(irq->vector); 6714 napi_schedule(&txr->bnapi->napi);
6670 irq->handler(irq->vector, bp->bnapi[i]);
6671 enable_irq(irq->vector);
6672 } 6715 }
6673} 6716}
6674#endif 6717#endif
@@ -7630,8 +7673,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7630 dev->min_mtu = ETH_ZLEN; 7673 dev->min_mtu = ETH_ZLEN;
7631 dev->max_mtu = BNXT_MAX_MTU; 7674 dev->max_mtu = BNXT_MAX_MTU;
7632 7675
7633 bnxt_dcb_init(bp);
7634
7635#ifdef CONFIG_BNXT_SRIOV 7676#ifdef CONFIG_BNXT_SRIOV
7636 init_waitqueue_head(&bp->sriov_cfg_wait); 7677 init_waitqueue_head(&bp->sriov_cfg_wait);
7637#endif 7678#endif
@@ -7669,6 +7710,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7669 bnxt_hwrm_func_qcfg(bp); 7710 bnxt_hwrm_func_qcfg(bp);
7670 bnxt_hwrm_port_led_qcaps(bp); 7711 bnxt_hwrm_port_led_qcaps(bp);
7671 bnxt_ethtool_init(bp); 7712 bnxt_ethtool_init(bp);
7713 bnxt_dcb_init(bp);
7672 7714
7673 bnxt_set_rx_skb_mode(bp, false); 7715 bnxt_set_rx_skb_mode(bp, false);
7674 bnxt_set_tpa_flags(bp); 7716 bnxt_set_tpa_flags(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3ef42dbc6327..d46a85041083 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext {
374 374
375 __le32 rx_tpa_end_cmp_errors_v2; 375 __le32 rx_tpa_end_cmp_errors_v2;
376 #define RX_TPA_END_CMP_V2 (0x1 << 0) 376 #define RX_TPA_END_CMP_V2 (0x1 << 0)
377 #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) 377 #define RX_TPA_END_CMP_ERRORS (0x3 << 1)
378 #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 378 #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
379 379
380 u32 rx_tpa_end_cmp_start_opaque; 380 u32 rx_tpa_end_cmp_start_opaque;
381}; 381};
382 382
383#define TPA_END_ERRORS(rx_tpa_end_ext) \
384 ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
385 cpu_to_le32(RX_TPA_END_CMP_ERRORS))
386
383#define DB_IDX_MASK 0xffffff 387#define DB_IDX_MASK 0xffffff
384#define DB_IDX_VALID (0x1 << 26) 388#define DB_IDX_VALID (0x1 << 26)
385#define DB_IRQ_DIS (0x1 << 27) 389#define DB_IRQ_DIS (0x1 << 27)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 46de2f8ff024..5c6dd0ce209f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -553,8 +553,10 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) 553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
554 return 1; 554 return 1;
555 555
556 if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp)) 556 if (mode & DCB_CAP_DCBX_HOST) {
557 return 1; 557 if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
558 return 1;
559 }
558 560
559 if (mode == bp->dcbx_cap) 561 if (mode == bp->dcbx_cap)
560 return 0; 562 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c6764bb5..53309f659951 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
2171{ 2171{
2172 int err; 2172 int err;
2173 2173
2174 mutex_lock(&uld_mutex);
2174 err = setup_sge_queues(adap); 2175 err = setup_sge_queues(adap);
2175 if (err) 2176 if (err)
2176 goto out; 2177 goto rel_lock;
2177 err = setup_rss(adap); 2178 err = setup_rss(adap);
2178 if (err) 2179 if (err)
2179 goto freeq; 2180 goto freeq;
@@ -2196,23 +2197,28 @@ static int cxgb_up(struct adapter *adap)
2196 if (err) 2197 if (err)
2197 goto irq_err; 2198 goto irq_err;
2198 } 2199 }
2200
2199 enable_rx(adap); 2201 enable_rx(adap);
2200 t4_sge_start(adap); 2202 t4_sge_start(adap);
2201 t4_intr_enable(adap); 2203 t4_intr_enable(adap);
2202 adap->flags |= FULL_INIT_DONE; 2204 adap->flags |= FULL_INIT_DONE;
2205 mutex_unlock(&uld_mutex);
2206
2203 notify_ulds(adap, CXGB4_STATE_UP); 2207 notify_ulds(adap, CXGB4_STATE_UP);
2204#if IS_ENABLED(CONFIG_IPV6) 2208#if IS_ENABLED(CONFIG_IPV6)
2205 update_clip(adap); 2209 update_clip(adap);
2206#endif 2210#endif
2207 /* Initialize hash mac addr list*/ 2211 /* Initialize hash mac addr list*/
2208 INIT_LIST_HEAD(&adap->mac_hlist); 2212 INIT_LIST_HEAD(&adap->mac_hlist);
2209 out:
2210 return err; 2213 return err;
2214
2211 irq_err: 2215 irq_err:
2212 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2213 freeq: 2217 freeq:
2214 t4_free_sge_resources(adap); 2218 t4_free_sge_resources(adap);
2215 goto out; 2219 rel_lock:
2220 mutex_unlock(&uld_mutex);
2221 return err;
2216} 2222}
2217 2223
2218static void cxgb_down(struct adapter *adapter) 2224static void cxgb_down(struct adapter *adapter)
@@ -2771,6 +2777,9 @@ void t4_fatal_err(struct adapter *adap)
2771{ 2777{
2772 int port; 2778 int port;
2773 2779
2780 if (pci_channel_offline(adap->pdev))
2781 return;
2782
2774 /* Disable the SGE since ULDs are going to free resources that 2783 /* Disable the SGE since ULDs are going to free resources that
2775 * could be exposed to the adapter. RDMA MWs for example... 2784 * could be exposed to the adapter. RDMA MWs for example...
2776 */ 2785 */
@@ -3882,9 +3891,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3882 spin_lock(&adap->stats_lock); 3891 spin_lock(&adap->stats_lock);
3883 for_each_port(adap, i) { 3892 for_each_port(adap, i) {
3884 struct net_device *dev = adap->port[i]; 3893 struct net_device *dev = adap->port[i];
3885 3894 if (dev) {
3886 netif_device_detach(dev); 3895 netif_device_detach(dev);
3887 netif_carrier_off(dev); 3896 netif_carrier_off(dev);
3897 }
3888 } 3898 }
3889 spin_unlock(&adap->stats_lock); 3899 spin_unlock(&adap->stats_lock);
3890 disable_interrupts(adap); 3900 disable_interrupts(adap);
@@ -3963,12 +3973,13 @@ static void eeh_resume(struct pci_dev *pdev)
3963 rtnl_lock(); 3973 rtnl_lock();
3964 for_each_port(adap, i) { 3974 for_each_port(adap, i) {
3965 struct net_device *dev = adap->port[i]; 3975 struct net_device *dev = adap->port[i];
3966 3976 if (dev) {
3967 if (netif_running(dev)) { 3977 if (netif_running(dev)) {
3968 link_start(dev); 3978 link_start(dev);
3969 cxgb_set_rxmode(dev); 3979 cxgb_set_rxmode(dev);
3980 }
3981 netif_device_attach(dev);
3970 } 3982 }
3971 netif_device_attach(dev);
3972 } 3983 }
3973 rtnl_unlock(); 3984 rtnl_unlock();
3974} 3985}
@@ -4516,7 +4527,7 @@ static void dummy_setup(struct net_device *dev)
4516 /* Initialize the device structure. */ 4527 /* Initialize the device structure. */
4517 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 4528 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4518 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 4529 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4519 dev->destructor = free_netdev; 4530 dev->needs_free_netdev = true;
4520} 4531}
4521 4532
4522static int config_mgmt_dev(struct pci_dev *pdev) 4533static int config_mgmt_dev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96f6d..3a34aa629f7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
4557 */ 4557 */
4558void t4_intr_disable(struct adapter *adapter) 4558void t4_intr_disable(struct adapter *adapter)
4559{ 4559{
4560 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); 4560 u32 whoami, pf;
4561 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 4561
4562 if (pci_channel_offline(adapter->pdev))
4563 return;
4564
4565 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4566 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4562 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 4567 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4563 4568
4564 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); 4569 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index fa376444e57c..f2d623a7aee0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x10
40#define T4FW_VERSION_MICRO 0x21 40#define T4FW_VERSION_MICRO 0x2D
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x10
49#define T5FW_VERSION_MICRO 0x21 49#define T5FW_VERSION_MICRO 0x2D
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x10
58#define T6FW_VERSION_MICRO 0x21 58#define T6FW_VERSION_MICRO 0x2D
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f3a09ab55900..4eee18ce9be4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
5078 struct be_adapter *adapter = netdev_priv(dev); 5078 struct be_adapter *adapter = netdev_priv(dev);
5079 u8 l4_hdr = 0; 5079 u8 l4_hdr = 0;
5080 5080
5081 /* The code below restricts offload features for some tunneled packets. 5081 /* The code below restricts offload features for some tunneled and
5082 * Q-in-Q packets.
5082 * Offload features for normal (non tunnel) packets are unchanged. 5083 * Offload features for normal (non tunnel) packets are unchanged.
5083 */ 5084 */
5085 features = vlan_features_check(skb, features);
5084 if (!skb->encapsulation || 5086 if (!skb->encapsulation ||
5085 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) 5087 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5086 return features; 5088 return features;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e863ba74d005..8bb0db990c8f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
739 if (ret) 739 if (ret)
740 return ret; 740 return ret;
741 741
742 napi_enable(&priv->napi);
743
742 ethoc_init_ring(priv, dev->mem_start); 744 ethoc_init_ring(priv, dev->mem_start);
743 ethoc_reset(priv); 745 ethoc_reset(priv);
744 746
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
754 priv->old_duplex = -1; 756 priv->old_duplex = -1;
755 757
756 phy_start(dev->phydev); 758 phy_start(dev->phydev);
757 napi_enable(&priv->napi);
758 759
759 if (netif_msg_ifup(priv)) { 760 if (netif_msg_ifup(priv)) {
760 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", 761 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6ac336b546e6..1536356e2ea8 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1174,11 +1174,17 @@ static int ftmac100_remove(struct platform_device *pdev)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static const struct of_device_id ftmac100_of_ids[] = {
1178 { .compatible = "andestech,atmac100" },
1179 { }
1180};
1181
1177static struct platform_driver ftmac100_driver = { 1182static struct platform_driver ftmac100_driver = {
1178 .probe = ftmac100_probe, 1183 .probe = ftmac100_probe,
1179 .remove = ftmac100_remove, 1184 .remove = ftmac100_remove,
1180 .driver = { 1185 .driver = {
1181 .name = DRV_NAME, 1186 .name = DRV_NAME,
1187 .of_match_table = ftmac100_of_ids
1182 }, 1188 },
1183}; 1189};
1184 1190
@@ -1202,3 +1208,4 @@ module_exit(ftmac100_exit);
1202MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1208MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1203MODULE_DESCRIPTION("FTMAC100 driver"); 1209MODULE_DESCRIPTION("FTMAC100 driver");
1204MODULE_LICENSE("GPL"); 1210MODULE_LICENSE("GPL");
1211MODULE_DEVICE_TABLE(of, ftmac100_of_ids);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 9a520e4f0df9..290ad0563320 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2648 2648
2649 /* device used for DMA mapping */ 2649 /* device used for DMA mapping */
2650 arch_setup_dma_ops(dev, 0, 0, NULL, false); 2650 set_dma_ops(dev, get_dma_ops(&pdev->dev));
2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2652 if (err) { 2652 if (err) {
2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 56a563f90b0b..f7c8649fd28f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
3192{ 3192{
3193 int err, phy_reset; 3193 int err, phy_reset;
3194 bool active_high = false; 3194 bool active_high = false;
3195 int msec = 1; 3195 int msec = 1, phy_post_delay = 0;
3196 struct device_node *np = pdev->dev.of_node; 3196 struct device_node *np = pdev->dev.of_node;
3197 3197
3198 if (!np) 3198 if (!np)
@@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
3209 else if (!gpio_is_valid(phy_reset)) 3209 else if (!gpio_is_valid(phy_reset))
3210 return 0; 3210 return 0;
3211 3211
3212 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3213 /* valid reset duration should be less than 1s */
3214 if (!err && phy_post_delay > 1000)
3215 return -EINVAL;
3216
3212 active_high = of_property_read_bool(np, "phy-reset-active-high"); 3217 active_high = of_property_read_bool(np, "phy-reset-active-high");
3213 3218
3214 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3219 err = devm_gpio_request_one(&pdev->dev, phy_reset,
@@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
3226 3231
3227 gpio_set_value_cansleep(phy_reset, !active_high); 3232 gpio_set_value_cansleep(phy_reset, !active_high);
3228 3233
3234 if (!phy_post_delay)
3235 return 0;
3236
3237 if (phy_post_delay > 20)
3238 msleep(phy_post_delay);
3239 else
3240 usleep_range(phy_post_delay * 1000,
3241 phy_post_delay * 1000 + 1000);
3242
3229 return 0; 3243 return 0;
3230} 3244}
3231#else /* CONFIG_OF */ 3245#else /* CONFIG_OF */
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index dc0850b3b517..8870a9a798ca 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -2,6 +2,7 @@ config FSL_FMAN
2 tristate "FMan support" 2 tristate "FMan support"
3 depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST 3 depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
4 select GENERIC_ALLOCATOR 4 select GENERIC_ALLOCATOR
5 depends on HAS_DMA
5 select PHYLIB 6 select PHYLIB
6 default n 7 default n
7 help 8 help
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 0b31f8502ada..6e67d22fd0d5 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
623 goto no_mem; 623 goto no_mem;
624 } 624 }
625 625
626 set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
627
626 ret = platform_device_add_data(pdev, &data, sizeof(data)); 628 ret = platform_device_add_data(pdev, &data, sizeof(data));
627 if (ret) 629 if (ret)
628 goto err; 630 goto err;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b374ff5..a10de1e9c157 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
381{ 381{
382 const struct of_device_id *id = 382 const struct of_device_id *id =
383 of_match_device(fsl_pq_mdio_match, &pdev->dev); 383 of_match_device(fsl_pq_mdio_match, &pdev->dev);
384 const struct fsl_pq_mdio_data *data = id->data; 384 const struct fsl_pq_mdio_data *data;
385 struct device_node *np = pdev->dev.of_node; 385 struct device_node *np = pdev->dev.of_node;
386 struct resource res; 386 struct resource res;
387 struct device_node *tbi; 387 struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
389 struct mii_bus *new_bus; 389 struct mii_bus *new_bus;
390 int err; 390 int err;
391 391
392 if (!id) {
393 dev_err(&pdev->dev, "Failed to match device\n");
394 return -ENODEV;
395 }
396
397 data = id->data;
398
392 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); 399 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
393 400
394 new_bus = mdiobus_alloc_size(sizeof(*priv)); 401 new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b8fab149690f..e95795b3c841 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
288 288
289 /* Force 1000M Link, Default is 0x0200 */ 289 /* Force 1000M Link, Default is 0x0200 */
290 phy_write(phy_dev, 7, 0x20C); 290 phy_write(phy_dev, 7, 0x20C);
291 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
292 291
293 /* Enable PHY loop-back */ 292 /* Powerup Fiber */
293 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val &= ~PHY_POWER_DOWN;
296 phy_write(phy_dev, COPPER_CONTROL_REG, val);
297
298 /* Enable Phy Loopback */
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
294 val = phy_read(phy_dev, COPPER_CONTROL_REG); 300 val = phy_read(phy_dev, COPPER_CONTROL_REG);
295 val |= PHY_LOOP_BACK; 301 val |= PHY_LOOP_BACK;
296 val &= ~PHY_POWER_DOWN; 302 val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); 305 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
300 phy_write(phy_dev, 1, 0x400); 306 phy_write(phy_dev, 1, 0x400);
301 phy_write(phy_dev, 7, 0x200); 307 phy_write(phy_dev, 7, 0x200);
308
309 phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
310 val = phy_read(phy_dev, COPPER_CONTROL_REG);
311 val |= PHY_POWER_DOWN;
312 phy_write(phy_dev, COPPER_CONTROL_REG, val);
313
302 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 314 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
303 phy_write(phy_dev, 9, 0xF00); 315 phy_write(phy_dev, 9, 0xF00);
304 316
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 508923f39ccf..259e69a52ec5 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev)
343{ 343{
344 struct emac_regs __iomem *p = dev->emacp; 344 struct emac_regs __iomem *p = dev->emacp;
345 int n = 20; 345 int n = 20;
346 bool __maybe_unused try_internal_clock = false;
346 347
347 DBG(dev, "reset" NL); 348 DBG(dev, "reset" NL);
348 349
@@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev)
355 } 356 }
356 357
357#ifdef CONFIG_PPC_DCR_NATIVE 358#ifdef CONFIG_PPC_DCR_NATIVE
359do_retry:
358 /* 360 /*
359 * PPC460EX/GT Embedded Processor Advanced User's Manual 361 * PPC460EX/GT Embedded Processor Advanced User's Manual
360 * section 28.10.1 Mode Register 0 (EMACx_MR0) states: 362 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev)
362 * of the EMAC. If none is present, select the internal clock 364 * of the EMAC. If none is present, select the internal clock
363 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). 365 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
364 * After a soft reset, select the external clock. 366 * After a soft reset, select the external clock.
367 *
368 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
369 * ethernet cable is not attached. This causes the reset to timeout
370 * and the PHY detection code in emac_init_phy() is unable to
371 * communicate and detect the AR8035-A PHY. As a result, the emac
372 * driver bails out early and the user has no ethernet.
373 * In order to stay compatible with existing configurations, the
374 * driver will temporarily switch to the internal clock, after
375 * the first reset fails.
365 */ 376 */
366 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 377 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
367 if (dev->phy_address == 0xffffffff && 378 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
368 dev->phy_map == 0xffffffff) { 379 dev->phy_map == 0xffffffff)) {
369 /* No PHY: select internal loop clock before reset */ 380 /* No PHY: select internal loop clock before reset */
370 dcri_clrset(SDR0, SDR0_ETH_CFG, 381 dcri_clrset(SDR0, SDR0_ETH_CFG,
371 0, SDR0_ETH_CFG_ECS << dev->cell_index); 382 0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev)
383 394
384#ifdef CONFIG_PPC_DCR_NATIVE 395#ifdef CONFIG_PPC_DCR_NATIVE
385 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 396 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
386 if (dev->phy_address == 0xffffffff && 397 if (!n && !try_internal_clock) {
387 dev->phy_map == 0xffffffff) { 398 /* first attempt has timed out. */
399 n = 20;
400 try_internal_clock = true;
401 goto do_retry;
402 }
403
404 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
405 dev->phy_map == 0xffffffff)) {
388 /* No PHY: restore external clock source after reset */ 406 /* No PHY: restore external clock source after reset */
389 dcri_clrset(SDR0, SDR0_ETH_CFG, 407 dcri_clrset(SDR0, SDR0_ETH_CFG,
390 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 408 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
@@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus)
2460 return emac_reset(dev); 2478 return emac_reset(dev);
2461} 2479}
2462 2480
2481static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2482 struct phy_device *phy_dev)
2483{
2484 phy_dev->autoneg = phy->autoneg;
2485 phy_dev->speed = phy->speed;
2486 phy_dev->duplex = phy->duplex;
2487 phy_dev->advertising = phy->advertising;
2488 return phy_start_aneg(phy_dev);
2489}
2490
2463static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) 2491static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2464{ 2492{
2465 struct net_device *ndev = phy->dev; 2493 struct net_device *ndev = phy->dev;
2466 struct emac_instance *dev = netdev_priv(ndev); 2494 struct emac_instance *dev = netdev_priv(ndev);
2467 2495
2468 dev->phy.autoneg = AUTONEG_ENABLE;
2469 dev->phy.speed = SPEED_1000;
2470 dev->phy.duplex = DUPLEX_FULL;
2471 dev->phy.advertising = advertise;
2472 phy->autoneg = AUTONEG_ENABLE; 2496 phy->autoneg = AUTONEG_ENABLE;
2473 phy->speed = dev->phy.speed;
2474 phy->duplex = dev->phy.duplex;
2475 phy->advertising = advertise; 2497 phy->advertising = advertise;
2476 return phy_start_aneg(dev->phy_dev); 2498 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2477} 2499}
2478 2500
2479static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) 2501static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
@@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2481 struct net_device *ndev = phy->dev; 2503 struct net_device *ndev = phy->dev;
2482 struct emac_instance *dev = netdev_priv(ndev); 2504 struct emac_instance *dev = netdev_priv(ndev);
2483 2505
2484 dev->phy.autoneg = AUTONEG_DISABLE;
2485 dev->phy.speed = speed;
2486 dev->phy.duplex = fd;
2487 phy->autoneg = AUTONEG_DISABLE; 2506 phy->autoneg = AUTONEG_DISABLE;
2488 phy->speed = speed; 2507 phy->speed = speed;
2489 phy->duplex = fd; 2508 phy->duplex = fd;
2490 return phy_start_aneg(dev->phy_dev); 2509 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2491} 2510}
2492 2511
2493static int emac_mdio_poll_link(struct mii_phy *phy) 2512static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy)
2509{ 2528{
2510 struct net_device *ndev = phy->dev; 2529 struct net_device *ndev = phy->dev;
2511 struct emac_instance *dev = netdev_priv(ndev); 2530 struct emac_instance *dev = netdev_priv(ndev);
2531 struct phy_device *phy_dev = dev->phy_dev;
2512 int res; 2532 int res;
2513 2533
2514 res = phy_read_status(dev->phy_dev); 2534 res = phy_read_status(phy_dev);
2515 if (res) 2535 if (res)
2516 return res; 2536 return res;
2517 2537
2518 dev->phy.speed = phy->speed; 2538 phy->speed = phy_dev->speed;
2519 dev->phy.duplex = phy->duplex; 2539 phy->duplex = phy_dev->duplex;
2520 dev->phy.pause = phy->pause; 2540 phy->pause = phy_dev->pause;
2521 dev->phy.asym_pause = phy->asym_pause; 2541 phy->asym_pause = phy_dev->asym_pause;
2522 return 0; 2542 return 0;
2523} 2543}
2524 2544
@@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy)
2528 struct emac_instance *dev = netdev_priv(ndev); 2548 struct emac_instance *dev = netdev_priv(ndev);
2529 2549
2530 phy_start(dev->phy_dev); 2550 phy_start(dev->phy_dev);
2531 dev->phy.autoneg = phy->autoneg;
2532 dev->phy.speed = phy->speed;
2533 dev->phy.duplex = phy->duplex;
2534 dev->phy.advertising = phy->advertising;
2535 dev->phy.pause = phy->pause;
2536 dev->phy.asym_pause = phy->asym_pause;
2537
2538 return phy_init_hw(dev->phy_dev); 2551 return phy_init_hw(dev->phy_dev);
2539} 2552}
2540 2553
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329dba99..c0fbeb387db4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -81,7 +81,7 @@
81static const char ibmvnic_driver_name[] = "ibmvnic"; 81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); 84MODULE_AUTHOR("Santiago Leon");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
@@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
1468} 1468}
1469#endif 1469#endif
1470 1470
1471static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
1472{
1473 return -EOPNOTSUPP;
1474}
1475
1471static const struct net_device_ops ibmvnic_netdev_ops = { 1476static const struct net_device_ops ibmvnic_netdev_ops = {
1472 .ndo_open = ibmvnic_open, 1477 .ndo_open = ibmvnic_open,
1473 .ndo_stop = ibmvnic_close, 1478 .ndo_stop = ibmvnic_close,
@@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
1479#ifdef CONFIG_NET_POLL_CONTROLLER 1484#ifdef CONFIG_NET_POLL_CONTROLLER
1480 .ndo_poll_controller = ibmvnic_netpoll_controller, 1485 .ndo_poll_controller = ibmvnic_netpoll_controller,
1481#endif 1486#endif
1487 .ndo_change_mtu = ibmvnic_change_mtu,
1482}; 1488};
1483 1489
1484/* ethtool functions */ 1490/* ethtool functions */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cdde3cc28fb5..44d9610f7a15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,7 @@ struct i40e_pf {
399#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) 399#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
400#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) 400#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
401#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) 401#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
402#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
402#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) 403#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
403#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) 404#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
404#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) 405#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7a8eb486b9ea..894c8e57ba00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
224 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), 224 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
225 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), 225 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
226 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), 226 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
227 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), 227 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
228 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), 228 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
229}; 229};
230 230
@@ -4092,7 +4092,7 @@ flags_complete:
4092 4092
4093 /* Only allow ATR evict on hardware that is capable of handling it */ 4093 /* Only allow ATR evict on hardware that is capable of handling it */
4094 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 4094 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
4095 pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; 4095 pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
4096 4096
4097 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { 4097 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
4098 u16 sw_flags = 0, valid_flags = 0; 4098 u16 sw_flags = 0, valid_flags = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5c9c9e06ff5..a7a4b28b4144 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
295 **/ 295 **/
296void i40e_service_event_schedule(struct i40e_pf *pf) 296void i40e_service_event_schedule(struct i40e_pf *pf)
297{ 297{
298 if (!test_bit(__I40E_VSI_DOWN, pf->state) && 298 if (!test_bit(__I40E_DOWN, pf->state) &&
299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
300 queue_work(i40e_wq, &pf->service_task); 300 queue_work(i40e_wq, &pf->service_task);
301} 301}
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3611 * this is not a performance path and napi_schedule() 3611 * this is not a performance path and napi_schedule()
3612 * can deal with rescheduling. 3612 * can deal with rescheduling.
3613 */ 3613 */
3614 if (!test_bit(__I40E_VSI_DOWN, pf->state)) 3614 if (!test_bit(__I40E_DOWN, pf->state))
3615 napi_schedule_irqoff(&q_vector->napi); 3615 napi_schedule_irqoff(&q_vector->napi);
3616 } 3616 }
3617 3617
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3687enable_intr: 3687enable_intr:
3688 /* re-enable interrupt causes */ 3688 /* re-enable interrupt causes */
3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3690 if (!test_bit(__I40E_VSI_DOWN, pf->state)) { 3690 if (!test_bit(__I40E_DOWN, pf->state)) {
3691 i40e_service_event_schedule(pf); 3691 i40e_service_event_schedule(pf);
3692 i40e_irq_dynamic_enable_icr0(pf, false); 3692 i40e_irq_dynamic_enable_icr0(pf, false);
3693 } 3693 }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6203{ 6203{
6204 6204
6205 /* if interface is down do nothing */ 6205 /* if interface is down do nothing */
6206 if (test_bit(__I40E_VSI_DOWN, pf->state)) 6206 if (test_bit(__I40E_DOWN, pf->state))
6207 return; 6207 return;
6208 6208
6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
6344 int i; 6344 int i;
6345 6345
6346 /* if interface is down do nothing */ 6346 /* if interface is down do nothing */
6347 if (test_bit(__I40E_VSI_DOWN, pf->state) || 6347 if (test_bit(__I40E_DOWN, pf->state) ||
6348 test_bit(__I40E_CONFIG_BUSY, pf->state)) 6348 test_bit(__I40E_CONFIG_BUSY, pf->state))
6349 return; 6349 return;
6350 6350
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6401 } 6401 }
6402 if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { 6402 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6403 reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); 6403 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6404 clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); 6404 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6405 } 6405 }
6406 6406
6407 /* If there's a recovery already waiting, it takes 6407 /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6415 6415
6416 /* If we're already down or resetting, just bail */ 6416 /* If we're already down or resetting, just bail */
6417 if (reset_flags && 6417 if (reset_flags &&
6418 !test_bit(__I40E_VSI_DOWN, pf->state) && 6418 !test_bit(__I40E_DOWN, pf->state) &&
6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { 6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6420 rtnl_lock(); 6420 rtnl_lock();
6421 i40e_do_reset(pf, reset_flags, true); 6421 i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7002 u32 val; 7002 u32 val;
7003 int v; 7003 int v;
7004 7004
7005 if (test_bit(__I40E_VSI_DOWN, pf->state)) 7005 if (test_bit(__I40E_DOWN, pf->state))
7006 goto clear_recovery; 7006 goto clear_recovery;
7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7008 7008
@@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
8821 (pf->hw.aq.api_min_ver > 4))) { 8821 (pf->hw.aq.api_min_ver > 4))) {
8822 /* Supported in FW API version higher than 1.4 */ 8822 /* Supported in FW API version higher than 1.4 */
8823 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8823 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8824 pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8825 } else {
8826 pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8827 } 8824 }
8828 8825
8826 /* Enable HW ATR eviction if possible */
8827 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
8828 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
8829
8829 pf->eeprom_version = 0xDEAD; 8830 pf->eeprom_version = 0xDEAD;
8830 pf->lan_veb = I40E_NO_VEB; 8831 pf->lan_veb = I40E_NO_VEB;
8831 pf->lan_vsi = I40E_NO_VSI; 8832 pf->lan_vsi = I40E_NO_VSI;
@@ -9767,7 +9768,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
9767 return -ENODEV; 9768 return -ENODEV;
9768 } 9769 }
9769 if (vsi == pf->vsi[pf->lan_vsi] && 9770 if (vsi == pf->vsi[pf->lan_vsi] &&
9770 !test_bit(__I40E_VSI_DOWN, pf->state)) { 9771 !test_bit(__I40E_DOWN, pf->state)) {
9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9772 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9772 return -ENODEV; 9773 return -ENODEV;
9773 } 9774 }
@@ -11003,7 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11003 } 11004 }
11004 pf->next_vsi = 0; 11005 pf->next_vsi = 0;
11005 pf->pdev = pdev; 11006 pf->pdev = pdev;
11006 set_bit(__I40E_VSI_DOWN, pf->state); 11007 set_bit(__I40E_DOWN, pf->state);
11007 11008
11008 hw = &pf->hw; 11009 hw = &pf->hw;
11009 hw->back = pf; 11010 hw->back = pf;
@@ -11293,7 +11294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11293 * before setting up the misc vector or we get a race and the vector 11294 * before setting up the misc vector or we get a race and the vector
11294 * ends up disabled forever. 11295 * ends up disabled forever.
11295 */ 11296 */
11296 clear_bit(__I40E_VSI_DOWN, pf->state); 11297 clear_bit(__I40E_DOWN, pf->state);
11297 11298
11298 /* In case of MSIX we are going to setup the misc vector right here 11299 /* In case of MSIX we are going to setup the misc vector right here
11299 * to handle admin queue events etc. In case of legacy and MSI 11300 * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11449,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11448 11449
11449 /* Unwind what we've done if something failed in the setup */ 11450 /* Unwind what we've done if something failed in the setup */
11450err_vsis: 11451err_vsis:
11451 set_bit(__I40E_VSI_DOWN, pf->state); 11452 set_bit(__I40E_DOWN, pf->state);
11452 i40e_clear_interrupt_scheme(pf); 11453 i40e_clear_interrupt_scheme(pf);
11453 kfree(pf->vsi); 11454 kfree(pf->vsi);
11454err_switch_setup: 11455err_switch_setup:
@@ -11500,7 +11501,7 @@ static void i40e_remove(struct pci_dev *pdev)
11500 11501
11501 /* no more scheduling of any task */ 11502 /* no more scheduling of any task */
11502 set_bit(__I40E_SUSPENDED, pf->state); 11503 set_bit(__I40E_SUSPENDED, pf->state);
11503 set_bit(__I40E_VSI_DOWN, pf->state); 11504 set_bit(__I40E_DOWN, pf->state);
11504 if (pf->service_timer.data) 11505 if (pf->service_timer.data)
11505 del_timer_sync(&pf->service_timer); 11506 del_timer_sync(&pf->service_timer);
11506 if (pf->service_task.func) 11507 if (pf->service_task.func)
@@ -11740,7 +11741,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
11740 struct i40e_hw *hw = &pf->hw; 11741 struct i40e_hw *hw = &pf->hw;
11741 11742
11742 set_bit(__I40E_SUSPENDED, pf->state); 11743 set_bit(__I40E_SUSPENDED, pf->state);
11743 set_bit(__I40E_VSI_DOWN, pf->state); 11744 set_bit(__I40E_DOWN, pf->state);
11744 rtnl_lock(); 11745 rtnl_lock();
11745 i40e_prep_for_reset(pf, true); 11746 i40e_prep_for_reset(pf, true);
11746 rtnl_unlock(); 11747 rtnl_unlock();
@@ -11789,7 +11790,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11789 int retval = 0; 11790 int retval = 0;
11790 11791
11791 set_bit(__I40E_SUSPENDED, pf->state); 11792 set_bit(__I40E_SUSPENDED, pf->state);
11792 set_bit(__I40E_VSI_DOWN, pf->state); 11793 set_bit(__I40E_DOWN, pf->state);
11793 11794
11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) 11795 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
11795 i40e_enable_mc_magic_wake(pf); 11796 i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11842,7 @@ static int i40e_resume(struct pci_dev *pdev)
11841 11842
11842 /* handling the reset will rebuild the device state */ 11843 /* handling the reset will rebuild the device state */
11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { 11844 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
11844 clear_bit(__I40E_VSI_DOWN, pf->state); 11845 clear_bit(__I40E_DOWN, pf->state);
11845 rtnl_lock(); 11846 rtnl_lock();
11846 i40e_reset_and_rebuild(pf, false, true); 11847 i40e_reset_and_rebuild(pf, false, true);
11847 rtnl_unlock(); 11848 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 29321a6167a6..77115c25d96f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1854#if (PAGE_SIZE < 8192) 1854#if (PAGE_SIZE < 8192)
1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1856#else 1856#else
1857 unsigned int truesize = SKB_DATA_ALIGN(size); 1857 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1858 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1858#endif 1859#endif
1859 struct sk_buff *skb; 1860 struct sk_buff *skb;
1860 1861
@@ -2340,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2340 /* Due to lack of space, no more new filters can be programmed */ 2341 /* Due to lack of space, no more new filters can be programmed */
2341 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) 2342 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
2342 return; 2343 return;
2343 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { 2344 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2344 /* HW ATR eviction will take care of removing filters on FIN 2345 /* HW ATR eviction will take care of removing filters on FIN
2345 * and RST packets. 2346 * and RST packets.
2346 */ 2347 */
@@ -2402,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2402 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2403 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2403 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2404 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2404 2405
2405 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 2406 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2406 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2407 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2407 2408
2408 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2409 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 95c23fbaa211..0fb38ca78900 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3017 VLAN_VID_MASK)); 3017 VLAN_VID_MASK));
3018 } 3018 }
3019 3019
3020 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3020 if (vlan_id || qos) 3021 if (vlan_id || qos)
3021 ret = i40e_vsi_add_pvid(vsi, vlanprio); 3022 ret = i40e_vsi_add_pvid(vsi, vlanprio);
3022 else 3023 else
3023 i40e_vsi_remove_pvid(vsi); 3024 i40e_vsi_remove_pvid(vsi);
3025 spin_lock_bh(&vsi->mac_filter_hash_lock);
3024 3026
3025 if (vlan_id) { 3027 if (vlan_id) {
3026 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", 3028 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index dfe241a12ad0..12b02e530503 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1190#if (PAGE_SIZE < 8192) 1190#if (PAGE_SIZE < 8192)
1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1192#else 1192#else
1193 unsigned int truesize = SKB_DATA_ALIGN(size); 1193 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1194 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1194#endif 1195#endif
1195 struct sk_buff *skb; 1196 struct sk_buff *skb;
1196 1197
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9b875d776b29..33c901622ed5 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3719 dma_addr_t *dma_addr, 3719 dma_addr_t *dma_addr,
3720 phys_addr_t *phys_addr) 3720 phys_addr_t *phys_addr)
3721{ 3721{
3722 int cpu = smp_processor_id(); 3722 int cpu = get_cpu();
3723 3723
3724 *dma_addr = mvpp2_percpu_read(priv, cpu, 3724 *dma_addr = mvpp2_percpu_read(priv, cpu,
3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
@@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3740 if (sizeof(phys_addr_t) == 8) 3740 if (sizeof(phys_addr_t) == 8)
3741 *phys_addr |= (u64)phys_addr_highbits << 32; 3741 *phys_addr |= (u64)phys_addr_highbits << 32;
3742 } 3742 }
3743
3744 put_cpu();
3743} 3745}
3744 3746
3745/* Free all buffers from the pool */ 3747/* Free all buffers from the pool */
@@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3920 return bm; 3922 return bm;
3921} 3923}
3922 3924
3923/* Get pool number from a BM cookie */
3924static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3925{
3926 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3927}
3928
3929/* Release buffer to BM */ 3925/* Release buffer to BM */
3930static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 3926static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3931 dma_addr_t buf_dma_addr, 3927 dma_addr_t buf_dma_addr,
3932 phys_addr_t buf_phys_addr) 3928 phys_addr_t buf_phys_addr)
3933{ 3929{
3934 int cpu = smp_processor_id(); 3930 int cpu = get_cpu();
3935 3931
3936 if (port->priv->hw_version == MVPP22) { 3932 if (port->priv->hw_version == MVPP22) {
3937 u32 val = 0; 3933 u32 val = 0;
@@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3958 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 3954 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3959 mvpp2_percpu_write(port->priv, cpu, 3955 mvpp2_percpu_write(port->priv, cpu,
3960 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 3956 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3957
3958 put_cpu();
3961} 3959}
3962 3960
3963/* Refill BM pool */ 3961/* Refill BM pool */
3964static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 3962static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
3965 dma_addr_t dma_addr, 3963 dma_addr_t dma_addr,
3966 phys_addr_t phys_addr) 3964 phys_addr_t phys_addr)
3967{ 3965{
3968 int pool = mvpp2_bm_cookie_pool_get(bm);
3969
3970 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3966 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3971} 3967}
3972 3968
@@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
4186{ 4182{
4187 u32 val; 4183 u32 val;
4188 4184
4189 return;
4190
4191 /* Only GOP port 0 has an XLG MAC */ 4185 /* Only GOP port 0 has an XLG MAC */
4192 if (port->gop_id == 0) { 4186 if (port->gop_id == 0) {
4193 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 4187 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4515 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 4509 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4516} 4510}
4517 4511
4518/* Obtain BM cookie information from descriptor */
4519static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4520 struct mvpp2_rx_desc *rx_desc)
4521{
4522 int cpu = smp_processor_id();
4523 int pool;
4524
4525 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4526 MVPP2_RXD_BM_POOL_ID_MASK) >>
4527 MVPP2_RXD_BM_POOL_ID_OFFS;
4528
4529 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4530 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4531}
4532
4533/* Tx descriptors helper methods */ 4512/* Tx descriptors helper methods */
4534 4513
4535/* Get pointer to next Tx descriptor to be processed (send) by HW */ 4514/* Get pointer to next Tx descriptor to be processed (send) by HW */
@@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4757static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 4736static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4758 struct mvpp2_rx_queue *rxq) 4737 struct mvpp2_rx_queue *rxq)
4759{ 4738{
4760 int cpu = smp_processor_id(); 4739 int cpu = get_cpu();
4761 4740
4762 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 4741 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4763 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; 4742 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
@@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4765 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4744 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4766 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, 4745 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4767 rxq->pkts_coal); 4746 rxq->pkts_coal);
4747
4748 put_cpu();
4768} 4749}
4769 4750
4770static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) 4751static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4945 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4926 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4946 4927
4947 /* Set Rx descriptors queue starting address - indirect access */ 4928 /* Set Rx descriptors queue starting address - indirect access */
4948 cpu = smp_processor_id(); 4929 cpu = get_cpu();
4949 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4930 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4950 if (port->priv->hw_version == MVPP21) 4931 if (port->priv->hw_version == MVPP21)
4951 rxq_dma = rxq->descs_dma; 4932 rxq_dma = rxq->descs_dma;
@@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4954 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4935 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4955 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4936 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4956 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); 4937 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4938 put_cpu();
4957 4939
4958 /* Set Offset */ 4940 /* Set Offset */
4959 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4941 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4980 4962
4981 for (i = 0; i < rx_received; i++) { 4963 for (i = 0; i < rx_received; i++) {
4982 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4964 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4983 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4965 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4966 int pool;
4967
4968 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4969 MVPP2_RXD_BM_POOL_ID_OFFS;
4984 4970
4985 mvpp2_pool_refill(port, bm, 4971 mvpp2_pool_refill(port, pool,
4986 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4972 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4987 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4973 mvpp2_rxdesc_cookie_get(port, rx_desc));
4988 } 4974 }
@@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5012 * free descriptor number 4998 * free descriptor number
5013 */ 4999 */
5014 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5000 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5015 cpu = smp_processor_id(); 5001 cpu = get_cpu();
5016 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5002 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5017 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); 5003 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5018 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); 5004 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
5005 put_cpu();
5019} 5006}
5020 5007
5021/* Create and initialize a Tx queue */ 5008/* Create and initialize a Tx queue */
@@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5038 txq->last_desc = txq->size - 1; 5025 txq->last_desc = txq->size - 1;
5039 5026
5040 /* Set Tx descriptors queue starting address - indirect access */ 5027 /* Set Tx descriptors queue starting address - indirect access */
5041 cpu = smp_processor_id(); 5028 cpu = get_cpu();
5042 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5029 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5043 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 5030 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5044 txq->descs_dma); 5031 txq->descs_dma);
@@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5063 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, 5050 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5064 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 5051 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5065 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 5052 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5053 put_cpu();
5066 5054
5067 /* WRR / EJP configuration - indirect access */ 5055 /* WRR / EJP configuration - indirect access */
5068 tx_port_num = mvpp2_egress_port(port); 5056 tx_port_num = mvpp2_egress_port(port);
@@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
5133 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 5121 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5134 5122
5135 /* Set Tx descriptors queue starting address and size */ 5123 /* Set Tx descriptors queue starting address and size */
5136 cpu = smp_processor_id(); 5124 cpu = get_cpu();
5137 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5125 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5138 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); 5126 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5139 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); 5127 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5128 put_cpu();
5140} 5129}
5141 5130
5142/* Cleanup Tx ports */ 5131/* Cleanup Tx ports */
@@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5146 int delay, pending, cpu; 5135 int delay, pending, cpu;
5147 u32 val; 5136 u32 val;
5148 5137
5149 cpu = smp_processor_id(); 5138 cpu = get_cpu();
5150 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5139 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5151 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); 5140 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5152 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5141 val |= MVPP2_TXQ_DRAIN_EN_MASK;
@@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5173 5162
5174 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5163 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5175 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 5164 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5165 put_cpu();
5176 5166
5177 for_each_present_cpu(cpu) { 5167 for_each_present_cpu(cpu) {
5178 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 5168 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5420 5410
5421/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 5411/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5422static int mvpp2_rx_refill(struct mvpp2_port *port, 5412static int mvpp2_rx_refill(struct mvpp2_port *port,
5423 struct mvpp2_bm_pool *bm_pool, u32 bm) 5413 struct mvpp2_bm_pool *bm_pool, int pool)
5424{ 5414{
5425 dma_addr_t dma_addr; 5415 dma_addr_t dma_addr;
5426 phys_addr_t phys_addr; 5416 phys_addr_t phys_addr;
@@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
5432 if (!buf) 5422 if (!buf)
5433 return -ENOMEM; 5423 return -ENOMEM;
5434 5424
5435 mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5425 mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5436 5426
5437 return 0; 5427 return 0;
5438} 5428}
@@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5490 unsigned int frag_size; 5480 unsigned int frag_size;
5491 dma_addr_t dma_addr; 5481 dma_addr_t dma_addr;
5492 phys_addr_t phys_addr; 5482 phys_addr_t phys_addr;
5493 u32 bm, rx_status; 5483 u32 rx_status;
5494 int pool, rx_bytes, err; 5484 int pool, rx_bytes, err;
5495 void *data; 5485 void *data;
5496 5486
@@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5502 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 5492 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5503 data = (void *)phys_to_virt(phys_addr); 5493 data = (void *)phys_to_virt(phys_addr);
5504 5494
5505 bm = mvpp2_bm_cookie_build(port, rx_desc); 5495 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5506 pool = mvpp2_bm_cookie_pool_get(bm); 5496 MVPP2_RXD_BM_POOL_ID_OFFS;
5507 bm_pool = &port->priv->bm_pools[pool]; 5497 bm_pool = &port->priv->bm_pools[pool];
5508 5498
5509 /* In case of an error, release the requested buffer pointer 5499 /* In case of an error, release the requested buffer pointer
@@ -5516,7 +5506,7 @@ err_drop_frame:
5516 dev->stats.rx_errors++; 5506 dev->stats.rx_errors++;
5517 mvpp2_rx_error(port, rx_desc); 5507 mvpp2_rx_error(port, rx_desc);
5518 /* Return the buffer to the pool */ 5508 /* Return the buffer to the pool */
5519 mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5509 mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
5520 continue; 5510 continue;
5521 } 5511 }
5522 5512
@@ -5531,7 +5521,7 @@ err_drop_frame:
5531 goto err_drop_frame; 5521 goto err_drop_frame;
5532 } 5522 }
5533 5523
5534 err = mvpp2_rx_refill(port, bm_pool, bm); 5524 err = mvpp2_rx_refill(port, bm_pool, pool);
5535 if (err) { 5525 if (err) {
5536 netdev_err(port->dev, "failed to refill BM pools\n"); 5526 netdev_err(port->dev, "failed to refill BM pools\n");
5537 goto err_drop_frame; 5527 goto err_drop_frame;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae5fdc2df654..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
1562 qpn = priv->drop_qp.qpn; 1562 qpn = priv->drop_qp.qpn;
1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
1565 if (qpn < priv->rss_map.base_qpn ||
1566 qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
1567 en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
1568 return -EINVAL;
1569 }
1570 } else { 1565 } else {
1571 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1566 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
1572 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", 1567 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 703205475524..83aab1e4c8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2862,12 +2862,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2862 int port = 0; 2862 int port = 0;
2863 2863
2864 if (msi_x) { 2864 if (msi_x) {
2865 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2865 int nreq = min3(dev->caps.num_ports *
2866 2866 (int)num_online_cpus() + 1,
2867 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2867 dev->caps.num_eqs - dev->caps.reserved_eqs,
2868 nreq); 2868 MAX_MSIX);
2869 if (nreq > MAX_MSIX)
2870 nreq = MAX_MSIX;
2871 2869
2872 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2870 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2873 if (!entries) 2871 if (!entries)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b681555..0710b3677464 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
38#include <linux/mlx4/qp.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
40#include "mlx4.h" 41#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
985 if (IS_ERR(mailbox)) 986 if (IS_ERR(mailbox))
986 return PTR_ERR(mailbox); 987 return PTR_ERR(mailbox);
987 988
989 if (!mlx4_qp_lookup(dev, rule->qpn)) {
990 mlx4_err_rule(dev, "QP doesn't exist\n", rule);
991 ret = -EINVAL;
992 goto out;
993 }
994
988 trans_rule_ctrl_to_hw(rule, mailbox->buf); 995 trans_rule_ctrl_to_hw(rule, mailbox->buf);
989 996
990 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
991 998
992 list_for_each_entry(cur, &rule->list, list) { 999 list_for_each_entry(cur, &rule->list, list) {
993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
994 if (ret < 0) { 1001 if (ret < 0)
995 mlx4_free_cmd_mailbox(dev, mailbox); 1002 goto out;
996 return ret; 1003
997 }
998 size += ret; 1004 size += ret;
999 } 1005 }
1000 1006
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
1021 } 1027 }
1022 } 1028 }
1023 1029
1030out:
1024 mlx4_free_cmd_mailbox(dev, mailbox); 1031 mlx4_free_cmd_mailbox(dev, mailbox);
1025 1032
1026 return ret; 1033 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2d6abd4662b1..5a310d313e94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
384 __mlx4_qp_free_icm(dev, qpn); 384 __mlx4_qp_free_icm(dev, qpn);
385} 385}
386 386
387struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
388{
389 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
390 struct mlx4_qp *qp;
391
392 spin_lock(&qp_table->lock);
393
394 qp = __mlx4_qp_lookup(dev, qpn);
395
396 spin_unlock(&qp_table->lock);
397 return qp;
398}
399
387int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
388{ 401{
389 struct mlx4_priv *priv = mlx4_priv(dev); 402 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
471 } 484 }
472 485
473 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 486 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
487 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
488 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
489 err = -EOPNOTSUPP;
490 goto out;
491 }
492
474 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 493 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
475 cmd->qp_context.qos_vport = params->qos_vport; 494 cmd->qp_context.qos_vport = params->qos_vport;
476 } 495 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 07516545474f..812783865205 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256} 5256}
5257 5257
5258static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259 struct mlx4_vf_immed_vlan_work *work)
5260{
5261 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262 ctx->qp_context.qos_vport = work->qos_vport;
5263}
5264
5258void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5265void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5259{ 5266{
5260 struct mlx4_vf_immed_vlan_work *work = 5267 struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5369 qp->sched_queue & 0xC7; 5376 qp->sched_queue & 0xC7;
5370 upd_context->qp_context.pri_path.sched_queue |= 5377 upd_context->qp_context.pri_path.sched_queue |=
5371 ((work->qos & 0x7) << 3); 5378 ((work->qos & 0x7) << 3);
5372 upd_context->qp_mask |= 5379
5373 cpu_to_be64(1ULL << 5380 if (dev->caps.flags2 &
5374 MLX4_UPD_QP_MASK_QOS_VPP); 5381 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5375 upd_context->qp_context.qos_vport = 5382 update_qos_vpp(upd_context, work);
5376 work->qos_vport;
5377 } 5383 }
5378 5384
5379 err = mlx4_cmd(dev, mailbox->dma, 5385 err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fc52d742b7f7..27251a78075c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -13,7 +13,7 @@ config MLX5_CORE
13 13
14config MLX5_CORE_EN 14config MLX5_CORE_EN
15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 15 bool "Mellanox Technologies ConnectX-4 Ethernet support"
16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 16 depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
17 depends on IPV6=y || IPV6=n || MLX5_CORE=m 17 depends on IPV6=y || IPV6=n || MLX5_CORE=m
18 imply PTP_1588_CLOCK 18 imply PTP_1588_CLOCK
19 default n 19 default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5bdaf3d545b2..10d282841f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
774 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 774 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
775 mlx5_command_str(msg_to_opcode(ent->in)), 775 mlx5_command_str(msg_to_opcode(ent->in)),
776 msg_to_opcode(ent->in)); 776 msg_to_opcode(ent->in));
777 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 777 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
778} 778}
779 779
780static void cmd_work_handler(struct work_struct *work) 780static void cmd_work_handler(struct work_struct *work)
@@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
804 } 804 }
805 805
806 cmd->ent_arr[ent->idx] = ent; 806 cmd->ent_arr[ent->idx] = ent;
807 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
807 lay = get_inst(cmd, ent->idx); 808 lay = get_inst(cmd, ent->idx);
808 ent->lay = lay; 809 ent->lay = lay;
809 memset(lay, 0, sizeof(*lay)); 810 memset(lay, 0, sizeof(*lay));
@@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
825 if (ent->callback) 826 if (ent->callback)
826 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 827 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
827 828
829 /* Skip sending command to fw if internal error */
830 if (pci_channel_offline(dev->pdev) ||
831 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
832 u8 status = 0;
833 u32 drv_synd;
834
835 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
836 MLX5_SET(mbox_out, ent->out, status, status);
837 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
838
839 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
840 return;
841 }
842
828 /* ring doorbell after the descriptor is valid */ 843 /* ring doorbell after the descriptor is valid */
829 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 844 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
830 wmb(); 845 wmb();
@@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
835 poll_timeout(ent); 850 poll_timeout(ent);
836 /* make sure we read the descriptor after ownership is SW */ 851 /* make sure we read the descriptor after ownership is SW */
837 rmb(); 852 rmb();
838 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 853 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
839 } 854 }
840} 855}
841 856
@@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
879 wait_for_completion(&ent->done); 894 wait_for_completion(&ent->done);
880 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 895 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
881 ent->ret = -ETIMEDOUT; 896 ent->ret = -ETIMEDOUT;
882 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 897 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
883 } 898 }
884 899
885 err = ent->ret; 900 err = ent->ret;
@@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1375 } 1390 }
1376} 1391}
1377 1392
1378void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) 1393void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1379{ 1394{
1380 struct mlx5_cmd *cmd = &dev->cmd; 1395 struct mlx5_cmd *cmd = &dev->cmd;
1381 struct mlx5_cmd_work_ent *ent; 1396 struct mlx5_cmd_work_ent *ent;
@@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1395 struct semaphore *sem; 1410 struct semaphore *sem;
1396 1411
1397 ent = cmd->ent_arr[i]; 1412 ent = cmd->ent_arr[i];
1413
1414 /* if we already completed the command, ignore it */
1415 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1416 &ent->state)) {
1417 /* only real completion can free the cmd slot */
1418 if (!forced) {
1419 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1420 ent->idx);
1421 free_ent(cmd, ent->idx);
1422 }
1423 continue;
1424 }
1425
1398 if (ent->callback) 1426 if (ent->callback)
1399 cancel_delayed_work(&ent->cb_timeout_work); 1427 cancel_delayed_work(&ent->cb_timeout_work);
1400 if (ent->page_queue) 1428 if (ent->page_queue)
@@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1417 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1445 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1418 ent->ret, deliv_status_to_str(ent->status), ent->status); 1446 ent->ret, deliv_status_to_str(ent->status), ent->status);
1419 } 1447 }
1420 free_ent(cmd, ent->idx); 1448
1449 /* only real completion will free the entry slot */
1450 if (!forced)
1451 free_ent(cmd, ent->idx);
1421 1452
1422 if (ent->callback) { 1453 if (ent->callback) {
1423 ds = ent->ts2 - ent->ts1; 1454 ds = ent->ts2 - ent->ts1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0099a3e397bc..944fc1742464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -458,13 +458,15 @@ struct mlx5e_mpw_info {
458 458
459struct mlx5e_rx_am_stats { 459struct mlx5e_rx_am_stats {
460 int ppms; /* packets per msec */ 460 int ppms; /* packets per msec */
461 int bpms; /* bytes per msec */
461 int epms; /* events per msec */ 462 int epms; /* events per msec */
462}; 463};
463 464
464struct mlx5e_rx_am_sample { 465struct mlx5e_rx_am_sample {
465 ktime_t time; 466 ktime_t time;
466 unsigned int pkt_ctr; 467 u32 pkt_ctr;
467 u16 event_ctr; 468 u32 byte_ctr;
469 u16 event_ctr;
468}; 470};
469 471
470struct mlx5e_rx_am { /* Adaptive Moderation */ 472struct mlx5e_rx_am { /* Adaptive Moderation */
@@ -1003,7 +1005,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
1003void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1005void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1004void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1006void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1005 1007
1006int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); 1008int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
1007void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); 1009void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
1008 1010
1009int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, 1011int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ce7b09d72ff6..16486dff1493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -794,7 +794,6 @@ static void get_supported(u32 eth_proto_cap,
794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); 794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
795 ptys2ethtool_supported_link(supported, eth_proto_cap); 795 ptys2ethtool_supported_link(supported, eth_proto_cap);
796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
797 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
798} 797}
799 798
800static void get_advertising(u32 eth_proto_cap, u8 tx_pause, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -804,7 +803,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
804 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
805 804
806 ptys2ethtool_adver_link(advertising, eth_proto_cap); 805 ptys2ethtool_adver_link(advertising, eth_proto_cap);
807 if (tx_pause) 806 if (rx_pause)
808 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
809 if (tx_pause ^ rx_pause) 808 if (tx_pause ^ rx_pause)
810 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); 809 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -849,6 +848,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
849 struct mlx5e_priv *priv = netdev_priv(netdev); 848 struct mlx5e_priv *priv = netdev_priv(netdev);
850 struct mlx5_core_dev *mdev = priv->mdev; 849 struct mlx5_core_dev *mdev = priv->mdev;
851 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 850 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
851 u32 rx_pause = 0;
852 u32 tx_pause = 0;
852 u32 eth_proto_cap; 853 u32 eth_proto_cap;
853 u32 eth_proto_admin; 854 u32 eth_proto_admin;
854 u32 eth_proto_lp; 855 u32 eth_proto_lp;
@@ -871,11 +872,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
871 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 872 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
872 an_status = MLX5_GET(ptys_reg, out, an_status); 873 an_status = MLX5_GET(ptys_reg, out, an_status);
873 874
875 mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
876
874 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 877 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
875 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 878 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
876 879
877 get_supported(eth_proto_cap, link_ksettings); 880 get_supported(eth_proto_cap, link_ksettings);
878 get_advertising(eth_proto_admin, 0, 0, link_ksettings); 881 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
879 get_speed_duplex(netdev, eth_proto_oper, link_ksettings); 882 get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
880 883
881 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 884 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1239,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
1239 SOF_TIMESTAMPING_RX_HARDWARE | 1242 SOF_TIMESTAMPING_RX_HARDWARE |
1240 SOF_TIMESTAMPING_RAW_HARDWARE; 1243 SOF_TIMESTAMPING_RAW_HARDWARE;
1241 1244
1242 info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | 1245 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1243 (BIT(1) << HWTSTAMP_TX_ON); 1246 BIT(HWTSTAMP_TX_ON);
1244 1247
1245 info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | 1248 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1246 (BIT(1) << HWTSTAMP_FILTER_ALL); 1249 BIT(HWTSTAMP_FILTER_ALL);
1247 1250
1248 return 0; 1251 return 0;
1249} 1252}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 576d6787b484..53ed58320a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -800,7 +800,7 @@ void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
800 mlx5e_destroy_flow_table(&ttc->ft); 800 mlx5e_destroy_flow_table(&ttc->ft);
801} 801}
802 802
803int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) 803int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
804{ 804{
805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc; 805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
806 struct mlx5_flow_table_attr ft_attr = {}; 806 struct mlx5_flow_table_attr ft_attr = {};
@@ -810,7 +810,6 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn)
810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; 810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
811 ft_attr.level = MLX5E_TTC_FT_LEVEL; 811 ft_attr.level = MLX5E_TTC_FT_LEVEL;
812 ft_attr.prio = MLX5E_NIC_PRIO; 812 ft_attr.prio = MLX5E_NIC_PRIO;
813 ft_attr.underlay_qpn = underlay_qpn;
814 813
815 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 814 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
816 if (IS_ERR(ft->t)) { 815 if (IS_ERR(ft->t)) {
@@ -1147,7 +1146,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1147 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1146 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1148 } 1147 }
1149 1148
1150 err = mlx5e_create_ttc_table(priv, 0); 1149 err = mlx5e_create_ttc_table(priv);
1151 if (err) { 1150 if (err) {
1152 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1151 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1153 err); 1152 err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a61b71b6fff3..277f4de30375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2976,7 +2976,7 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2976 new_channels.params = priv->channels.params; 2976 new_channels.params = priv->channels.params;
2977 new_channels.params.num_tc = tc ? tc : 1; 2977 new_channels.params.num_tc = tc ? tc : 1;
2978 2978
2979 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2979 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
2980 priv->channels.params = new_channels.params; 2980 priv->channels.params = new_channels.params;
2981 goto out; 2981 goto out;
2982 } 2982 }
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4241 return netdev; 4241 return netdev;
4242 4242
4243err_cleanup_nic: 4243err_cleanup_nic:
4244 profile->cleanup(priv); 4244 if (profile->cleanup)
4245 profile->cleanup(priv);
4245 free_netdev(netdev); 4246 free_netdev(netdev);
4246 4247
4247 return NULL; 4248 return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 79462c0368a0..46984a52a94b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); 791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
792 params->num_tc = 1; 792 params->num_tc = 1;
793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
794
795 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
794} 796}
795 797
796static void mlx5e_build_rep_netdev(struct net_device *netdev) 798static void mlx5e_build_rep_netdev(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7b1566f0ae58..66b5fec15313 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
1041#define MLX5_IB_GRH_BYTES 40 1041#define MLX5_IB_GRH_BYTES 40
1042#define MLX5_IPOIB_ENCAP_LEN 4 1042#define MLX5_IPOIB_ENCAP_LEN 4
1043#define MLX5_GID_SIZE 16 1043#define MLX5_GID_SIZE 16
1044#define MLX5_IPOIB_PSEUDO_LEN 20
1045#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
1044 1046
1045static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 1047static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1046 struct mlx5_cqe64 *cqe, 1048 struct mlx5_cqe64 *cqe,
@@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1048 struct sk_buff *skb) 1050 struct sk_buff *skb)
1049{ 1051{
1050 struct net_device *netdev = rq->netdev; 1052 struct net_device *netdev = rq->netdev;
1053 char *pseudo_header;
1051 u8 *dgid; 1054 u8 *dgid;
1052 u8 g; 1055 u8 g;
1053 1056
@@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1076 if (likely(netdev->features & NETIF_F_RXHASH)) 1079 if (likely(netdev->features & NETIF_F_RXHASH))
1077 mlx5e_skb_set_hash(cqe, skb); 1080 mlx5e_skb_set_hash(cqe, skb);
1078 1081
1082 /* 20 bytes of ipoib header and 4 for encap existing */
1083 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1084 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1079 skb_reset_mac_header(skb); 1085 skb_reset_mac_header(skb);
1080 skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); 1086 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1081 1087
1082 skb->dev = netdev; 1088 skb->dev = netdev;
1083 1089
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 02dd3a95ed8f..acf32fe952cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
183 mlx5e_am_step(am); 183 mlx5e_am_step(am);
184} 184}
185 185
186#define IS_SIGNIFICANT_DIFF(val, ref) \
187 (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
188
186static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, 189static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
187 struct mlx5e_rx_am_stats *prev) 190 struct mlx5e_rx_am_stats *prev)
188{ 191{
189 int diff; 192 if (!prev->bpms)
190 193 return curr->bpms ? MLX5E_AM_STATS_BETTER :
191 if (!prev->ppms)
192 return curr->ppms ? MLX5E_AM_STATS_BETTER :
193 MLX5E_AM_STATS_SAME; 194 MLX5E_AM_STATS_SAME;
194 195
195 diff = curr->ppms - prev->ppms; 196 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
196 if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ 197 return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
197 return (diff > 0) ? MLX5E_AM_STATS_BETTER : 198 MLX5E_AM_STATS_WORSE;
198 MLX5E_AM_STATS_WORSE;
199 199
200 if (!prev->epms) 200 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
201 return curr->epms ? MLX5E_AM_STATS_WORSE : 201 return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
202 MLX5E_AM_STATS_SAME; 202 MLX5E_AM_STATS_WORSE;
203 203
204 diff = curr->epms - prev->epms; 204 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
205 if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ 205 return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
206 return (diff < 0) ? MLX5E_AM_STATS_BETTER : 206 MLX5E_AM_STATS_WORSE;
207 MLX5E_AM_STATS_WORSE;
208 207
209 return MLX5E_AM_STATS_SAME; 208 return MLX5E_AM_STATS_SAME;
210} 209}
@@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
266{ 265{
267 s->time = ktime_get(); 266 s->time = ktime_get();
268 s->pkt_ctr = rq->stats.packets; 267 s->pkt_ctr = rq->stats.packets;
268 s->byte_ctr = rq->stats.bytes;
269 s->event_ctr = rq->cq.event_ctr; 269 s->event_ctr = rq->cq.event_ctr;
270} 270}
271 271
272#define MLX5E_AM_NEVENTS 64 272#define MLX5E_AM_NEVENTS 64
273#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
274#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
273 275
274static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, 276static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
275 struct mlx5e_rx_am_sample *end, 277 struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
277{ 279{
278 /* u32 holds up to 71 minutes, should be enough */ 280 /* u32 holds up to 71 minutes, should be enough */
279 u32 delta_us = ktime_us_delta(end->time, start->time); 281 u32 delta_us = ktime_us_delta(end->time, start->time);
280 unsigned int npkts = end->pkt_ctr - start->pkt_ctr; 282 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
283 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
284 start->byte_ctr);
281 285
282 if (!delta_us) 286 if (!delta_us)
283 return; 287 return;
284 288
285 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; 289 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
286 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; 290 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
291 curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
292 delta_us);
287} 293}
288 294
289void mlx5e_rx_am_work(struct work_struct *work) 295void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
308 314
309 switch (am->state) { 315 switch (am->state) {
310 case MLX5E_AM_MEASURE_IN_PROGRESS: 316 case MLX5E_AM_MEASURE_IN_PROGRESS:
311 nevents = rq->cq.event_ctr - am->start_sample.event_ctr; 317 nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
318 am->start_sample.event_ctr);
312 if (nevents < MLX5E_AM_NEVENTS) 319 if (nevents < MLX5E_AM_NEVENTS)
313 break; 320 break;
314 mlx5e_am_sample(rq, &end_sample); 321 mlx5e_am_sample(rq, &end_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53e4992d6511..f81c3aa60b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -417,20 +417,13 @@ struct mlx5e_stats {
417}; 417};
418 418
419static const struct counter_desc mlx5e_pme_status_desc[] = { 419static const struct counter_desc mlx5e_pme_status_desc[] = {
420 { "module_plug", 0 },
421 { "module_unplug", 8 }, 420 { "module_unplug", 8 },
422}; 421};
423 422
424static const struct counter_desc mlx5e_pme_error_desc[] = { 423static const struct counter_desc mlx5e_pme_error_desc[] = {
425 { "module_pwr_budget_exd", 0 }, /* power budget exceed */ 424 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
426 { "module_long_range", 8 }, /* long range for non MLNX cable */ 425 { "module_high_temp", 48 }, /* high temperature */
427 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
428 { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
429 { "module_enforce_part", 32 }, /* enforce part number list */
430 { "module_unknown_id", 40 }, /* unknown identifier */
431 { "module_high_temp", 48 }, /* high temperature */
432 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ 426 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
433 { "module_unknown_status", 64 },
434}; 427};
435 428
436#endif /* __MLX5_EN_STATS_H__ */ 429#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 11c27e4fadf6..9df9fc0d26f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -43,6 +43,7 @@
43#include <net/tc_act/tc_vlan.h> 43#include <net/tc_act/tc_vlan.h>
44#include <net/tc_act/tc_tunnel_key.h> 44#include <net/tc_act/tc_tunnel_key.h>
45#include <net/tc_act/tc_pedit.h> 45#include <net/tc_act/tc_pedit.h>
46#include <net/tc_act/tc_csum.h>
46#include <net/vxlan.h> 47#include <net/vxlan.h>
47#include <net/arp.h> 48#include <net/arp.h>
48#include "en.h" 49#include "en.h"
@@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
384 if (e->flags & MLX5_ENCAP_ENTRY_VALID) 385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
385 mlx5_encap_dealloc(priv->mdev, e->encap_id); 386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
386 387
387 hlist_del_rcu(&e->encap_hlist); 388 hash_del_rcu(&e->encap_hlist);
388 kfree(e->encap_header); 389 kfree(e->encap_header);
389 kfree(e); 390 kfree(e);
390 } 391 }
@@ -894,7 +895,6 @@ static struct mlx5_fields fields[] = {
894 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, 895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
895 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, 896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
896 897
897 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, 898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, 899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, 900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
@@ -925,11 +925,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
925 struct mlx5e_tc_flow_parse_attr *parse_attr) 925 struct mlx5e_tc_flow_parse_attr *parse_attr)
926{ 926{
927 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 927 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
928 int i, action_size, nactions, max_actions, first, last; 928 int i, action_size, nactions, max_actions, first, last, first_z;
929 void *s_masks_p, *a_masks_p, *vals_p; 929 void *s_masks_p, *a_masks_p, *vals_p;
930 u32 s_mask, a_mask, val;
931 struct mlx5_fields *f; 930 struct mlx5_fields *f;
932 u8 cmd, field_bsize; 931 u8 cmd, field_bsize;
932 u32 s_mask, a_mask;
933 unsigned long mask; 933 unsigned long mask;
934 void *action; 934 void *action;
935 935
@@ -946,7 +946,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
946 for (i = 0; i < ARRAY_SIZE(fields); i++) { 946 for (i = 0; i < ARRAY_SIZE(fields); i++) {
947 f = &fields[i]; 947 f = &fields[i];
948 /* avoid seeing bits set from previous iterations */ 948 /* avoid seeing bits set from previous iterations */
949 s_mask = a_mask = mask = val = 0; 949 s_mask = 0;
950 a_mask = 0;
950 951
951 s_masks_p = (void *)set_masks + f->offset; 952 s_masks_p = (void *)set_masks + f->offset;
952 a_masks_p = (void *)add_masks + f->offset; 953 a_masks_p = (void *)add_masks + f->offset;
@@ -981,12 +982,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
981 memset(a_masks_p, 0, f->size); 982 memset(a_masks_p, 0, f->size);
982 } 983 }
983 984
984 memcpy(&val, vals_p, f->size);
985
986 field_bsize = f->size * BITS_PER_BYTE; 985 field_bsize = f->size * BITS_PER_BYTE;
986
987 first_z = find_first_zero_bit(&mask, field_bsize);
987 first = find_first_bit(&mask, field_bsize); 988 first = find_first_bit(&mask, field_bsize);
988 last = find_last_bit(&mask, field_bsize); 989 last = find_last_bit(&mask, field_bsize);
989 if (first > 0 || last != (field_bsize - 1)) { 990 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
990 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", 991 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
991 mask); 992 mask);
992 return -EOPNOTSUPP; 993 return -EOPNOTSUPP;
@@ -1002,11 +1003,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1002 } 1003 }
1003 1004
1004 if (field_bsize == 32) 1005 if (field_bsize == 32)
1005 MLX5_SET(set_action_in, action, data, ntohl(val)); 1006 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
1006 else if (field_bsize == 16) 1007 else if (field_bsize == 16)
1007 MLX5_SET(set_action_in, action, data, ntohs(val)); 1008 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
1008 else if (field_bsize == 8) 1009 else if (field_bsize == 8)
1009 MLX5_SET(set_action_in, action, data, val); 1010 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
1010 1011
1011 action += action_size; 1012 action += action_size;
1012 nactions++; 1013 nactions++;
@@ -1109,6 +1110,28 @@ out_err:
1109 return err; 1110 return err;
1110} 1111}
1111 1112
1113static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1114{
1115 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1116 TCA_CSUM_UPDATE_FLAG_UDP;
1117
1118 /* The HW recalcs checksums only if re-writing headers */
1119 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1120 netdev_warn(priv->netdev,
1121 "TC csum action is only offloaded with pedit\n");
1122 return false;
1123 }
1124
1125 if (update_flags & ~prot_flags) {
1126 netdev_warn(priv->netdev,
1127 "can't offload TC csum action for some header/s - flags %#x\n",
1128 update_flags);
1129 return false;
1130 }
1131
1132 return true;
1133}
1134
1112static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1135static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1113 struct mlx5e_tc_flow_parse_attr *parse_attr, 1136 struct mlx5e_tc_flow_parse_attr *parse_attr,
1114 struct mlx5e_tc_flow *flow) 1137 struct mlx5e_tc_flow *flow)
@@ -1149,6 +1172,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1149 continue; 1172 continue;
1150 } 1173 }
1151 1174
1175 if (is_tcf_csum(a)) {
1176 if (csum_offload_supported(priv, attr->action,
1177 tcf_csum_update_flags(a)))
1178 continue;
1179
1180 return -EOPNOTSUPP;
1181 }
1182
1152 if (is_tcf_skbedit_mark(a)) { 1183 if (is_tcf_skbedit_mark(a)) {
1153 u32 mark = tcf_skbedit_mark(a); 1184 u32 mark = tcf_skbedit_mark(a);
1154 1185
@@ -1651,6 +1682,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1651 continue; 1682 continue;
1652 } 1683 }
1653 1684
1685 if (is_tcf_csum(a)) {
1686 if (csum_offload_supported(priv, attr->action,
1687 tcf_csum_update_flags(a)))
1688 continue;
1689
1690 return -EOPNOTSUPP;
1691 }
1692
1654 if (is_tcf_mirred_egress_redirect(a)) { 1693 if (is_tcf_mirred_egress_redirect(a)) {
1655 int ifindex = tcf_mirred_ifindex(a); 1694 int ifindex = tcf_mirred_ifindex(a);
1656 struct net_device *out_dev, *encap_dev = NULL; 1695 struct net_device *out_dev, *encap_dev = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea5d8d37a75c..33eae5ad2fb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
422 break; 422 break;
423 423
424 case MLX5_EVENT_TYPE_CMD: 424 case MLX5_EVENT_TYPE_CMD:
425 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); 425 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
426 break; 426 break;
427 427
428 case MLX5_EVENT_TYPE_PORT_CHANGE: 428 case MLX5_EVENT_TYPE_PORT_CHANGE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f991f669047e..a53e982a6863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
906 return 0; 906 return 0;
907} 907}
908 908
909int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 909static int mlx5_devlink_eswitch_check(struct devlink *devlink)
910{ 910{
911 struct mlx5_core_dev *dev; 911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912 u16 cur_mlx5_mode, mlx5_mode = 0;
913 912
914 dev = devlink_priv(devlink); 913 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
914 return -EOPNOTSUPP;
915 915
916 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 916 if (!MLX5_CAP_GEN(dev, vport_group_manager))
917 return -EOPNOTSUPP; 917 return -EOPNOTSUPP;
918 918
919 cur_mlx5_mode = dev->priv.eswitch->mode; 919 if (dev->priv.eswitch->mode == SRIOV_NONE)
920
921 if (cur_mlx5_mode == SRIOV_NONE)
922 return -EOPNOTSUPP; 920 return -EOPNOTSUPP;
923 921
922 return 0;
923}
924
925int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
926{
927 struct mlx5_core_dev *dev = devlink_priv(devlink);
928 u16 cur_mlx5_mode, mlx5_mode = 0;
929 int err;
930
931 err = mlx5_devlink_eswitch_check(devlink);
932 if (err)
933 return err;
934
935 cur_mlx5_mode = dev->priv.eswitch->mode;
936
924 if (esw_mode_from_devlink(mode, &mlx5_mode)) 937 if (esw_mode_from_devlink(mode, &mlx5_mode))
925 return -EINVAL; 938 return -EINVAL;
926 939
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
937 950
938int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 951int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
939{ 952{
940 struct mlx5_core_dev *dev; 953 struct mlx5_core_dev *dev = devlink_priv(devlink);
941 954 int err;
942 dev = devlink_priv(devlink);
943
944 if (!MLX5_CAP_GEN(dev, vport_group_manager))
945 return -EOPNOTSUPP;
946 955
947 if (dev->priv.eswitch->mode == SRIOV_NONE) 956 err = mlx5_devlink_eswitch_check(devlink);
948 return -EOPNOTSUPP; 957 if (err)
958 return err;
949 959
950 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 960 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
951} 961}
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
954{ 964{
955 struct mlx5_core_dev *dev = devlink_priv(devlink); 965 struct mlx5_core_dev *dev = devlink_priv(devlink);
956 struct mlx5_eswitch *esw = dev->priv.eswitch; 966 struct mlx5_eswitch *esw = dev->priv.eswitch;
957 int num_vports = esw->enabled_vports;
958 int err, vport; 967 int err, vport;
959 u8 mlx5_mode; 968 u8 mlx5_mode;
960 969
961 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 970 err = mlx5_devlink_eswitch_check(devlink);
962 return -EOPNOTSUPP; 971 if (err)
963 972 return err;
964 if (esw->mode == SRIOV_NONE)
965 return -EOPNOTSUPP;
966 973
967 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 974 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
968 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 975 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
985 if (err) 992 if (err)
986 goto out; 993 goto out;
987 994
988 for (vport = 1; vport < num_vports; vport++) { 995 for (vport = 1; vport < esw->enabled_vports; vport++) {
989 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 996 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
990 if (err) { 997 if (err) {
991 esw_warn(dev, "Failed to set min inline on vport %d\n", 998 esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1010{ 1017{
1011 struct mlx5_core_dev *dev = devlink_priv(devlink); 1018 struct mlx5_core_dev *dev = devlink_priv(devlink);
1012 struct mlx5_eswitch *esw = dev->priv.eswitch; 1019 struct mlx5_eswitch *esw = dev->priv.eswitch;
1020 int err;
1013 1021
1014 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1022 err = mlx5_devlink_eswitch_check(devlink);
1015 return -EOPNOTSUPP; 1023 if (err)
1016 1024 return err;
1017 if (esw->mode == SRIOV_NONE)
1018 return -EOPNOTSUPP;
1019 1025
1020 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1026 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1021} 1027}
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1062 struct mlx5_eswitch *esw = dev->priv.eswitch; 1068 struct mlx5_eswitch *esw = dev->priv.eswitch;
1063 int err; 1069 int err;
1064 1070
1065 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1071 err = mlx5_devlink_eswitch_check(devlink);
1066 return -EOPNOTSUPP; 1072 if (err)
1067 1073 return err;
1068 if (esw->mode == SRIOV_NONE)
1069 return -EOPNOTSUPP;
1070 1074
1071 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 1075 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1072 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || 1076 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1105{ 1109{
1106 struct mlx5_core_dev *dev = devlink_priv(devlink); 1110 struct mlx5_core_dev *dev = devlink_priv(devlink);
1107 struct mlx5_eswitch *esw = dev->priv.eswitch; 1111 struct mlx5_eswitch *esw = dev->priv.eswitch;
1112 int err;
1108 1113
1109 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1114 err = mlx5_devlink_eswitch_check(devlink);
1110 return -EOPNOTSUPP; 1115 if (err)
1111 1116 return err;
1112 if (esw->mode == SRIOV_NONE)
1113 return -EOPNOTSUPP;
1114 1117
1115 *encap = esw->offloads.encap; 1118 *encap = esw->offloads.encap;
1116 return 0; 1119 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 19e3d2fc2099..fcec7bedd3cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,28 +40,25 @@
40#include "eswitch.h" 40#include "eswitch.h"
41 41
42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft) 43 struct mlx5_flow_table *ft, u32 underlay_qpn)
44{ 44{
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; 45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; 46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 47
48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
49 ft->underlay_qpn == 0) 49 underlay_qpn == 0)
50 return 0; 50 return 0;
51 51
52 MLX5_SET(set_flow_table_root_in, in, opcode, 52 MLX5_SET(set_flow_table_root_in, in, opcode,
53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
56 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
56 if (ft->vport) { 57 if (ft->vport) {
57 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 58 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
58 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 59 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
59 } 60 }
60 61
61 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
62 ft->underlay_qpn != 0)
63 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
64
65 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 62 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
66} 63}
67 64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 8fad80688536..0f98a7cf4877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,7 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
71 unsigned int index); 71 unsigned int index);
72 72
73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
74 struct mlx5_flow_table *ft); 74 struct mlx5_flow_table *ft,
75 u32 underlay_qpn);
75 76
76int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); 77int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
77int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); 78int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b8a176503d38..8f5125ccd8d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -650,7 +650,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
650 if (ft->level >= min_level) 650 if (ft->level >= min_level)
651 return 0; 651 return 0;
652 652
653 err = mlx5_cmd_update_root_ft(root->dev, ft); 653 err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
654 if (err) 654 if (err)
655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
656 ft->id); 656 ft->id);
@@ -818,8 +818,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
818 goto unlock_root; 818 goto unlock_root;
819 } 819 }
820 820
821 ft->underlay_qpn = ft_attr->underlay_qpn;
822
823 tree_init_node(&ft->node, 1, del_flow_table); 821 tree_init_node(&ft->node, 1, del_flow_table);
824 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; 822 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
825 next_ft = find_next_chained_ft(fs_prio); 823 next_ft = find_next_chained_ft(fs_prio);
@@ -864,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
864 ft_attr.level = level; 862 ft_attr.level = level;
865 ft_attr.prio = prio; 863 ft_attr.prio = prio;
866 864
867 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); 865 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
868} 866}
869 867
870struct mlx5_flow_table* 868struct mlx5_flow_table*
@@ -1489,7 +1487,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1489 1487
1490 new_root_ft = find_next_ft(ft); 1488 new_root_ft = find_next_ft(ft);
1491 if (new_root_ft) { 1489 if (new_root_ft) {
1492 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); 1490 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
1491 root->underlay_qpn);
1493 1492
1494 if (err) { 1493 if (err) {
1495 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 1494 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
@@ -2062,3 +2061,21 @@ err:
2062 mlx5_cleanup_fs(dev); 2061 mlx5_cleanup_fs(dev);
2063 return err; 2062 return err;
2064} 2063}
2064
2065int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2066{
2067 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2068
2069 root->underlay_qpn = underlay_qpn;
2070 return 0;
2071}
2072EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2073
2074int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2075{
2076 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2077
2078 root->underlay_qpn = 0;
2079 return 0;
2080}
2081EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 81eafc7b9dd9..990acee6fb09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -118,7 +118,6 @@ struct mlx5_flow_table {
118 /* FWD rules that point on this flow table */ 118 /* FWD rules that point on this flow table */
119 struct list_head fwd_rules; 119 struct list_head fwd_rules;
120 u32 flags; 120 u32 flags;
121 u32 underlay_qpn;
122}; 121};
123 122
124struct mlx5_fc_cache { 123struct mlx5_fc_cache {
@@ -195,6 +194,7 @@ struct mlx5_flow_root_namespace {
195 struct mlx5_flow_table *root_ft; 194 struct mlx5_flow_table *root_ft;
196 /* Should be held when chaining flow tables */ 195 /* Should be held when chaining flow tables */
197 struct mutex chain_lock; 196 struct mutex chain_lock;
197 u32 underlay_qpn;
198}; 198};
199 199
200int mlx5_init_fc_stats(struct mlx5_core_dev *dev); 200int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d0515391d33b..f27f84ffbc85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
90 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 90 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
91 91
92 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 92 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
93 mlx5_cmd_comp_handler(dev, vector); 93 mlx5_cmd_comp_handler(dev, vector, true);
94 return; 94 return;
95 95
96no_trig: 96no_trig:
@@ -275,10 +275,8 @@ static void poll_health(unsigned long data)
275 struct mlx5_core_health *health = &dev->priv.health; 275 struct mlx5_core_health *health = &dev->priv.health;
276 u32 count; 276 u32 count;
277 277
278 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 278 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
279 mod_timer(&health->timer, get_next_poll_jiffies()); 279 goto out;
280 return;
281 }
282 280
283 count = ioread32be(health->health_counter); 281 count = ioread32be(health->health_counter);
284 if (count == health->prev) 282 if (count == health->prev)
@@ -290,8 +288,6 @@ static void poll_health(unsigned long data)
290 if (health->miss_counter == MAX_MISSES) { 288 if (health->miss_counter == MAX_MISSES) {
291 dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); 289 dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
292 print_health_info(dev); 290 print_health_info(dev);
293 } else {
294 mod_timer(&health->timer, get_next_poll_jiffies());
295 } 291 }
296 292
297 if (in_fatal(dev) && !health->sick) { 293 if (in_fatal(dev) && !health->sick) {
@@ -305,6 +301,9 @@ static void poll_health(unsigned long data)
305 "new health works are not permitted at this stage\n"); 301 "new health works are not permitted at this stage\n");
306 spin_unlock(&health->wq_lock); 302 spin_unlock(&health->wq_lock);
307 } 303 }
304
305out:
306 mod_timer(&health->timer, get_next_poll_jiffies());
308} 307}
309 308
310void mlx5_start_health_poll(struct mlx5_core_dev *dev) 309void mlx5_start_health_poll(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
index 019c230da498..cc1858752e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -66,6 +66,10 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
66 66
67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
68 68
69 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
70 mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
71 priv->channels.params.lro_en = false;
72
69 mutex_init(&priv->state_lock); 73 mutex_init(&priv->state_lock);
70 74
71 netdev->hw_features |= NETIF_F_SG; 75 netdev->hw_features |= NETIF_F_SG;
@@ -156,6 +160,8 @@ out:
156 160
157static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 161static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
158{ 162{
163 mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
164
159 mlx5_core_destroy_qp(mdev, qp); 165 mlx5_core_destroy_qp(mdev, qp);
160} 166}
161 167
@@ -170,6 +176,8 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
170 return err; 176 return err;
171 } 177 }
172 178
179 mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
180
173 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 181 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
174 if (err) { 182 if (err) {
175 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 183 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -189,7 +197,6 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
189 197
190static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 198static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
191{ 199{
192 struct mlx5i_priv *ipriv = priv->ppriv;
193 int err; 200 int err;
194 201
195 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 202 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
@@ -205,7 +212,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
205 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 212 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
206 } 213 }
207 214
208 err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); 215 err = mlx5e_create_ttc_table(priv);
209 if (err) { 216 if (err) {
210 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 217 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
211 err); 218 err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0c123d571b4c..13be264587f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
175 }, 175 },
176}; 176};
177 177
178#define FW_INIT_TIMEOUT_MILI 2000 178#define FW_INIT_TIMEOUT_MILI 2000
179#define FW_INIT_WAIT_MS 2 179#define FW_INIT_WAIT_MS 2
180#define FW_PRE_INIT_TIMEOUT_MILI 10000
180 181
181static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 182static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
182{ 183{
@@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
537 /* disable cmdif checksum */ 538 /* disable cmdif checksum */
538 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 539 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
539 540
540 /* If the HCA supports 4K UARs use it */ 541 /* Enable 4K UAR only when HCA supports it and page size is bigger
541 if (MLX5_CAP_GEN_MAX(dev, uar_4k)) 542 * than 4K.
543 */
544 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
542 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); 545 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
543 546
544 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 547 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
@@ -612,7 +615,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
612 struct mlx5_priv *priv = &mdev->priv; 615 struct mlx5_priv *priv = &mdev->priv;
613 struct msix_entry *msix = priv->msix_arr; 616 struct msix_entry *msix = priv->msix_arr;
614 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; 617 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
615 int err;
616 618
617 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { 619 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
618 mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); 620 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -622,18 +624,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
622 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 624 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
623 priv->irq_info[i].mask); 625 priv->irq_info[i].mask);
624 626
625 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); 627 if (IS_ENABLED(CONFIG_SMP) &&
626 if (err) { 628 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
627 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", 629 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
628 irq);
629 goto err_clear_mask;
630 }
631 630
632 return 0; 631 return 0;
633
634err_clear_mask:
635 free_cpumask_var(priv->irq_info[i].mask);
636 return err;
637} 632}
638 633
639static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) 634static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
@@ -1019,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1019 */ 1014 */
1020 dev->state = MLX5_DEVICE_STATE_UP; 1015 dev->state = MLX5_DEVICE_STATE_UP;
1021 1016
1017 /* wait for firmware to accept initialization segments configurations
1018 */
1019 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
1020 if (err) {
1021 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1022 FW_PRE_INIT_TIMEOUT_MILI);
1023 goto out;
1024 }
1025
1022 err = mlx5_cmd_init(dev); 1026 err = mlx5_cmd_init(dev);
1023 if (err) { 1027 if (err) {
1024 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); 1028 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ea56f6ade6b4..5f0a7bc692a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -199,10 +199,11 @@ static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
199 199
200 entry->counter_valid = false; 200 entry->counter_valid = false;
201 entry->counter = 0; 201 entry->counter = 0;
202 entry->index = mlxsw_sp_rif_index(rif);
203
202 if (!counters_enabled) 204 if (!counters_enabled)
203 return 0; 205 return 0;
204 206
205 entry->index = mlxsw_sp_rif_index(rif);
206 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, 207 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
207 MLXSW_SP_RIF_COUNTER_EGRESS, 208 MLXSW_SP_RIF_COUNTER_EGRESS,
208 &cnt); 209 &cnt);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 33cec1cc1642..0744452a0b18 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -206,6 +206,9 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
206{ 206{
207 unsigned int *p_counter_index; 207 unsigned int *p_counter_index;
208 208
209 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
210 return;
211
209 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 212 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
210 if (WARN_ON(!p_counter_index)) 213 if (WARN_ON(!p_counter_index))
211 return; 214 return;
@@ -3331,6 +3334,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3334 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3332 u16 vid = vlan_dev_vlan_id(vlan_dev); 3335 u16 vid = vlan_dev_vlan_id(vlan_dev);
3333 3336
3337 if (netif_is_bridge_port(vlan_dev))
3338 return 0;
3339
3334 if (mlxsw_sp_port_dev_check(real_dev)) 3340 if (mlxsw_sp_port_dev_check(real_dev))
3335 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3341 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3336 vid); 3342 vid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8411f1f954..f4bb0c0b7c1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1497,8 +1497,7 @@ do_fdb_op:
1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1498 adding, true); 1498 adding, true);
1499 if (err) { 1499 if (err) {
1500 if (net_ratelimit()) 1500 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1501 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1502 return; 1501 return;
1503 } 1502 }
1504 1503
@@ -1558,8 +1557,7 @@ do_fdb_op:
1558 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1557 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1559 adding, true); 1558 adding, true);
1560 if (err) { 1559 if (err) {
1561 if (net_ratelimit()) 1560 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1562 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1563 return; 1561 return;
1564 } 1562 }
1565 1563
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
247 cmd.req.arg3 = 0; 247 cmd.req.arg3 = 0;
248 248
249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
250 netxen_issue_cmd(adapter, &cmd); 250 rcode = netxen_issue_cmd(adapter, &cmd);
251 251
252 if (rcode != NX_RCODE_SUCCESS) 252 if (rcode != NX_RCODE_SUCCESS)
253 return -EIO; 253 return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 483241b4b05d..a672f6a860dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2956 qed_wr(p_hwfn, 2956 qed_wr(p_hwfn,
2957 p_ptt, 2957 p_ptt,
2958 s_storm_defs[storm_id].cm_ctx_wr_addr, 2958 s_storm_defs[storm_id].cm_ctx_wr_addr,
2959 BIT(9) | lid); 2959 (i << 9) | lid);
2960 *(dump_buf + offset) = qed_rd(p_hwfn, 2960 *(dump_buf + offset) = qed_rd(p_hwfn,
2961 p_ptt, 2961 p_ptt,
2962 rd_reg_addr); 2962 rd_reg_addr);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 67200c5498ab..0a8fde629991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -983,7 +983,7 @@ void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
983 memset(&camline, 0, sizeof(union gft_cam_line_union)); 983 memset(&camline, 0, sizeof(union gft_cam_line_union));
984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
985 camline.cam_line_mapped.camline); 985 camline.cam_line_mapped.camline);
986 memset(&ramline, 0, sizeof(union gft_cam_line_union)); 986 memset(&ramline, 0, sizeof(ramline));
987 987
988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { 988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; 989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d1236a4fe..715b3aaf83ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
1731 break; 1731 break;
1732 default: 1732 default:
1733 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1733 DP_VERBOSE(cdev, QED_MSG_SP,
1734 "Invalid protocol type = %d\n", type);
1734 return; 1735 return;
1735 } 1736 }
1736} 1737}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 49bad00a0f8f..81312924df14 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 65 40#define _QLCNIC_LINUX_SUBVERSION 66
41#define QLCNIC_LINUX_VERSIONID "5.3.65" 41#define QLCNIC_LINUX_VERSIONID "5.3.66"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
1824 u32 (*get_cap_size)(void *, int); 1824 u32 (*get_cap_size)(void *, int);
1825 void (*set_sys_info)(void *, int, u32); 1825 void (*set_sys_info)(void *, int, u32);
1826 void (*store_cap_mask)(void *, u32); 1826 void (*store_cap_mask)(void *, u32);
1827 bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
1828 bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
1827}; 1829};
1828 1830
1829extern struct qlcnic_nic_template qlcnic_vf_ops; 1831extern struct qlcnic_nic_template qlcnic_vf_ops;
1830 1832
1831static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) 1833static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1832{ 1834{
1833 return adapter->ahw->extra_capability[0] & 1835 return adapter->ahw->extra_capability[0] &
1834 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; 1836 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
1835} 1837}
1836 1838
1837static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) 1839static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1838{ 1840{
1839 return adapter->ahw->extra_capability[0] & 1841 return adapter->ahw->extra_capability[0] &
1840 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; 1842 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
1841} 1843}
1842 1844
1845static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1846{
1847 return false;
1848}
1849
1850static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1851{
1852 return false;
1853}
1854
1855static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
1856{
1857 return adapter->ahw->hw_ops->encap_rx_offload(adapter);
1858}
1859
1860static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
1861{
1862 return adapter->ahw->hw_ops->encap_tx_offload(adapter);
1863}
1864
1843static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1865static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1844{ 1866{
1845 return adapter->nic_ops->start_firmware(adapter); 1867 return adapter->nic_ops->start_firmware(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 718bf58a7da6..f7080d0ab874 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
242 .get_cap_size = qlcnic_83xx_get_cap_size, 242 .get_cap_size = qlcnic_83xx_get_cap_size,
243 .set_sys_info = qlcnic_83xx_set_sys_info, 243 .set_sys_info = qlcnic_83xx_set_sys_info,
244 .store_cap_mask = qlcnic_83xx_store_cap_mask, 244 .store_cap_mask = qlcnic_83xx_store_cap_mask,
245 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
246 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
245}; 247};
246 248
247static struct qlcnic_nic_template qlcnic_83xx_ops = { 249static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -3168,6 +3170,40 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
3168 return 0; 3170 return 0;
3169} 3171}
3170 3172
3173void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter)
3174{
3175 struct qlcnic_hardware_context *ahw = adapter->ahw;
3176 struct qlcnic_cmd_args cmd;
3177 u32 config;
3178 int err;
3179
3180 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
3181 if (err)
3182 return;
3183
3184 err = qlcnic_issue_cmd(adapter, &cmd);
3185 if (err) {
3186 dev_info(&adapter->pdev->dev,
3187 "Get Link Status Command failed: 0x%x\n", err);
3188 goto out;
3189 } else {
3190 config = cmd.rsp.arg[3];
3191
3192 switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
3193 case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
3194 case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
3195 case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
3196 case QLC_83XX_MODULE_TP_1000BASE_T:
3197 ahw->port_type = QLCNIC_GBE;
3198 break;
3199 default:
3200 ahw->port_type = QLCNIC_XGBE;
3201 }
3202 }
3203out:
3204 qlcnic_free_mbx_args(&cmd);
3205}
3206
3171int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 3207int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
3172{ 3208{
3173 u8 pci_func; 3209 u8 pci_func;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 3dfe8e27b51c..b75a81246856 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -637,6 +637,7 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, 637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
638 struct ethtool_pauseparam *); 638 struct ethtool_pauseparam *);
639int qlcnic_83xx_test_link(struct qlcnic_adapter *); 639int qlcnic_83xx_test_link(struct qlcnic_adapter *);
640void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter);
640int qlcnic_83xx_reg_test(struct qlcnic_adapter *); 641int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
641int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); 642int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
642int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); 643int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9a869c15d8bf..7f7deeaf1cf0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -486,6 +486,9 @@ static int qlcnic_set_link_ksettings(struct net_device *dev,
486 u32 ret = 0; 486 u32 ret = 0;
487 struct qlcnic_adapter *adapter = netdev_priv(dev); 487 struct qlcnic_adapter *adapter = netdev_priv(dev);
488 488
489 if (qlcnic_83xx_check(adapter))
490 qlcnic_83xx_get_port_type(adapter);
491
489 if (adapter->ahw->port_type != QLCNIC_GBE) 492 if (adapter->ahw->port_type != QLCNIC_GBE)
490 return -EOPNOTSUPP; 493 return -EOPNOTSUPP;
491 494
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 838cc0ceafd8..7848cf04b29a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341 } 341 }
342 return -EIO; 342 return -EIO;
343 } 343 }
344 usleep_range(1000, 1500); 344 udelay(1200);
345 } 345 }
346 346
347 if (id_reg) 347 if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b6628aaa6e4a..1b5f7d57b6f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
632 .get_cap_size = qlcnic_82xx_get_cap_size, 632 .get_cap_size = qlcnic_82xx_get_cap_size,
633 .set_sys_info = qlcnic_82xx_set_sys_info, 633 .set_sys_info = qlcnic_82xx_set_sys_info,
634 .store_cap_mask = qlcnic_82xx_store_cap_mask, 634 .store_cap_mask = qlcnic_82xx_store_cap_mask,
635 .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
636 .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
635}; 637};
636 638
637static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) 639static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 2f656f395f39..c58180f40844 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
77 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 77 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr, 78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
80 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
81 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
80}; 82};
81 83
82static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 84static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index cc065ffbe4b5..bcd4708b3745 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
931 emac_mac_config(adpt); 931 emac_mac_config(adpt);
932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q); 932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
933 933
934 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 934 adpt->phydev->irq = PHY_POLL;
935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, 935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
936 PHY_INTERFACE_MODE_SGMII); 936 PHY_INTERFACE_MODE_SGMII);
937 if (ret) { 937 if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 441c19366489..18461fcb9815 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -13,15 +13,11 @@
13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver. 13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
14 */ 14 */
15 15
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_net.h>
19#include <linux/of_mdio.h> 16#include <linux/of_mdio.h>
20#include <linux/phy.h> 17#include <linux/phy.h>
21#include <linux/iopoll.h> 18#include <linux/iopoll.h>
22#include <linux/acpi.h> 19#include <linux/acpi.h>
23#include "emac.h" 20#include "emac.h"
24#include "emac-mac.h"
25 21
26/* EMAC base register offsets */ 22/* EMAC base register offsets */
27#define EMAC_MDIO_CTRL 0x001414 23#define EMAC_MDIO_CTRL 0x001414
@@ -52,62 +48,10 @@
52 48
53#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
54 50
55#define EMAC_LINK_SPEED_DEFAULT (\
56 EMAC_LINK_SPEED_10_HALF |\
57 EMAC_LINK_SPEED_10_FULL |\
58 EMAC_LINK_SPEED_100_HALF |\
59 EMAC_LINK_SPEED_100_FULL |\
60 EMAC_LINK_SPEED_1GB_FULL)
61
62/**
63 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
64 * @adpt: the emac adapter
65 *
66 * The autopoll feature takes over the MDIO bus. In order for
67 * the PHY driver to be able to talk to the PHY over the MDIO
68 * bus, we need to temporarily disable the autopoll feature.
69 */
70static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
71{
72 u32 val;
73
74 /* disable autopoll */
75 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
76
77 /* wait for any mdio polling to complete */
78 if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
79 !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
80 return 0;
81
82 /* failed to disable; ensure it is enabled before returning */
83 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
84
85 return -EBUSY;
86}
87
88/**
89 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
90 * @adpt: the emac adapter
91 *
92 * The EMAC has the ability to poll the external PHY on the MDIO
93 * bus for link state changes. This eliminates the need for the
94 * driver to poll the phy. If if the link state does change,
95 * the EMAC issues an interrupt on behalf of the PHY.
96 */
97static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
98{
99 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
100}
101
102static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
103{ 52{
104 struct emac_adapter *adpt = bus->priv; 53 struct emac_adapter *adpt = bus->priv;
105 u32 reg; 54 u32 reg;
106 int ret;
107
108 ret = emac_phy_mdio_autopoll_disable(adpt);
109 if (ret)
110 return ret;
111 55
112 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 56 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
113 (addr << PHY_ADDR_SHFT)); 57 (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
122 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
123 !(reg & (MDIO_START | MDIO_BUSY)), 67 !(reg & (MDIO_START | MDIO_BUSY)),
124 100, MDIO_WAIT_TIMES * 100)) 68 100, MDIO_WAIT_TIMES * 100))
125 ret = -EIO; 69 return -EIO;
126 else
127 ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
128 70
129 emac_phy_mdio_autopoll_enable(adpt); 71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
130
131 return ret;
132} 72}
133 73
134static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) 74static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
135{ 75{
136 struct emac_adapter *adpt = bus->priv; 76 struct emac_adapter *adpt = bus->priv;
137 u32 reg; 77 u32 reg;
138 int ret;
139
140 ret = emac_phy_mdio_autopoll_disable(adpt);
141 if (ret)
142 return ret;
143 78
144 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 79 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
145 (addr << PHY_ADDR_SHFT)); 80 (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
155 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
156 !(reg & (MDIO_START | MDIO_BUSY)), 100, 91 !(reg & (MDIO_START | MDIO_BUSY)), 100,
157 MDIO_WAIT_TIMES * 100)) 92 MDIO_WAIT_TIMES * 100))
158 ret = -EIO; 93 return -EIO;
159 94
160 emac_phy_mdio_autopoll_enable(adpt); 95 return 0;
161
162 return ret;
163} 96}
164 97
165/* Configure the MDIO bus and connect the external PHY */ 98/* Configure the MDIO bus and connect the external PHY */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 28a8cdc36485..98a326faea29 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -50,19 +50,7 @@
50#define DMAR_DLY_CNT_DEF 15 50#define DMAR_DLY_CNT_DEF 15
51#define DMAW_DLY_CNT_DEF 4 51#define DMAW_DLY_CNT_DEF 4
52 52
53#define IMR_NORMAL_MASK (\ 53#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
54 ISR_ERROR |\
55 ISR_GPHY_LINK |\
56 ISR_TX_PKT |\
57 GPHY_WAKEUP_INT)
58
59#define IMR_EXTENDED_MASK (\
60 SW_MAN_INT |\
61 ISR_OVER |\
62 ISR_ERROR |\
63 ISR_GPHY_LINK |\
64 ISR_TX_PKT |\
65 GPHY_WAKEUP_INT)
66 54
67#define ISR_TX_PKT (\ 55#define ISR_TX_PKT (\
68 TX_PKT_INT |\ 56 TX_PKT_INT |\
@@ -70,10 +58,6 @@
70 TX_PKT_INT2 |\ 58 TX_PKT_INT2 |\
71 TX_PKT_INT3) 59 TX_PKT_INT3)
72 60
73#define ISR_GPHY_LINK (\
74 GPHY_LINK_UP_INT |\
75 GPHY_LINK_DOWN_INT)
76
77#define ISR_OVER (\ 61#define ISR_OVER (\
78 RFD0_UR_INT |\ 62 RFD0_UR_INT |\
79 RFD1_UR_INT |\ 63 RFD1_UR_INT |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
187 if (status & ISR_OVER) 171 if (status & ISR_OVER)
188 net_warn_ratelimited("warning: TX/RX overflow\n"); 172 net_warn_ratelimited("warning: TX/RX overflow\n");
189 173
190 /* link event */
191 if (status & ISR_GPHY_LINK)
192 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
193
194exit: 174exit:
195 /* enable the interrupt */ 175 /* enable the interrupt */
196 writel(irq->mask, adpt->base + EMAC_INT_MASK); 176 writel(irq->mask, adpt->base + EMAC_INT_MASK);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 513e6c74e199..24ca7df15d07 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
296 296
297 /* Allocate rx SKB if we don't have one available. */ 297 /* Allocate rx SKB if we don't have one available. */
298 if (!qca->rx_skb) { 298 if (!qca->rx_skb) {
299 qca->rx_skb = netdev_alloc_skb(net_dev, 299 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
300 net_dev->mtu + VLAN_ETH_HLEN); 300 net_dev->mtu +
301 VLAN_ETH_HLEN);
301 if (!qca->rx_skb) { 302 if (!qca->rx_skb) {
302 netdev_dbg(net_dev, "out of RX resources\n"); 303 netdev_dbg(net_dev, "out of RX resources\n");
303 qca->stats.out_of_mem++; 304 qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
377 qca->rx_skb, qca->rx_skb->dev); 378 qca->rx_skb, qca->rx_skb->dev);
378 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 379 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
379 netif_rx_ni(qca->rx_skb); 380 netif_rx_ni(qca->rx_skb);
380 qca->rx_skb = netdev_alloc_skb(net_dev, 381 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
381 net_dev->mtu + VLAN_ETH_HLEN); 382 net_dev->mtu + VLAN_ETH_HLEN);
382 if (!qca->rx_skb) { 383 if (!qca->rx_skb) {
383 netdev_dbg(net_dev, "out of RX resources\n"); 384 netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
759 if (!qca->rx_buffer) 760 if (!qca->rx_buffer)
760 return -ENOBUFS; 761 return -ENOBUFS;
761 762
762 qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); 763 qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
764 VLAN_ETH_HLEN);
763 if (!qca->rx_skb) { 765 if (!qca->rx_skb) {
764 kfree(qca->rx_buffer); 766 kfree(qca->rx_buffer);
765 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); 767 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3cd7989c007d..784782da3a85 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
230 int ring_size; 230 int ring_size;
231 int i; 231 int i;
232 232
233 /* Free RX skb ringbuffer */
234 if (priv->rx_skb[q]) {
235 for (i = 0; i < priv->num_rx_ring[q]; i++)
236 dev_kfree_skb(priv->rx_skb[q][i]);
237 }
238 kfree(priv->rx_skb[q]);
239 priv->rx_skb[q] = NULL;
240
241 /* Free aligned TX buffers */
242 kfree(priv->tx_align[q]);
243 priv->tx_align[q] = NULL;
244
245 if (priv->rx_ring[q]) { 233 if (priv->rx_ring[q]) {
246 for (i = 0; i < priv->num_rx_ring[q]; i++) { 234 for (i = 0; i < priv->num_rx_ring[q]; i++) {
247 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
270 priv->tx_ring[q] = NULL; 258 priv->tx_ring[q] = NULL;
271 } 259 }
272 260
261 /* Free RX skb ringbuffer */
262 if (priv->rx_skb[q]) {
263 for (i = 0; i < priv->num_rx_ring[q]; i++)
264 dev_kfree_skb(priv->rx_skb[q][i]);
265 }
266 kfree(priv->rx_skb[q]);
267 priv->rx_skb[q] = NULL;
268
269 /* Free aligned TX buffers */
270 kfree(priv->tx_align[q]);
271 priv->tx_align[q] = NULL;
272
273 /* Free TX skb ringbuffer. 273 /* Free TX skb ringbuffer.
274 * SKBs are freed by ravb_tx_free() call above. 274 * SKBs are freed by ravb_tx_free() call above.
275 */ 275 */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f68c4db656ed..2d686ccf971b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3220,7 +3220,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3220 /* MDIO bus init */ 3220 /* MDIO bus init */
3221 ret = sh_mdio_init(mdp, pd); 3221 ret = sh_mdio_init(mdp, pd);
3222 if (ret) { 3222 if (ret) {
3223 dev_err(&ndev->dev, "failed to initialise MDIO\n"); 3223 if (ret != -EPROBE_DEFER)
3224 dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3224 goto out_release; 3225 goto out_release;
3225 } 3226 }
3226 3227
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 2ae852454780..a9ce82d3e9cf 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1505 *index = entry->index; 1505 *index = entry->index;
1506 resolved = false; 1506 resolved = false;
1507 } else if (removing) { 1507 } else if (removing) {
1508 ofdpa_neigh_del(trans, found);
1509 *index = found->index; 1508 *index = found->index;
1509 ofdpa_neigh_del(trans, found);
1510 } else if (updating) { 1510 } else if (updating) {
1511 ofdpa_neigh_update(found, trans, NULL, false); 1511 ofdpa_neigh_update(found, trans, NULL, false);
1512 resolved = !is_zero_ether_addr(found->eth_dst); 1512 resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 78efb2822b86..78f9e43420e0 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4172,7 +4172,7 @@ found:
4172 * recipients 4172 * recipients
4173 */ 4173 */
4174 if (is_mc_recip) { 4174 if (is_mc_recip) {
4175 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4175 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4176 unsigned int depth, i; 4176 unsigned int depth, i;
4177 4177
4178 memset(inbuf, 0, sizeof(inbuf)); 4178 memset(inbuf, 0, sizeof(inbuf));
@@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
4320 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4320 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4321 } else { 4321 } else {
4322 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4322 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
4323 MC_CMD_FILTER_OP_IN_LEN, 4323 MC_CMD_FILTER_OP_EXT_IN_LEN,
4324 NULL, 0, rc); 4324 NULL, 0, rc);
4325 } 4325 }
4326 } 4326 }
@@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
4453 struct efx_filter_spec *spec) 4453 struct efx_filter_spec *spec)
4454{ 4454{
4455 struct efx_ef10_filter_table *table = efx->filter_state; 4455 struct efx_ef10_filter_table *table = efx->filter_state;
4456 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4456 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4457 struct efx_filter_spec *saved_spec; 4457 struct efx_filter_spec *saved_spec;
4458 unsigned int hash, i, depth = 1; 4458 unsigned int hash, i, depth = 1;
4459 bool replacing = false; 4459 bool replacing = false;
@@ -4940,7 +4940,7 @@ not_restored:
4940static void efx_ef10_filter_table_remove(struct efx_nic *efx) 4940static void efx_ef10_filter_table_remove(struct efx_nic *efx)
4941{ 4941{
4942 struct efx_ef10_filter_table *table = efx->filter_state; 4942 struct efx_ef10_filter_table *table = efx->filter_state;
4943 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4943 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4944 struct efx_filter_spec *spec; 4944 struct efx_filter_spec *spec;
4945 unsigned int filter_idx; 4945 unsigned int filter_idx;
4946 int rc; 4946 int rc;
@@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5105 5105
5106 /* Insert/renew filters */ 5106 /* Insert/renew filters */
5107 for (i = 0; i < addr_count; i++) { 5107 for (i = 0; i < addr_count; i++) {
5108 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5108 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5109 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5109 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5110 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5110 rc = efx_ef10_filter_insert(efx, &spec, true); 5111 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5122 } 5123 }
5123 return rc; 5124 return rc;
5124 } else { 5125 } else {
5125 /* mark as not inserted, and carry on */ 5126 /* keep invalid ID, and carry on */
5126 rc = EFX_EF10_FILTER_ID_INVALID;
5127 } 5127 }
5128 } else {
5129 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5128 } 5130 }
5129 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
5130 } 5131 }
5131 5132
5132 if (multicast && rollback) { 5133 if (multicast && rollback) {
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index b7e4345c990d..019cef1d3cf7 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -661,8 +661,6 @@ restore_filters:
661 up_write(&vf->efx->filter_sem); 661 up_write(&vf->efx->filter_sem);
662 mutex_unlock(&vf->efx->mac_lock); 662 mutex_unlock(&vf->efx->mac_lock);
663 663
664 up_write(&vf->efx->filter_sem);
665
666 rc2 = efx_net_open(vf->efx->net_dev); 664 rc2 = efx_net_open(vf->efx->net_dev);
667 if (rc2) 665 if (rc2)
668 goto reset_nic; 666 goto reset_nic;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b916aa21bde..4d7fb8af880d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -18,8 +18,12 @@
18#include "mcdi.h" 18#include "mcdi.h"
19 19
20enum { 20enum {
21 EFX_REV_SIENA_A0 = 0, 21 /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
22 EFX_REV_HUNT_A0 = 1, 22 * They are not supported by this driver but these revision numbers
23 * form part of the ethtool API for register dumping.
24 */
25 EFX_REV_SIENA_A0 = 3,
26 EFX_REV_HUNT_A0 = 4,
23}; 27};
24 28
25static inline int efx_nic_rev(struct efx_nic *efx) 29static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 489ef146201e..6a9c954492f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -37,6 +37,7 @@
37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) 37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
38#define TSE_PCS_CONTROL_REG 0x00 38#define TSE_PCS_CONTROL_REG 0x00
39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) 39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
40#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
40#define TSE_PCS_IF_MODE_REG 0x28 41#define TSE_PCS_IF_MODE_REG 0x28
41#define TSE_PCS_LINK_TIMER_0_REG 0x24 42#define TSE_PCS_LINK_TIMER_0_REG 0x24
42#define TSE_PCS_LINK_TIMER_1_REG 0x26 43#define TSE_PCS_LINK_TIMER_1_REG 0x26
@@ -65,6 +66,7 @@
65#define TSE_PCS_SW_RESET_TIMEOUT 100 66#define TSE_PCS_SW_RESET_TIMEOUT 100
66#define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 67#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
67#define TSE_PCS_USE_SGMII_ENA BIT(0) 68#define TSE_PCS_USE_SGMII_ENA BIT(0)
69#define TSE_PCS_IF_USE_SGMII 0x03
68 70
69#define SGMII_ADAPTER_CTRL_REG 0x00 71#define SGMII_ADAPTER_CTRL_REG 0x00
70#define SGMII_ADAPTER_DISABLE 0x0001 72#define SGMII_ADAPTER_DISABLE 0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
101{ 103{
102 int ret = 0; 104 int ret = 0;
103 105
104 writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); 106 writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
107
108 writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
105 109
106 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); 110 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
107 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); 111 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aa6476439aee..e0ef02f9503b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
214{ 214{
215 /* Context type from W/B descriptor must be zero */ 215 /* Context type from W/B descriptor must be zero */
216 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) 216 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
217 return -EINVAL; 217 return 0;
218 218
219 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ 219 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
220 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) 220 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
221 return 0; 221 return 1;
222 222
223 return 1; 223 return 0;
224} 224}
225 225
226static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) 226static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
282 } 282 }
283 } 283 }
284exit: 284exit:
285 return ret; 285 if (likely(ret == 0))
286 return 1;
287
288 return 0;
286} 289}
287 290
288static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 291static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cd8c60132390..6e4cbc6ce0ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
434 return; 434 return;
435 435
436 /* check tx tstamp status */ 436 /* check tx tstamp status */
437 if (!priv->hw->desc->get_tx_timestamp_status(p)) { 437 if (priv->hw->desc->get_tx_timestamp_status(p)) {
438 /* get the valid tstamp */ 438 /* get the valid tstamp */
439 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 439 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
440 440
441 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 441 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
442 shhwtstamp.hwtstamp = ns_to_ktime(ns); 442 shhwtstamp.hwtstamp = ns_to_ktime(ns);
443 443
444 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); 444 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
445 /* pass tstamp to stack */ 445 /* pass tstamp to stack */
446 skb_tstamp_tx(skb, &shhwtstamp); 446 skb_tstamp_tx(skb, &shhwtstamp);
447 } 447 }
@@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
468 return; 468 return;
469 469
470 /* Check if timestamp is available */ 470 /* Check if timestamp is available */
471 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 471 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
472 /* For GMAC4, the valid timestamp is from CTX next desc. */ 472 /* For GMAC4, the valid timestamp is from CTX next desc. */
473 if (priv->plat->has_gmac4) 473 if (priv->plat->has_gmac4)
474 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); 474 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
475 else 475 else
476 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 476 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
477 477
478 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); 478 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
479 shhwtstamp = skb_hwtstamps(skb); 479 shhwtstamp = skb_hwtstamps(skb);
480 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 480 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
481 shhwtstamp->hwtstamp = ns_to_ktime(ns); 481 shhwtstamp->hwtstamp = ns_to_ktime(ns);
482 } else { 482 } else {
483 netdev_err(priv->dev, "cannot get RX hw timestamp\n"); 483 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
484 } 484 }
485} 485}
486 486
@@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
546 /* PTP v1, UDP, any kind of event packet */ 546 /* PTP v1, UDP, any kind of event packet */
547 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 547 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
548 /* take time stamp for all event messages */ 548 /* take time stamp for all event messages */
549 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 549 if (priv->plat->has_gmac4)
550 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
551 else
552 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
550 553
551 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
552 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 581 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
579 ptp_v2 = PTP_TCR_TSVER2ENA; 582 ptp_v2 = PTP_TCR_TSVER2ENA;
580 /* take time stamp for all event messages */ 583 /* take time stamp for all event messages */
581 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 584 if (priv->plat->has_gmac4)
585 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
586 else
587 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 588
583 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 589 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 590 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
612 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 618 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
613 ptp_v2 = PTP_TCR_TSVER2ENA; 619 ptp_v2 = PTP_TCR_TSVER2ENA;
614 /* take time stamp for all event messages */ 620 /* take time stamp for all event messages */
615 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 621 if (priv->plat->has_gmac4)
622 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
623 else
624 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
616 625
617 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 626 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 627 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1208,7 +1217,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1208 u32 rx_count = priv->plat->rx_queues_to_use; 1217 u32 rx_count = priv->plat->rx_queues_to_use;
1209 unsigned int bfsize = 0; 1218 unsigned int bfsize = 0;
1210 int ret = -ENOMEM; 1219 int ret = -ENOMEM;
1211 u32 queue; 1220 int queue;
1212 int i; 1221 int i;
1213 1222
1214 if (priv->hw->mode->set_16kib_bfsize) 1223 if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2733,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2724 2733
2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 2734 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2726 0, 1, 2735 0, 1,
2727 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), 2736 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2728 0, 0); 2737 0, 0);
2729 2738
2730 tmp_len -= TSO_MAX_BUFF_SIZE; 2739 tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2822,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2822 2831
2823 tx_q->tx_skbuff_dma[first_entry].buf = des; 2832 tx_q->tx_skbuff_dma[first_entry].buf = des;
2824 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2825 tx_q->tx_skbuff[first_entry] = skb;
2826 2834
2827 first->des0 = cpu_to_le32(des); 2835 first->des0 = cpu_to_le32(des);
2828 2836
@@ -2856,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2856 2864
2857 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2858 2866
2867 /* Only the last descriptor gets to point to the skb. */
2868 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2869
2870 /* We've used all descriptors we need for this skb, however,
2871 * advance cur_tx so that it references a fresh descriptor.
2872 * ndo_start_xmit will fill this descriptor the next time it's
2873 * called and stmmac_tx_clean may clean up to this descriptor.
2874 */
2859 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2875 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2860 2876
2861 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2877 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2947,7 +2963,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2947 int i, csum_insertion = 0, is_jumbo = 0; 2963 int i, csum_insertion = 0, is_jumbo = 0;
2948 u32 queue = skb_get_queue_mapping(skb); 2964 u32 queue = skb_get_queue_mapping(skb);
2949 int nfrags = skb_shinfo(skb)->nr_frags; 2965 int nfrags = skb_shinfo(skb)->nr_frags;
2950 unsigned int entry, first_entry; 2966 int entry;
2967 unsigned int first_entry;
2951 struct dma_desc *desc, *first; 2968 struct dma_desc *desc, *first;
2952 struct stmmac_tx_queue *tx_q; 2969 struct stmmac_tx_queue *tx_q;
2953 unsigned int enh_desc; 2970 unsigned int enh_desc;
@@ -2988,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2988 3005
2989 first = desc; 3006 first = desc;
2990 3007
2991 tx_q->tx_skbuff[first_entry] = skb;
2992
2993 enh_desc = priv->plat->enh_desc; 3008 enh_desc = priv->plat->enh_desc;
2994 /* To program the descriptors according to the size of the frame */ 3009 /* To program the descriptors according to the size of the frame */
2995 if (enh_desc) 3010 if (enh_desc)
@@ -3037,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3037 skb->len); 3052 skb->len);
3038 } 3053 }
3039 3054
3040 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3055 /* Only the last descriptor gets to point to the skb. */
3056 tx_q->tx_skbuff[entry] = skb;
3041 3057
3058 /* We've used all descriptors we need for this skb, however,
3059 * advance cur_tx so that it references a fresh descriptor.
3060 * ndo_start_xmit will fill this descriptor the next time it's
3061 * called and stmmac_tx_clean may clean up to this descriptor.
3062 */
3063 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3042 tx_q->cur_tx = entry; 3064 tx_q->cur_tx = entry;
3043 3065
3044 if (netif_msg_pktdata(priv)) { 3066 if (netif_msg_pktdata(priv)) {
@@ -3725,7 +3747,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
3725 ep++; 3747 ep++;
3726 } else { 3748 } else {
3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3749 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3728 i, (unsigned int)virt_to_phys(ep), 3750 i, (unsigned int)virt_to_phys(p),
3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3751 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3752 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3731 p++; 3753 p++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 48fb72fc423c..f4b31d69f60e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -59,7 +59,8 @@
59/* Enable Snapshot for Messages Relevant to Master */ 59/* Enable Snapshot for Messages Relevant to Master */
60#define PTP_TCR_TSMSTRENA BIT(15) 60#define PTP_TCR_TSMSTRENA BIT(15)
61/* Select PTP packets for Taking Snapshots */ 61/* Select PTP packets for Taking Snapshots */
62#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 62#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
63#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
63/* Enable MAC address for PTP Frame Filtering */ 64/* Enable MAC address for PTP Frame Filtering */
64#define PTP_TCR_TSENMACADDR BIT(18) 65#define PTP_TCR_TSENMACADDR BIT(18)
65 66
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5a90fed06260..5b56c24b6ed2 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -411,13 +411,14 @@ static int vsw_port_remove(struct vio_dev *vdev)
411 411
412 if (port) { 412 if (port) {
413 del_timer_sync(&port->vio.timer); 413 del_timer_sync(&port->vio.timer);
414 del_timer_sync(&port->clean_timer);
414 415
415 napi_disable(&port->napi); 416 napi_disable(&port->napi);
417 unregister_netdev(port->dev);
416 418
417 list_del_rcu(&port->list); 419 list_del_rcu(&port->list);
418 420
419 synchronize_rcu(); 421 synchronize_rcu();
420 del_timer_sync(&port->clean_timer);
421 spin_lock_irqsave(&port->vp->lock, flags); 422 spin_lock_irqsave(&port->vp->lock, flags);
422 sunvnet_port_rm_txq_common(port); 423 sunvnet_port_rm_txq_common(port);
423 spin_unlock_irqrestore(&port->vp->lock, flags); 424 spin_unlock_irqrestore(&port->vp->lock, flags);
@@ -427,7 +428,6 @@ static int vsw_port_remove(struct vio_dev *vdev)
427 428
428 dev_set_drvdata(&vdev->dev, NULL); 429 dev_set_drvdata(&vdev->dev, NULL);
429 430
430 unregister_netdev(port->dev);
431 free_netdev(port->dev); 431 free_netdev(port->dev);
432 } 432 }
433 433
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 1562ab4151e1..56ba411421f0 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
90 if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) 90 if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
91 return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); 91 return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
92 92
93 if (of_machine_is_compatible("ti,am4372")) 93 if (of_machine_is_compatible("ti,am43"))
94 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 94 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
95 95
96 if (of_machine_is_compatible("ti,dra7")) 96 if (of_machine_is_compatible("ti,dra7"))
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 729a7da90b5b..e6222e535019 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1353,9 +1353,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1353 1353
1354 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1354 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1355 tx_pipe->dma_chan_name, &config); 1355 tx_pipe->dma_chan_name, &config);
1356 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { 1356 if (IS_ERR(tx_pipe->dma_channel)) {
1357 dev_err(dev, "failed opening tx chan(%s)\n", 1357 dev_err(dev, "failed opening tx chan(%s)\n",
1358 tx_pipe->dma_chan_name); 1358 tx_pipe->dma_chan_name);
1359 ret = PTR_ERR(tx_pipe->dma_channel);
1359 goto err; 1360 goto err;
1360 } 1361 }
1361 1362
@@ -1673,9 +1674,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1673 1674
1674 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1675 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1675 netcp->dma_chan_name, &config); 1676 netcp->dma_chan_name, &config);
1676 if (IS_ERR_OR_NULL(netcp->rx_channel)) { 1677 if (IS_ERR(netcp->rx_channel)) {
1677 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1678 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1678 netcp->dma_chan_name); 1679 netcp->dma_chan_name);
1680 ret = PTR_ERR(netcp->rx_channel);
1679 goto fail; 1681 goto fail;
1680 } 1682 }
1681 1683
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 897176fc5043..dd92950a4615 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2651,7 +2651,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2651 case HWTSTAMP_FILTER_NONE: 2651 case HWTSTAMP_FILTER_NONE:
2652 cpts_rx_enable(cpts, 0); 2652 cpts_rx_enable(cpts, 0);
2653 break; 2653 break;
2654 case HWTSTAMP_FILTER_ALL:
2655 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2654 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2656 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2655 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2657 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2656 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index dec5d563ab19..199459bd6961 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev)
1007 1007
1008 dev->netdev_ops = &geneve_netdev_ops; 1008 dev->netdev_ops = &geneve_netdev_ops;
1009 dev->ethtool_ops = &geneve_ethtool_ops; 1009 dev->ethtool_ops = &geneve_ethtool_ops;
1010 dev->destructor = free_netdev; 1010 dev->needs_free_netdev = true;
1011 1011
1012 SET_NETDEV_DEVTYPE(dev, &geneve_type); 1012 SET_NETDEV_DEVTYPE(dev, &geneve_type);
1013 1013
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
1133 1133
1134 /* make enough headroom for basic scenario */ 1134 /* make enough headroom for basic scenario */
1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
1136 if (ip_tunnel_info_af(info) == AF_INET) { 1136 if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
1137 encap_len += sizeof(struct iphdr); 1137 encap_len += sizeof(struct iphdr);
1138 dev->max_mtu -= sizeof(struct iphdr); 1138 dev->max_mtu -= sizeof(struct iphdr);
1139 } else { 1139 } else {
@@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1293 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 1293 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
1294 goto nla_put_failure; 1294 goto nla_put_failure;
1295 1295
1296 if (ip_tunnel_info_af(info) == AF_INET) { 1296 if (rtnl_dereference(geneve->sock4)) {
1297 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 1297 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
1298 info->key.u.ipv4.dst)) 1298 info->key.u.ipv4.dst))
1299 goto nla_put_failure; 1299 goto nla_put_failure;
@@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1302 !!(info->key.tun_flags & TUNNEL_CSUM))) 1302 !!(info->key.tun_flags & TUNNEL_CSUM)))
1303 goto nla_put_failure; 1303 goto nla_put_failure;
1304 1304
1305 }
1306
1305#if IS_ENABLED(CONFIG_IPV6) 1307#if IS_ENABLED(CONFIG_IPV6)
1306 } else { 1308 if (rtnl_dereference(geneve->sock6)) {
1307 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, 1309 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
1308 &info->key.u.ipv6.dst)) 1310 &info->key.u.ipv6.dst))
1309 goto nla_put_failure; 1311 goto nla_put_failure;
@@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1315 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 1317 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
1316 !geneve->use_udp6_rx_checksums)) 1318 !geneve->use_udp6_rx_checksums))
1317 goto nla_put_failure; 1319 goto nla_put_failure;
1318#endif
1319 } 1320 }
1321#endif
1320 1322
1321 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || 1323 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
1322 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || 1324 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4fea1b3dfbb4..ca110cd2a4e4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = {
611static void gtp_link_setup(struct net_device *dev) 611static void gtp_link_setup(struct net_device *dev)
612{ 612{
613 dev->netdev_ops = &gtp_netdev_ops; 613 dev->netdev_ops = &gtp_netdev_ops;
614 dev->destructor = free_netdev; 614 dev->needs_free_netdev = true;
615 615
616 dev->hard_header_len = 0; 616 dev->hard_header_len = 0;
617 dev->addr_len = 0; 617 dev->addr_len = 0;
@@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
873 873
874 /* Check if there's an existing gtpX device to configure */ 874 /* Check if there's an existing gtpX device to configure */
875 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); 875 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
876 if (dev->netdev_ops == &gtp_netdev_ops) 876 if (dev && dev->netdev_ops == &gtp_netdev_ops)
877 gtp = netdev_priv(dev); 877 gtp = netdev_priv(dev);
878 878
879 put_net(net); 879 put_net(net);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 922bf440e9f1..021a8ec411ab 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev)
311{ 311{
312 /* Finish setting up the DEVICE info. */ 312 /* Finish setting up the DEVICE info. */
313 dev->netdev_ops = &sp_netdev_ops; 313 dev->netdev_ops = &sp_netdev_ops;
314 dev->destructor = free_netdev; 314 dev->needs_free_netdev = true;
315 dev->mtu = SIXP_MTU; 315 dev->mtu = SIXP_MTU;
316 dev->hard_header_len = AX25_MAX_HEADER_LEN; 316 dev->hard_header_len = AX25_MAX_HEADER_LEN;
317 dev->header_ops = &ax25_header_ops; 317 dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f62e7f325cf9..78a6414c5fd9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = {
476static void bpq_setup(struct net_device *dev) 476static void bpq_setup(struct net_device *dev)
477{ 477{
478 dev->netdev_ops = &bpq_netdev_ops; 478 dev->netdev_ops = &bpq_netdev_ops;
479 dev->destructor = free_netdev; 479 dev->needs_free_netdev = true;
480 480
481 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 481 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8c3633c1d078..97e3bc60c3e7 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
576 case HDLCDRVCTL_CALIBRATE: 576 case HDLCDRVCTL_CALIBRATE:
577 if(!capable(CAP_SYS_RAWIO)) 577 if(!capable(CAP_SYS_RAWIO))
578 return -EPERM; 578 return -EPERM;
579 if (s->par.bitrate <= 0)
580 return -EINVAL;
579 if (bi.data.calibrate > INT_MAX / s->par.bitrate) 581 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
580 return -EINVAL; 582 return -EINVAL;
581 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 583 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 262b2ea576a3..6066f1bcaf2d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -171,6 +171,8 @@ struct rndis_device {
171 spinlock_t request_lock; 171 spinlock_t request_lock;
172 struct list_head req_list; 172 struct list_head req_list;
173 173
174 struct work_struct mcast_work;
175
174 u8 hw_mac_adr[ETH_ALEN]; 176 u8 hw_mac_adr[ETH_ALEN];
175 u8 rss_key[NETVSC_HASH_KEYLEN]; 177 u8 rss_key[NETVSC_HASH_KEYLEN];
176 u16 ind_table[ITAB_NUM]; 178 u16 ind_table[ITAB_NUM];
@@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev);
201int rndis_filter_close(struct netvsc_device *nvdev); 203int rndis_filter_close(struct netvsc_device *nvdev);
202int rndis_filter_device_add(struct hv_device *dev, 204int rndis_filter_device_add(struct hv_device *dev,
203 struct netvsc_device_info *info); 205 struct netvsc_device_info *info);
206void rndis_filter_update(struct netvsc_device *nvdev);
204void rndis_filter_device_remove(struct hv_device *dev, 207void rndis_filter_device_remove(struct hv_device *dev,
205 struct netvsc_device *nvdev); 208 struct netvsc_device *nvdev);
206int rndis_filter_set_rss_param(struct rndis_device *rdev, 209int rndis_filter_set_rss_param(struct rndis_device *rdev,
@@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev,
211 struct vmbus_channel *channel, 214 struct vmbus_channel *channel,
212 void *data, u32 buflen); 215 void *data, u32 buflen);
213 216
214int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
215int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); 217int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
216 218
217void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); 219void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -696,7 +698,6 @@ struct net_device_context {
696 /* list protection */ 698 /* list protection */
697 spinlock_t lock; 699 spinlock_t lock;
698 700
699 struct work_struct work;
700 u32 msg_enable; /* debug level */ 701 u32 msg_enable; /* debug level */
701 702
702 u32 tx_checksum_mask; 703 u32 tx_checksum_mask;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4421a6d00375..643c539a08ba 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -56,37 +56,12 @@ static int debug = -1;
56module_param(debug, int, S_IRUGO); 56module_param(debug, int, S_IRUGO);
57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 57MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
58 58
59static void do_set_multicast(struct work_struct *w)
60{
61 struct net_device_context *ndevctx =
62 container_of(w, struct net_device_context, work);
63 struct hv_device *device_obj = ndevctx->device_ctx;
64 struct net_device *ndev = hv_get_drvdata(device_obj);
65 struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
66 struct rndis_device *rdev;
67
68 if (!nvdev)
69 return;
70
71 rdev = nvdev->extension;
72 if (rdev == NULL)
73 return;
74
75 if (ndev->flags & IFF_PROMISC)
76 rndis_filter_set_packet_filter(rdev,
77 NDIS_PACKET_TYPE_PROMISCUOUS);
78 else
79 rndis_filter_set_packet_filter(rdev,
80 NDIS_PACKET_TYPE_BROADCAST |
81 NDIS_PACKET_TYPE_ALL_MULTICAST |
82 NDIS_PACKET_TYPE_DIRECTED);
83}
84
85static void netvsc_set_multicast_list(struct net_device *net) 59static void netvsc_set_multicast_list(struct net_device *net)
86{ 60{
87 struct net_device_context *net_device_ctx = netdev_priv(net); 61 struct net_device_context *net_device_ctx = netdev_priv(net);
62 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
88 63
89 schedule_work(&net_device_ctx->work); 64 rndis_filter_update(nvdev);
90} 65}
91 66
92static int netvsc_open(struct net_device *net) 67static int netvsc_open(struct net_device *net)
@@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net)
123 98
124 netif_tx_disable(net); 99 netif_tx_disable(net);
125 100
126 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
127 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(nvdev); 101 ret = rndis_filter_close(nvdev);
129 if (ret != 0) { 102 if (ret != 0) {
130 netdev_err(net, "unable to close device (ret %d).\n", ret); 103 netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -803,7 +776,7 @@ static int netvsc_set_channels(struct net_device *net,
803 channels->rx_count || channels->tx_count || channels->other_count) 776 channels->rx_count || channels->tx_count || channels->other_count)
804 return -EINVAL; 777 return -EINVAL;
805 778
806 if (count > net->num_tx_queues || count > net->num_rx_queues) 779 if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
807 return -EINVAL; 780 return -EINVAL;
808 781
809 if (!nvdev || nvdev->destroy) 782 if (!nvdev || nvdev->destroy)
@@ -1028,7 +1001,7 @@ static const struct {
1028static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1001static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1029{ 1002{
1030 struct net_device_context *ndc = netdev_priv(dev); 1003 struct net_device_context *ndc = netdev_priv(dev);
1031 struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); 1004 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1032 1005
1033 if (!nvdev) 1006 if (!nvdev)
1034 return -ENODEV; 1007 return -ENODEV;
@@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1158} 1131}
1159 1132
1160#ifdef CONFIG_NET_POLL_CONTROLLER 1133#ifdef CONFIG_NET_POLL_CONTROLLER
1161static void netvsc_poll_controller(struct net_device *net) 1134static void netvsc_poll_controller(struct net_device *dev)
1162{ 1135{
1163 /* As netvsc_start_xmit() works synchronous we don't have to 1136 struct net_device_context *ndc = netdev_priv(dev);
1164 * trigger anything here. 1137 struct netvsc_device *ndev;
1165 */ 1138 int i;
1139
1140 rcu_read_lock();
1141 ndev = rcu_dereference(ndc->nvdev);
1142 if (ndev) {
1143 for (i = 0; i < ndev->num_chn; i++) {
1144 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1145
1146 napi_schedule(&nvchan->napi);
1147 }
1148 }
1149 rcu_read_unlock();
1166} 1150}
1167#endif 1151#endif
1168 1152
@@ -1219,7 +1203,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1219 rndis_dev = ndev->extension; 1203 rndis_dev = ndev->extension;
1220 if (indir) { 1204 if (indir) {
1221 for (i = 0; i < ITAB_NUM; i++) 1205 for (i = 0; i < ITAB_NUM; i++)
1222 if (indir[i] >= dev->num_rx_queues) 1206 if (indir[i] >= VRSS_CHANNEL_MAX)
1223 return -EINVAL; 1207 return -EINVAL;
1224 1208
1225 for (i = 0; i < ITAB_NUM; i++) 1209 for (i = 0; i < ITAB_NUM; i++)
@@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev,
1552 hv_set_drvdata(dev, net); 1536 hv_set_drvdata(dev, net);
1553 1537
1554 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1538 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1555 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1556 1539
1557 spin_lock_init(&net_device_ctx->lock); 1540 spin_lock_init(&net_device_ctx->lock);
1558 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1541 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev)
1622 netif_device_detach(net); 1605 netif_device_detach(net);
1623 1606
1624 cancel_delayed_work_sync(&ndev_ctx->dwork); 1607 cancel_delayed_work_sync(&ndev_ctx->dwork);
1625 cancel_work_sync(&ndev_ctx->work);
1626 1608
1627 /* 1609 /*
1628 * Call to the vsc driver to let it know that the device is being 1610 * Call to the vsc driver to let it know that the device is being
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f9d5b0b8209a..cb79cd081f42 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,6 +31,7 @@
31 31
32#include "hyperv_net.h" 32#include "hyperv_net.h"
33 33
34static void rndis_set_multicast(struct work_struct *w);
34 35
35#define RNDIS_EXT_LEN PAGE_SIZE 36#define RNDIS_EXT_LEN PAGE_SIZE
36struct rndis_request { 37struct rndis_request {
@@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void)
76 spin_lock_init(&device->request_lock); 77 spin_lock_init(&device->request_lock);
77 78
78 INIT_LIST_HEAD(&device->req_list); 79 INIT_LIST_HEAD(&device->req_list);
80 INIT_WORK(&device->mcast_work, rndis_set_multicast);
79 81
80 device->state = RNDIS_DEV_UNINITIALIZED; 82 device->state = RNDIS_DEV_UNINITIALIZED;
81 83
@@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev)
815 return ret; 817 return ret;
816} 818}
817 819
818int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) 820static int rndis_filter_set_packet_filter(struct rndis_device *dev,
821 u32 new_filter)
819{ 822{
820 struct rndis_request *request; 823 struct rndis_request *request;
821 struct rndis_set_request *set; 824 struct rndis_set_request *set;
@@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
846 return ret; 849 return ret;
847} 850}
848 851
852static void rndis_set_multicast(struct work_struct *w)
853{
854 struct rndis_device *rdev
855 = container_of(w, struct rndis_device, mcast_work);
856
857 if (rdev->ndev->flags & IFF_PROMISC)
858 rndis_filter_set_packet_filter(rdev,
859 NDIS_PACKET_TYPE_PROMISCUOUS);
860 else
861 rndis_filter_set_packet_filter(rdev,
862 NDIS_PACKET_TYPE_BROADCAST |
863 NDIS_PACKET_TYPE_ALL_MULTICAST |
864 NDIS_PACKET_TYPE_DIRECTED);
865}
866
867void rndis_filter_update(struct netvsc_device *nvdev)
868{
869 struct rndis_device *rdev = nvdev->extension;
870
871 schedule_work(&rdev->mcast_work);
872}
873
849static int rndis_filter_init_device(struct rndis_device *dev) 874static int rndis_filter_init_device(struct rndis_device *dev)
850{ 875{
851 struct rndis_request *request; 876 struct rndis_request *request;
@@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
973 if (dev->state != RNDIS_DEV_DATAINITIALIZED) 998 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
974 return 0; 999 return 0;
975 1000
1001 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1002 cancel_work_sync(&dev->mcast_work);
1003
976 ret = rndis_filter_set_packet_filter(dev, 0); 1004 ret = rndis_filter_set_packet_filter(dev, 0);
977 if (ret == -ENODEV) 1005 if (ret == -ENODEV)
978 ret = 0; 1006 ret = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 312fce7302d3..144ea5ae8ab4 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev)
207 __skb_queue_purge(&txp->tq); 207 __skb_queue_purge(&txp->tq);
208 } 208 }
209 kfree(dp->tx_private); 209 kfree(dp->tx_private);
210 free_netdev(dev);
211} 210}
212 211
213static void ifb_setup(struct net_device *dev) 212static void ifb_setup(struct net_device *dev)
@@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev)
230 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
231 netif_keep_dst(dev); 230 netif_keep_dst(dev);
232 eth_hw_addr_random(dev); 231 eth_hw_addr_random(dev);
233 dev->destructor = ifb_dev_free; 232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
234} 234}
235 235
236static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) 236static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 618ed88fad0f..7c7680c8f0e3 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev)
632 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 632 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
633 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; 633 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
634 dev->netdev_ops = &ipvlan_netdev_ops; 634 dev->netdev_ops = &ipvlan_netdev_ops;
635 dev->destructor = free_netdev; 635 dev->needs_free_netdev = true;
636 dev->header_ops = &ipvlan_header_ops; 636 dev->header_ops = &ipvlan_header_ops;
637 dev->ethtool_ops = &ipvlan_ethtool_ops; 637 dev->ethtool_ops = &ipvlan_ethtool_ops;
638} 638}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c07feb..6f3c805f7211 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1077 * are "42101001.sb" or "42101002.sb" 1077 * are "42101001.sb" or "42101002.sb"
1078 */ 1078 */
1079 sprintf(stir421x_fw_name, "4210%4X.sb", 1079 sprintf(stir421x_fw_name, "4210%4X.sb",
1080 self->usbdev->descriptor.bcdDevice); 1080 le16_to_cpu(self->usbdev->descriptor.bcdDevice));
1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); 1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
1082 if (ret < 0) 1082 if (ret < 0)
1083 return ret; 1083 return ret;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 224f65cb576b..30612497643c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev)
159{ 159{
160 dev_net(dev)->loopback_dev = NULL; 160 dev_net(dev)->loopback_dev = NULL;
161 free_percpu(dev->lstats); 161 free_percpu(dev->lstats);
162 free_netdev(dev);
163} 162}
164 163
165static const struct net_device_ops loopback_ops = { 164static const struct net_device_ops loopback_ops = {
@@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev)
196 dev->ethtool_ops = &loopback_ethtool_ops; 195 dev->ethtool_ops = &loopback_ethtool_ops;
197 dev->header_ops = &eth_header_ops; 196 dev->header_ops = &eth_header_ops;
198 dev->netdev_ops = &loopback_ops; 197 dev->netdev_ops = &loopback_ops;
199 dev->destructor = loopback_dev_free; 198 dev->needs_free_netdev = true;
199 dev->priv_destructor = loopback_dev_free;
200} 200}
201 201
202/* Setup and register the loopback device. */ 202/* Setup and register the loopback device. */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cdc347be68f2..79411675f0e6 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev)
2996 free_percpu(macsec->secy.tx_sc.stats); 2996 free_percpu(macsec->secy.tx_sc.stats);
2997 2997
2998 dev_put(real_dev); 2998 dev_put(real_dev);
2999 free_netdev(dev);
3000} 2999}
3001 3000
3002static void macsec_setup(struct net_device *dev) 3001static void macsec_setup(struct net_device *dev)
@@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev)
3006 dev->max_mtu = ETH_MAX_MTU; 3005 dev->max_mtu = ETH_MAX_MTU;
3007 dev->priv_flags |= IFF_NO_QUEUE; 3006 dev->priv_flags |= IFF_NO_QUEUE;
3008 dev->netdev_ops = &macsec_netdev_ops; 3007 dev->netdev_ops = &macsec_netdev_ops;
3009 dev->destructor = macsec_free_netdev; 3008 dev->needs_free_netdev = true;
3009 dev->priv_destructor = macsec_free_netdev;
3010 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3010 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3011 3011
3012 eth_zero_addr(dev->broadcast); 3012 eth_zero_addr(dev->broadcast);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b34eaaae03fd..72b801803aa4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,16 +39,20 @@
39#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) 39#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
40#define MACVLAN_BC_QUEUE_LEN 1000 40#define MACVLAN_BC_QUEUE_LEN 1000
41 41
42#define MACVLAN_F_PASSTHRU 1
43#define MACVLAN_F_ADDRCHANGE 2
44
42struct macvlan_port { 45struct macvlan_port {
43 struct net_device *dev; 46 struct net_device *dev;
44 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 47 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
45 struct list_head vlans; 48 struct list_head vlans;
46 struct sk_buff_head bc_queue; 49 struct sk_buff_head bc_queue;
47 struct work_struct bc_work; 50 struct work_struct bc_work;
48 bool passthru; 51 u32 flags;
49 int count; 52 int count;
50 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; 53 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
51 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); 54 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
55 unsigned char perm_addr[ETH_ALEN];
52}; 56};
53 57
54struct macvlan_source_entry { 58struct macvlan_source_entry {
@@ -66,6 +70,31 @@ struct macvlan_skb_cb {
66 70
67static void macvlan_port_destroy(struct net_device *dev); 71static void macvlan_port_destroy(struct net_device *dev);
68 72
73static inline bool macvlan_passthru(const struct macvlan_port *port)
74{
75 return port->flags & MACVLAN_F_PASSTHRU;
76}
77
78static inline void macvlan_set_passthru(struct macvlan_port *port)
79{
80 port->flags |= MACVLAN_F_PASSTHRU;
81}
82
83static inline bool macvlan_addr_change(const struct macvlan_port *port)
84{
85 return port->flags & MACVLAN_F_ADDRCHANGE;
86}
87
88static inline void macvlan_set_addr_change(struct macvlan_port *port)
89{
90 port->flags |= MACVLAN_F_ADDRCHANGE;
91}
92
93static inline void macvlan_clear_addr_change(struct macvlan_port *port)
94{
95 port->flags &= ~MACVLAN_F_ADDRCHANGE;
96}
97
69/* Hash Ethernet address */ 98/* Hash Ethernet address */
70static u32 macvlan_eth_hash(const unsigned char *addr) 99static u32 macvlan_eth_hash(const unsigned char *addr)
71{ 100{
@@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
181static bool macvlan_addr_busy(const struct macvlan_port *port, 210static bool macvlan_addr_busy(const struct macvlan_port *port,
182 const unsigned char *addr) 211 const unsigned char *addr)
183{ 212{
184 /* Test to see if the specified multicast address is 213 /* Test to see if the specified address is
185 * currently in use by the underlying device or 214 * currently in use by the underlying device or
186 * another macvlan. 215 * another macvlan.
187 */ 216 */
188 if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) 217 if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
218 ether_addr_equal_64bits(port->dev->dev_addr, addr))
189 return true; 219 return true;
190 220
191 if (macvlan_hash_lookup(port, addr)) 221 if (macvlan_hash_lookup(port, addr))
@@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
445 } 475 }
446 476
447 macvlan_forward_source(skb, port, eth->h_source); 477 macvlan_forward_source(skb, port, eth->h_source);
448 if (port->passthru) 478 if (macvlan_passthru(port))
449 vlan = list_first_or_null_rcu(&port->vlans, 479 vlan = list_first_or_null_rcu(&port->vlans,
450 struct macvlan_dev, list); 480 struct macvlan_dev, list);
451 else 481 else
@@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev)
574 struct net_device *lowerdev = vlan->lowerdev; 604 struct net_device *lowerdev = vlan->lowerdev;
575 int err; 605 int err;
576 606
577 if (vlan->port->passthru) { 607 if (macvlan_passthru(vlan->port)) {
578 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { 608 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
579 err = dev_set_promiscuity(lowerdev, 1); 609 err = dev_set_promiscuity(lowerdev, 1);
580 if (err < 0) 610 if (err < 0)
@@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev)
649 dev_uc_unsync(lowerdev, dev); 679 dev_uc_unsync(lowerdev, dev);
650 dev_mc_unsync(lowerdev, dev); 680 dev_mc_unsync(lowerdev, dev);
651 681
652 if (vlan->port->passthru) { 682 if (macvlan_passthru(vlan->port)) {
653 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 683 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
654 dev_set_promiscuity(lowerdev, -1); 684 dev_set_promiscuity(lowerdev, -1);
655 goto hash_del; 685 goto hash_del;
@@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
672{ 702{
673 struct macvlan_dev *vlan = netdev_priv(dev); 703 struct macvlan_dev *vlan = netdev_priv(dev);
674 struct net_device *lowerdev = vlan->lowerdev; 704 struct net_device *lowerdev = vlan->lowerdev;
705 struct macvlan_port *port = vlan->port;
675 int err; 706 int err;
676 707
677 if (!(dev->flags & IFF_UP)) { 708 if (!(dev->flags & IFF_UP)) {
@@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
682 if (macvlan_addr_busy(vlan->port, addr)) 713 if (macvlan_addr_busy(vlan->port, addr))
683 return -EBUSY; 714 return -EBUSY;
684 715
685 if (!vlan->port->passthru) { 716 if (!macvlan_passthru(port)) {
686 err = dev_uc_add(lowerdev, addr); 717 err = dev_uc_add(lowerdev, addr);
687 if (err) 718 if (err)
688 return err; 719 return err;
@@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
692 723
693 macvlan_hash_change_addr(vlan, addr); 724 macvlan_hash_change_addr(vlan, addr);
694 } 725 }
726 if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
727 /* Since addr_change isn't set, we are here due to lower
728 * device change. Save the lower-dev address so we can
729 * restore it later.
730 */
731 ether_addr_copy(vlan->port->perm_addr,
732 lowerdev->dev_addr);
733 }
734 macvlan_clear_addr_change(port);
695 return 0; 735 return 0;
696} 736}
697 737
@@ -703,7 +743,12 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
703 if (!is_valid_ether_addr(addr->sa_data)) 743 if (!is_valid_ether_addr(addr->sa_data))
704 return -EADDRNOTAVAIL; 744 return -EADDRNOTAVAIL;
705 745
746 /* If the addresses are the same, this is a no-op */
747 if (ether_addr_equal(dev->dev_addr, addr->sa_data))
748 return 0;
749
706 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 750 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
751 macvlan_set_addr_change(vlan->port);
707 dev_set_mac_address(vlan->lowerdev, addr); 752 dev_set_mac_address(vlan->lowerdev, addr);
708 return 0; 753 return 0;
709 } 754 }
@@ -789,10 +834,12 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
789 */ 834 */
790static struct lock_class_key macvlan_netdev_addr_lock_key; 835static struct lock_class_key macvlan_netdev_addr_lock_key;
791 836
792#define ALWAYS_ON_FEATURES \ 837#define ALWAYS_ON_OFFLOADS \
793 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ 838 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
794 NETIF_F_GSO_ROBUST) 839 NETIF_F_GSO_ROBUST)
795 840
841#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
842
796#define MACVLAN_FEATURES \ 843#define MACVLAN_FEATURES \
797 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 844 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
798 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ 845 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \
@@ -827,6 +874,7 @@ static int macvlan_init(struct net_device *dev)
827 dev->features |= ALWAYS_ON_FEATURES; 874 dev->features |= ALWAYS_ON_FEATURES;
828 dev->hw_features |= NETIF_F_LRO; 875 dev->hw_features |= NETIF_F_LRO;
829 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 876 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
877 dev->vlan_features |= ALWAYS_ON_OFFLOADS;
830 dev->gso_max_size = lowerdev->gso_max_size; 878 dev->gso_max_size = lowerdev->gso_max_size;
831 dev->gso_max_segs = lowerdev->gso_max_segs; 879 dev->gso_max_segs = lowerdev->gso_max_segs;
832 dev->hard_header_len = lowerdev->hard_header_len; 880 dev->hard_header_len = lowerdev->hard_header_len;
@@ -925,7 +973,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
925 /* Support unicast filter only on passthru devices. 973 /* Support unicast filter only on passthru devices.
926 * Multicast filter should be allowed on all devices. 974 * Multicast filter should be allowed on all devices.
927 */ 975 */
928 if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 976 if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
929 return -EOPNOTSUPP; 977 return -EOPNOTSUPP;
930 978
931 if (flags & NLM_F_REPLACE) 979 if (flags & NLM_F_REPLACE)
@@ -949,7 +997,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
949 /* Support unicast filter only on passthru devices. 997 /* Support unicast filter only on passthru devices.
950 * Multicast filter should be allowed on all devices. 998 * Multicast filter should be allowed on all devices.
951 */ 999 */
952 if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 1000 if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
953 return -EOPNOTSUPP; 1001 return -EOPNOTSUPP;
954 1002
955 if (is_unicast_ether_addr(addr)) 1003 if (is_unicast_ether_addr(addr))
@@ -1089,7 +1137,7 @@ void macvlan_common_setup(struct net_device *dev)
1089 netif_keep_dst(dev); 1137 netif_keep_dst(dev);
1090 dev->priv_flags |= IFF_UNICAST_FLT; 1138 dev->priv_flags |= IFF_UNICAST_FLT;
1091 dev->netdev_ops = &macvlan_netdev_ops; 1139 dev->netdev_ops = &macvlan_netdev_ops;
1092 dev->destructor = free_netdev; 1140 dev->needs_free_netdev = true;
1093 dev->header_ops = &macvlan_hard_header_ops; 1141 dev->header_ops = &macvlan_hard_header_ops;
1094 dev->ethtool_ops = &macvlan_ethtool_ops; 1142 dev->ethtool_ops = &macvlan_ethtool_ops;
1095} 1143}
@@ -1117,8 +1165,8 @@ static int macvlan_port_create(struct net_device *dev)
1117 if (port == NULL) 1165 if (port == NULL)
1118 return -ENOMEM; 1166 return -ENOMEM;
1119 1167
1120 port->passthru = false;
1121 port->dev = dev; 1168 port->dev = dev;
1169 ether_addr_copy(port->perm_addr, dev->dev_addr);
1122 INIT_LIST_HEAD(&port->vlans); 1170 INIT_LIST_HEAD(&port->vlans);
1123 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 1171 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
1124 INIT_HLIST_HEAD(&port->vlan_hash[i]); 1172 INIT_HLIST_HEAD(&port->vlan_hash[i]);
@@ -1158,6 +1206,18 @@ static void macvlan_port_destroy(struct net_device *dev)
1158 kfree_skb(skb); 1206 kfree_skb(skb);
1159 } 1207 }
1160 1208
1209 /* If the lower device address has been changed by passthru
1210 * macvlan, put it back.
1211 */
1212 if (macvlan_passthru(port) &&
1213 !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
1214 struct sockaddr sa;
1215
1216 sa.sa_family = port->dev->type;
1217 memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
1218 dev_set_mac_address(port->dev, &sa);
1219 }
1220
1161 kfree(port); 1221 kfree(port);
1162} 1222}
1163 1223
@@ -1323,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1323 port = macvlan_port_get_rtnl(lowerdev); 1383 port = macvlan_port_get_rtnl(lowerdev);
1324 1384
1325 /* Only 1 macvlan device can be created in passthru mode */ 1385 /* Only 1 macvlan device can be created in passthru mode */
1326 if (port->passthru) { 1386 if (macvlan_passthru(port)) {
1327 /* The macvlan port must be not created this time, 1387 /* The macvlan port must be not created this time,
1328 * still goto destroy_macvlan_port for readability. 1388 * still goto destroy_macvlan_port for readability.
1329 */ 1389 */
@@ -1349,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1349 err = -EINVAL; 1409 err = -EINVAL;
1350 goto destroy_macvlan_port; 1410 goto destroy_macvlan_port;
1351 } 1411 }
1352 port->passthru = true; 1412 macvlan_set_passthru(port);
1353 eth_hw_addr_inherit(dev, lowerdev); 1413 eth_hw_addr_inherit(dev, lowerdev);
1354 } 1414 }
1355 1415
@@ -1431,7 +1491,7 @@ static int macvlan_changelink(struct net_device *dev,
1431 if (data && data[IFLA_MACVLAN_FLAGS]) { 1491 if (data && data[IFLA_MACVLAN_FLAGS]) {
1432 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1492 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
1433 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 1493 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
1434 if (vlan->port->passthru && promisc) { 1494 if (macvlan_passthru(vlan->port) && promisc) {
1435 int err; 1495 int err;
1436 1496
1437 if (flags & MACVLAN_FLAG_NOPROMISC) 1497 if (flags & MACVLAN_FLAG_NOPROMISC)
@@ -1594,7 +1654,7 @@ static int macvlan_device_event(struct notifier_block *unused,
1594 } 1654 }
1595 break; 1655 break;
1596 case NETDEV_CHANGEADDR: 1656 case NETDEV_CHANGEADDR:
1597 if (!port->passthru) 1657 if (!macvlan_passthru(port))
1598 return NOTIFY_DONE; 1658 return NOTIFY_DONE;
1599 1659
1600 vlan = list_first_entry_or_null(&port->vlans, 1660 vlan = list_first_entry_or_null(&port->vlans,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 06ee6395117f..0e27920c2b6b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item,
358 if (err) 358 if (err)
359 goto out_unlock; 359 goto out_unlock;
360 360
361 pr_info("netconsole: network logging started\n"); 361 pr_info("network logging started\n");
362 } else { /* false */ 362 } else { /* false */
363 /* We need to disable the netconsole before cleaning it up 363 /* We need to disable the netconsole before cleaning it up
364 * otherwise we might end up in write_msg() with 364 * otherwise we might end up in write_msg() with
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b91603835d26..c4b3362da4a2 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev)
113 113
114 dev->netdev_ops = &nlmon_ops; 114 dev->netdev_ops = &nlmon_ops;
115 dev->ethtool_ops = &nlmon_ethtool_ops; 115 dev->ethtool_ops = &nlmon_ethtool_ops;
116 dev->destructor = free_netdev; 116 dev->needs_free_netdev = true;
117 117
118 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 118 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
119 NETIF_F_HIGHDMA | NETIF_F_LLTX; 119 NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 60ffc9da6a28..3ab6c58d4be6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -108,7 +108,7 @@ config MDIO_MOXART
108config MDIO_OCTEON 108config MDIO_OCTEON
109 tristate "Octeon and some ThunderX SOCs MDIO buses" 109 tristate "Octeon and some ThunderX SOCs MDIO buses"
110 depends on 64BIT 110 depends on 64BIT
111 depends on HAS_IOMEM 111 depends on HAS_IOMEM && OF_MDIO
112 select MDIO_CAVIUM 112 select MDIO_CAVIUM
113 help 113 help
114 This module provides a driver for the Octeon and ThunderX MDIO 114 This module provides a driver for the Octeon and ThunderX MDIO
@@ -127,6 +127,7 @@ config MDIO_THUNDER
127 tristate "ThunderX SOCs MDIO buses" 127 tristate "ThunderX SOCs MDIO buses"
128 depends on 64BIT 128 depends on 64BIT
129 depends on PCI 129 depends on PCI
130 depends on !(MDIO_DEVICE=y && PHYLIB=m)
130 select MDIO_CAVIUM 131 select MDIO_CAVIUM
131 help 132 help
132 This driver supports the MDIO interfaces found on Cavium 133 This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ed0d10f54f26..c3065236ffcc 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
908 if (overflow) { 908 if (overflow) {
909 pr_debug("tx timestamp queue overflow, count %d\n", overflow); 909 pr_debug("tx timestamp queue overflow, count %d\n", overflow);
910 while (skb) { 910 while (skb) {
911 skb_complete_tx_timestamp(skb, NULL); 911 kfree_skb(skb);
912 skb = skb_dequeue(&dp83640->tx_queue); 912 skb = skb_dequeue(&dp83640->tx_queue);
913 } 913 }
914 return; 914 return;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 272b051a0199..57297ba23987 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
255{ 255{
256 int err; 256 int err;
257 257
258 /* The Marvell PHY has an errata which requires
259 * that certain registers get written in order
260 * to restart autonegotiation */
261 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
262
263 if (err < 0)
264 return err;
265
266 err = phy_write(phydev, 0x1d, 0x1f);
267 if (err < 0)
268 return err;
269
270 err = phy_write(phydev, 0x1e, 0x200c);
271 if (err < 0)
272 return err;
273
274 err = phy_write(phydev, 0x1d, 0x5);
275 if (err < 0)
276 return err;
277
278 err = phy_write(phydev, 0x1e, 0);
279 if (err < 0)
280 return err;
281
282 err = phy_write(phydev, 0x1e, 0x100);
283 if (err < 0)
284 return err;
285
286 err = marvell_set_polarity(phydev, phydev->mdix_ctrl); 258 err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
287 if (err < 0) 259 if (err < 0)
288 return err; 260 return err;
@@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
316 return 0; 288 return 0;
317} 289}
318 290
291static int m88e1101_config_aneg(struct phy_device *phydev)
292{
293 int err;
294
295 /* This Marvell PHY has an errata which requires
296 * that certain registers get written in order
297 * to restart autonegotiation
298 */
299 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
300
301 if (err < 0)
302 return err;
303
304 err = phy_write(phydev, 0x1d, 0x1f);
305 if (err < 0)
306 return err;
307
308 err = phy_write(phydev, 0x1e, 0x200c);
309 if (err < 0)
310 return err;
311
312 err = phy_write(phydev, 0x1d, 0x5);
313 if (err < 0)
314 return err;
315
316 err = phy_write(phydev, 0x1e, 0);
317 if (err < 0)
318 return err;
319
320 err = phy_write(phydev, 0x1e, 0x100);
321 if (err < 0)
322 return err;
323
324 return marvell_config_aneg(phydev);
325}
326
319static int m88e1111_config_aneg(struct phy_device *phydev) 327static int m88e1111_config_aneg(struct phy_device *phydev)
320{ 328{
321 int err; 329 int err;
@@ -1119,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
1119 if (adv < 0) 1127 if (adv < 0)
1120 return adv; 1128 return adv;
1121 1129
1122 lpa &= adv;
1123
1124 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 1130 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
1125 phydev->duplex = DUPLEX_FULL; 1131 phydev->duplex = DUPLEX_FULL;
1126 else 1132 else
@@ -1892,7 +1898,7 @@ static struct phy_driver marvell_drivers[] = {
1892 .flags = PHY_HAS_INTERRUPT, 1898 .flags = PHY_HAS_INTERRUPT,
1893 .probe = marvell_probe, 1899 .probe = marvell_probe,
1894 .config_init = &marvell_config_init, 1900 .config_init = &marvell_config_init,
1895 .config_aneg = &marvell_config_aneg, 1901 .config_aneg = &m88e1101_config_aneg,
1896 .read_status = &genphy_read_status, 1902 .read_status = &genphy_read_status,
1897 .ack_interrupt = &marvell_ack_interrupt, 1903 .ack_interrupt = &marvell_ack_interrupt,
1898 .config_intr = &marvell_config_intr, 1904 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 963838d4fac1..599ce24c514f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev,
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 123 if (pb == NULL) {
124 ret_val = -ENOMEM; 124 ret_val = -ENOMEM;
125 goto err_parent_bus; 125 goto err_pb_kz;
126 } 126 }
127 127
128
129 pb->switch_data = data; 128 pb->switch_data = data;
130 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
131 pb->current_child = -1; 130 pb->current_child = -1;
@@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev,
154 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
155 if (!cb->mii_bus) { 154 if (!cb->mii_bus) {
156 ret_val = -ENOMEM; 155 ret_val = -ENOMEM;
156 devm_kfree(dev, cb);
157 of_node_put(child_bus_node); 157 of_node_put(child_bus_node);
158 break; 158 break;
159 } 159 }
@@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev,
170 mdiobus_free(cb->mii_bus); 170 mdiobus_free(cb->mii_bus);
171 devm_kfree(dev, cb); 171 devm_kfree(dev, cb);
172 } else { 172 } else {
173 of_node_get(child_bus_node);
174 cb->next = pb->children; 173 cb->next = pb->children;
175 pb->children = cb; 174 pb->children = cb;
176 } 175 }
@@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev,
181 return 0; 180 return 0;
182 } 181 }
183 182
183 devm_kfree(dev, pb);
184err_pb_kz:
184 /* balance the reference of_mdio_find_bus() took */ 185 /* balance the reference of_mdio_find_bus() took */
185 put_device(&pb->mii_bus->dev); 186 if (!mux_bus)
186 187 put_device(&parent_bus->dev);
187err_parent_bus: 188err_parent_bus:
188 of_node_put(parent_bus_node); 189 of_node_put(parent_bus_node);
189 return ret_val; 190 return ret_val;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index a898e5c4ef1b..f99c21f78b63 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -364,9 +364,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
364 364
365 mutex_init(&bus->mdio_lock); 365 mutex_init(&bus->mdio_lock);
366 366
367 if (bus->reset)
368 bus->reset(bus);
369
370 /* de-assert bus level PHY GPIO resets */ 367 /* de-assert bus level PHY GPIO resets */
371 if (bus->num_reset_gpios > 0) { 368 if (bus->num_reset_gpios > 0) {
372 bus->reset_gpiod = devm_kcalloc(&bus->dev, 369 bus->reset_gpiod = devm_kcalloc(&bus->dev,
@@ -396,6 +393,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
396 } 393 }
397 } 394 }
398 395
396 if (bus->reset)
397 bus->reset(bus);
398
399 for (i = 0; i < PHY_MAX_ADDR; i++) { 399 for (i = 0; i < PHY_MAX_ADDR; i++) {
400 if ((bus->phy_mask & (1 << i)) == 0) { 400 if ((bus->phy_mask & (1 << i)) == 0) {
401 struct phy_device *phydev; 401 struct phy_device *phydev;
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
658 return 0; 658 return 0;
659} 659}
660 660
661static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
662{
663 int rc;
664
665 /* Some devices have extra OF data and an OF-style MODALIAS */
666 rc = of_device_uevent_modalias(dev, env);
667 if (rc != -ENODEV)
668 return rc;
669
670 return 0;
671}
672
661#ifdef CONFIG_PM 673#ifdef CONFIG_PM
662static int mdio_bus_suspend(struct device *dev) 674static int mdio_bus_suspend(struct device *dev)
663{ 675{
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
708struct bus_type mdio_bus_type = { 720struct bus_type mdio_bus_type = {
709 .name = "mdio_bus", 721 .name = "mdio_bus",
710 .match = mdio_bus_match, 722 .match = mdio_bus_match,
723 .uevent = mdio_uevent,
711 .pm = MDIO_BUS_PM_OPS, 724 .pm = MDIO_BUS_PM_OPS,
712}; 725};
713EXPORT_SYMBOL(mdio_bus_type); 726EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18f062c..8b2038844ba9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,23 +268,12 @@ out:
268 return ret; 268 return ret;
269} 269}
270 270
271static int kszphy_config_init(struct phy_device *phydev) 271/* Some config bits need to be set again on resume, handle them here. */
272static int kszphy_config_reset(struct phy_device *phydev)
272{ 273{
273 struct kszphy_priv *priv = phydev->priv; 274 struct kszphy_priv *priv = phydev->priv;
274 const struct kszphy_type *type;
275 int ret; 275 int ret;
276 276
277 if (!priv)
278 return 0;
279
280 type = priv->type;
281
282 if (type->has_broadcast_disable)
283 kszphy_broadcast_disable(phydev);
284
285 if (type->has_nand_tree_disable)
286 kszphy_nand_tree_disable(phydev);
287
288 if (priv->rmii_ref_clk_sel) { 277 if (priv->rmii_ref_clk_sel) {
289 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); 278 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
290 if (ret) { 279 if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
295 } 284 }
296 285
297 if (priv->led_mode >= 0) 286 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 287 kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
299 288
300 return 0; 289 return 0;
301} 290}
302 291
292static int kszphy_config_init(struct phy_device *phydev)
293{
294 struct kszphy_priv *priv = phydev->priv;
295 const struct kszphy_type *type;
296
297 if (!priv)
298 return 0;
299
300 type = priv->type;
301
302 if (type->has_broadcast_disable)
303 kszphy_broadcast_disable(phydev);
304
305 if (type->has_nand_tree_disable)
306 kszphy_nand_tree_disable(phydev);
307
308 return kszphy_config_reset(phydev);
309}
310
303static int ksz8041_config_init(struct phy_device *phydev) 311static int ksz8041_config_init(struct phy_device *phydev)
304{ 312{
305 struct device_node *of_node = phydev->mdio.dev.of_node; 313 struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -611,6 +619,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
611 if ((regval & 0xFF) == 0xFF) { 619 if ((regval & 0xFF) == 0xFF) {
612 phy_init_hw(phydev); 620 phy_init_hw(phydev);
613 phydev->link = 0; 621 phydev->link = 0;
622 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
623 phydev->drv->config_intr(phydev);
614 } 624 }
615 625
616 return 0; 626 return 0;
@@ -700,8 +710,14 @@ static int kszphy_suspend(struct phy_device *phydev)
700 710
701static int kszphy_resume(struct phy_device *phydev) 711static int kszphy_resume(struct phy_device *phydev)
702{ 712{
713 int ret;
714
703 genphy_resume(phydev); 715 genphy_resume(phydev);
704 716
717 ret = kszphy_config_reset(phydev);
718 if (ret)
719 return ret;
720
705 /* Enable PHY Interrupts */ 721 /* Enable PHY Interrupts */
706 if (phy_interrupt_is_valid(phydev)) { 722 if (phy_interrupt_is_valid(phydev)) {
707 phydev->interrupts = PHY_INTERRUPT_ENABLED; 723 phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 82ab8fb82587..eebb0e1c70ff 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed)
54 return "5Gbps"; 54 return "5Gbps";
55 case SPEED_10000: 55 case SPEED_10000:
56 return "10Gbps"; 56 return "10Gbps";
57 case SPEED_14000:
58 return "14Gbps";
57 case SPEED_20000: 59 case SPEED_20000:
58 return "20Gbps"; 60 return "20Gbps";
59 case SPEED_25000: 61 case SPEED_25000:
@@ -241,7 +243,7 @@ static const struct phy_setting settings[] = {
241 * phy_lookup_setting - lookup a PHY setting 243 * phy_lookup_setting - lookup a PHY setting
242 * @speed: speed to match 244 * @speed: speed to match
243 * @duplex: duplex to match 245 * @duplex: duplex to match
244 * @feature: allowed link modes 246 * @features: allowed link modes
245 * @exact: an exact match is required 247 * @exact: an exact match is required
246 * 248 *
247 * Search the settings array for a setting that matches the speed and 249 * Search the settings array for a setting that matches the speed and
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 1da31dc47f86..74b907206aa7 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev)
629static void sl_free_netdev(struct net_device *dev) 629static void sl_free_netdev(struct net_device *dev)
630{ 630{
631 int i = dev->base_addr; 631 int i = dev->base_addr;
632 free_netdev(dev); 632
633 slip_devs[i] = NULL; 633 slip_devs[i] = NULL;
634} 634}
635 635
@@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = {
651static void sl_setup(struct net_device *dev) 651static void sl_setup(struct net_device *dev)
652{ 652{
653 dev->netdev_ops = &sl_netdev_ops; 653 dev->netdev_ops = &sl_netdev_ops;
654 dev->destructor = sl_free_netdev; 654 dev->needs_free_netdev = true;
655 dev->priv_destructor = sl_free_netdev;
655 656
656 dev->hard_header_len = 0; 657 dev->hard_header_len = 0;
657 dev->addr_len = 0; 658 dev->addr_len = 0;
@@ -1369,8 +1370,6 @@ static void __exit slip_exit(void)
1369 if (sl->tty) { 1370 if (sl->tty) {
1370 printk(KERN_ERR "%s: tty discipline still running\n", 1371 printk(KERN_ERR "%s: tty discipline still running\n",
1371 dev->name); 1372 dev->name);
1372 /* Intentionally leak the control block. */
1373 dev->destructor = NULL;
1374 } 1373 }
1375 1374
1376 unregister_netdev(dev); 1375 unregister_netdev(dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6c5d5ef46f75..fba8c136aa7c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev)
1643 struct team *team = netdev_priv(dev); 1643 struct team *team = netdev_priv(dev);
1644 1644
1645 free_percpu(team->pcpu_stats); 1645 free_percpu(team->pcpu_stats);
1646 free_netdev(dev);
1647} 1646}
1648 1647
1649static int team_open(struct net_device *dev) 1648static int team_open(struct net_device *dev)
@@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev)
2079 2078
2080 dev->netdev_ops = &team_netdev_ops; 2079 dev->netdev_ops = &team_netdev_ops;
2081 dev->ethtool_ops = &team_ethtool_ops; 2080 dev->ethtool_ops = &team_ethtool_ops;
2082 dev->destructor = team_destructor; 2081 dev->needs_free_netdev = true;
2082 dev->priv_destructor = team_destructor;
2083 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 2083 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2084 dev->priv_flags |= IFF_NO_QUEUE; 2084 dev->priv_flags |= IFF_NO_QUEUE;
2085 dev->priv_flags |= IFF_TEAM; 2085 dev->priv_flags |= IFF_TEAM;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bbd707b9ef7a..9ee7d4275640 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev)
1560 free_percpu(tun->pcpu_stats); 1560 free_percpu(tun->pcpu_stats);
1561 tun_flow_uninit(tun); 1561 tun_flow_uninit(tun);
1562 security_tun_dev_free_security(tun->security); 1562 security_tun_dev_free_security(tun->security);
1563 free_netdev(dev);
1564} 1563}
1565 1564
1566static void tun_setup(struct net_device *dev) 1565static void tun_setup(struct net_device *dev)
@@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev)
1571 tun->group = INVALID_GID; 1570 tun->group = INVALID_GID;
1572 1571
1573 dev->ethtool_ops = &tun_ethtool_ops; 1572 dev->ethtool_ops = &tun_ethtool_ops;
1574 dev->destructor = tun_free_netdev; 1573 dev->needs_free_netdev = true;
1574 dev->priv_destructor = tun_free_netdev;
1575 /* We prefer our own queue length */ 1575 /* We prefer our own queue length */
1576 dev->tx_queue_len = TUN_READQ_SIZE; 1576 dev->tx_queue_len = TUN_READQ_SIZE;
1577} 1577}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51cf60092a18..4037ab27734a 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1722,6 +1722,18 @@ static const struct driver_info lenovo_info = {
1722 .tx_fixup = ax88179_tx_fixup, 1722 .tx_fixup = ax88179_tx_fixup,
1723}; 1723};
1724 1724
1725static const struct driver_info belkin_info = {
1726 .description = "Belkin USB Ethernet Adapter",
1727 .bind = ax88179_bind,
1728 .unbind = ax88179_unbind,
1729 .status = ax88179_status,
1730 .link_reset = ax88179_link_reset,
1731 .reset = ax88179_reset,
1732 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1733 .rx_fixup = ax88179_rx_fixup,
1734 .tx_fixup = ax88179_tx_fixup,
1735};
1736
1725static const struct usb_device_id products[] = { 1737static const struct usb_device_id products[] = {
1726{ 1738{
1727 /* ASIX AX88179 10/100/1000 */ 1739 /* ASIX AX88179 10/100/1000 */
@@ -1751,6 +1763,10 @@ static const struct usb_device_id products[] = {
1751 /* Lenovo OneLinkDock Gigabit LAN */ 1763 /* Lenovo OneLinkDock Gigabit LAN */
1752 USB_DEVICE(0x17ef, 0x304b), 1764 USB_DEVICE(0x17ef, 0x304b),
1753 .driver_info = (unsigned long)&lenovo_info, 1765 .driver_info = (unsigned long)&lenovo_info,
1766}, {
1767 /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
1768 USB_DEVICE(0x050d, 0x0128),
1769 .driver_info = (unsigned long)&belkin_info,
1754}, 1770},
1755 { }, 1771 { },
1756}; 1772};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index eb52de8205f0..c7a350bbaaa7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev)
298 dev->addr_len = 1; 298 dev->addr_len = 1;
299 dev->tx_queue_len = 3; 299 dev->tx_queue_len = 3;
300 300
301 dev->destructor = free_netdev; 301 dev->needs_free_netdev = true;
302} 302}
303 303
304/* 304/*
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f3ae88fdf332..8ab281b478f2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -310,6 +310,26 @@ skip:
310 return -ENODEV; 310 return -ENODEV;
311 } 311 }
312 312
313 return 0;
314
315bad_desc:
316 dev_info(&dev->udev->dev, "bad CDC descriptors\n");
317 return -ENODEV;
318}
319EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
320
321
322/* like usbnet_generic_cdc_bind() but handles filter initialization
323 * correctly
324 */
325int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
326{
327 int rv;
328
329 rv = usbnet_generic_cdc_bind(dev, intf);
330 if (rv < 0)
331 goto bail_out;
332
313 /* Some devices don't initialise properly. In particular 333 /* Some devices don't initialise properly. In particular
314 * the packet filter is not reset. There are devices that 334 * the packet filter is not reset. There are devices that
315 * don't do reset all the way. So the packet filter should 335 * don't do reset all the way. So the packet filter should
@@ -317,13 +337,10 @@ skip:
317 */ 337 */
318 usbnet_cdc_update_filter(dev); 338 usbnet_cdc_update_filter(dev);
319 339
320 return 0; 340bail_out:
321 341 return rv;
322bad_desc:
323 dev_info(&dev->udev->dev, "bad CDC descriptors\n");
324 return -ENODEV;
325} 342}
326EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); 343EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
327 344
328void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) 345void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
329{ 346{
@@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
417 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) 434 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
418 < sizeof(struct cdc_state))); 435 < sizeof(struct cdc_state)));
419 436
420 status = usbnet_generic_cdc_bind(dev, intf); 437 status = usbnet_ether_cdc_bind(dev, intf);
421 if (status < 0) 438 if (status < 0)
422 return status; 439 return status;
423 440
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index c4f1c363e24b..9df3c1ffff35 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -310,8 +310,8 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
310 int rd_mac_len = 0; 310 int rd_mac_len = 0;
311 311
312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", 312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
313 dev->udev->descriptor.idVendor, 313 le16_to_cpu(dev->udev->descriptor.idVendor),
314 dev->udev->descriptor.idProduct); 314 le16_to_cpu(dev->udev->descriptor.idProduct));
315 315
316 memset(mac_addr, 0, sizeof(mac_addr)); 316 memset(mac_addr, 0, sizeof(mac_addr));
317 rd_mac_len = control_read(dev, REQUEST_READ, 0, 317 rd_mac_len = control_read(dev, REQUEST_READ, 0,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d7165767ca9d..32a22f4e8356 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev)
123 dev->addr_len = 0; 123 dev->addr_len = 0;
124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
125 dev->netdev_ops = &qmimux_netdev_ops; 125 dev->netdev_ops = &qmimux_netdev_ops;
126 dev->destructor = free_netdev; 126 dev->needs_free_netdev = true;
127} 127}
128 128
129static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) 129static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
@@ -1192,10 +1192,14 @@ static const struct usb_device_id products[] = {
1192 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 1192 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
1193 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 1193 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
1194 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1194 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1195 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1196 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1195 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1197 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1198 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1199 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1198 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1200 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1201 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1202 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1199 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1203 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1200 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1204 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1201 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1205 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1204,6 +1208,8 @@ static const struct usb_device_id products[] = {
1204 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1208 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1205 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1209 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1206 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1210 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1211 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1212 {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1207 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 1213 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
1208 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 1214 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
1209 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 1215 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ddc62cb69be8..1a419a45e2a2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
4368 break; 4368 break;
4369 } 4369 }
4370 4370
4371 dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
4372
4371 return version; 4373 return version;
4372} 4374}
4373 4375
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 765400b62168..2dfca96a63b6 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -681,7 +681,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
681 if (ret < 0) 681 if (ret < 0)
682 return ret; 682 return ret;
683 683
684 if (features & NETIF_F_HW_CSUM) 684 if (features & NETIF_F_IP_CSUM)
685 read_buf |= Tx_COE_EN_; 685 read_buf |= Tx_COE_EN_;
686 else 686 else
687 read_buf &= ~Tx_COE_EN_; 687 read_buf &= ~Tx_COE_EN_;
@@ -1279,12 +1279,19 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1279 1279
1280 spin_lock_init(&pdata->mac_cr_lock); 1280 spin_lock_init(&pdata->mac_cr_lock);
1281 1281
1282 /* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
1283 * RFC 2460, ipv6 UDP calculated checksum yields a result of zero must
1284 * be changed to 0xffff. RFC 768, ipv4 UDP computed checksum is zero,
1285 * it is transmitted as all ones. The zero transmitted checksum means
1286 * transmitter generated no checksum. Hence, enable csum offload only
1287 * for ipv4 packets.
1288 */
1282 if (DEFAULT_TX_CSUM_ENABLE) 1289 if (DEFAULT_TX_CSUM_ENABLE)
1283 dev->net->features |= NETIF_F_HW_CSUM; 1290 dev->net->features |= NETIF_F_IP_CSUM;
1284 if (DEFAULT_RX_CSUM_ENABLE) 1291 if (DEFAULT_RX_CSUM_ENABLE)
1285 dev->net->features |= NETIF_F_RXCSUM; 1292 dev->net->features |= NETIF_F_RXCSUM;
1286 1293
1287 dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 1294 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1288 1295
1289 smsc95xx_init_mac_address(dev); 1296 smsc95xx_init_mac_address(dev);
1290 1297
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 38f0f03a29c8..364fa9d11d1a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev)
222static void veth_dev_free(struct net_device *dev) 222static void veth_dev_free(struct net_device *dev)
223{ 223{
224 free_percpu(dev->vstats); 224 free_percpu(dev->vstats);
225 free_netdev(dev);
226} 225}
227 226
228#ifdef CONFIG_NET_POLL_CONTROLLER 227#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev)
317 NETIF_F_HW_VLAN_STAG_TX | 316 NETIF_F_HW_VLAN_STAG_TX |
318 NETIF_F_HW_VLAN_CTAG_RX | 317 NETIF_F_HW_VLAN_CTAG_RX |
319 NETIF_F_HW_VLAN_STAG_RX); 318 NETIF_F_HW_VLAN_STAG_RX);
320 dev->destructor = veth_dev_free; 319 dev->needs_free_netdev = true;
320 dev->priv_destructor = veth_dev_free;
321 dev->max_mtu = ETH_MAX_MTU; 321 dev->max_mtu = ETH_MAX_MTU;
322 322
323 dev->hw_features = VETH_FEATURES; 323 dev->hw_features = VETH_FEATURES;
@@ -383,7 +383,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
383 tbp = tb; 383 tbp = tb;
384 } 384 }
385 385
386 if (tbp[IFLA_IFNAME]) { 386 if (ifmp && tbp[IFLA_IFNAME]) {
387 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 387 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
388 name_assign_type = NET_NAME_USER; 388 name_assign_type = NET_NAME_USER;
389 } else { 389 } else {
@@ -402,7 +402,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
402 return PTR_ERR(peer); 402 return PTR_ERR(peer);
403 } 403 }
404 404
405 if (tbp[IFLA_ADDRESS] == NULL) 405 if (!ifmp || !tbp[IFLA_ADDRESS])
406 eth_hw_addr_random(peer); 406 eth_hw_addr_random(peer);
407 407
408 if (ifmp && (dev->ifindex != 0)) 408 if (ifmp && (dev->ifindex != 0))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9320d96a1632..143d8a95a60d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
869 unsigned int len; 869 unsigned int len;
870 870
871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
872 rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); 872 rq->min_buf_len, PAGE_SIZE - hdr_len);
873 return ALIGN(len, L1_CACHE_BYTES); 873 return ALIGN(len, L1_CACHE_BYTES);
874} 874}
875 875
@@ -1797,6 +1797,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
1797 flush_work(&vi->config_work); 1797 flush_work(&vi->config_work);
1798 1798
1799 netif_device_detach(vi->dev); 1799 netif_device_detach(vi->dev);
1800 netif_tx_disable(vi->dev);
1800 cancel_delayed_work_sync(&vi->refill); 1801 cancel_delayed_work_sync(&vi->refill);
1801 1802
1802 if (netif_running(vi->dev)) { 1803 if (netif_running(vi->dev)) {
@@ -1989,6 +1990,7 @@ static const struct net_device_ops virtnet_netdev = {
1989 .ndo_poll_controller = virtnet_netpoll, 1990 .ndo_poll_controller = virtnet_netpoll,
1990#endif 1991#endif
1991 .ndo_xdp = virtnet_xdp, 1992 .ndo_xdp = virtnet_xdp,
1993 .ndo_features_check = passthru_features_check,
1992}; 1994};
1993 1995
1994static void virtnet_config_changed_work(struct work_struct *work) 1996static void virtnet_config_changed_work(struct work_struct *work)
@@ -2143,7 +2145,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
2143 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2145 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2144 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2146 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2145 2147
2146 return max(min_buf_len, hdr_len); 2148 return max(max(min_buf_len, hdr_len) - hdr_len,
2149 (unsigned int)GOOD_PACKET_LEN);
2147} 2150}
2148 2151
2149static int virtnet_find_vqs(struct virtnet_info *vi) 2152static int virtnet_find_vqs(struct virtnet_info *vi)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 25bc764ae7dc..d1c7029ded7c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2962 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2962 /* we need to enable NAPI, otherwise dev_close will deadlock */
2963 for (i = 0; i < adapter->num_rx_queues; i++) 2963 for (i = 0; i < adapter->num_rx_queues; i++)
2964 napi_enable(&adapter->rx_queue[i].napi); 2964 napi_enable(&adapter->rx_queue[i].napi);
2965 /*
2966 * Need to clear the quiesce bit to ensure that vmxnet3_close
2967 * can quiesce the device properly
2968 */
2969 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2965 dev_close(adapter->netdev); 2970 dev_close(adapter->netdev);
2966} 2971}
2967 2972
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ceda5861da78..022c0b5f9844 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -36,12 +36,14 @@
36#include <net/addrconf.h> 36#include <net/addrconf.h>
37#include <net/l3mdev.h> 37#include <net/l3mdev.h>
38#include <net/fib_rules.h> 38#include <net/fib_rules.h>
39#include <net/netns/generic.h>
39 40
40#define DRV_NAME "vrf" 41#define DRV_NAME "vrf"
41#define DRV_VERSION "1.0" 42#define DRV_VERSION "1.0"
42 43
43#define FIB_RULE_PREF 1000 /* default preference for FIB rules */ 44#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
44static bool add_fib_rules = true; 45
46static unsigned int vrf_net_id;
45 47
46struct net_vrf { 48struct net_vrf {
47 struct rtable __rcu *rth; 49 struct rtable __rcu *rth;
@@ -989,6 +991,7 @@ static u32 vrf_fib_table(const struct net_device *dev)
989 991
990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 992static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
991{ 993{
994 kfree_skb(skb);
992 return 0; 995 return 0;
993} 996}
994 997
@@ -998,7 +1001,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
998{ 1001{
999 struct net *net = dev_net(dev); 1002 struct net *net = dev_net(dev);
1000 1003
1001 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) 1004 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1002 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1005 skb = NULL; /* kfree_skb(skb) handled by nf code */
1003 1006
1004 return skb; 1007 return skb;
@@ -1347,7 +1350,7 @@ static void vrf_setup(struct net_device *dev)
1347 dev->netdev_ops = &vrf_netdev_ops; 1350 dev->netdev_ops = &vrf_netdev_ops;
1348 dev->l3mdev_ops = &vrf_l3mdev_ops; 1351 dev->l3mdev_ops = &vrf_l3mdev_ops;
1349 dev->ethtool_ops = &vrf_ethtool_ops; 1352 dev->ethtool_ops = &vrf_ethtool_ops;
1350 dev->destructor = free_netdev; 1353 dev->needs_free_netdev = true;
1351 1354
1352 /* Fill in device structure with ethernet-generic values. */ 1355 /* Fill in device structure with ethernet-generic values. */
1353 eth_hw_addr_random(dev); 1356 eth_hw_addr_random(dev);
@@ -1393,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1393 struct nlattr *tb[], struct nlattr *data[]) 1396 struct nlattr *tb[], struct nlattr *data[])
1394{ 1397{
1395 struct net_vrf *vrf = netdev_priv(dev); 1398 struct net_vrf *vrf = netdev_priv(dev);
1399 bool *add_fib_rules;
1400 struct net *net;
1396 int err; 1401 int err;
1397 1402
1398 if (!data || !data[IFLA_VRF_TABLE]) 1403 if (!data || !data[IFLA_VRF_TABLE])
@@ -1408,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1408 if (err) 1413 if (err)
1409 goto out; 1414 goto out;
1410 1415
1411 if (add_fib_rules) { 1416 net = dev_net(dev);
1417 add_fib_rules = net_generic(net, vrf_net_id);
1418 if (*add_fib_rules) {
1412 err = vrf_add_fib_rules(dev); 1419 err = vrf_add_fib_rules(dev);
1413 if (err) { 1420 if (err) {
1414 unregister_netdevice(dev); 1421 unregister_netdevice(dev);
1415 goto out; 1422 goto out;
1416 } 1423 }
1417 add_fib_rules = false; 1424 *add_fib_rules = false;
1418 } 1425 }
1419 1426
1420out: 1427out:
@@ -1497,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
1497 .notifier_call = vrf_device_event, 1504 .notifier_call = vrf_device_event,
1498}; 1505};
1499 1506
1507/* Initialize per network namespace state */
1508static int __net_init vrf_netns_init(struct net *net)
1509{
1510 bool *add_fib_rules = net_generic(net, vrf_net_id);
1511
1512 *add_fib_rules = true;
1513
1514 return 0;
1515}
1516
1517static struct pernet_operations vrf_net_ops __net_initdata = {
1518 .init = vrf_netns_init,
1519 .id = &vrf_net_id,
1520 .size = sizeof(bool),
1521};
1522
1500static int __init vrf_init_module(void) 1523static int __init vrf_init_module(void)
1501{ 1524{
1502 int rc; 1525 int rc;
1503 1526
1504 register_netdevice_notifier(&vrf_notifier_block); 1527 register_netdevice_notifier(&vrf_notifier_block);
1505 1528
1506 rc = rtnl_link_register(&vrf_link_ops); 1529 rc = register_pernet_subsys(&vrf_net_ops);
1507 if (rc < 0) 1530 if (rc < 0)
1508 goto error; 1531 goto error;
1509 1532
1533 rc = rtnl_link_register(&vrf_link_ops);
1534 if (rc < 0) {
1535 unregister_pernet_subsys(&vrf_net_ops);
1536 goto error;
1537 }
1538
1510 return 0; 1539 return 0;
1511 1540
1512error: 1541error:
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 7f0136f2dd9d..c28bdce14fd5 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev)
135 135
136 dev->netdev_ops = &vsockmon_ops; 136 dev->netdev_ops = &vsockmon_ops;
137 dev->ethtool_ops = &vsockmon_ethtool_ops; 137 dev->ethtool_ops = &vsockmon_ethtool_ops;
138 dev->destructor = free_netdev; 138 dev->needs_free_netdev = true;
139 139
140 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 140 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
141 NETIF_F_HIGHDMA | NETIF_F_LLTX; 141 NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 328b4712683c..5fa798a5c9a6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
59 59
60static int vxlan_sock_add(struct vxlan_dev *vxlan); 60static int vxlan_sock_add(struct vxlan_dev *vxlan);
61 61
62static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
63
62/* per-network namespace private data for this module */ 64/* per-network namespace private data for this module */
63struct vxlan_net { 65struct vxlan_net {
64 struct list_head vxlan_list; 66 struct list_head vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
740 call_rcu(&f->rcu, vxlan_fdb_free); 742 call_rcu(&f->rcu, vxlan_fdb_free);
741} 743}
742 744
745static void vxlan_dst_free(struct rcu_head *head)
746{
747 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
748
749 dst_cache_destroy(&rd->dst_cache);
750 kfree(rd);
751}
752
753static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
754 struct vxlan_rdst *rd)
755{
756 list_del_rcu(&rd->list);
757 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
758 call_rcu(&rd->rcu, vxlan_dst_free);
759}
760
743static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 761static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
744 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 762 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
745 __be32 *vni, u32 *ifindex) 763 __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
864 * otherwise destroy the fdb entry 882 * otherwise destroy the fdb entry
865 */ 883 */
866 if (rd && !list_is_singular(&f->remotes)) { 884 if (rd && !list_is_singular(&f->remotes)) {
867 list_del_rcu(&rd->list); 885 vxlan_fdb_dst_destroy(vxlan, f, rd);
868 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
869 kfree_rcu(rd, rcu);
870 goto out; 886 goto out;
871 } 887 }
872 888
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
1067 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1083 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1068 synchronize_net(); 1084 synchronize_net();
1069 1085
1086 vxlan_vs_del_dev(vxlan);
1087
1070 if (__vxlan_sock_release_prep(sock4)) { 1088 if (__vxlan_sock_release_prep(sock4)) {
1071 udp_tunnel_sock_release(sock4->sock); 1089 udp_tunnel_sock_release(sock4->sock);
1072 kfree(sock4); 1090 kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
2342 mod_timer(&vxlan->age_timer, next_timer); 2360 mod_timer(&vxlan->age_timer, next_timer);
2343} 2361}
2344 2362
2363static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2364{
2365 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2366
2367 spin_lock(&vn->sock_lock);
2368 hlist_del_init_rcu(&vxlan->hlist);
2369 spin_unlock(&vn->sock_lock);
2370}
2371
2345static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2372static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2346{ 2373{
2347 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2374 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -2584,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev)
2584 eth_hw_addr_random(dev); 2611 eth_hw_addr_random(dev);
2585 ether_setup(dev); 2612 ether_setup(dev);
2586 2613
2587 dev->destructor = free_netdev; 2614 dev->needs_free_netdev = true;
2588 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2615 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2589 2616
2590 dev->features |= NETIF_F_LLTX; 2617 dev->features |= NETIF_F_LLTX;
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3286static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3313static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3287{ 3314{
3288 struct vxlan_dev *vxlan = netdev_priv(dev); 3315 struct vxlan_dev *vxlan = netdev_priv(dev);
3289 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3290 3316
3291 vxlan_flush(vxlan, true); 3317 vxlan_flush(vxlan, true);
3292 3318
3293 spin_lock(&vn->sock_lock);
3294 if (!hlist_unhashed(&vxlan->hlist))
3295 hlist_del_rcu(&vxlan->hlist);
3296 spin_unlock(&vn->sock_lock);
3297
3298 gro_cells_destroy(&vxlan->gro_cells); 3319 gro_cells_destroy(&vxlan->gro_cells);
3299 list_del(&vxlan->next); 3320 list_del(&vxlan->next);
3300 unregister_netdevice_queue(dev, head); 3321 unregister_netdevice_queue(dev, head);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 65ee2a6f248c..a0d76f70c428 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev)
475 dev->flags = 0; 475 dev->flags = 0;
476 dev->header_ops = &dlci_header_ops; 476 dev->header_ops = &dlci_header_ops;
477 dev->netdev_ops = &dlci_netdev_ops; 477 dev->netdev_ops = &dlci_netdev_ops;
478 dev->destructor = free_netdev; 478 dev->needs_free_netdev = true;
479 479
480 dlp->receive = dlci_receive; 480 dlp->receive = dlci_receive;
481 481
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index eb915281197e..78596e42a3f3 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1106 return -EIO; 1106 return -EIO;
1107 } 1107 }
1108 1108
1109 dev->destructor = free_netdev; 1109 dev->needs_free_netdev = true;
1110 *get_dev_p(pvc, type) = dev; 1110 *get_dev_p(pvc, type) = dev;
1111 if (!used) { 1111 if (!used) {
1112 state(hdlc)->dce_changed = 1; 1112 state(hdlc)->dce_changed = 1;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 9df9ed62beff..63f749078a1f 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
306static void lapbeth_setup(struct net_device *dev) 306static void lapbeth_setup(struct net_device *dev)
307{ 307{
308 dev->netdev_ops = &lapbeth_netdev_ops; 308 dev->netdev_ops = &lapbeth_netdev_ops;
309 dev->destructor = free_netdev; 309 dev->needs_free_netdev = true;
310 dev->type = ARPHRD_X25; 310 dev->type = ARPHRD_X25;
311 dev->hard_header_len = 3; 311 dev->hard_header_len = 3;
312 dev->mtu = 1000; 312 dev->mtu = 1000;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 91ee542de3d7..b90c77ef792e 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev)
1287 struct ath6kl *ar = ath6kl_priv(dev); 1287 struct ath6kl *ar = ath6kl_priv(dev);
1288 1288
1289 dev->netdev_ops = &ath6kl_netdev_ops; 1289 dev->netdev_ops = &ath6kl_netdev_ops;
1290 dev->destructor = free_netdev; 1290 dev->needs_free_netdev = true;
1291 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1291 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1292 1292
1293 dev->needed_headroom = ETH_HLEN; 1293 dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index d5e993dc9b23..517a315e259b 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
1271 qcom_smem_state_put(wcn->tx_enable_state); 1271 qcom_smem_state_put(wcn->tx_enable_state);
1272 qcom_smem_state_put(wcn->tx_rings_empty_state); 1272 qcom_smem_state_put(wcn->tx_rings_empty_state);
1273 1273
1274 rpmsg_destroy_ept(wcn->smd_channel);
1275
1274 iounmap(wcn->dxe_base); 1276 iounmap(wcn->dxe_base);
1275 iounmap(wcn->ccu_base); 1277 iounmap(wcn->ccu_base);
1276 1278
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index cd1d6730eab7..617199c0e5a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
5225 5225
5226 if (vif) 5226 if (vif)
5227 brcmf_free_vif(vif); 5227 brcmf_free_vif(vif);
5228 free_netdev(ndev);
5229} 5228}
5230 5229
5231static bool brcmf_is_linkup(const struct brcmf_event_msg *e) 5230static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index a3d82368f1a9..511d190c6cca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
624 if (!ndev) 624 if (!ndev)
625 return ERR_PTR(-ENOMEM); 625 return ERR_PTR(-ENOMEM);
626 626
627 ndev->destructor = brcmf_cfg80211_free_netdev; 627 ndev->needs_free_netdev = true;
628 ndev->priv_destructor = brcmf_cfg80211_free_netdev;
628 ifp = netdev_priv(ndev); 629 ifp = netdev_priv(ndev);
629 ifp->ndev = ndev; 630 ifp->ndev = ndev;
630 /* store mapping ifidx to bsscfgidx */ 631 /* store mapping ifidx to bsscfgidx */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e9906500..d231042f19d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
442 const char *nvram_name; 442 const char *nvram_name;
443 u16 domain_nr; 443 u16 domain_nr;
444 u16 bus_nr; 444 u16 bus_nr;
445 void (*done)(struct device *dev, const struct firmware *fw, 445 void (*done)(struct device *dev, int err, const struct firmware *fw,
446 void *nvram_image, u32 nvram_len); 446 void *nvram_image, u32 nvram_len);
447}; 447};
448 448
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) 477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
478 goto fail; 478 goto fail;
479 479
480 fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); 480 fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
481 kfree(fwctx); 481 kfree(fwctx);
482 return; 482 return;
483 483
484fail: 484fail:
485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
486 release_firmware(fwctx->code); 486 release_firmware(fwctx->code);
487 device_release_driver(fwctx->dev); 487 fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
488 kfree(fwctx); 488 kfree(fwctx);
489} 489}
490 490
491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) 491static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
492{ 492{
493 struct brcmf_fw *fwctx = ctx; 493 struct brcmf_fw *fwctx = ctx;
494 int ret; 494 int ret = 0;
495 495
496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); 496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
497 if (!fw) 497 if (!fw) {
498 ret = -ENOENT;
498 goto fail; 499 goto fail;
499
500 /* only requested code so done here */
501 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
502 fwctx->done(fwctx->dev, fw, NULL, 0);
503 kfree(fwctx);
504 return;
505 } 500 }
501 /* only requested code so done here */
502 if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
503 goto done;
504
506 fwctx->code = fw; 505 fwctx->code = fw;
507 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, 506 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
508 fwctx->dev, GFP_KERNEL, fwctx, 507 fwctx->dev, GFP_KERNEL, fwctx,
509 brcmf_fw_request_nvram_done); 508 brcmf_fw_request_nvram_done);
510 509
511 if (!ret) 510 /* pass NULL to nvram callback for bcm47xx fallback */
512 return; 511 if (ret)
513 512 brcmf_fw_request_nvram_done(NULL, fwctx);
514 brcmf_fw_request_nvram_done(NULL, fwctx);
515 return; 513 return;
516 514
517fail: 515fail:
518 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 516 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
519 device_release_driver(fwctx->dev); 517done:
518 fwctx->done(fwctx->dev, ret, fw, NULL, 0);
520 kfree(fwctx); 519 kfree(fwctx);
521} 520}
522 521
523int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 522int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
524 const char *code, const char *nvram, 523 const char *code, const char *nvram,
525 void (*fw_cb)(struct device *dev, 524 void (*fw_cb)(struct device *dev, int err,
526 const struct firmware *fw, 525 const struct firmware *fw,
527 void *nvram_image, u32 nvram_len), 526 void *nvram_image, u32 nvram_len),
528 u16 domain_nr, u16 bus_nr) 527 u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
555 554
556int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 555int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
557 const char *code, const char *nvram, 556 const char *code, const char *nvram,
558 void (*fw_cb)(struct device *dev, 557 void (*fw_cb)(struct device *dev, int err,
559 const struct firmware *fw, 558 const struct firmware *fw,
560 void *nvram_image, u32 nvram_len)) 559 void *nvram_image, u32 nvram_len))
561{ 560{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d52ae3..8fa4b7e1ab3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
73 */ 73 */
74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 74int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
75 const char *code, const char *nvram, 75 const char *code, const char *nvram,
76 void (*fw_cb)(struct device *dev, 76 void (*fw_cb)(struct device *dev, int err,
77 const struct firmware *fw, 77 const struct firmware *fw,
78 void *nvram_image, u32 nvram_len), 78 void *nvram_image, u32 nvram_len),
79 u16 domain_nr, u16 bus_nr); 79 u16 domain_nr, u16 bus_nr);
80int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 80int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
81 const char *code, const char *nvram, 81 const char *code, const char *nvram,
82 void (*fw_cb)(struct device *dev, 82 void (*fw_cb)(struct device *dev, int err,
83 const struct firmware *fw, 83 const struct firmware *fw,
84 void *nvram_image, u32 nvram_len)); 84 void *nvram_image, u32 nvram_len));
85 85
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 72373e59308e..f59642b2c935 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); 2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
2146 struct brcmf_fws_mac_descriptor *entry; 2146 struct brcmf_fws_mac_descriptor *entry;
2147 2147
2148 if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) 2148 if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
2149 return; 2149 return;
2150 2150
2151 entry = &fws->desc.iface[ifp->ifidx]; 2151 entry = &fws->desc.iface[ifp->ifidx];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f36b96dc6acd..f878706613e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1650 .write32 = brcmf_pcie_buscore_write32, 1650 .write32 = brcmf_pcie_buscore_write32,
1651}; 1651};
1652 1652
1653static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1653static void brcmf_pcie_setup(struct device *dev, int ret,
1654 const struct firmware *fw,
1654 void *nvram, u32 nvram_len) 1655 void *nvram, u32 nvram_len)
1655{ 1656{
1656 struct brcmf_bus *bus = dev_get_drvdata(dev); 1657 struct brcmf_bus *bus;
1657 struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1658 struct brcmf_pciedev *pcie_bus_dev;
1658 struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1659 struct brcmf_pciedev_info *devinfo;
1659 struct brcmf_commonring **flowrings; 1660 struct brcmf_commonring **flowrings;
1660 int ret;
1661 u32 i; 1661 u32 i;
1662 1662
1663 /* check firmware loading result */
1664 if (ret)
1665 goto fail;
1666
1667 bus = dev_get_drvdata(dev);
1668 pcie_bus_dev = bus->bus_priv.pcie;
1669 devinfo = pcie_bus_dev->devinfo;
1663 brcmf_pcie_attach(devinfo); 1670 brcmf_pcie_attach(devinfo);
1664 1671
1665 /* Some of the firmwares have the size of the memory of the device 1672 /* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fc64b8913aa6..5653d6dd38f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
3422 /* otherwise, set txglomalign */ 3422 /* otherwise, set txglomalign */
3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align; 3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align;
3424 /* SDIO ADMA requires at least 32 bit alignment */ 3424 /* SDIO ADMA requires at least 32 bit alignment */
3425 value = max_t(u32, value, 4); 3425 value = max_t(u32, value, ALIGNMENT);
3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, 3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3427 sizeof(u32)); 3427 sizeof(u32));
3428 } 3428 }
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3982 .get_memdump = brcmf_sdio_bus_get_memdump, 3982 .get_memdump = brcmf_sdio_bus_get_memdump,
3983}; 3983};
3984 3984
3985static void brcmf_sdio_firmware_callback(struct device *dev, 3985static void brcmf_sdio_firmware_callback(struct device *dev, int err,
3986 const struct firmware *code, 3986 const struct firmware *code,
3987 void *nvram, u32 nvram_len) 3987 void *nvram, u32 nvram_len)
3988{ 3988{
3989 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3989 struct brcmf_bus *bus_if;
3990 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3990 struct brcmf_sdio_dev *sdiodev;
3991 struct brcmf_sdio *bus = sdiodev->bus; 3991 struct brcmf_sdio *bus;
3992 int err = 0;
3993 u8 saveclk; 3992 u8 saveclk;
3994 3993
3995 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3994 brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
3995 bus_if = dev_get_drvdata(dev);
3996 sdiodev = bus_if->bus_priv.sdio;
3997 if (err)
3998 goto fail;
3996 3999
3997 if (!bus_if->drvr) 4000 if (!bus_if->drvr)
3998 return; 4001 return;
3999 4002
4003 bus = sdiodev->bus;
4004
4000 /* try to download image and nvram to the dongle */ 4005 /* try to download image and nvram to the dongle */
4001 bus->alp_only = true; 4006 bus->alp_only = true;
4002 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); 4007 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ release:
4083fail: 4088fail:
4084 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4089 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
4085 device_release_driver(dev); 4090 device_release_driver(dev);
4091 device_release_driver(&sdiodev->func[2]->dev);
4086} 4092}
4087 4093
4088struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) 4094struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e4d545f9edee..0eea48e73331 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1159,17 +1159,18 @@ fail:
1159 return ret; 1159 return ret;
1160} 1160}
1161 1161
1162static void brcmf_usb_probe_phase2(struct device *dev, 1162static void brcmf_usb_probe_phase2(struct device *dev, int ret,
1163 const struct firmware *fw, 1163 const struct firmware *fw,
1164 void *nvram, u32 nvlen) 1164 void *nvram, u32 nvlen)
1165{ 1165{
1166 struct brcmf_bus *bus = dev_get_drvdata(dev); 1166 struct brcmf_bus *bus = dev_get_drvdata(dev);
1167 struct brcmf_usbdev_info *devinfo; 1167 struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
1168 int ret; 1168
1169 if (ret)
1170 goto error;
1169 1171
1170 brcmf_dbg(USB, "Start fw downloading\n"); 1172 brcmf_dbg(USB, "Start fw downloading\n");
1171 1173
1172 devinfo = bus->bus_priv.usb->devinfo;
1173 ret = check_file(fw->data); 1174 ret = check_file(fw->data);
1174 if (ret < 0) { 1175 if (ret < 0) {
1175 brcmf_err("invalid firmware\n"); 1176 brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 3b3e076571d6..45e2efc70d19 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
79/* Lowest firmware API version supported */ 79/* Lowest firmware API version supported */
80#define IWL7260_UCODE_API_MIN 17 80#define IWL7260_UCODE_API_MIN 17
81#define IWL7265_UCODE_API_MIN 17 81#define IWL7265_UCODE_API_MIN 17
82#define IWL7265D_UCODE_API_MIN 17 82#define IWL7265D_UCODE_API_MIN 22
83#define IWL3168_UCODE_API_MIN 20 83#define IWL3168_UCODE_API_MIN 22
84 84
85/* NVM versions */ 85/* NVM versions */
86#define IWL7260_NVM_VERSION 0x0a1d 86#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b9718c0cf174..89137717c1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
74#define IWL8265_UCODE_API_MAX 30 74#define IWL8265_UCODE_API_MAX 30
75 75
76/* Lowest firmware API version supported */ 76/* Lowest firmware API version supported */
77#define IWL8000_UCODE_API_MIN 17 77#define IWL8000_UCODE_API_MIN 22
78#define IWL8265_UCODE_API_MIN 20 78#define IWL8265_UCODE_API_MIN 22
79 79
80/* NVM versions */ 80/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 81#define IWL8000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 306bc967742e..77efbb78e867 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -370,6 +370,7 @@
370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c) 370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
371 371
372#define DBGC_IN_SAMPLE (0xa03c00) 372#define DBGC_IN_SAMPLE (0xa03c00)
373#define DBGC_OUT_CTRL (0xa03c0c)
373 374
374/* enable the ID buf for read */ 375/* enable the ID buf for read */
375#define WFPM_PS_CTL_CLR 0xA0300C 376#define WFPM_PS_CTL_CLR 0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index 1b7d265ffb0a..a10c6aae9ab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -307,6 +307,11 @@ enum {
307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ 307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
308#define LQ_FLAG_COLOR_POS 1 308#define LQ_FLAG_COLOR_POS 1
309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) 309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
310#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
311 LQ_FLAG_COLOR_POS)
312#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
313 LQ_FLAG_COLOR_MSK)
314#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
310 315
311/* Bit 4-5: Tx RTS BW Signalling 316/* Bit 4-5: Tx RTS BW Signalling
312 * (0) No RTS BW signalling 317 * (0) No RTS BW signalling
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 81b98915b1a4..1360ebfdc51b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -519,8 +519,11 @@ struct agg_tx_status {
519 * bit-7 invalid rate indication 519 * bit-7 invalid rate indication
520 */ 520 */
521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f 521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
522#define TX_RES_RATE_TABLE_COLOR_POS 4
522#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 523#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
523#define TX_RES_INV_RATE_INDEX_MSK 0x80 524#define TX_RES_INV_RATE_INDEX_MSK 0x80
525#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
526 TX_RES_RATE_TABLE_COLOR_POS)
524 527
525#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) 528#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
526#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) 529#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 7b86a4f1b574..c8712e6eea74 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
1002 return 0; 1002 return 0;
1003} 1003}
1004 1004
1005static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
1006{
1007 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
1008 iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1009 else
1010 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
1011}
1012
1013int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) 1005int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1014{ 1006{
1015 u8 *ptr; 1007 u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1023 /* EARLY START - firmware's configuration is hard coded */ 1015 /* EARLY START - firmware's configuration is hard coded */
1024 if ((!mvm->fw->dbg_conf_tlv[conf_id] || 1016 if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
1025 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && 1017 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
1026 conf_id == FW_DBG_START_FROM_ALIVE) { 1018 conf_id == FW_DBG_START_FROM_ALIVE)
1027 iwl_mvm_restart_early_start(mvm);
1028 return 0; 1019 return 0;
1029 }
1030 1020
1031 if (!mvm->fw->dbg_conf_tlv[conf_id]) 1021 if (!mvm->fw->dbg_conf_tlv[conf_id])
1032 return -EINVAL; 1022 return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 0f1831b41915..fd2fc46e2fe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; 1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd; 1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd;
1042 } u = {}; 1042 } u = {};
1043 struct iwl_mac_beacon_cmd beacon_cmd; 1043 struct iwl_mac_beacon_cmd beacon_cmd = {};
1044 struct ieee80211_tx_info *info; 1044 struct ieee80211_tx_info *info;
1045 u32 beacon_skb_len; 1045 u32 beacon_skb_len;
1046 u32 rate, tx_flags; 1046 u32 rate, tx_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4e74a6b90e70..52f8d7a6a7dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
1730 */ 1730 */
1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) 1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1732{ 1732{
1733 u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
1734 IWL_MVM_CMD_QUEUE;
1735
1733 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & 1736 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
1734 ~BIT(IWL_MVM_CMD_QUEUE)); 1737 ~BIT(cmd_queue));
1735} 1738}
1736 1739
1737static inline 1740static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1753 if (!iwl_mvm_has_new_tx_api(mvm)) 1756 if (!iwl_mvm_has_new_tx_api(mvm))
1754 iwl_free_fw_paging(mvm); 1757 iwl_free_fw_paging(mvm);
1755 mvm->ucode_loaded = false; 1758 mvm->ucode_loaded = false;
1759 mvm->fw_dbg_conf = FW_DBG_INVALID;
1756 iwl_trans_stop_device(mvm->trans); 1760 iwl_trans_stop_device(mvm->trans);
1757} 1761}
1758 1762
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 9ffff6ed8133..3da5ec40aaea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
1149 1149
1150 mutex_lock(&mvm->mutex); 1150 mutex_lock(&mvm->mutex);
1151 1151
1152 /* stop recording */
1153 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1152 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1153 /* stop recording */
1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1155
1156 iwl_mvm_fw_error_dump(mvm);
1157
1158 /* start recording again if the firmware is not crashed */
1159 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1160 mvm->fw->dbg_dest_tlv)
1161 iwl_clear_bits_prph(mvm->trans,
1162 MON_BUFF_SAMPLE_CTL, 0x100);
1155 } else { 1163 } else {
1164 u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
1165 u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
1166
1167 /* stop recording */
1156 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); 1168 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
1157 /* wait before we collect the data till the DBGC stop */
1158 udelay(100); 1169 udelay(100);
1159 } 1170 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
1171 /* wait before we collect the data till the DBGC stop */
1172 udelay(500);
1160 1173
1161 iwl_mvm_fw_error_dump(mvm); 1174 iwl_mvm_fw_error_dump(mvm);
1162 1175
1163 /* start recording again if the firmware is not crashed */ 1176 /* start recording again if the firmware is not crashed */
1164 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 1177 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1165 mvm->fw->dbg_dest_tlv && 1178 mvm->fw->dbg_dest_tlv) {
1166 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 1179 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
1180 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
1181 }
1182 }
1167 1183
1168 mutex_unlock(&mvm->mutex); 1184 mutex_unlock(&mvm->mutex);
1169 1185
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7788eefcd2bd..aa785cf3cf68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1083 rs_get_lower_rate_in_column(lq_sta, rate); 1083 rs_get_lower_rate_in_column(lq_sta, rate);
1084} 1084}
1085 1085
1086/* Check if both rates are identical
1087 * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
1088 * with a rate indicating STBC/BFER and ANT_AB.
1089 */
1090static inline bool rs_rate_equal(struct rs_rate *a,
1091 struct rs_rate *b,
1092 bool allow_ant_mismatch)
1093
1094{
1095 bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
1096 (a->bfer == b->bfer);
1097
1098 if (allow_ant_mismatch) {
1099 if (a->stbc || a->bfer) {
1100 WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
1101 a->stbc, a->bfer, a->ant);
1102 ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
1103 } else if (b->stbc || b->bfer) {
1104 WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
1105 b->stbc, b->bfer, b->ant);
1106 ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
1107 }
1108 }
1109
1110 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
1111 (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
1112}
1113
1114/* Check if both rates share the same column */ 1086/* Check if both rates share the same column */
1115static inline bool rs_rate_column_match(struct rs_rate *a, 1087static inline bool rs_rate_column_match(struct rs_rate *a,
1116 struct rs_rate *b) 1088 struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1182 u32 lq_hwrate; 1154 u32 lq_hwrate;
1183 struct rs_rate lq_rate, tx_resp_rate; 1155 struct rs_rate lq_rate, tx_resp_rate;
1184 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1156 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1185 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; 1157 u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
1158 u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
1159 u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
1186 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; 1160 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1187 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1161 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1188 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; 1162 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
1189 bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
1190 IWL_UCODE_TLV_API_LQ_SS_PARAMS);
1191 1163
1192 /* Treat uninitialized rate scaling data same as non-existing. */ 1164 /* Treat uninitialized rate scaling data same as non-existing. */
1193 if (!lq_sta) { 1165 if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1262 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1234 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1263 1235
1264 /* Here we actually compare this rate to the latest LQ command */ 1236 /* Here we actually compare this rate to the latest LQ command */
1265 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { 1237 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
1266 IWL_DEBUG_RATE(mvm, 1238 IWL_DEBUG_RATE(mvm,
1267 "initial tx resp rate 0x%x does not match 0x%x\n", 1239 "tx resp color 0x%x does not match 0x%x\n",
1268 tx_resp_hwrate, lq_hwrate); 1240 lq_color, LQ_FLAG_COLOR_GET(table->flags));
1269 1241
1270 /* 1242 /*
1271 * Since rates mis-match, the last LQ command may have failed. 1243 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3326 u8 valid_tx_ant = 0; 3298 u8 valid_tx_ant = 0;
3327 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; 3299 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3328 bool toggle_ant = false; 3300 bool toggle_ant = false;
3301 u32 color;
3329 3302
3330 memcpy(&rate, initial_rate, sizeof(rate)); 3303 memcpy(&rate, initial_rate, sizeof(rate));
3331 3304
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3380 num_rates, num_retries, valid_tx_ant, 3353 num_rates, num_retries, valid_tx_ant,
3381 toggle_ant); 3354 toggle_ant);
3382 3355
3356 /* update the color of the LQ command (as a counter at bits 1-3) */
3357 color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
3358 lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
3383} 3359}
3384 3360
3385struct rs_bfer_active_iter_data { 3361struct rs_bfer_active_iter_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ee207f2c0a90..3abde1cb0303 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2017 Intel Deutschland GmbH
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
357 } pers; 358 } pers;
358}; 359};
359 360
361/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
362 * Note, it's iwlmvm <-> mac80211 interface.
363 * bits 0-7: reduced tx power
364 * bits 8-10: LQ command's color
365 */
366#define RS_DRV_DATA_TXP_MSK 0xff
367#define RS_DRV_DATA_LQ_COLOR_POS 8
368#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
369#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
370 RS_DRV_DATA_LQ_COLOR_POS)
371#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
372 (((uintptr_t)_p) |\
373 ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
374
360/* Initialize station's rate scaling information after adding station */ 375/* Initialize station's rate scaling information after adding station */
361void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 376void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
362 enum nl80211_band band, bool init); 377 enum nl80211_band band, bool init);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f5c786ddc526..614d67810d05 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2120 if (!iwl_mvm_is_dqa_supported(mvm)) 2120 if (!iwl_mvm_is_dqa_supported(mvm))
2121 return 0; 2121 return 0;
2122 2122
2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) 2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2124 vif->type != NL80211_IFTYPE_ADHOC))
2124 return -ENOTSUPP; 2125 return -ENOTSUPP;
2125 2126
2126 /* 2127 /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2155 mvmvif->cab_queue = queue; 2156 mvmvif->cab_queue = queue;
2156 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2157 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2157 IWL_UCODE_TLV_API_STA_TYPE)) { 2158 IWL_UCODE_TLV_API_STA_TYPE)) {
2159 /*
2160 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2161 * invalid, so make sure we use the queue we want.
2162 * Note that this is done here as we want to avoid making DQA
2163 * changes in mac80211 layer.
2164 */
2165 if (vif->type == NL80211_IFTYPE_ADHOC) {
2166 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2167 mvmvif->cab_queue = vif->cab_queue;
2168 }
2158 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2169 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2159 &cfg, timeout); 2170 &cfg, timeout);
2160 } 2171 }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3321 3332
3322 /* Get the station from the mvm local station table */ 3333 /* Get the station from the mvm local station table */
3323 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3334 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3324 if (!mvm_sta) { 3335 if (mvm_sta)
3325 IWL_ERR(mvm, "Failed to find station\n"); 3336 sta_id = mvm_sta->sta_id;
3326 return -EINVAL;
3327 }
3328 sta_id = mvm_sta->sta_id;
3329 3337
3330 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3338 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3331 keyconf->keyidx, sta_id); 3339 keyconf->keyidx, sta_id);
3332 3340
3333 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3341 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3334 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3342 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3335 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3343 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3336 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3344 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3337 3345
3338 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3346 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 2716cb5483bf..ad62b67dceb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
313 * This is basically (last acked packet++). 313 * This is basically (last acked packet++).
314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
316 * @lq_color: the color of the LQ command as it appears in tx response.
316 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 317 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
317 * @state: state of the BA agreement establishment / tear down. 318 * @state: state of the BA agreement establishment / tear down.
318 * @txq_id: Tx queue used by the BA session / DQA 319 * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
331 u16 next_reclaimed; 332 u16 next_reclaimed;
332 /* The rest is Tx AGG related */ 333 /* The rest is Tx AGG related */
333 u32 rate_n_flags; 334 u32 rate_n_flags;
335 u8 lq_color;
334 bool amsdu_in_ampdu_allowed; 336 bool amsdu_in_ampdu_allowed;
335 enum iwl_mvm_agg_state state; 337 enum iwl_mvm_agg_state state;
336 u16 txq_id; 338 u16 txq_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f9cbd197246f..506d58104e1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); 790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
791 int ret; 791 int ret;
792 792
793 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
794 return -EIO;
795
796 mutex_lock(&mvm->mutex); 793 mutex_lock(&mvm->mutex);
797 794
795 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
796 ret = -EIO;
797 goto unlock;
798 }
799
798 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { 800 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
799 ret = -EINVAL; 801 ret = -EINVAL;
800 goto unlock; 802 goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bcaceb64a6e8..f21901cd4a4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1323 struct iwl_mvm_sta *mvmsta; 1323 struct iwl_mvm_sta *mvmsta;
1324 struct sk_buff_head skbs; 1324 struct sk_buff_head skbs;
1325 u8 skb_freed = 0; 1325 u8 skb_freed = 0;
1326 u8 lq_color;
1326 u16 next_reclaimed, seq_ctl; 1327 u16 next_reclaimed, seq_ctl;
1327 bool is_ndp = false; 1328 bool is_ndp = false;
1328 1329
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1405 info->status.tx_time = 1406 info->status.tx_time =
1406 le16_to_cpu(tx_resp->wireless_media_time); 1407 le16_to_cpu(tx_resp->wireless_media_time);
1407 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); 1408 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1409 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1408 info->status.status_driver_data[0] = 1410 info->status.status_driver_data[0] =
1409 (void *)(uintptr_t)tx_resp->reduced_tpc; 1411 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1410 1412
1411 ieee80211_tx_status(mvm->hw, skb); 1413 ieee80211_tx_status(mvm->hw, skb);
1412 } 1414 }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1638 le32_to_cpu(tx_resp->initial_rate); 1640 le32_to_cpu(tx_resp->initial_rate);
1639 mvmsta->tid_data[tid].tx_time = 1641 mvmsta->tid_data[tid].tx_time =
1640 le16_to_cpu(tx_resp->wireless_media_time); 1642 le16_to_cpu(tx_resp->wireless_media_time);
1643 mvmsta->tid_data[tid].lq_color =
1644 (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
1645 TX_RES_RATE_TABLE_COLOR_POS;
1641 } 1646 }
1642 1647
1643 rcu_read_unlock(); 1648 rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1707 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1712 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1708 1713
1709 freed = 0; 1714 freed = 0;
1715
1716 /* pack lq color from tid_data along the reduced txp */
1717 ba_info->status.status_driver_data[0] =
1718 RS_DRV_DATA_PACK(tid_data->lq_color,
1719 ba_info->status.status_driver_data[0]);
1710 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; 1720 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1711 1721
1712 skb_queue_walk(&reclaimed_skbs, skb) { 1722 skb_queue_walk(&reclaimed_skbs, skb) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 70acf850a9f1..93cbc7a69bcd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
2803#ifdef CONFIG_PM_SLEEP 2803#ifdef CONFIG_PM_SLEEP
2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2805{ 2805{
2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2807 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2807 return iwl_pci_fw_enter_d0i3(trans); 2808 return iwl_pci_fw_enter_d0i3(trans);
2808 2809
2809 return 0; 2810 return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2811 2812
2812static void iwl_trans_pcie_resume(struct iwl_trans *trans) 2813static void iwl_trans_pcie_resume(struct iwl_trans *trans)
2813{ 2814{
2814 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2815 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2816 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2815 iwl_pci_fw_exit_d0i3(trans); 2817 iwl_pci_fw_exit_d0i3(trans);
2816} 2818}
2817#endif /* CONFIG_PM_SLEEP */ 2819#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9fb46a6f47cf..9c9bfbbabdf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
906 906
907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { 907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
908 ret = -EINVAL; 908 ret = -EINVAL;
909 goto error; 909 goto error_free_resp;
910 } 910 }
911 911
912 rsp = (void *)hcmd.resp_pkt->data; 912 rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
915 if (qid > ARRAY_SIZE(trans_pcie->txq)) { 915 if (qid > ARRAY_SIZE(trans_pcie->txq)) {
916 WARN_ONCE(1, "queue index %d unsupported", qid); 916 WARN_ONCE(1, "queue index %d unsupported", qid);
917 ret = -EIO; 917 ret = -EIO;
918 goto error; 918 goto error_free_resp;
919 } 919 }
920 920
921 if (test_and_set_bit(qid, trans_pcie->queue_used)) { 921 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
922 WARN_ONCE(1, "queue %d already used", qid); 922 WARN_ONCE(1, "queue %d already used", qid);
923 ret = -EIO; 923 ret = -EIO;
924 goto error; 924 goto error_free_resp;
925 } 925 }
926 926
927 txq->id = qid; 927 txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
934 (txq->write_ptr) | (qid << 16)); 934 (txq->write_ptr) | (qid << 16));
935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
936 936
937 iwl_free_resp(&hcmd);
937 return qid; 938 return qid;
938 939
940error_free_resp:
941 iwl_free_resp(&hcmd);
939error: 942error:
940 iwl_pcie_gen2_txq_free_memory(trans, txq); 943 iwl_pcie_gen2_txq_free_memory(trans, txq);
941 return ret; 944 return ret;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 544fc09dcb62..1372b20f931e 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
73 dev->mem_end = mdev->mem_end; 73 dev->mem_end = mdev->mem_end;
74 74
75 hostap_setup_dev(dev, local, type); 75 hostap_setup_dev(dev, local, type);
76 dev->destructor = free_netdev; 76 dev->needs_free_netdev = true;
77 77
78 sprintf(dev->name, "%s%s", prefix, name); 78 sprintf(dev->name, "%s%s", prefix, name);
79 if (!rtnl_locked) 79 if (!rtnl_locked)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 002b25cff5b6..c854a557998b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = {
2861static void hwsim_mon_setup(struct net_device *dev) 2861static void hwsim_mon_setup(struct net_device *dev)
2862{ 2862{
2863 dev->netdev_ops = &hwsim_netdev_ops; 2863 dev->netdev_ops = &hwsim_netdev_ops;
2864 dev->destructor = free_netdev; 2864 dev->needs_free_netdev = true;
2865 ether_setup(dev); 2865 ether_setup(dev);
2866 dev->priv_flags |= IFF_NO_QUEUE; 2866 dev->priv_flags |= IFF_NO_QUEUE;
2867 dev->type = ARPHRD_IEEE80211_RADIOTAP; 2867 dev->type = ARPHRD_IEEE80211_RADIOTAP;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index dd87b9ff64c3..39b6b5e3f6e0 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
1280 struct net_device *dev) 1280 struct net_device *dev)
1281{ 1281{
1282 dev->netdev_ops = &mwifiex_netdev_ops; 1282 dev->netdev_ops = &mwifiex_netdev_ops;
1283 dev->destructor = free_netdev; 1283 dev->needs_free_netdev = true;
1284 /* Initialize private structure */ 1284 /* Initialize private structure */
1285 priv->current_key_index = 0; 1285 priv->current_key_index = 0;
1286 priv->media_connected = false; 1286 priv->media_connected = false;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 530586be05b4..5b1d2e8402d9 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
199 unsigned long remaining_credit; 199 unsigned long remaining_credit;
200 struct timer_list credit_timeout; 200 struct timer_list credit_timeout;
201 u64 credit_window_start; 201 u64 credit_window_start;
202 bool rate_limited;
202 203
203 /* Statistics */ 204 /* Statistics */
204 struct xenvif_stats stats; 205 struct xenvif_stats stats;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8397f6c92451..e322a862ddfe 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
106 106
107 if (work_done < budget) { 107 if (work_done < budget) {
108 napi_complete_done(napi, work_done); 108 napi_complete_done(napi, work_done);
109 xenvif_napi_schedule_or_enable_events(queue); 109 /* If the queue is rate-limited, it shall be
110 * rescheduled in the timer callback.
111 */
112 if (likely(!queue->rate_limited))
113 xenvif_napi_schedule_or_enable_events(queue);
110 } 114 }
111 115
112 return work_done; 116 return work_done;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 602d408fa25e..5042ff8d449a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
181 181
182 queue->remaining_credit = min(max_credit, max_burst); 182 queue->remaining_credit = min(max_credit, max_burst);
183 queue->rate_limited = false;
183} 184}
184 185
185void xenvif_tx_credit_callback(unsigned long data) 186void xenvif_tx_credit_callback(unsigned long data)
@@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
686 msecs_to_jiffies(queue->credit_usec / 1000); 687 msecs_to_jiffies(queue->credit_usec / 1000);
687 688
688 /* Timer could already be pending in rare cases. */ 689 /* Timer could already be pending in rare cases. */
689 if (timer_pending(&queue->credit_timeout)) 690 if (timer_pending(&queue->credit_timeout)) {
691 queue->rate_limited = true;
690 return true; 692 return true;
693 }
691 694
692 /* Passed the point where we can replenish credit? */ 695 /* Passed the point where we can replenish credit? */
693 if (time_after_eq64(now, next_credit)) { 696 if (time_after_eq64(now, next_credit)) {
@@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
702 mod_timer(&queue->credit_timeout, 705 mod_timer(&queue->credit_timeout,
703 next_credit); 706 next_credit);
704 queue->credit_window_start = next_credit; 707 queue->credit_window_start = next_credit;
708 queue->rate_limited = true;
705 709
706 return true; 710 return true;
707 } 711 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6ffc482550c1..7b61adb6270c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1934,8 +1934,7 @@ abort_transaction_no_dev_fatal:
1934 xennet_disconnect_backend(info); 1934 xennet_disconnect_backend(info);
1935 xennet_destroy_queues(info); 1935 xennet_destroy_queues(info);
1936 out: 1936 out:
1937 unregister_netdev(info->netdev); 1937 device_unregister(&dev->dev);
1938 xennet_free_netdev(info->netdev);
1939 return err; 1938 return err;
1940} 1939}
1941 1940
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index c00238491673..7b3b6fd63d7d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
2878 .link_is_up = xeon_link_is_up, 2878 .link_is_up = xeon_link_is_up,
2879 .db_ioread = skx_db_ioread, 2879 .db_ioread = skx_db_ioread,
2880 .db_iowrite = skx_db_iowrite, 2880 .db_iowrite = skx_db_iowrite,
2881 .db_size = sizeof(u64), 2881 .db_size = sizeof(u32),
2882 .ntb_ctl = SKX_NTBCNTL_OFFSET, 2882 .ntb_ctl = SKX_NTBCNTL_OFFSET,
2883 .mw_bar = {2, 4}, 2883 .mw_bar = {2, 4},
2884}; 2884};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 02ca45fdd892..10e5bf460139 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -177,14 +177,12 @@ struct ntb_transport_qp {
177 u64 rx_err_ver; 177 u64 rx_err_ver;
178 u64 rx_memcpy; 178 u64 rx_memcpy;
179 u64 rx_async; 179 u64 rx_async;
180 u64 dma_rx_prep_err;
181 u64 tx_bytes; 180 u64 tx_bytes;
182 u64 tx_pkts; 181 u64 tx_pkts;
183 u64 tx_ring_full; 182 u64 tx_ring_full;
184 u64 tx_err_no_buf; 183 u64 tx_err_no_buf;
185 u64 tx_memcpy; 184 u64 tx_memcpy;
186 u64 tx_async; 185 u64 tx_async;
187 u64 dma_tx_prep_err;
188}; 186};
189 187
190struct ntb_transport_mw { 188struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
254#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 252#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
255#define NTB_QP_DEF_NUM_ENTRIES 100 253#define NTB_QP_DEF_NUM_ENTRIES 100
256#define NTB_LINK_DOWN_TIMEOUT 10 254#define NTB_LINK_DOWN_TIMEOUT 10
257#define DMA_RETRIES 20
258#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
259 255
260static void ntb_transport_rxc_db(unsigned long data); 256static void ntb_transport_rxc_db(unsigned long data);
261static const struct ntb_ctx_ops ntb_transport_ops; 257static const struct ntb_ctx_ops ntb_transport_ops;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 512 out_offset += snprintf(buf + out_offset, out_count - out_offset,
517 "free tx - \t%u\n", 513 "free tx - \t%u\n",
518 ntb_transport_tx_free_entry(qp)); 514 ntb_transport_tx_free_entry(qp));
519 out_offset += snprintf(buf + out_offset, out_count - out_offset,
520 "DMA tx prep err - \t%llu\n",
521 qp->dma_tx_prep_err);
522 out_offset += snprintf(buf + out_offset, out_count - out_offset,
523 "DMA rx prep err - \t%llu\n",
524 qp->dma_rx_prep_err);
525 515
526 out_offset += snprintf(buf + out_offset, out_count - out_offset, 516 out_offset += snprintf(buf + out_offset, out_count - out_offset,
527 "\n"); 517 "\n");
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
623 if (!mw->virt_addr) 613 if (!mw->virt_addr)
624 return -ENOMEM; 614 return -ENOMEM;
625 615
626 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 616 if (mw_num < qp_count % mw_count)
627 num_qps_mw = qp_count / mw_count + 1; 617 num_qps_mw = qp_count / mw_count + 1;
628 else 618 else
629 num_qps_mw = qp_count / mw_count; 619 num_qps_mw = qp_count / mw_count;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
768 qp->tx_err_no_buf = 0; 758 qp->tx_err_no_buf = 0;
769 qp->tx_memcpy = 0; 759 qp->tx_memcpy = 0;
770 qp->tx_async = 0; 760 qp->tx_async = 0;
771 qp->dma_tx_prep_err = 0;
772 qp->dma_rx_prep_err = 0;
773} 761}
774 762
775static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 763static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1000 qp->event_handler = NULL; 988 qp->event_handler = NULL;
1001 ntb_qp_link_down_reset(qp); 989 ntb_qp_link_down_reset(qp);
1002 990
1003 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 991 if (mw_num < qp_count % mw_count)
1004 num_qps_mw = qp_count / mw_count + 1; 992 num_qps_mw = qp_count / mw_count + 1;
1005 else 993 else
1006 num_qps_mw = qp_count / mw_count; 994 num_qps_mw = qp_count / mw_count;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1128 qp_count = ilog2(qp_bitmap); 1116 qp_count = ilog2(qp_bitmap);
1129 if (max_num_clients && max_num_clients < qp_count) 1117 if (max_num_clients && max_num_clients < qp_count)
1130 qp_count = max_num_clients; 1118 qp_count = max_num_clients;
1131 else if (mw_count < qp_count) 1119 else if (nt->mw_count < qp_count)
1132 qp_count = mw_count; 1120 qp_count = nt->mw_count;
1133 1121
1134 qp_bitmap &= BIT_ULL(qp_count) - 1; 1122 qp_bitmap &= BIT_ULL(qp_count) - 1;
1135 1123
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1317 struct dmaengine_unmap_data *unmap; 1305 struct dmaengine_unmap_data *unmap;
1318 dma_cookie_t cookie; 1306 dma_cookie_t cookie;
1319 void *buf = entry->buf; 1307 void *buf = entry->buf;
1320 int retries = 0;
1321 1308
1322 len = entry->len; 1309 len = entry->len;
1323 device = chan->device; 1310 device = chan->device;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1346 1333
1347 unmap->from_cnt = 1; 1334 unmap->from_cnt = 1;
1348 1335
1349 for (retries = 0; retries < DMA_RETRIES; retries++) { 1336 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1350 txd = device->device_prep_dma_memcpy(chan, 1337 unmap->addr[0], len,
1351 unmap->addr[1], 1338 DMA_PREP_INTERRUPT);
1352 unmap->addr[0], len, 1339 if (!txd)
1353 DMA_PREP_INTERRUPT);
1354 if (txd)
1355 break;
1356
1357 set_current_state(TASK_INTERRUPTIBLE);
1358 schedule_timeout(DMA_OUT_RESOURCE_TO);
1359 }
1360
1361 if (!txd) {
1362 qp->dma_rx_prep_err++;
1363 goto err_get_unmap; 1340 goto err_get_unmap;
1364 }
1365 1341
1366 txd->callback_result = ntb_rx_copy_callback; 1342 txd->callback_result = ntb_rx_copy_callback;
1367 txd->callback_param = entry; 1343 txd->callback_param = entry;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1606 struct dmaengine_unmap_data *unmap; 1582 struct dmaengine_unmap_data *unmap;
1607 dma_addr_t dest; 1583 dma_addr_t dest;
1608 dma_cookie_t cookie; 1584 dma_cookie_t cookie;
1609 int retries = 0;
1610 1585
1611 device = chan->device; 1586 device = chan->device;
1612 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 1587 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1628 1603
1629 unmap->to_cnt = 1; 1604 unmap->to_cnt = 1;
1630 1605
1631 for (retries = 0; retries < DMA_RETRIES; retries++) { 1606 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1632 txd = device->device_prep_dma_memcpy(chan, dest, 1607 DMA_PREP_INTERRUPT);
1633 unmap->addr[0], len, 1608 if (!txd)
1634 DMA_PREP_INTERRUPT);
1635 if (txd)
1636 break;
1637
1638 set_current_state(TASK_INTERRUPTIBLE);
1639 schedule_timeout(DMA_OUT_RESOURCE_TO);
1640 }
1641
1642 if (!txd) {
1643 qp->dma_tx_prep_err++;
1644 goto err_get_unmap; 1609 goto err_get_unmap;
1645 }
1646 1610
1647 txd->callback_result = ntb_tx_copy_callback; 1611 txd->callback_result = ntb_tx_copy_callback;
1648 txd->callback_param = entry; 1612 txd->callback_param = entry;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 434e1d474f33..5cab2831ce99 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
90 90
91static unsigned int seg_order = 19; /* 512K */ 91static unsigned int seg_order = 19; /* 512K */
92module_param(seg_order, uint, 0644); 92module_param(seg_order, uint, 0644);
93MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); 93MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
94 94
95static unsigned int run_order = 32; /* 4G */ 95static unsigned int run_order = 32; /* 4G */
96module_param(run_order, uint, 0644); 96module_param(run_order, uint, 0644);
97MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); 97MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
98 98
99static bool use_dma; /* default to 0 */ 99static bool use_dma; /* default to 0 */
100module_param(use_dma, bool, 0644); 100module_param(use_dma, bool, 0644);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d5e0906262ea..903d5813023a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
56static int nvme_char_major; 56static int nvme_char_major;
57module_param(nvme_char_major, int, 0); 57module_param(nvme_char_major, int, 0);
58 58
59static unsigned long default_ps_max_latency_us = 25000; 59static unsigned long default_ps_max_latency_us = 100000;
60module_param(default_ps_max_latency_us, ulong, 0644); 60module_param(default_ps_max_latency_us, ulong, 0644);
61MODULE_PARM_DESC(default_ps_max_latency_us, 61MODULE_PARM_DESC(default_ps_max_latency_us,
62 "max power saving latency for new devices; use PM QOS to change per device"); 62 "max power saving latency for new devices; use PM QOS to change per device");
@@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
925} 925}
926 926
927#ifdef CONFIG_BLK_DEV_INTEGRITY 927#ifdef CONFIG_BLK_DEV_INTEGRITY
928static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
929 u16 bs)
930{
931 struct nvme_ns *ns = disk->private_data;
932 u16 old_ms = ns->ms;
933 u8 pi_type = 0;
934
935 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
936 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
937
938 /* PI implementation requires metadata equal t10 pi tuple size */
939 if (ns->ms == sizeof(struct t10_pi_tuple))
940 pi_type = id->dps & NVME_NS_DPS_PI_MASK;
941
942 if (blk_get_integrity(disk) &&
943 (ns->pi_type != pi_type || ns->ms != old_ms ||
944 bs != queue_logical_block_size(disk->queue) ||
945 (ns->ms && ns->ext)))
946 blk_integrity_unregister(disk);
947
948 ns->pi_type = pi_type;
949}
950
928static void nvme_init_integrity(struct nvme_ns *ns) 951static void nvme_init_integrity(struct nvme_ns *ns)
929{ 952{
930 struct blk_integrity integrity; 953 struct blk_integrity integrity;
@@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns)
951 blk_queue_max_integrity_segments(ns->queue, 1); 974 blk_queue_max_integrity_segments(ns->queue, 1);
952} 975}
953#else 976#else
977static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
978 u16 bs)
979{
980}
954static void nvme_init_integrity(struct nvme_ns *ns) 981static void nvme_init_integrity(struct nvme_ns *ns)
955{ 982{
956} 983}
@@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
997static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1024static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
998{ 1025{
999 struct nvme_ns *ns = disk->private_data; 1026 struct nvme_ns *ns = disk->private_data;
1000 u8 lbaf, pi_type; 1027 u16 bs;
1001 u16 old_ms;
1002 unsigned short bs;
1003
1004 old_ms = ns->ms;
1005 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1006 ns->lba_shift = id->lbaf[lbaf].ds;
1007 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1008 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1009 1028
1010 /* 1029 /*
1011 * If identify namespace failed, use default 512 byte block size so 1030 * If identify namespace failed, use default 512 byte block size so
1012 * block layer can use before failing read/write for 0 capacity. 1031 * block layer can use before failing read/write for 0 capacity.
1013 */ 1032 */
1033 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1014 if (ns->lba_shift == 0) 1034 if (ns->lba_shift == 0)
1015 ns->lba_shift = 9; 1035 ns->lba_shift = 9;
1016 bs = 1 << ns->lba_shift; 1036 bs = 1 << ns->lba_shift;
1017 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
1018 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
1019 id->dps & NVME_NS_DPS_PI_MASK : 0;
1020 1037
1021 blk_mq_freeze_queue(disk->queue); 1038 blk_mq_freeze_queue(disk->queue);
1022 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
1023 ns->ms != old_ms ||
1024 bs != queue_logical_block_size(disk->queue) ||
1025 (ns->ms && ns->ext)))
1026 blk_integrity_unregister(disk);
1027 1039
1028 ns->pi_type = pi_type; 1040 if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
1041 nvme_prep_integrity(disk, id, bs);
1029 blk_queue_logical_block_size(ns->queue, bs); 1042 blk_queue_logical_block_size(ns->queue, bs);
1030
1031 if (ns->ms && !blk_get_integrity(disk) && !ns->ext) 1043 if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
1032 nvme_init_integrity(ns); 1044 nvme_init_integrity(ns);
1033 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) 1045 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1330,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1330 * transitioning between power states. Therefore, when running 1342 * transitioning between power states. Therefore, when running
1331 * in any given state, we will enter the next lower-power 1343 * in any given state, we will enter the next lower-power
1332 * non-operational state after waiting 50 * (enlat + exlat) 1344 * non-operational state after waiting 50 * (enlat + exlat)
1333 * microseconds, as long as that state's total latency is under 1345 * microseconds, as long as that state's exit latency is under
1334 * the requested maximum latency. 1346 * the requested maximum latency.
1335 * 1347 *
1336 * We will not autonomously enter any non-operational state for 1348 * We will not autonomously enter any non-operational state for
@@ -1375,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1375 * lowest-power state, not the number of states. 1387 * lowest-power state, not the number of states.
1376 */ 1388 */
1377 for (state = (int)ctrl->npss; state >= 0; state--) { 1389 for (state = (int)ctrl->npss; state >= 0; state--) {
1378 u64 total_latency_us, transition_ms; 1390 u64 total_latency_us, exit_latency_us, transition_ms;
1379 1391
1380 if (target) 1392 if (target)
1381 table->entries[state] = target; 1393 table->entries[state] = target;
@@ -1396,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1396 NVME_PS_FLAGS_NON_OP_STATE)) 1408 NVME_PS_FLAGS_NON_OP_STATE))
1397 continue; 1409 continue;
1398 1410
1399 total_latency_us = 1411 exit_latency_us =
1400 (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + 1412 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
1401 + le32_to_cpu(ctrl->psd[state].exit_lat); 1413 if (exit_latency_us > ctrl->ps_max_latency_us)
1402 if (total_latency_us > ctrl->ps_max_latency_us)
1403 continue; 1414 continue;
1404 1415
1416 total_latency_us =
1417 exit_latency_us +
1418 le32_to_cpu(ctrl->psd[state].entry_lat);
1419
1405 /* 1420 /*
1406 * This state is good. Use it as the APST idle 1421 * This state is good. Use it as the APST idle
1407 * target for higher power states. 1422 * target for higher power states.
@@ -1605,7 +1620,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1605 } 1620 }
1606 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 1621 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
1607 1622
1608 if (ctrl->ops->is_fabrics) { 1623 if (ctrl->ops->flags & NVME_F_FABRICS) {
1609 ctrl->icdoff = le16_to_cpu(id->icdoff); 1624 ctrl->icdoff = le16_to_cpu(id->icdoff);
1610 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 1625 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
1611 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 1626 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
@@ -2098,7 +2113,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2098 if (ns->ndev) 2113 if (ns->ndev)
2099 nvme_nvm_unregister_sysfs(ns); 2114 nvme_nvm_unregister_sysfs(ns);
2100 del_gendisk(ns->disk); 2115 del_gendisk(ns->disk);
2101 blk_mq_abort_requeue_list(ns->queue);
2102 blk_cleanup_queue(ns->queue); 2116 blk_cleanup_queue(ns->queue);
2103 } 2117 }
2104 2118
@@ -2427,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2427 struct nvme_ns *ns; 2441 struct nvme_ns *ns;
2428 2442
2429 mutex_lock(&ctrl->namespaces_mutex); 2443 mutex_lock(&ctrl->namespaces_mutex);
2444
2445 /* Forcibly start all queues to avoid having stuck requests */
2446 blk_mq_start_hw_queues(ctrl->admin_q);
2447
2430 list_for_each_entry(ns, &ctrl->namespaces, list) { 2448 list_for_each_entry(ns, &ctrl->namespaces, list) {
2431 /* 2449 /*
2432 * Revalidating a dead namespace sets capacity to 0. This will 2450 * Revalidating a dead namespace sets capacity to 0. This will
@@ -2436,8 +2454,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2436 continue; 2454 continue;
2437 revalidate_disk(ns->disk); 2455 revalidate_disk(ns->disk);
2438 blk_set_queue_dying(ns->queue); 2456 blk_set_queue_dying(ns->queue);
2439 blk_mq_abort_requeue_list(ns->queue); 2457
2440 blk_mq_start_stopped_hw_queues(ns->queue, true); 2458 /*
2459 * Forcibly start all queues to avoid having stuck requests.
2460 * Note that we must ensure the queues are not stopped
2461 * when the final removal happens.
2462 */
2463 blk_mq_start_hw_queues(ns->queue);
2464
2465 /* draining requests in requeue list */
2466 blk_mq_kick_requeue_list(ns->queue);
2441 } 2467 }
2442 mutex_unlock(&ctrl->namespaces_mutex); 2468 mutex_unlock(&ctrl->namespaces_mutex);
2443} 2469}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 70e689bf1cad..92964cef0f4b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
45 45
46#define NVMEFC_QUEUE_DELAY 3 /* ms units */ 46#define NVMEFC_QUEUE_DELAY 3 /* ms units */
47 47
48#define NVME_FC_MAX_CONNECT_ATTEMPTS 1
49
50struct nvme_fc_queue { 48struct nvme_fc_queue {
51 struct nvme_fc_ctrl *ctrl; 49 struct nvme_fc_ctrl *ctrl;
52 struct device *dev; 50 struct device *dev;
@@ -165,8 +163,6 @@ struct nvme_fc_ctrl {
165 struct work_struct delete_work; 163 struct work_struct delete_work;
166 struct work_struct reset_work; 164 struct work_struct reset_work;
167 struct delayed_work connect_work; 165 struct delayed_work connect_work;
168 int reconnect_delay;
169 int connect_attempts;
170 166
171 struct kref ref; 167 struct kref ref;
172 u32 flags; 168 u32 flags;
@@ -1143,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1143/* *********************** NVME Ctrl Routines **************************** */ 1139/* *********************** NVME Ctrl Routines **************************** */
1144 1140
1145static void __nvme_fc_final_op_cleanup(struct request *rq); 1141static void __nvme_fc_final_op_cleanup(struct request *rq);
1142static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1146 1143
1147static int 1144static int
1148nvme_fc_reinit_request(void *data, struct request *rq) 1145nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1269,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1269 struct nvme_command *sqe = &op->cmd_iu.sqe; 1266 struct nvme_command *sqe = &op->cmd_iu.sqe;
1270 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1267 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1271 union nvme_result result; 1268 union nvme_result result;
1272 bool complete_rq; 1269 bool complete_rq, terminate_assoc = true;
1273 1270
1274 /* 1271 /*
1275 * WARNING: 1272 * WARNING:
@@ -1298,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1298 * fabricate a CQE, the following fields will not be set as they 1295 * fabricate a CQE, the following fields will not be set as they
1299 * are not referenced: 1296 * are not referenced:
1300 * cqe.sqid, cqe.sqhd, cqe.command_id 1297 * cqe.sqid, cqe.sqhd, cqe.command_id
1298 *
1299 * Failure or error of an individual i/o, in a transport
1300 * detected fashion unrelated to the nvme completion status,
1301 * potentially cause the initiator and target sides to get out
1302 * of sync on SQ head/tail (aka outstanding io count allowed).
1303 * Per FC-NVME spec, failure of an individual command requires
1304 * the connection to be terminated, which in turn requires the
1305 * association to be terminated.
1301 */ 1306 */
1302 1307
1303 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1308 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1363,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1363 goto done; 1368 goto done;
1364 } 1369 }
1365 1370
1371 terminate_assoc = false;
1372
1366done: 1373done:
1367 if (op->flags & FCOP_FLAGS_AEN) { 1374 if (op->flags & FCOP_FLAGS_AEN) {
1368 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1375 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1370,19 +1377,23 @@ done:
1370 atomic_set(&op->state, FCPOP_STATE_IDLE); 1377 atomic_set(&op->state, FCPOP_STATE_IDLE);
1371 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1378 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1372 nvme_fc_ctrl_put(ctrl); 1379 nvme_fc_ctrl_put(ctrl);
1373 return; 1380 goto check_error;
1374 } 1381 }
1375 1382
1376 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1383 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1377 if (!complete_rq) { 1384 if (!complete_rq) {
1378 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1385 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1379 status = cpu_to_le16(NVME_SC_ABORT_REQ); 1386 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1380 if (blk_queue_dying(rq->q)) 1387 if (blk_queue_dying(rq->q))
1381 status |= cpu_to_le16(NVME_SC_DNR); 1388 status |= cpu_to_le16(NVME_SC_DNR << 1);
1382 } 1389 }
1383 nvme_end_request(rq, status, result); 1390 nvme_end_request(rq, status, result);
1384 } else 1391 } else
1385 __nvme_fc_final_op_cleanup(rq); 1392 __nvme_fc_final_op_cleanup(rq);
1393
1394check_error:
1395 if (terminate_assoc)
1396 nvme_fc_error_recovery(ctrl, "transport detected io error");
1386} 1397}
1387 1398
1388static int 1399static int
@@ -1751,9 +1762,13 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1751 dev_warn(ctrl->ctrl.device, 1762 dev_warn(ctrl->ctrl.device,
1752 "NVME-FC{%d}: transport association error detected: %s\n", 1763 "NVME-FC{%d}: transport association error detected: %s\n",
1753 ctrl->cnum, errmsg); 1764 ctrl->cnum, errmsg);
1754 dev_info(ctrl->ctrl.device, 1765 dev_warn(ctrl->ctrl.device,
1755 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 1766 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1756 1767
1768 /* stop the queues on error, cleanup is in reset thread */
1769 if (ctrl->queue_count > 1)
1770 nvme_stop_queues(&ctrl->ctrl);
1771
1757 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1772 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1758 dev_err(ctrl->ctrl.device, 1773 dev_err(ctrl->ctrl.device,
1759 "NVME-FC{%d}: error_recovery: Couldn't change state " 1774 "NVME-FC{%d}: error_recovery: Couldn't change state "
@@ -2191,9 +2206,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2191 if (!opts->nr_io_queues) 2206 if (!opts->nr_io_queues)
2192 return 0; 2207 return 0;
2193 2208
2194 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2195 opts->nr_io_queues);
2196
2197 nvme_fc_init_io_queues(ctrl); 2209 nvme_fc_init_io_queues(ctrl);
2198 2210
2199 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2211 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@@ -2264,9 +2276,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2264 if (ctrl->queue_count == 1) 2276 if (ctrl->queue_count == 1)
2265 return 0; 2277 return 0;
2266 2278
2267 dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2268 opts->nr_io_queues);
2269
2270 nvme_fc_init_io_queues(ctrl); 2279 nvme_fc_init_io_queues(ctrl);
2271 2280
2272 ret = blk_mq_reinit_tagset(&ctrl->tag_set); 2281 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -2302,7 +2311,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2302 int ret; 2311 int ret;
2303 bool changed; 2312 bool changed;
2304 2313
2305 ctrl->connect_attempts++; 2314 ++ctrl->ctrl.opts->nr_reconnects;
2306 2315
2307 /* 2316 /*
2308 * Create the admin queue 2317 * Create the admin queue
@@ -2399,9 +2408,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2399 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 2408 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2400 WARN_ON_ONCE(!changed); 2409 WARN_ON_ONCE(!changed);
2401 2410
2402 ctrl->connect_attempts = 0; 2411 ctrl->ctrl.opts->nr_reconnects = 0;
2403
2404 kref_get(&ctrl->ctrl.kref);
2405 2412
2406 if (ctrl->queue_count > 1) { 2413 if (ctrl->queue_count > 1) {
2407 nvme_start_queues(&ctrl->ctrl); 2414 nvme_start_queues(&ctrl->ctrl);
@@ -2532,26 +2539,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
2532 2539
2533 /* 2540 /*
2534 * tear down the controller 2541 * tear down the controller
2535 * This will result in the last reference on the nvme ctrl to 2542 * After the last reference on the nvme ctrl is removed,
2536 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. 2543 * the transport nvme_fc_nvme_ctrl_freed() callback will be
2537 * From there, the transport will tear down it's logical queues and 2544 * invoked. From there, the transport will tear down it's
2538 * association. 2545 * logical queues and association.
2539 */ 2546 */
2540 nvme_uninit_ctrl(&ctrl->ctrl); 2547 nvme_uninit_ctrl(&ctrl->ctrl);
2541 2548
2542 nvme_put_ctrl(&ctrl->ctrl); 2549 nvme_put_ctrl(&ctrl->ctrl);
2543} 2550}
2544 2551
2545static int 2552static bool
2546__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) 2553__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
2547{ 2554{
2548 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 2555 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2549 return -EBUSY; 2556 return true;
2550 2557
2551 if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) 2558 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2552 return -EBUSY; 2559 return true;
2553 2560
2554 return 0; 2561 return false;
2562}
2563
2564static int
2565__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2566{
2567 return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
2555} 2568}
2556 2569
2557/* 2570/*
@@ -2577,6 +2590,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2577} 2590}
2578 2591
2579static void 2592static void
2593nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2594{
2595 /* If we are resetting/deleting then do nothing */
2596 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
2597 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
2598 ctrl->ctrl.state == NVME_CTRL_LIVE);
2599 return;
2600 }
2601
2602 dev_info(ctrl->ctrl.device,
2603 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2604 ctrl->cnum, status);
2605
2606 if (nvmf_should_reconnect(&ctrl->ctrl)) {
2607 dev_info(ctrl->ctrl.device,
2608 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2609 ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2610 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2611 ctrl->ctrl.opts->reconnect_delay * HZ);
2612 } else {
2613 dev_warn(ctrl->ctrl.device,
2614 "NVME-FC{%d}: Max reconnect attempts (%d) "
2615 "reached. Removing controller\n",
2616 ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
2617 WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
2618 }
2619}
2620
2621static void
2580nvme_fc_reset_ctrl_work(struct work_struct *work) 2622nvme_fc_reset_ctrl_work(struct work_struct *work)
2581{ 2623{
2582 struct nvme_fc_ctrl *ctrl = 2624 struct nvme_fc_ctrl *ctrl =
@@ -2587,34 +2629,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2587 nvme_fc_delete_association(ctrl); 2629 nvme_fc_delete_association(ctrl);
2588 2630
2589 ret = nvme_fc_create_association(ctrl); 2631 ret = nvme_fc_create_association(ctrl);
2590 if (ret) { 2632 if (ret)
2591 dev_warn(ctrl->ctrl.device, 2633 nvme_fc_reconnect_or_delete(ctrl, ret);
2592 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 2634 else
2593 ctrl->cnum, ret);
2594 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2595 dev_warn(ctrl->ctrl.device,
2596 "NVME-FC{%d}: Max reconnect attempts (%d) "
2597 "reached. Removing controller\n",
2598 ctrl->cnum, ctrl->connect_attempts);
2599
2600 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2601 NVME_CTRL_DELETING)) {
2602 dev_err(ctrl->ctrl.device,
2603 "NVME-FC{%d}: failed to change state "
2604 "to DELETING\n", ctrl->cnum);
2605 return;
2606 }
2607
2608 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2609 return;
2610 }
2611
2612 dev_warn(ctrl->ctrl.device,
2613 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2614 ctrl->cnum, ctrl->reconnect_delay);
2615 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2616 ctrl->reconnect_delay * HZ);
2617 } else
2618 dev_info(ctrl->ctrl.device, 2635 dev_info(ctrl->ctrl.device,
2619 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); 2636 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2620} 2637}
@@ -2628,7 +2645,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2628{ 2645{
2629 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2646 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2630 2647
2631 dev_warn(ctrl->ctrl.device, 2648 dev_info(ctrl->ctrl.device,
2632 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); 2649 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2633 2650
2634 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 2651 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
@@ -2645,7 +2662,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2645static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 2662static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2646 .name = "fc", 2663 .name = "fc",
2647 .module = THIS_MODULE, 2664 .module = THIS_MODULE,
2648 .is_fabrics = true, 2665 .flags = NVME_F_FABRICS,
2649 .reg_read32 = nvmf_reg_read32, 2666 .reg_read32 = nvmf_reg_read32,
2650 .reg_read64 = nvmf_reg_read64, 2667 .reg_read64 = nvmf_reg_read64,
2651 .reg_write32 = nvmf_reg_write32, 2668 .reg_write32 = nvmf_reg_write32,
@@ -2667,34 +2684,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
2667 struct nvme_fc_ctrl, connect_work); 2684 struct nvme_fc_ctrl, connect_work);
2668 2685
2669 ret = nvme_fc_create_association(ctrl); 2686 ret = nvme_fc_create_association(ctrl);
2670 if (ret) { 2687 if (ret)
2671 dev_warn(ctrl->ctrl.device, 2688 nvme_fc_reconnect_or_delete(ctrl, ret);
2672 "NVME-FC{%d}: Reconnect attempt failed (%d)\n", 2689 else
2673 ctrl->cnum, ret);
2674 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2675 dev_warn(ctrl->ctrl.device,
2676 "NVME-FC{%d}: Max reconnect attempts (%d) "
2677 "reached. Removing controller\n",
2678 ctrl->cnum, ctrl->connect_attempts);
2679
2680 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2681 NVME_CTRL_DELETING)) {
2682 dev_err(ctrl->ctrl.device,
2683 "NVME-FC{%d}: failed to change state "
2684 "to DELETING\n", ctrl->cnum);
2685 return;
2686 }
2687
2688 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2689 return;
2690 }
2691
2692 dev_warn(ctrl->ctrl.device,
2693 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2694 ctrl->cnum, ctrl->reconnect_delay);
2695 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2696 ctrl->reconnect_delay * HZ);
2697 } else
2698 dev_info(ctrl->ctrl.device, 2690 dev_info(ctrl->ctrl.device,
2699 "NVME-FC{%d}: controller reconnect complete\n", 2691 "NVME-FC{%d}: controller reconnect complete\n",
2700 ctrl->cnum); 2692 ctrl->cnum);
@@ -2720,6 +2712,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2720 unsigned long flags; 2712 unsigned long flags;
2721 int ret, idx; 2713 int ret, idx;
2722 2714
2715 if (!(rport->remoteport.port_role &
2716 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2717 ret = -EBADR;
2718 goto out_fail;
2719 }
2720
2723 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2721 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2724 if (!ctrl) { 2722 if (!ctrl) {
2725 ret = -ENOMEM; 2723 ret = -ENOMEM;
@@ -2745,7 +2743,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2745 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); 2743 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2746 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); 2744 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2747 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 2745 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2748 ctrl->reconnect_delay = opts->reconnect_delay;
2749 spin_lock_init(&ctrl->lock); 2746 spin_lock_init(&ctrl->lock);
2750 2747
2751 /* io queue count */ 2748 /* io queue count */
@@ -2825,6 +2822,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2825 return ERR_PTR(ret); 2822 return ERR_PTR(ret);
2826 } 2823 }
2827 2824
2825 kref_get(&ctrl->ctrl.kref);
2826
2828 dev_info(ctrl->ctrl.device, 2827 dev_info(ctrl->ctrl.device,
2829 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 2828 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2830 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 2829 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
@@ -2961,7 +2960,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2961static struct nvmf_transport_ops nvme_fc_transport = { 2960static struct nvmf_transport_ops nvme_fc_transport = {
2962 .name = "fc", 2961 .name = "fc",
2963 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 2962 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2964 .allowed_opts = NVMF_OPT_RECONNECT_DELAY, 2963 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
2965 .create_ctrl = nvme_fc_create_ctrl, 2964 .create_ctrl = nvme_fc_create_ctrl,
2966}; 2965};
2967 2966
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 29c708ca9621..9d6a070d4391 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -208,7 +208,9 @@ struct nvme_ns {
208struct nvme_ctrl_ops { 208struct nvme_ctrl_ops {
209 const char *name; 209 const char *name;
210 struct module *module; 210 struct module *module;
211 bool is_fabrics; 211 unsigned int flags;
212#define NVME_F_FABRICS (1 << 0)
213#define NVME_F_METADATA_SUPPORTED (1 << 1)
212 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 214 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
213 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 215 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
214 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 216 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fed803232edc..40c7581caeb0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
263 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 263 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
264 264
265 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 265 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
266 dev_warn(dev->dev, "unable to set dbbuf\n"); 266 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
267 /* Free memory and continue on */ 267 /* Free memory and continue on */
268 nvme_dbbuf_dma_free(dev); 268 nvme_dbbuf_dma_free(dev);
269 } 269 }
@@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1368 1368
1369 /* If there is a reset ongoing, we shouldn't reset again. */ 1369 /* If there is a reset ongoing, we shouldn't reset again. */
1370 if (work_busy(&dev->reset_work)) 1370 if (dev->ctrl.state == NVME_CTRL_RESETTING)
1371 return false; 1371 return false;
1372 1372
1373 /* We shouldn't reset unless the controller is on fatal error state 1373 /* We shouldn't reset unless the controller is on fatal error state
@@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1394 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1394 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1395 &pci_status); 1395 &pci_status);
1396 if (result == PCIBIOS_SUCCESSFUL) 1396 if (result == PCIBIOS_SUCCESSFUL)
1397 dev_warn(dev->dev, 1397 dev_warn(dev->ctrl.device,
1398 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1398 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1399 csts, pci_status); 1399 csts, pci_status);
1400 else 1400 else
1401 dev_warn(dev->dev, 1401 dev_warn(dev->ctrl.device,
1402 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1402 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1403 csts, result); 1403 csts, result);
1404} 1404}
@@ -1506,6 +1506,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1506 if (dev->cmb) { 1506 if (dev->cmb) {
1507 iounmap(dev->cmb); 1507 iounmap(dev->cmb);
1508 dev->cmb = NULL; 1508 dev->cmb = NULL;
1509 if (dev->cmbsz) {
1510 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1511 &dev_attr_cmb.attr, NULL);
1512 dev->cmbsz = 0;
1513 }
1509 } 1514 }
1510} 1515}
1511 1516
@@ -1735,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1735 */ 1740 */
1736 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 1741 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
1737 dev->q_depth = 2; 1742 dev->q_depth = 2;
1738 dev_warn(dev->dev, "detected Apple NVMe controller, set " 1743 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
1739 "queue depth=%u to work around controller resets\n", 1744 "set queue depth=%u to work around controller resets\n",
1740 dev->q_depth); 1745 dev->q_depth);
1741 } 1746 }
1742 1747
@@ -1754,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1754 if (dev->cmbsz) { 1759 if (dev->cmbsz) {
1755 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1760 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1756 &dev_attr_cmb.attr, NULL)) 1761 &dev_attr_cmb.attr, NULL))
1757 dev_warn(dev->dev, 1762 dev_warn(dev->ctrl.device,
1758 "failed to add sysfs attribute for CMB\n"); 1763 "failed to add sysfs attribute for CMB\n");
1759 } 1764 }
1760 } 1765 }
@@ -1779,6 +1784,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
1779{ 1784{
1780 struct pci_dev *pdev = to_pci_dev(dev->dev); 1785 struct pci_dev *pdev = to_pci_dev(dev->dev);
1781 1786
1787 nvme_release_cmb(dev);
1782 pci_free_irq_vectors(pdev); 1788 pci_free_irq_vectors(pdev);
1783 1789
1784 if (pci_is_enabled(pdev)) { 1790 if (pci_is_enabled(pdev)) {
@@ -1799,7 +1805,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1799 if (pci_is_enabled(pdev)) { 1805 if (pci_is_enabled(pdev)) {
1800 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1806 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1801 1807
1802 if (dev->ctrl.state == NVME_CTRL_LIVE) 1808 if (dev->ctrl.state == NVME_CTRL_LIVE ||
1809 dev->ctrl.state == NVME_CTRL_RESETTING)
1803 nvme_start_freeze(&dev->ctrl); 1810 nvme_start_freeze(&dev->ctrl);
1804 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 1811 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
1805 pdev->error_state != pci_channel_io_normal); 1812 pdev->error_state != pci_channel_io_normal);
@@ -1897,7 +1904,7 @@ static void nvme_reset_work(struct work_struct *work)
1897 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1904 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
1898 int result = -ENODEV; 1905 int result = -ENODEV;
1899 1906
1900 if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1907 if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
1901 goto out; 1908 goto out;
1902 1909
1903 /* 1910 /*
@@ -1907,9 +1914,6 @@ static void nvme_reset_work(struct work_struct *work)
1907 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1914 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1908 nvme_dev_disable(dev, false); 1915 nvme_dev_disable(dev, false);
1909 1916
1910 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
1911 goto out;
1912
1913 result = nvme_pci_enable(dev); 1917 result = nvme_pci_enable(dev);
1914 if (result) 1918 if (result)
1915 goto out; 1919 goto out;
@@ -2003,8 +2007,8 @@ static int nvme_reset(struct nvme_dev *dev)
2003{ 2007{
2004 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 2008 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
2005 return -ENODEV; 2009 return -ENODEV;
2006 if (work_busy(&dev->reset_work)) 2010 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
2007 return -ENODEV; 2011 return -EBUSY;
2008 if (!queue_work(nvme_workq, &dev->reset_work)) 2012 if (!queue_work(nvme_workq, &dev->reset_work))
2009 return -EBUSY; 2013 return -EBUSY;
2010 return 0; 2014 return 0;
@@ -2041,6 +2045,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
2041static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 2045static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2042 .name = "pcie", 2046 .name = "pcie",
2043 .module = THIS_MODULE, 2047 .module = THIS_MODULE,
2048 .flags = NVME_F_METADATA_SUPPORTED,
2044 .reg_read32 = nvme_pci_reg_read32, 2049 .reg_read32 = nvme_pci_reg_read32,
2045 .reg_write32 = nvme_pci_reg_write32, 2050 .reg_write32 = nvme_pci_reg_write32,
2046 .reg_read64 = nvme_pci_reg_read64, 2051 .reg_read64 = nvme_pci_reg_read64,
@@ -2129,6 +2134,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2129 if (result) 2134 if (result)
2130 goto release_pools; 2135 goto release_pools;
2131 2136
2137 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
2132 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2138 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2133 2139
2134 queue_work(nvme_workq, &dev->reset_work); 2140 queue_work(nvme_workq, &dev->reset_work);
@@ -2172,6 +2178,7 @@ static void nvme_remove(struct pci_dev *pdev)
2172 2178
2173 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2179 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2174 2180
2181 cancel_work_sync(&dev->reset_work);
2175 pci_set_drvdata(pdev, NULL); 2182 pci_set_drvdata(pdev, NULL);
2176 2183
2177 if (!pci_device_is_present(pdev)) { 2184 if (!pci_device_is_present(pdev)) {
@@ -2184,7 +2191,6 @@ static void nvme_remove(struct pci_dev *pdev)
2184 nvme_dev_disable(dev, true); 2191 nvme_dev_disable(dev, true);
2185 nvme_dev_remove_admin(dev); 2192 nvme_dev_remove_admin(dev);
2186 nvme_free_queues(dev, 0); 2193 nvme_free_queues(dev, 0);
2187 nvme_release_cmb(dev);
2188 nvme_release_prp_pools(dev); 2194 nvme_release_prp_pools(dev);
2189 nvme_dev_unmap(dev); 2195 nvme_dev_unmap(dev);
2190 nvme_put_ctrl(&dev->ctrl); 2196 nvme_put_ctrl(&dev->ctrl);
@@ -2288,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = {
2288 { PCI_VDEVICE(INTEL, 0x0a54), 2294 { PCI_VDEVICE(INTEL, 0x0a54),
2289 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2295 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2290 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2296 NVME_QUIRK_DEALLOCATE_ZEROES, },
2297 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2298 .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2291 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2299 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2292 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2300 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2293 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2301 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dd1c6deef82f..24397d306d53 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
753 if (ret) 753 if (ret)
754 goto requeue; 754 goto requeue;
755 755
756 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
757
758 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 756 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
759 if (ret) 757 if (ret)
760 goto stop_admin_q; 758 goto requeue;
761 759
762 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 760 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
763 761
764 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 762 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
765 if (ret) 763 if (ret)
766 goto stop_admin_q; 764 goto requeue;
767 765
768 nvme_start_keep_alive(&ctrl->ctrl); 766 nvme_start_keep_alive(&ctrl->ctrl);
769 767
770 if (ctrl->queue_count > 1) { 768 if (ctrl->queue_count > 1) {
771 ret = nvme_rdma_init_io_queues(ctrl); 769 ret = nvme_rdma_init_io_queues(ctrl);
772 if (ret) 770 if (ret)
773 goto stop_admin_q; 771 goto requeue;
774 772
775 ret = nvme_rdma_connect_io_queues(ctrl); 773 ret = nvme_rdma_connect_io_queues(ctrl);
776 if (ret) 774 if (ret)
777 goto stop_admin_q; 775 goto requeue;
778 } 776 }
779 777
780 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 778 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
782 ctrl->ctrl.opts->nr_reconnects = 0; 780 ctrl->ctrl.opts->nr_reconnects = 0;
783 781
784 if (ctrl->queue_count > 1) { 782 if (ctrl->queue_count > 1) {
785 nvme_start_queues(&ctrl->ctrl);
786 nvme_queue_scan(&ctrl->ctrl); 783 nvme_queue_scan(&ctrl->ctrl);
787 nvme_queue_async_events(&ctrl->ctrl); 784 nvme_queue_async_events(&ctrl->ctrl);
788 } 785 }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
791 788
792 return; 789 return;
793 790
794stop_admin_q:
795 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
796requeue: 791requeue:
797 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 792 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
798 ctrl->ctrl.opts->nr_reconnects); 793 ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
823 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 818 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
824 nvme_cancel_request, &ctrl->ctrl); 819 nvme_cancel_request, &ctrl->ctrl);
825 820
821 /*
822 * queues are not a live anymore, so restart the queues to fail fast
823 * new IO
824 */
825 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
826 nvme_start_queues(&ctrl->ctrl);
827
826 nvme_rdma_reconnect_or_remove(ctrl); 828 nvme_rdma_reconnect_or_remove(ctrl);
827} 829}
828 830
@@ -1038,6 +1040,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1038 nvme_rdma_wr_error(cq, wc, "SEND"); 1040 nvme_rdma_wr_error(cq, wc, "SEND");
1039} 1041}
1040 1042
1043static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
1044{
1045 int sig_limit;
1046
1047 /*
1048 * We signal completion every queue depth/2 and also handle the
1049 * degenerated case of a device with queue_depth=1, where we
1050 * would need to signal every message.
1051 */
1052 sig_limit = max(queue->queue_size / 2, 1);
1053 return (++queue->sig_count % sig_limit) == 0;
1054}
1055
1041static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1056static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1042 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1057 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1043 struct ib_send_wr *first, bool flush) 1058 struct ib_send_wr *first, bool flush)
@@ -1065,9 +1080,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1065 * Would have been way to obvious to handle this in hardware or 1080 * Would have been way to obvious to handle this in hardware or
1066 * at least the RDMA stack.. 1081 * at least the RDMA stack..
1067 * 1082 *
1068 * This messy and racy code sniplet is copy and pasted from the iSER
1069 * initiator, and the magic '32' comes from there as well.
1070 *
1071 * Always signal the flushes. The magic request used for the flush 1083 * Always signal the flushes. The magic request used for the flush
1072 * sequencer is not allocated in our driver's tagset and it's 1084 * sequencer is not allocated in our driver's tagset and it's
1073 * triggered to be freed by blk_cleanup_queue(). So we need to 1085 * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1075,7 +1087,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1075 * embedded in request's payload, is not freed when __ib_process_cq() 1087 * embedded in request's payload, is not freed when __ib_process_cq()
1076 * calls wr_cqe->done(). 1088 * calls wr_cqe->done().
1077 */ 1089 */
1078 if ((++queue->sig_count % 32) == 0 || flush) 1090 if (nvme_rdma_queue_sig_limit(queue) || flush)
1079 wr.send_flags |= IB_SEND_SIGNALED; 1091 wr.send_flags |= IB_SEND_SIGNALED;
1080 1092
1081 if (first) 1093 if (first)
@@ -1423,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
1423/* 1435/*
1424 * We cannot accept any other command until the Connect command has completed. 1436 * We cannot accept any other command until the Connect command has completed.
1425 */ 1437 */
1426static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1438static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1427 struct request *rq) 1439 struct request *rq)
1428{ 1440{
1429 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1441 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1431,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1431 1443
1432 if (!blk_rq_is_passthrough(rq) || 1444 if (!blk_rq_is_passthrough(rq) ||
1433 cmd->common.opcode != nvme_fabrics_command || 1445 cmd->common.opcode != nvme_fabrics_command ||
1434 cmd->fabrics.fctype != nvme_fabrics_type_connect) 1446 cmd->fabrics.fctype != nvme_fabrics_type_connect) {
1435 return false; 1447 /*
1448 * reconnecting state means transport disruption, which
1449 * can take a long time and even might fail permanently,
1450 * so we can't let incoming I/O be requeued forever.
1451 * fail it fast to allow upper layers a chance to
1452 * failover.
1453 */
1454 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
1455 return -EIO;
1456 else
1457 return -EAGAIN;
1458 }
1436 } 1459 }
1437 1460
1438 return true; 1461 return 0;
1439} 1462}
1440 1463
1441static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1464static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1453,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1453 1476
1454 WARN_ON_ONCE(rq->tag < 0); 1477 WARN_ON_ONCE(rq->tag < 0);
1455 1478
1456 if (!nvme_rdma_queue_is_ready(queue, rq)) 1479 ret = nvme_rdma_queue_is_ready(queue, rq);
1457 return BLK_MQ_RQ_QUEUE_BUSY; 1480 if (unlikely(ret))
1481 goto err;
1458 1482
1459 dev = queue->device->dev; 1483 dev = queue->device->dev;
1460 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1484 ib_dma_sync_single_for_cpu(dev, sqe->dma,
@@ -1782,7 +1806,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
1782static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1806static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1783 .name = "rdma", 1807 .name = "rdma",
1784 .module = THIS_MODULE, 1808 .module = THIS_MODULE,
1785 .is_fabrics = true, 1809 .flags = NVME_F_FABRICS,
1786 .reg_read32 = nvmf_reg_read32, 1810 .reg_read32 = nvmf_reg_read32,
1787 .reg_read64 = nvmf_reg_read64, 1811 .reg_read64 = nvmf_reg_read64,
1788 .reg_write32 = nvmf_reg_write32, 1812 .reg_write32 = nvmf_reg_write32,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index cf90713043da..eb9399ac97cf 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -529,6 +529,12 @@ fail:
529} 529}
530EXPORT_SYMBOL_GPL(nvmet_req_init); 530EXPORT_SYMBOL_GPL(nvmet_req_init);
531 531
532void nvmet_req_uninit(struct nvmet_req *req)
533{
534 percpu_ref_put(&req->sq->ref);
535}
536EXPORT_SYMBOL_GPL(nvmet_req_uninit);
537
532static inline bool nvmet_cc_en(u32 cc) 538static inline bool nvmet_cc_en(u32 cc)
533{ 539{
534 return cc & 0x1; 540 return cc & 0x1;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 62eba29c85fb..2006fae61980 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
517{ 517{
518 int cpu, idx, cnt; 518 int cpu, idx, cnt;
519 519
520 if (!(tgtport->ops->target_features & 520 if (tgtport->ops->max_hw_queues == 1)
521 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
522 tgtport->ops->max_hw_queues == 1)
523 return WORK_CPU_UNBOUND; 521 return WORK_CPU_UNBOUND;
524 522
525 /* Simple cpu selection based on qid modulo active cpu count */ 523 /* Simple cpu selection based on qid modulo active cpu count */
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 15551ef79c8c..294a6611fb24 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = {
698 .dma_boundary = FCLOOP_DMABOUND_4G, 698 .dma_boundary = FCLOOP_DMABOUND_4G,
699 /* optional features */ 699 /* optional features */
700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | 700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
701 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
702 NVMET_FCTGTFEAT_OPDONE_IN_ISR, 701 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
703 /* sizes of additional private data for data structures */ 702 /* sizes of additional private data for data structures */
704 .target_priv_sz = sizeof(struct fcloop_tport), 703 .target_priv_sz = sizeof(struct fcloop_tport),
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index feb497134aee..e503cfff0337 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
558static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 558static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
559 .name = "loop", 559 .name = "loop",
560 .module = THIS_MODULE, 560 .module = THIS_MODULE,
561 .is_fabrics = true, 561 .flags = NVME_F_FABRICS,
562 .reg_read32 = nvmf_reg_read32, 562 .reg_read32 = nvmf_reg_read32,
563 .reg_read64 = nvmf_reg_read64, 563 .reg_read64 = nvmf_reg_read64,
564 .reg_write32 = nvmf_reg_write32, 564 .reg_write32 = nvmf_reg_write32,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7cb77ba5993b..cfc5c7fb0ab7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
261 261
262bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 262bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); 263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
264void nvmet_req_uninit(struct nvmet_req *req);
264void nvmet_req_complete(struct nvmet_req *req, u16 status); 265void nvmet_req_complete(struct nvmet_req *req, u16 status);
265 266
266void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, 267void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 99c69018a35f..9e45cde63376 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
567 rsp->n_rdma = 0; 567 rsp->n_rdma = 0;
568 568
569 if (unlikely(wc->status != IB_WC_SUCCESS)) { 569 if (unlikely(wc->status != IB_WC_SUCCESS)) {
570 nvmet_req_uninit(&rsp->req);
570 nvmet_rdma_release_rsp(rsp); 571 nvmet_rdma_release_rsp(rsp);
571 if (wc->status != IB_WC_WR_FLUSH_ERR) { 572 if (wc->status != IB_WC_WR_FLUSH_ERR) {
572 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 573 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 9416d052cb89..28c38c756f92 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
144 coherent ? " " : " not "); 144 coherent ? " " : " not ");
145 145
146 iommu = of_iommu_configure(dev, np); 146 iommu = of_iommu_configure(dev, np);
147 if (IS_ERR(iommu)) 147 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
148 return PTR_ERR(iommu); 148 return -EPROBE_DEFER;
149 149
150 dev_dbg(dev, "device is%sbehind an iommu\n", 150 dev_dbg(dev, "device is%sbehind an iommu\n",
151 iommu ? " " : " not "); 151 iommu ? " " : " not ");
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 3080d9dd031d..43bd69dceabf 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -507,6 +507,9 @@ void *__unflatten_device_tree(const void *blob,
507 507
508 /* Allocate memory for the expanded device tree */ 508 /* Allocate memory for the expanded device tree */
509 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 509 mem = dt_alloc(size + 4, __alignof__(struct device_node));
510 if (!mem)
511 return NULL;
512
510 memset(mem, 0, size); 513 memset(mem, 0, size);
511 514
512 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); 515 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 4dec07ea510f..d507c3569a88 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -197,7 +197,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
197 const struct of_device_id *i; 197 const struct of_device_id *i;
198 198
199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
200 int const (*initfn)(struct reserved_mem *rmem) = i->data; 200 reservedmem_of_init_fn initfn = i->data;
201 const char *compat = i->compatible; 201 const char *compat = i->compatible;
202 202
203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 71fecc2debfc..703a42118ffc 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void)
523arch_initcall_sync(of_platform_default_populate_init); 523arch_initcall_sync(of_platform_default_populate_init);
524#endif 524#endif
525 525
526static int of_platform_device_destroy(struct device *dev, void *data) 526int of_platform_device_destroy(struct device *dev, void *data)
527{ 527{
528 /* Do not touch devices not populated from the device tree */ 528 /* Do not touch devices not populated from the device tree */
529 if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) 529 if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
@@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data)
544 of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); 544 of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
545 return 0; 545 return 0;
546} 546}
547EXPORT_SYMBOL_GPL(of_platform_device_destroy);
547 548
548/** 549/**
549 * of_platform_depopulate() - Remove devices populated from device tree 550 * of_platform_depopulate() - Remove devices populated from device tree
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 74cf5fffb1e1..c80e37a69305 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -896,7 +896,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
896{ 896{
897 if (pci_dev_is_disconnected(dev)) { 897 if (pci_dev_is_disconnected(dev)) {
898 *val = ~0; 898 *val = ~0;
899 return -ENODEV; 899 return PCIBIOS_DEVICE_NOT_FOUND;
900 } 900 }
901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
902} 902}
@@ -906,7 +906,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
906{ 906{
907 if (pci_dev_is_disconnected(dev)) { 907 if (pci_dev_is_disconnected(dev)) {
908 *val = ~0; 908 *val = ~0;
909 return -ENODEV; 909 return PCIBIOS_DEVICE_NOT_FOUND;
910 } 910 }
911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
912} 912}
@@ -917,7 +917,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
917{ 917{
918 if (pci_dev_is_disconnected(dev)) { 918 if (pci_dev_is_disconnected(dev)) {
919 *val = ~0; 919 *val = ~0;
920 return -ENODEV; 920 return PCIBIOS_DEVICE_NOT_FOUND;
921 } 921 }
922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
923} 923}
@@ -926,7 +926,7 @@ EXPORT_SYMBOL(pci_read_config_dword);
926int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 926int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
927{ 927{
928 if (pci_dev_is_disconnected(dev)) 928 if (pci_dev_is_disconnected(dev))
929 return -ENODEV; 929 return PCIBIOS_DEVICE_NOT_FOUND;
930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
931} 931}
932EXPORT_SYMBOL(pci_write_config_byte); 932EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(pci_write_config_byte);
934int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 934int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
935{ 935{
936 if (pci_dev_is_disconnected(dev)) 936 if (pci_dev_is_disconnected(dev))
937 return -ENODEV; 937 return PCIBIOS_DEVICE_NOT_FOUND;
938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
939} 939}
940EXPORT_SYMBOL(pci_write_config_word); 940EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +943,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
943 u32 val) 943 u32 val)
944{ 944{
945 if (pci_dev_is_disconnected(dev)) 945 if (pci_dev_is_disconnected(dev))
946 return -ENODEV; 946 return PCIBIOS_DEVICE_NOT_FOUND;
947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
948} 948}
949EXPORT_SYMBOL(pci_write_config_dword); 949EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index a98cba55c7f0..19a289b8cc94 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
252static int imx6q_pcie_abort_handler(unsigned long addr, 252static int imx6q_pcie_abort_handler(unsigned long addr,
253 unsigned int fsr, struct pt_regs *regs) 253 unsigned int fsr, struct pt_regs *regs)
254{ 254{
255 return 0; 255 unsigned long pc = instruction_pointer(regs);
256 unsigned long instr = *(unsigned long *)pc;
257 int reg = (instr >> 12) & 15;
258
259 /*
260 * If the instruction being executed was a read,
261 * make it look like it read all-ones.
262 */
263 if ((instr & 0x0c100000) == 0x04100000) {
264 unsigned long val;
265
266 if (instr & 0x00400000)
267 val = 255;
268 else
269 val = -1;
270
271 regs->uregs[reg] = val;
272 regs->ARM_pc += 4;
273 return 0;
274 }
275
276 if ((instr & 0x0e100090) == 0x00100090) {
277 regs->uregs[reg] = -1;
278 regs->ARM_pc += 4;
279 return 0;
280 }
281
282 return 1;
256} 283}
257 284
258static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 285static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void)
819 * we can install the handler here without risking it 846 * we can install the handler here without risking it
820 * accessing some uninitialized driver state. 847 * accessing some uninitialized driver state.
821 */ 848 */
822 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, 849 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
823 "imprecise external abort"); 850 "external abort on non-linefetch");
824 851
825 return platform_driver_register(&imx6_pcie_driver); 852 return platform_driver_register(&imx6_pcie_driver);
826} 853}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index c23f146fb5a6..c09623ca8c3b 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -6,6 +6,7 @@ menu "PCI Endpoint"
6 6
7config PCI_ENDPOINT 7config PCI_ENDPOINT
8 bool "PCI Endpoint Support" 8 bool "PCI Endpoint Support"
9 depends on HAS_DMA
9 help 10 help
10 Enable this configuration option to support configurable PCI 11 Enable this configuration option to support configurable PCI
11 endpoint. This should be enabled if the platform has a PCI 12 endpoint. This should be enabled if the platform has a PCI
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 175edad42d2f..2942066607e0 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -5,6 +5,7 @@
5config PCI_EPF_TEST 5config PCI_EPF_TEST
6 tristate "PCI Endpoint Test driver" 6 tristate "PCI Endpoint Test driver"
7 depends on PCI_ENDPOINT 7 depends on PCI_ENDPOINT
8 select CRC32
8 help 9 help
9 Enable this configuration option to enable the test driver 10 Enable this configuration option to enable the test driver
10 for PCI Endpoint. 11 for PCI Endpoint.
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b01bd5bba8e6..563901cd9c06 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2144 2144
2145 if (!pm_runtime_suspended(dev) 2145 if (!pm_runtime_suspended(dev)
2146 || pci_target_state(pci_dev) != pci_dev->current_state 2146 || pci_target_state(pci_dev) != pci_dev->current_state
2147 || platform_pci_need_resume(pci_dev)) 2147 || platform_pci_need_resume(pci_dev)
2148 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2148 return false; 2149 return false;
2149 2150
2150 /* 2151 /*
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index cc6e085008fb..f6a63406c76e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1291 cdev = &stdev->cdev; 1291 cdev = &stdev->cdev;
1292 cdev_init(cdev, &switchtec_fops); 1292 cdev_init(cdev, &switchtec_fops);
1293 cdev->owner = THIS_MODULE; 1293 cdev->owner = THIS_MODULE;
1294 cdev->kobj.parent = &dev->kobj;
1295 1294
1296 return stdev; 1295 return stdev;
1297 1296
@@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1442 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; 1441 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1443 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; 1442 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1444 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; 1443 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1445 stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); 1444 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1446 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); 1445 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1447 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; 1446 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1448 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; 1447 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1449 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; 1448 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1450 1449
1450 if (stdev->partition_count < 1)
1451 stdev->partition_count = 1;
1452
1451 init_pff(stdev); 1453 init_pff(stdev);
1452 1454
1453 pci_set_drvdata(pdev, stdev); 1455 pci_set_drvdata(pdev, stdev);
@@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1479 SWITCHTEC_EVENT_EN_IRQ, 1481 SWITCHTEC_EVENT_EN_IRQ,
1480 &stdev->mmio_part_cfg->mrpc_comp_hdr); 1482 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1481 1483
1482 rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); 1484 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1483 if (rc)
1484 goto err_put;
1485
1486 rc = device_add(&stdev->dev);
1487 if (rc) 1485 if (rc)
1488 goto err_devadd; 1486 goto err_devadd;
1489 1487
@@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1492 return 0; 1490 return 0;
1493 1491
1494err_devadd: 1492err_devadd:
1495 cdev_del(&stdev->cdev);
1496 stdev_kill(stdev); 1493 stdev_kill(stdev);
1497err_put: 1494err_put:
1498 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1495 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
1506 1503
1507 pci_set_drvdata(pdev, NULL); 1504 pci_set_drvdata(pdev, NULL);
1508 1505
1509 device_del(&stdev->dev); 1506 cdev_device_del(&stdev->cdev, &stdev->dev);
1510 cdev_del(&stdev->cdev);
1511 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1507 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1512 dev_info(&stdev->dev, "unregistered.\n"); 1508 dev_info(&stdev->dev, "unregistered.\n");
1513 1509
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 34c862f213c7..0a9b78705ee8 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
29 return -EINVAL; 29 return -EINVAL;
30 30
31 gsi = gicc->performance_interrupt; 31 gsi = gicc->performance_interrupt;
32
33 /*
34 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
35 * have an interrupt. QEMU advertises this by using a GSI of zero,
36 * which is not known to be valid on any hardware despite being
37 * valid per the spec. Take the pragmatic approach and reject a
38 * GSI of zero for now.
39 */
40 if (!gsi)
41 return 0;
42
32 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) 43 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
33 trigger = ACPI_EDGE_SENSITIVE; 44 trigger = ACPI_EDGE_SENSITIVE;
34 else 45 else
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/phy-qcom-qmp.c
index 727e23be7cac..78ca62897784 100644
--- a/drivers/phy/phy-qcom-qmp.c
+++ b/drivers/phy/phy-qcom-qmp.c
@@ -844,7 +844,7 @@ static int qcom_qmp_phy_vreg_init(struct device *dev)
844 int num = qmp->cfg->num_vregs; 844 int num = qmp->cfg->num_vregs;
845 int i; 845 int i;
846 846
847 qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL); 847 qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
848 if (!qmp->vregs) 848 if (!qmp->vregs)
849 return -ENOMEM; 849 return -ENOMEM;
850 850
@@ -983,16 +983,16 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
983 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. 983 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
984 */ 984 */
985 qphy->tx = of_iomap(np, 0); 985 qphy->tx = of_iomap(np, 0);
986 if (IS_ERR(qphy->tx)) 986 if (!qphy->tx)
987 return PTR_ERR(qphy->tx); 987 return -ENOMEM;
988 988
989 qphy->rx = of_iomap(np, 1); 989 qphy->rx = of_iomap(np, 1);
990 if (IS_ERR(qphy->rx)) 990 if (!qphy->rx)
991 return PTR_ERR(qphy->rx); 991 return -ENOMEM;
992 992
993 qphy->pcs = of_iomap(np, 2); 993 qphy->pcs = of_iomap(np, 2);
994 if (IS_ERR(qphy->pcs)) 994 if (!qphy->pcs)
995 return PTR_ERR(qphy->pcs); 995 return -ENOMEM;
996 996
997 /* 997 /*
998 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 998 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 1653cbda6a82..bd459a93b0e7 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
680 * pinctrl_generic_free_groups() - removes all pin groups 680 * pinctrl_generic_free_groups() - removes all pin groups
681 * @pctldev: pin controller device 681 * @pctldev: pin controller device
682 * 682 *
683 * Note that the caller must take care of locking. 683 * Note that the caller must take care of locking. The pinctrl groups
684 * are allocated with devm_kzalloc() so no need to free them here.
684 */ 685 */
685static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) 686static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
686{ 687{
687 struct radix_tree_iter iter; 688 struct radix_tree_iter iter;
688 struct group_desc *group;
689 unsigned long *indices;
690 void **slot; 689 void **slot;
691 int i = 0;
692
693 indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
694 pctldev->num_groups, GFP_KERNEL);
695 if (!indices)
696 return;
697 690
698 radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) 691 radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
699 indices[i++] = iter.index; 692 radix_tree_delete(&pctldev->pin_group_tree, iter.index);
700
701 for (i = 0; i < pctldev->num_groups; i++) {
702 group = radix_tree_lookup(&pctldev->pin_group_tree,
703 indices[i]);
704 radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
705 devm_kfree(pctldev->dev, group);
706 }
707 693
708 pctldev->num_groups = 0; 694 pctldev->num_groups = 0;
709} 695}
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 41b5b07d5a2b..6852010a6d70 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
194 return 0; 194 return 0;
195} 195}
196 196
197static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
198{
199 u32 tmp;
200
201 tmp = readl(reg);
202 tmp &= ~(mask << shift);
203 tmp |= value << shift;
204 writel(tmp, reg);
205}
206
197static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, 207static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
198 unsigned group) 208 unsigned group)
199{ 209{
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
211 reg += bank * 0x20 + pin / 16 * 0x10; 221 reg += bank * 0x20 + pin / 16 * 0x10;
212 shift = pin % 16 * 2; 222 shift = pin % 16 * 2;
213 223
214 writel(0x3 << shift, reg + CLR); 224 mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
215 writel(g->muxsel[i] << shift, reg + SET);
216 } 225 }
217 226
218 return 0; 227 return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
279 /* mA */ 288 /* mA */
280 if (config & MA_PRESENT) { 289 if (config & MA_PRESENT) {
281 shift = pin % 8 * 4; 290 shift = pin % 8 * 4;
282 writel(0x3 << shift, reg + CLR); 291 mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
283 writel(ma << shift, reg + SET);
284 } 292 }
285 293
286 /* vol */ 294 /* vol */
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2debba62fac9..20f1b4493994 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1539 * is not listed below. 1539 * is not listed below.
1540 */ 1540 */
1541static const struct dmi_system_id chv_no_valid_mask[] = { 1541static const struct dmi_system_id chv_no_valid_mask[] = {
1542 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1542 { 1543 {
1543 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ 1544 .ident = "Intel_Strago based Chromebooks (All models)",
1544 .ident = "Acer Chromebook (CYAN)",
1545 .matches = { 1545 .matches = {
1546 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1546 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1547 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), 1547 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1548 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1549 }, 1548 },
1550 } 1549 },
1550 {
1551 .ident = "Acer Chromebook R11 (Cyan)",
1552 .matches = {
1553 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1554 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1555 },
1556 },
1557 {
1558 .ident = "Samsung Chromebook 3 (Celes)",
1559 .matches = {
1560 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1561 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1562 },
1563 },
1564 {}
1551}; 1565};
1552 1566
1553static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1567static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0d6b7f4b82af..720a19fd38d2 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
36 "input bias pull to pin specific state", NULL, false), 36 "input bias pull to pin specific state", NULL, false),
37 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), 37 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
38 PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), 38 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
40 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), 39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
41 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), 40 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
161 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, 160 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
162 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, 161 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
163 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, 162 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
164 { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
165 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, 163 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
166 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, 164 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
167 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, 165 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
174 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, 172 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
175 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, 173 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
176 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, 174 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
177 { "output-enable", PIN_CONFIG_OUTPUT, 1, },
178 { "output-high", PIN_CONFIG_OUTPUT, 1, }, 175 { "output-high", PIN_CONFIG_OUTPUT, 1, },
179 { "output-low", PIN_CONFIG_OUTPUT, 0, }, 176 { "output-low", PIN_CONFIG_OUTPUT, 0, },
180 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, 177 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1482d132fbb8..e432ec887479 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
495 .flags = IRQCHIP_SKIP_SET_WAKE, 495 .flags = IRQCHIP_SKIP_SET_WAKE,
496}; 496};
497 497
498static void amd_gpio_irq_handler(struct irq_desc *desc) 498#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
499
500static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
499{ 501{
500 u32 i; 502 struct amd_gpio *gpio_dev = dev_id;
501 u32 off; 503 struct gpio_chip *gc = &gpio_dev->gc;
502 u32 reg; 504 irqreturn_t ret = IRQ_NONE;
503 u32 pin_reg; 505 unsigned int i, irqnr;
504 u64 reg64;
505 int handled = 0;
506 unsigned int irq;
507 unsigned long flags; 506 unsigned long flags;
508 struct irq_chip *chip = irq_desc_get_chip(desc); 507 u32 *regs, regval;
509 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 508 u64 status, mask;
510 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
511 509
512 chained_irq_enter(chip, desc); 510 /* Read the wake status */
513 /*enable GPIO interrupt again*/
514 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 511 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
515 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 512 status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
516 reg64 = reg; 513 status <<= 32;
517 reg64 = reg64 << 32; 514 status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
518
519 reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
520 reg64 |= reg;
521 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 515 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
522 516
523 /* 517 /* Bit 0-45 contain the relevant status bits */
524 * first 46 bits indicates interrupt status. 518 status &= (1ULL << 46) - 1;
525 * one bit represents four interrupt sources. 519 regs = gpio_dev->base;
526 */ 520 for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
527 for (off = 0; off < 46 ; off++) { 521 if (!(status & mask))
528 if (reg64 & BIT(off)) { 522 continue;
529 for (i = 0; i < 4; i++) { 523 status &= ~mask;
530 pin_reg = readl(gpio_dev->base + 524
531 (off * 4 + i) * 4); 525 /* Each status bit covers four pins */
532 if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || 526 for (i = 0; i < 4; i++) {
533 (pin_reg & BIT(WAKE_STS_OFF))) { 527 regval = readl(regs + i);
534 irq = irq_find_mapping(gc->irqdomain, 528 if (!(regval & PIN_IRQ_PENDING))
535 off * 4 + i); 529 continue;
536 generic_handle_irq(irq); 530 irq = irq_find_mapping(gc->irqdomain, irqnr + i);
537 writel(pin_reg, 531 generic_handle_irq(irq);
538 gpio_dev->base 532 /* Clear interrupt */
539 + (off * 4 + i) * 4); 533 writel(regval, regs + i);
540 handled++; 534 ret = IRQ_HANDLED;
541 }
542 }
543 } 535 }
544 } 536 }
545 537
546 if (handled == 0) 538 /* Signal EOI to the GPIO unit */
547 handle_bad_irq(desc);
548
549 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 539 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
550 reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 540 regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
551 reg |= EOI_MASK; 541 regval |= EOI_MASK;
552 writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); 542 writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
553 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 543 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
554 544
555 chained_irq_exit(chip, desc); 545 return ret;
556} 546}
557 547
558static int amd_get_groups_count(struct pinctrl_dev *pctldev) 548static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
821 goto out2; 811 goto out2;
822 } 812 }
823 813
824 gpiochip_set_chained_irqchip(&gpio_dev->gc, 814 ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
825 &amd_gpio_irqchip, 815 KBUILD_MODNAME, gpio_dev);
826 irq_base, 816 if (ret)
827 amd_gpio_irq_handler); 817 goto out2;
818
828 platform_set_drvdata(pdev, gpio_dev); 819 platform_set_drvdata(pdev, gpio_dev);
829 820
830 dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); 821 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index f141aa0430b1..9dd981ddbb17 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -143,9 +143,6 @@ struct rockchip_drv {
143 * @gpio_chip: gpiolib chip 143 * @gpio_chip: gpiolib chip
144 * @grange: gpio range 144 * @grange: gpio range
145 * @slock: spinlock for the gpio bank 145 * @slock: spinlock for the gpio bank
146 * @irq_lock: bus lock for irq chip
147 * @new_irqs: newly configured irqs which must be muxed as GPIOs in
148 * irq_bus_sync_unlock()
149 */ 146 */
150struct rockchip_pin_bank { 147struct rockchip_pin_bank {
151 void __iomem *reg_base; 148 void __iomem *reg_base;
@@ -168,8 +165,6 @@ struct rockchip_pin_bank {
168 struct pinctrl_gpio_range grange; 165 struct pinctrl_gpio_range grange;
169 raw_spinlock_t slock; 166 raw_spinlock_t slock;
170 u32 toggle_edge_mode; 167 u32 toggle_edge_mode;
171 struct mutex irq_lock;
172 u32 new_irqs;
173}; 168};
174 169
175#define PIN_BANK(id, pins, label) \ 170#define PIN_BANK(id, pins, label) \
@@ -2134,12 +2129,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
2134 int ret; 2129 int ret;
2135 2130
2136 /* make sure the pin is configured as gpio input */ 2131 /* make sure the pin is configured as gpio input */
2137 ret = rockchip_verify_mux(bank, d->hwirq, RK_FUNC_GPIO); 2132 ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
2138 if (ret < 0) 2133 if (ret < 0)
2139 return ret; 2134 return ret;
2140 2135
2141 bank->new_irqs |= mask; 2136 clk_enable(bank->clk);
2142
2143 raw_spin_lock_irqsave(&bank->slock, flags); 2137 raw_spin_lock_irqsave(&bank->slock, flags);
2144 2138
2145 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); 2139 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -2197,6 +2191,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
2197 default: 2191 default:
2198 irq_gc_unlock(gc); 2192 irq_gc_unlock(gc);
2199 raw_spin_unlock_irqrestore(&bank->slock, flags); 2193 raw_spin_unlock_irqrestore(&bank->slock, flags);
2194 clk_disable(bank->clk);
2200 return -EINVAL; 2195 return -EINVAL;
2201 } 2196 }
2202 2197
@@ -2205,6 +2200,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
2205 2200
2206 irq_gc_unlock(gc); 2201 irq_gc_unlock(gc);
2207 raw_spin_unlock_irqrestore(&bank->slock, flags); 2202 raw_spin_unlock_irqrestore(&bank->slock, flags);
2203 clk_disable(bank->clk);
2208 2204
2209 return 0; 2205 return 0;
2210} 2206}
@@ -2248,34 +2244,6 @@ static void rockchip_irq_disable(struct irq_data *d)
2248 clk_disable(bank->clk); 2244 clk_disable(bank->clk);
2249} 2245}
2250 2246
2251static void rockchip_irq_bus_lock(struct irq_data *d)
2252{
2253 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
2254 struct rockchip_pin_bank *bank = gc->private;
2255
2256 clk_enable(bank->clk);
2257 mutex_lock(&bank->irq_lock);
2258}
2259
2260static void rockchip_irq_bus_sync_unlock(struct irq_data *d)
2261{
2262 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
2263 struct rockchip_pin_bank *bank = gc->private;
2264
2265 while (bank->new_irqs) {
2266 unsigned int irq = __ffs(bank->new_irqs);
2267 int ret;
2268
2269 ret = rockchip_set_mux(bank, irq, RK_FUNC_GPIO);
2270 WARN_ON(ret < 0);
2271
2272 bank->new_irqs &= ~BIT(irq);
2273 }
2274
2275 mutex_unlock(&bank->irq_lock);
2276 clk_disable(bank->clk);
2277}
2278
2279static int rockchip_interrupts_register(struct platform_device *pdev, 2247static int rockchip_interrupts_register(struct platform_device *pdev,
2280 struct rockchip_pinctrl *info) 2248 struct rockchip_pinctrl *info)
2281{ 2249{
@@ -2342,9 +2310,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
2342 gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; 2310 gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
2343 gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; 2311 gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
2344 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 2312 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
2345 gc->chip_types[0].chip.irq_bus_lock = rockchip_irq_bus_lock;
2346 gc->chip_types[0].chip.irq_bus_sync_unlock =
2347 rockchip_irq_bus_sync_unlock;
2348 gc->wake_enabled = IRQ_MSK(bank->nr_pins); 2313 gc->wake_enabled = IRQ_MSK(bank->nr_pins);
2349 2314
2350 irq_set_chained_handler_and_data(bank->irq, 2315 irq_set_chained_handler_and_data(bank->irq,
@@ -2518,7 +2483,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
2518 int bank_pins = 0; 2483 int bank_pins = 0;
2519 2484
2520 raw_spin_lock_init(&bank->slock); 2485 raw_spin_lock_init(&bank->slock);
2521 mutex_init(&bank->irq_lock);
2522 bank->drvdata = d; 2486 bank->drvdata = d;
2523 bank->pin_base = ctrl->nr_pins; 2487 bank->pin_base = ctrl->nr_pins;
2524 ctrl->nr_pins += bank->nr_pins; 2488 ctrl->nr_pins += bank->nr_pins;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 9fd6d9087dc5..16b3ae5e4f44 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
826 * pinmux_generic_free_functions() - removes all functions 826 * pinmux_generic_free_functions() - removes all functions
827 * @pctldev: pin controller device 827 * @pctldev: pin controller device
828 * 828 *
829 * Note that the caller must take care of locking. 829 * Note that the caller must take care of locking. The pinctrl
830 * functions are allocated with devm_kzalloc() so no need to free
831 * them here.
830 */ 832 */
831void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) 833void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
832{ 834{
833 struct radix_tree_iter iter; 835 struct radix_tree_iter iter;
834 struct function_desc *function;
835 unsigned long *indices;
836 void **slot; 836 void **slot;
837 int i = 0;
838
839 indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
840 pctldev->num_functions, GFP_KERNEL);
841 if (!indices)
842 return;
843 837
844 radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) 838 radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
845 indices[i++] = iter.index; 839 radix_tree_delete(&pctldev->pin_function_tree, iter.index);
846
847 for (i = 0; i < pctldev->num_functions; i++) {
848 function = radix_tree_lookup(&pctldev->pin_function_tree,
849 indices[i]);
850 radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
851 devm_kfree(pctldev->dev, function);
852 }
853 840
854 pctldev->num_functions = 0; 841 pctldev->num_functions = 0;
855} 842}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index d3c5f5dfbbd7..222b6685b09f 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
798 break; 798 break;
799 case PIN_CONFIG_OUTPUT: 799 case PIN_CONFIG_OUTPUT:
800 __stm32_gpio_set(bank, offset, arg); 800 __stm32_gpio_set(bank, offset, arg);
801 ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); 801 ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
802 break; 802 break;
803 default: 803 default:
804 ret = -EINVAL; 804 ret = -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 9aec1d2232dd..6624499eae72 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
394 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), 394 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
395 SUNXI_FUNCTION(0x0, "gpio_in"), 395 SUNXI_FUNCTION(0x0, "gpio_in"),
396 SUNXI_FUNCTION(0x1, "gpio_out"), 396 SUNXI_FUNCTION(0x1, "gpio_out"),
397 SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ 397 SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
398 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), 398 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
399 SUNXI_FUNCTION(0x0, "gpio_in"), 399 SUNXI_FUNCTION(0x0, "gpio_in"),
400 SUNXI_FUNCTION(0x1, "gpio_out")), 400 SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2de1e603bd2b..5f3672153b12 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -704,7 +704,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
704 /* Reallocate the array */ 704 /* Reallocate the array */
705 u32 new_capacity = 2 * dev->pipes_capacity; 705 u32 new_capacity = 2 * dev->pipes_capacity;
706 struct goldfish_pipe **pipes = 706 struct goldfish_pipe **pipes =
707 kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL); 707 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
708 if (!pipes) 708 if (!pipes)
709 return -ENOMEM; 709 return -ENOMEM;
710 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); 710 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ef29f18b1951..4cc2f4ea0a25 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -97,11 +97,9 @@
97 } \ 97 } \
98} 98}
99 99
100#ifdef CONFIG_PM_SLEEP
101static u8 suspend_prep_ok; 100static u8 suspend_prep_ok;
102static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; 101static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
103static u64 suspend_shlw_res_temp, suspend_deep_res_temp; 102static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
104#endif
105 103
106struct telemetry_susp_stats { 104struct telemetry_susp_stats {
107 u32 shlw_swake_ctr; 105 u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = {
807 .release = single_release, 805 .release = single_release,
808}; 806};
809 807
810#ifdef CONFIG_PM_SLEEP
811static int pm_suspend_prep_cb(void) 808static int pm_suspend_prep_cb(void)
812{ 809{
813 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; 810 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this,
937static struct notifier_block pm_notifier = { 934static struct notifier_block pm_notifier = {
938 .notifier_call = pm_notification, 935 .notifier_call = pm_notification,
939}; 936};
940#endif /* CONFIG_PM_SLEEP */
941 937
942static int __init telemetry_debugfs_init(void) 938static int __init telemetry_debugfs_init(void)
943{ 939{
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void)
960 if (err < 0) 956 if (err < 0)
961 return -EINVAL; 957 return -EINVAL;
962 958
963
964#ifdef CONFIG_PM_SLEEP
965 register_pm_notifier(&pm_notifier); 959 register_pm_notifier(&pm_notifier);
966#endif /* CONFIG_PM_SLEEP */
967 960
968 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); 961 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
969 if (!debugfs_conf->telemetry_dbg_dir) 962 if (!debugfs_conf->telemetry_dbg_dir) {
970 return -ENOMEM; 963 err = -ENOMEM;
964 goto out_pm;
965 }
971 966
972 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, 967 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
973 debugfs_conf->telemetry_dbg_dir, NULL, 968 debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void)
1014out: 1009out:
1015 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1010 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
1016 debugfs_conf->telemetry_dbg_dir = NULL; 1011 debugfs_conf->telemetry_dbg_dir = NULL;
1012out_pm:
1013 unregister_pm_notifier(&pm_notifier);
1017 1014
1018 return err; 1015 return err;
1019} 1016}
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void)
1022{ 1019{
1023 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1020 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
1024 debugfs_conf->telemetry_dbg_dir = NULL; 1021 debugfs_conf->telemetry_dbg_dir = NULL;
1022 unregister_pm_notifier(&pm_notifier);
1025} 1023}
1026 1024
1027late_initcall(telemetry_debugfs_init); 1025late_initcall(telemetry_debugfs_init);
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 14bde0db8c24..5b10b50f8686 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
538 538
539 power_zone->id = result; 539 power_zone->id = result;
540 idr_init(&power_zone->idr); 540 idr_init(&power_zone->idr);
541 result = -ENOMEM;
541 power_zone->name = kstrdup(name, GFP_KERNEL); 542 power_zone->name = kstrdup(name, GFP_KERNEL);
542 if (!power_zone->name) 543 if (!power_zone->name)
543 goto err_name_alloc; 544 goto err_name_alloc;
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 35ce53edabf9..d5e5229308f2 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
155} 155}
156 156
157postcore_initcall(hi6220_reset_init); 157postcore_initcall(hi6220_reset_init);
158
159MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index b3de973a6260..9dca53df3584 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1088,7 +1088,7 @@ static u32 rtc_handler(void *context)
1088 } 1088 }
1089 spin_unlock_irqrestore(&rtc_lock, flags); 1089 spin_unlock_irqrestore(&rtc_lock, flags);
1090 1090
1091 pm_wakeup_event(dev, 0); 1091 pm_wakeup_hard_event(dev);
1092 acpi_clear_event(ACPI_EVENT_RTC); 1092 acpi_clear_event(ACPI_EVENT_RTC);
1093 acpi_disable_event(ACPI_EVENT_RTC, 0); 1093 acpi_disable_event(ACPI_EVENT_RTC, 0);
1094 return ACPI_INTERRUPT_HANDLED; 1094 return ACPI_INTERRUPT_HANDLED;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e443b0d0b236..34b9ad6b3143 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -35,7 +35,7 @@ static struct bus_type ccwgroup_bus_type;
35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
36{ 36{
37 int i; 37 int i;
38 char str[8]; 38 char str[16];
39 39
40 for (i = 0; i < gdev->count; i++) { 40 for (i = 0; i < gdev->count; i++) {
41 sprintf(str, "cdev%d", i); 41 sprintf(str, "cdev%d", i);
@@ -238,7 +238,7 @@ static void ccwgroup_release(struct device *dev)
238 238
239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
240{ 240{
241 char str[8]; 241 char str[16];
242 int i, rc; 242 int i, rc;
243 243
244 for (i = 0; i < gdev->count; i++) { 244 for (i = 0; i < gdev->count; i++) {
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f33ce8577619..1d595d17bf11 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -11,7 +11,7 @@
11#include "qdio.h" 11#include "qdio.h"
12 12
13/* that gives us 15 characters in the text event views */ 13/* that gives us 15 characters in the text event views */
14#define QDIO_DBF_LEN 16 14#define QDIO_DBF_LEN 32
15 15
16extern debug_info_t *qdio_dbf_setup; 16extern debug_info_t *qdio_dbf_setup;
17extern debug_info_t *qdio_dbf_error; 17extern debug_info_t *qdio_dbf_error;
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc18ee3..a66a317f3e4f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
70{ 70{
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); 71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
72} 72}
73MDEV_TYPE_ATTR_RO(name); 73static MDEV_TYPE_ATTR_RO(name);
74 74
75static ssize_t device_api_show(struct kobject *kobj, struct device *dev, 75static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
76 char *buf) 76 char *buf)
77{ 77{
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); 78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
79} 79}
80MDEV_TYPE_ATTR_RO(device_api); 80static MDEV_TYPE_ATTR_RO(device_api);
81 81
82static ssize_t available_instances_show(struct kobject *kobj, 82static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf) 83 struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
86 86
87 return sprintf(buf, "%d\n", atomic_read(&private->avail)); 87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
88} 88}
89MDEV_TYPE_ATTR_RO(available_instances); 89static MDEV_TYPE_ATTR_RO(available_instances);
90 90
91static struct attribute *mdev_types_attrs[] = { 91static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr, 92 &mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
100 .attrs = mdev_types_attrs, 100 .attrs = mdev_types_attrs,
101}; 101};
102 102
103struct attribute_group *mdev_type_groups[] = { 103static struct attribute_group *mdev_type_groups[] = {
104 &mdev_type_group, 104 &mdev_type_group,
105 NULL, 105 NULL,
106}; 106};
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
152 &events, &private->nb); 152 &events, &private->nb);
153} 153}
154 154
155void vfio_ccw_mdev_release(struct mdev_device *mdev) 155static void vfio_ccw_mdev_release(struct mdev_device *mdev)
156{ 156{
157 struct vfio_ccw_private *private = 157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev)); 158 dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
233 } 233 }
234} 234}
235 235
236int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 236static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
237{ 237{
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX) 238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596d8a08..ea099910b4e9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
668 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 668 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
669 int rc; 669 int rc;
670 670
671 /* Add queue/card to list of active queues/cards */
672 spin_lock_bh(&ap_list_lock);
673 if (is_card_dev(dev))
674 list_add(&to_ap_card(dev)->list, &ap_card_list);
675 else
676 list_add(&to_ap_queue(dev)->list,
677 &to_ap_queue(dev)->card->queues);
678 spin_unlock_bh(&ap_list_lock);
679
671 ap_dev->drv = ap_drv; 680 ap_dev->drv = ap_drv;
672 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 681 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
673 if (rc) 682
683 if (rc) {
684 spin_lock_bh(&ap_list_lock);
685 if (is_card_dev(dev))
686 list_del_init(&to_ap_card(dev)->list);
687 else
688 list_del_init(&to_ap_queue(dev)->list);
689 spin_unlock_bh(&ap_list_lock);
674 ap_dev->drv = NULL; 690 ap_dev->drv = NULL;
691 }
692
675 return rc; 693 return rc;
676} 694}
677 695
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
680 struct ap_device *ap_dev = to_ap_dev(dev); 698 struct ap_device *ap_dev = to_ap_dev(dev);
681 struct ap_driver *ap_drv = ap_dev->drv; 699 struct ap_driver *ap_drv = ap_dev->drv;
682 700
701 if (ap_drv->remove)
702 ap_drv->remove(ap_dev);
703
704 /* Remove queue/card from list of active queues/cards */
683 spin_lock_bh(&ap_list_lock); 705 spin_lock_bh(&ap_list_lock);
684 if (is_card_dev(dev)) 706 if (is_card_dev(dev))
685 list_del_init(&to_ap_card(dev)->list); 707 list_del_init(&to_ap_card(dev)->list);
686 else 708 else
687 list_del_init(&to_ap_queue(dev)->list); 709 list_del_init(&to_ap_queue(dev)->list);
688 spin_unlock_bh(&ap_list_lock); 710 spin_unlock_bh(&ap_list_lock);
689 if (ap_drv->remove) 711
690 ap_drv->remove(ap_dev);
691 return 0; 712 return 0;
692} 713}
693 714
@@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused)
1056 } 1077 }
1057 /* get it and thus adjust reference counter */ 1078 /* get it and thus adjust reference counter */
1058 get_device(&ac->ap_dev.device); 1079 get_device(&ac->ap_dev.device);
1059 /* Add card device to card list */
1060 spin_lock_bh(&ap_list_lock);
1061 list_add(&ac->list, &ap_card_list);
1062 spin_unlock_bh(&ap_list_lock);
1063 } 1080 }
1064 /* now create the new queue device */ 1081 /* now create the new queue device */
1065 aq = ap_queue_create(qid, type); 1082 aq = ap_queue_create(qid, type);
@@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused)
1070 aq->ap_dev.device.parent = &ac->ap_dev.device; 1087 aq->ap_dev.device.parent = &ac->ap_dev.device;
1071 dev_set_name(&aq->ap_dev.device, 1088 dev_set_name(&aq->ap_dev.device,
1072 "%02x.%04x", id, dom); 1089 "%02x.%04x", id, dom);
1073 /* Add queue device to card queue list */
1074 spin_lock_bh(&ap_list_lock);
1075 list_add(&aq->list, &ac->queues);
1076 spin_unlock_bh(&ap_list_lock);
1077 /* Start with a device reset */ 1090 /* Start with a device reset */
1078 spin_lock_bh(&aq->lock); 1091 spin_lock_bh(&aq->lock);
1079 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1092 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused)
1081 /* Register device */ 1094 /* Register device */
1082 rc = device_register(&aq->ap_dev.device); 1095 rc = device_register(&aq->ap_dev.device);
1083 if (rc) { 1096 if (rc) {
1084 spin_lock_bh(&ap_list_lock);
1085 list_del_init(&aq->list);
1086 spin_unlock_bh(&ap_list_lock);
1087 put_device(&aq->ap_dev.device); 1097 put_device(&aq->ap_dev.device);
1088 continue; 1098 continue;
1089 } 1099 }
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161ccc74e..836efac96813 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
160 160
161static void ap_card_device_release(struct device *dev) 161static void ap_card_device_release(struct device *dev)
162{ 162{
163 kfree(to_ap_card(dev)); 163 struct ap_card *ac = to_ap_card(dev);
164
165 if (!list_empty(&ac->list)) {
166 spin_lock_bh(&ap_list_lock);
167 list_del_init(&ac->list);
168 spin_unlock_bh(&ap_list_lock);
169 }
170 kfree(ac);
164} 171}
165 172
166struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 173struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a63769..0f1a5d02acb0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
584 584
585static void ap_queue_device_release(struct device *dev) 585static void ap_queue_device_release(struct device *dev)
586{ 586{
587 kfree(to_ap_queue(dev)); 587 struct ap_queue *aq = to_ap_queue(dev);
588
589 if (!list_empty(&aq->list)) {
590 spin_lock_bh(&ap_list_lock);
591 list_del_init(&aq->list);
592 spin_unlock_bh(&ap_list_lock);
593 }
594 kfree(aq);
588} 595}
589 596
590struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 597struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index dba94b486f05..fa732bd86729 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev)
1954 privptr->conn = NULL; privptr->fsm = NULL; 1954 privptr->conn = NULL; privptr->fsm = NULL;
1955 /* privptr gets freed by free_netdev() */ 1955 /* privptr gets freed by free_netdev() */
1956 } 1956 }
1957 free_netdev(dev);
1958} 1957}
1959 1958
1960/** 1959/**
@@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev)
1972 dev->mtu = NETIUCV_MTU_DEFAULT; 1971 dev->mtu = NETIUCV_MTU_DEFAULT;
1973 dev->min_mtu = 576; 1972 dev->min_mtu = 576;
1974 dev->max_mtu = NETIUCV_MTU_MAX; 1973 dev->max_mtu = NETIUCV_MTU_MAX;
1975 dev->destructor = netiucv_free_netdevice; 1974 dev->needs_free_netdev = true;
1975 dev->priv_destructor = netiucv_free_netdevice;
1976 dev->hard_header_len = NETIUCV_HDRLEN; 1976 dev->hard_header_len = NETIUCV_HDRLEN;
1977 dev->addr_len = 0; 1977 dev->addr_len = 0;
1978 dev->type = ARPHRD_SLIP; 1978 dev->type = ARPHRD_SLIP;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f6aa21176d89..30bc6105aac3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -701,6 +701,7 @@ enum qeth_discipline_id {
701}; 701};
702 702
703struct qeth_discipline { 703struct qeth_discipline {
704 const struct device_type *devtype;
704 void (*start_poll)(struct ccw_device *, int, unsigned long); 705 void (*start_poll)(struct ccw_device *, int, unsigned long);
705 qdio_handler_t *input_handler; 706 qdio_handler_t *input_handler;
706 qdio_handler_t *output_handler; 707 qdio_handler_t *output_handler;
@@ -875,6 +876,9 @@ extern struct qeth_discipline qeth_l2_discipline;
875extern struct qeth_discipline qeth_l3_discipline; 876extern struct qeth_discipline qeth_l3_discipline;
876extern const struct attribute_group *qeth_generic_attr_groups[]; 877extern const struct attribute_group *qeth_generic_attr_groups[];
877extern const struct attribute_group *qeth_osn_attr_groups[]; 878extern const struct attribute_group *qeth_osn_attr_groups[];
879extern const struct attribute_group qeth_device_attr_group;
880extern const struct attribute_group qeth_device_blkt_group;
881extern const struct device_type qeth_generic_devtype;
878extern struct workqueue_struct *qeth_wq; 882extern struct workqueue_struct *qeth_wq;
879 883
880int qeth_card_hw_is_reachable(struct qeth_card *); 884int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 38114a8d56e0..fc6d85f2b38d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5530,10 +5530,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
5530 card->discipline = NULL; 5530 card->discipline = NULL;
5531} 5531}
5532 5532
5533static const struct device_type qeth_generic_devtype = { 5533const struct device_type qeth_generic_devtype = {
5534 .name = "qeth_generic", 5534 .name = "qeth_generic",
5535 .groups = qeth_generic_attr_groups, 5535 .groups = qeth_generic_attr_groups,
5536}; 5536};
5537EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5538
5537static const struct device_type qeth_osn_devtype = { 5539static const struct device_type qeth_osn_devtype = {
5538 .name = "qeth_osn", 5540 .name = "qeth_osn",
5539 .groups = qeth_osn_attr_groups, 5541 .groups = qeth_osn_attr_groups,
@@ -5659,23 +5661,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5659 goto err_card; 5661 goto err_card;
5660 } 5662 }
5661 5663
5662 if (card->info.type == QETH_CARD_TYPE_OSN)
5663 gdev->dev.type = &qeth_osn_devtype;
5664 else
5665 gdev->dev.type = &qeth_generic_devtype;
5666
5667 switch (card->info.type) { 5664 switch (card->info.type) {
5668 case QETH_CARD_TYPE_OSN: 5665 case QETH_CARD_TYPE_OSN:
5669 case QETH_CARD_TYPE_OSM: 5666 case QETH_CARD_TYPE_OSM:
5670 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5667 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5671 if (rc) 5668 if (rc)
5672 goto err_card; 5669 goto err_card;
5670
5671 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5672 ? card->discipline->devtype
5673 : &qeth_osn_devtype;
5673 rc = card->discipline->setup(card->gdev); 5674 rc = card->discipline->setup(card->gdev);
5674 if (rc) 5675 if (rc)
5675 goto err_disc; 5676 goto err_disc;
5676 case QETH_CARD_TYPE_OSD: 5677 break;
5677 case QETH_CARD_TYPE_OSX:
5678 default: 5678 default:
5679 gdev->dev.type = &qeth_generic_devtype;
5679 break; 5680 break;
5680 } 5681 }
5681 5682
@@ -5731,8 +5732,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5731 if (rc) 5732 if (rc)
5732 goto err; 5733 goto err;
5733 rc = card->discipline->setup(card->gdev); 5734 rc = card->discipline->setup(card->gdev);
5734 if (rc) 5735 if (rc) {
5736 qeth_core_free_discipline(card);
5735 goto err; 5737 goto err;
5738 }
5736 } 5739 }
5737 rc = card->discipline->set_online(gdev); 5740 rc = card->discipline->set_online(gdev);
5738err: 5741err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2fcf4..db6a285d41e0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
413 413
414 if (card->options.layer2 == newdis) 414 if (card->options.layer2 == newdis)
415 goto out; 415 goto out;
416 else { 416 if (card->info.type == QETH_CARD_TYPE_OSM) {
417 card->info.mac_bits = 0; 417 /* fixed layer, can't switch */
418 if (card->discipline) { 418 rc = -EOPNOTSUPP;
419 card->discipline->remove(card->gdev); 419 goto out;
420 qeth_core_free_discipline(card); 420 }
421 } 421
422 card->info.mac_bits = 0;
423 if (card->discipline) {
424 card->discipline->remove(card->gdev);
425 qeth_core_free_discipline(card);
422 } 426 }
423 427
424 rc = qeth_core_load_discipline(card, newdis); 428 rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
426 goto out; 430 goto out;
427 431
428 rc = card->discipline->setup(card->gdev); 432 rc = card->discipline->setup(card->gdev);
433 if (rc)
434 qeth_core_free_discipline(card);
429out: 435out:
430 mutex_unlock(&card->discipline_mutex); 436 mutex_unlock(&card->discipline_mutex);
431 return rc ? rc : count; 437 return rc ? rc : count;
@@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
703 &dev_attr_inter_jumbo.attr, 709 &dev_attr_inter_jumbo.attr,
704 NULL, 710 NULL,
705}; 711};
706static struct attribute_group qeth_device_blkt_group = { 712const struct attribute_group qeth_device_blkt_group = {
707 .name = "blkt", 713 .name = "blkt",
708 .attrs = qeth_blkt_device_attrs, 714 .attrs = qeth_blkt_device_attrs,
709}; 715};
716EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
710 717
711static struct attribute *qeth_device_attrs[] = { 718static struct attribute *qeth_device_attrs[] = {
712 &dev_attr_state.attr, 719 &dev_attr_state.attr,
@@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = {
726 &dev_attr_switch_attrs.attr, 733 &dev_attr_switch_attrs.attr,
727 NULL, 734 NULL,
728}; 735};
729static struct attribute_group qeth_device_attr_group = { 736const struct attribute_group qeth_device_attr_group = {
730 .attrs = qeth_device_attrs, 737 .attrs = qeth_device_attrs,
731}; 738};
739EXPORT_SYMBOL_GPL(qeth_device_attr_group);
732 740
733const struct attribute_group *qeth_generic_attr_groups[] = { 741const struct attribute_group *qeth_generic_attr_groups[] = {
734 &qeth_device_attr_group, 742 &qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3890ad..0d59f9a45ea9 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
8 8
9#include "qeth_core.h" 9#include "qeth_core.h"
10 10
11extern const struct attribute_group *qeth_l2_attr_groups[];
12
11int qeth_l2_create_device_attributes(struct device *); 13int qeth_l2_create_device_attributes(struct device *);
12void qeth_l2_remove_device_attributes(struct device *); 14void qeth_l2_remove_device_attributes(struct device *);
13void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); 15void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 1b07f382d74c..bd2df62a5cdf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -880,11 +880,21 @@ static int qeth_l2_stop(struct net_device *dev)
880 return 0; 880 return 0;
881} 881}
882 882
883static const struct device_type qeth_l2_devtype = {
884 .name = "qeth_layer2",
885 .groups = qeth_l2_attr_groups,
886};
887
883static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 888static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
884{ 889{
885 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 890 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
891 int rc;
886 892
887 qeth_l2_create_device_attributes(&gdev->dev); 893 if (gdev->dev.type == &qeth_generic_devtype) {
894 rc = qeth_l2_create_device_attributes(&gdev->dev);
895 if (rc)
896 return rc;
897 }
888 INIT_LIST_HEAD(&card->vid_list); 898 INIT_LIST_HEAD(&card->vid_list);
889 hash_init(card->mac_htable); 899 hash_init(card->mac_htable);
890 card->options.layer2 = 1; 900 card->options.layer2 = 1;
@@ -896,7 +906,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
896{ 906{
897 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 907 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
898 908
899 qeth_l2_remove_device_attributes(&cgdev->dev); 909 if (cgdev->dev.type == &qeth_generic_devtype)
910 qeth_l2_remove_device_attributes(&cgdev->dev);
900 qeth_set_allowed_threads(card, 0, 1); 911 qeth_set_allowed_threads(card, 0, 1);
901 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 912 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
902 913
@@ -954,7 +965,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
954 case QETH_CARD_TYPE_OSN: 965 case QETH_CARD_TYPE_OSN:
955 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, 966 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
956 ether_setup); 967 ether_setup);
957 card->dev->flags |= IFF_NOARP;
958 break; 968 break;
959 default: 969 default:
960 card->dev = alloc_etherdev(0); 970 card->dev = alloc_etherdev(0);
@@ -969,9 +979,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
969 card->dev->min_mtu = 64; 979 card->dev->min_mtu = 64;
970 card->dev->max_mtu = ETH_MAX_MTU; 980 card->dev->max_mtu = ETH_MAX_MTU;
971 card->dev->netdev_ops = &qeth_l2_netdev_ops; 981 card->dev->netdev_ops = &qeth_l2_netdev_ops;
972 card->dev->ethtool_ops = 982 if (card->info.type == QETH_CARD_TYPE_OSN) {
973 (card->info.type != QETH_CARD_TYPE_OSN) ? 983 card->dev->ethtool_ops = &qeth_l2_osn_ops;
974 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; 984 card->dev->flags |= IFF_NOARP;
985 } else {
986 card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
987 }
975 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 988 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
976 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 989 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
977 card->dev->hw_features = NETIF_F_SG; 990 card->dev->hw_features = NETIF_F_SG;
@@ -1269,6 +1282,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
1269} 1282}
1270 1283
1271struct qeth_discipline qeth_l2_discipline = { 1284struct qeth_discipline qeth_l2_discipline = {
1285 .devtype = &qeth_l2_devtype,
1272 .start_poll = qeth_qdio_start_poll, 1286 .start_poll = qeth_qdio_start_poll,
1273 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1287 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1274 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 1288 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 687972356d6b..9696baa49e2d 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -269,3 +269,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
269 } else 269 } else
270 qeth_bridgeport_an_set(card, 0); 270 qeth_bridgeport_an_set(card, 0);
271} 271}
272
273const struct attribute_group *qeth_l2_attr_groups[] = {
274 &qeth_device_attr_group,
275 &qeth_device_blkt_group,
276 /* l2 specific, see l2_{create,remove}_device_attributes(): */
277 &qeth_l2_bridgeport_attr_group,
278 NULL,
279};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e0354ef4b86..d8df1e635163 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3039,8 +3039,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3040{ 3040{
3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3042 int rc;
3042 3043
3043 qeth_l3_create_device_attributes(&gdev->dev); 3044 rc = qeth_l3_create_device_attributes(&gdev->dev);
3045 if (rc)
3046 return rc;
3047 hash_init(card->ip_htable);
3048 hash_init(card->ip_mc_htable);
3044 card->options.layer2 = 0; 3049 card->options.layer2 = 0;
3045 card->info.hwtrap = 0; 3050 card->info.hwtrap = 0;
3046 return 0; 3051 return 0;
@@ -3306,6 +3311,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
3306} 3311}
3307 3312
3308struct qeth_discipline qeth_l3_discipline = { 3313struct qeth_discipline qeth_l3_discipline = {
3314 .devtype = &qeth_generic_devtype,
3309 .start_poll = qeth_qdio_start_poll, 3315 .start_poll = qeth_qdio_start_poll,
3310 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3316 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
3311 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 3317 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 2a76ea78a0bf..b18fe2014cf2 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -87,7 +87,7 @@ struct vq_info_block {
87} __packed; 87} __packed;
88 88
89struct virtio_feature_desc { 89struct virtio_feature_desc {
90 __u32 features; 90 __le32 features;
91 __u8 index; 91 __u8 index;
92} __packed; 92} __packed;
93 93
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 4fc8ed5fe067..1f424e40afdf 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
191 struct bnx2fc_cmd_mgr *cmd_mgr; 191 struct bnx2fc_cmd_mgr *cmd_mgr;
192 spinlock_t hba_lock; 192 spinlock_t hba_lock;
193 struct mutex hba_mutex; 193 struct mutex hba_mutex;
194 struct mutex hba_stats_mutex;
194 unsigned long adapter_state; 195 unsigned long adapter_state;
195 #define ADAPTER_STATE_UP 0 196 #define ADAPTER_STATE_UP 0
196 #define ADAPTER_STATE_GOING_DOWN 1 197 #define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 93b5a0012417..902722dc4ce3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
663 if (!fw_stats) 663 if (!fw_stats)
664 return NULL; 664 return NULL;
665 665
666 mutex_lock(&hba->hba_stats_mutex);
667
666 bnx2fc_stats = fc_get_host_stats(shost); 668 bnx2fc_stats = fc_get_host_stats(shost);
667 669
668 init_completion(&hba->stat_req_done); 670 init_completion(&hba->stat_req_done);
669 if (bnx2fc_send_stat_req(hba)) 671 if (bnx2fc_send_stat_req(hba))
670 return bnx2fc_stats; 672 goto unlock_stats_mutex;
671 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); 673 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
672 if (!rc) { 674 if (!rc) {
673 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); 675 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
674 return bnx2fc_stats; 676 goto unlock_stats_mutex;
675 } 677 }
676 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); 678 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
677 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; 679 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
693 695
694 memcpy(&hba->prev_stats, hba->stats_buffer, 696 memcpy(&hba->prev_stats, hba->stats_buffer,
695 sizeof(struct fcoe_statistics_params)); 697 sizeof(struct fcoe_statistics_params));
698
699unlock_stats_mutex:
700 mutex_unlock(&hba->hba_stats_mutex);
696 return bnx2fc_stats; 701 return bnx2fc_stats;
697} 702}
698 703
@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1340 } 1345 }
1341 spin_lock_init(&hba->hba_lock); 1346 spin_lock_init(&hba->hba_lock);
1342 mutex_init(&hba->hba_mutex); 1347 mutex_init(&hba->hba_mutex);
1348 mutex_init(&hba->hba_stats_mutex);
1343 1349
1344 hba->cnic = cnic; 1350 hba->cnic = cnic;
1345 1351
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1769 goto bye; 1769 goto bye;
1770 } 1770 }
1771 1771
1772 mempool_free(mbp, hw->mb_mempool);
1773 if (finicsum != cfcsum) { 1772 if (finicsum != cfcsum) {
1774 csio_warn(hw, 1773 csio_warn(hw,
1775 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1774 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1780 rv = csio_hw_validate_caps(hw, mbp); 1779 rv = csio_hw_validate_caps(hw, mbp);
1781 if (rv != 0) 1780 if (rv != 0)
1782 goto bye; 1781 goto bye;
1782
1783 mempool_free(mbp, hw->mb_mempool);
1784 mbp = NULL;
1785
1783 /* 1786 /*
1784 * Note that we're operating with parameters 1787 * Note that we're operating with parameters
1785 * not supplied by the driver, rather than from hard-wired 1788 * not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1076c1578322..0aae094ab91c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk)
1595 cxgbi_sock_put(csk); 1595 cxgbi_sock_put(csk);
1596 } 1596 }
1597 csk->dst = NULL; 1597 csk->dst = NULL;
1598 csk->cdev = NULL;
1599} 1598}
1600 1599
1601static int init_act_open(struct cxgbi_sock *csk) 1600static int init_act_open(struct cxgbi_sock *csk)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index bd7d39ecbd24..e4c83b7c96a8 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk)
867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
868 csk, (csk)->state, (csk)->flags, (csk)->tid); 868 csk, (csk)->state, (csk)->flags, (csk)->tid);
869 spin_lock_bh(&csk->lock); 869 spin_lock_bh(&csk->lock);
870 dst_confirm(csk->dst); 870 if (csk->dst)
871 dst_confirm(csk->dst);
871 data_lost = skb_queue_len(&csk->receive_queue); 872 data_lost = skb_queue_len(&csk->receive_queue);
872 __skb_queue_purge(&csk->receive_queue); 873 __skb_queue_purge(&csk->receive_queue);
873 874
@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk)
882 } 883 }
883 884
884 if (close_req) { 885 if (close_req) {
885 if (data_lost) 886 if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
887 data_lost)
886 csk->cdev->csk_send_abort_req(csk); 888 csk->cdev->csk_send_abort_req(csk);
887 else 889 else
888 csk->cdev->csk_send_close_req(csk); 890 csk->cdev->csk_send_close_req(csk);
@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1186 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1188 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
1187 skb = next; 1189 skb = next;
1188 } 1190 }
1189done: 1191
1190 if (likely(skb_queue_len(&csk->write_queue))) 1192 if (likely(skb_queue_len(&csk->write_queue)))
1191 cdev->csk_push_tx_frames(csk, 1); 1193 cdev->csk_push_tx_frames(csk, 1);
1194done:
1192 spin_unlock_bh(&csk->lock); 1195 spin_unlock_bh(&csk->lock);
1193 return copied; 1196 return copied;
1194 1197
@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn,
1568 } 1571 }
1569} 1572}
1570 1573
1571static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1574static int
1575skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
1576 struct sk_buff *skb)
1572{ 1577{
1573 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1578 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1579 int err;
1574 1580
1575 log_debug(1 << CXGBI_DBG_PDU_RX, 1581 log_debug(1 << CXGBI_DBG_PDU_RX,
1576 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1582 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1608 } 1614 }
1609 } 1615 }
1610 1616
1611 return read_pdu_skb(conn, skb, 0, 0); 1617 err = read_pdu_skb(conn, skb, 0, 0);
1618 if (likely(err >= 0)) {
1619 struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
1620 u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
1621
1622 if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
1623 cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
1624 }
1625
1626 return err;
1612} 1627}
1613 1628
1614static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1629static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1713 cxgbi_skcb_rx_pdulen(skb)); 1728 cxgbi_skcb_rx_pdulen(skb));
1714 1729
1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1730 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1716 err = skb_read_pdu_bhs(conn, skb); 1731 err = skb_read_pdu_bhs(csk, conn, skb);
1717 if (err < 0) { 1732 if (err < 0) {
1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1733 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1719 "f 0x%lx, plen %u.\n", 1734 "f 0x%lx, plen %u.\n",
@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1731 cxgbi_skcb_flags(skb), 1746 cxgbi_skcb_flags(skb),
1732 cxgbi_skcb_rx_pdulen(skb)); 1747 cxgbi_skcb_rx_pdulen(skb));
1733 } else { 1748 } else {
1734 err = skb_read_pdu_bhs(conn, skb); 1749 err = skb_read_pdu_bhs(csk, conn, skb);
1735 if (err < 0) { 1750 if (err < 0) {
1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1751 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1737 "f 0x%lx, plen %u.\n", 1752 "f 0x%lx, plen %u.\n",
@@ -1873,6 +1888,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1873 tcp_task->dd_data = tdata; 1888 tcp_task->dd_data = tdata;
1874 task->hdr = NULL; 1889 task->hdr = NULL;
1875 1890
1891 if (tdata->skb) {
1892 kfree_skb(tdata->skb);
1893 tdata->skb = NULL;
1894 }
1895
1876 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1896 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1877 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1897 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1878 (opcode == ISCSI_OP_SCSI_CMD && 1898 (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1910,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1890 return -ENOMEM; 1910 return -ENOMEM;
1891 } 1911 }
1892 1912
1913 skb_get(tdata->skb);
1893 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1914 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1894 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1915 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1895 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 1916 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2056,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2035 unsigned int datalen; 2056 unsigned int datalen;
2036 int err; 2057 int err;
2037 2058
2038 if (!skb) { 2059 if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
2039 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2060 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2040 "task 0x%p, skb NULL.\n", task); 2061 "task 0x%p, skb 0x%p\n", task, skb);
2041 return 0; 2062 return 0;
2042 } 2063 }
2043 2064
@@ -2050,7 +2071,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2050 } 2071 }
2051 2072
2052 datalen = skb->data_len; 2073 datalen = skb->data_len;
2053 tdata->skb = NULL;
2054 2074
2055 /* write ppod first if using ofldq to write ppod */ 2075 /* write ppod first if using ofldq to write ppod */
2056 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { 2076 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2098,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2078 pdulen += ISCSI_DIGEST_SIZE; 2098 pdulen += ISCSI_DIGEST_SIZE;
2079 2099
2080 task->conn->txdata_octets += pdulen; 2100 task->conn->txdata_octets += pdulen;
2101 cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
2081 return 0; 2102 return 0;
2082 } 2103 }
2083 2104
@@ -2086,7 +2107,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2086 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2107 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2087 task, skb, skb->len, skb->data_len, err); 2108 task, skb, skb->len, skb->data_len, err);
2088 /* reset skb to send when we are called again */ 2109 /* reset skb to send when we are called again */
2089 tdata->skb = skb;
2090 return err; 2110 return err;
2091 } 2111 }
2092 2112
@@ -2094,7 +2114,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2094 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2114 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2095 task->itt, skb, skb->len, skb->data_len, err); 2115 task->itt, skb, skb->len, skb->data_len, err);
2096 2116
2097 kfree_skb(skb); 2117 __kfree_skb(tdata->skb);
2118 tdata->skb = NULL;
2098 2119
2099 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2120 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2100 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2121 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2134,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
2113 2134
2114 tcp_task->dd_data = NULL; 2135 tcp_task->dd_data = NULL;
2115 /* never reached the xmit task callout */ 2136 /* never reached the xmit task callout */
2116 if (tdata->skb) 2137 if (tdata->skb) {
2117 __kfree_skb(tdata->skb); 2138 kfree_skb(tdata->skb);
2139 tdata->skb = NULL;
2140 }
2118 2141
2119 task_release_itt(task, task->hdr_itt); 2142 task_release_itt(task, task->hdr_itt);
2120 memset(tdata, 0, sizeof(*tdata)); 2143 memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2737,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
2714static int __init libcxgbi_init_module(void) 2737static int __init libcxgbi_init_module(void)
2715{ 2738{
2716 pr_info("%s", version); 2739 pr_info("%s", version);
2740
2741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
2742 sizeof(struct cxgbi_skb_cb));
2717 return 0; 2743 return 0;
2718} 2744}
2719 2745
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 18e0ea83d361..37f07aaab1e4 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -187,6 +187,7 @@ enum cxgbi_sock_flags {
187 CTPF_HAS_ATID, /* reserved atid */ 187 CTPF_HAS_ATID, /* reserved atid */
188 CTPF_HAS_TID, /* reserved hw tid */ 188 CTPF_HAS_TID, /* reserved hw tid */
189 CTPF_OFFLOAD_DOWN, /* offload function off */ 189 CTPF_OFFLOAD_DOWN, /* offload function off */
190 CTPF_LOGOUT_RSP_RCVD, /* received logout response */
190}; 191};
191 192
192struct cxgbi_skb_rx_cb { 193struct cxgbi_skb_rx_cb {
@@ -195,7 +196,8 @@ struct cxgbi_skb_rx_cb {
195}; 196};
196 197
197struct cxgbi_skb_tx_cb { 198struct cxgbi_skb_tx_cb {
198 void *l2t; 199 void *handle;
200 void *arp_err_handler;
199 struct sk_buff *wr_next; 201 struct sk_buff *wr_next;
200}; 202};
201 203
@@ -203,6 +205,7 @@ enum cxgbi_skcb_flags {
203 SKCBF_TX_NEED_HDR, /* packet needs a header */ 205 SKCBF_TX_NEED_HDR, /* packet needs a header */
204 SKCBF_TX_MEM_WRITE, /* memory write */ 206 SKCBF_TX_MEM_WRITE, /* memory write */
205 SKCBF_TX_FLAG_COMPL, /* wr completion flag */ 207 SKCBF_TX_FLAG_COMPL, /* wr completion flag */
208 SKCBF_TX_DONE, /* skb tx done */
206 SKCBF_RX_COALESCED, /* received whole pdu */ 209 SKCBF_RX_COALESCED, /* received whole pdu */
207 SKCBF_RX_HDR, /* received pdu header */ 210 SKCBF_RX_HDR, /* received pdu header */
208 SKCBF_RX_DATA, /* received pdu payload */ 211 SKCBF_RX_DATA, /* received pdu payload */
@@ -215,13 +218,13 @@ enum cxgbi_skcb_flags {
215}; 218};
216 219
217struct cxgbi_skb_cb { 220struct cxgbi_skb_cb {
218 unsigned char ulp_mode;
219 unsigned long flags;
220 unsigned int seq;
221 union { 221 union {
222 struct cxgbi_skb_rx_cb rx; 222 struct cxgbi_skb_rx_cb rx;
223 struct cxgbi_skb_tx_cb tx; 223 struct cxgbi_skb_tx_cb tx;
224 }; 224 };
225 unsigned char ulp_mode;
226 unsigned long flags;
227 unsigned int seq;
225}; 228};
226 229
227#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) 230#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +377,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
374 cxgbi_skcb_tx_wr_next(skb) = NULL; 377 cxgbi_skcb_tx_wr_next(skb) = NULL;
375 /* 378 /*
376 * We want to take an extra reference since both us and the driver 379 * We want to take an extra reference since both us and the driver
377 * need to free the packet before it's really freed. We know there's 380 * need to free the packet before it's really freed.
378 * just one user currently so we use atomic_set rather than skb_get
379 * to avoid the atomic op.
380 */ 381 */
381 atomic_set(&skb->users, 2); 382 skb_get(skb);
382 383
383 if (!csk->wr_pending_head) 384 if (!csk->wr_pending_head)
384 csk->wr_pending_head = skb; 385 csk->wr_pending_head = skb;
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index c052104e523e..a011c5dbf214 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -5,6 +5,7 @@
5config CXLFLASH 5config CXLFLASH
6 tristate "Support for IBM CAPI Flash" 6 tristate "Support for IBM CAPI Flash"
7 depends on PCI && SCSI && CXL && EEH 7 depends on PCI && SCSI && CXL && EEH
8 select IRQ_POLL
8 default m 9 default m
9 help 10 help
10 Allows CAPI Accelerated IO to Flash 11 Allows CAPI Accelerated IO to Flash
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3cbab8710e58..2ceff585f189 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
265 struct list_head *list, 265 struct list_head *list,
266 unsigned char *cdb) 266 unsigned char *cdb)
267{ 267{
268 struct scsi_device *sdev = ctlr->ms_sdev;
269 struct rdac_dh_data *h = sdev->handler_data;
270 struct rdac_mode_common *common; 268 struct rdac_mode_common *common;
271 unsigned data_size; 269 unsigned data_size;
272 struct rdac_queue_data *qdata; 270 struct rdac_queue_data *qdata;
273 u8 *lun_table; 271 u8 *lun_table;
274 272
275 if (h->ctlr->use_ms10) { 273 if (ctlr->use_ms10) {
276 struct rdac_pg_expanded *rdac_pg; 274 struct rdac_pg_expanded *rdac_pg;
277 275
278 data_size = sizeof(struct rdac_pg_expanded); 276 data_size = sizeof(struct rdac_pg_expanded);
279 rdac_pg = &h->ctlr->mode_select.expanded; 277 rdac_pg = &ctlr->mode_select.expanded;
280 memset(rdac_pg, 0, data_size); 278 memset(rdac_pg, 0, data_size);
281 common = &rdac_pg->common; 279 common = &rdac_pg->common;
282 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; 280 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
288 struct rdac_pg_legacy *rdac_pg; 286 struct rdac_pg_legacy *rdac_pg;
289 287
290 data_size = sizeof(struct rdac_pg_legacy); 288 data_size = sizeof(struct rdac_pg_legacy);
291 rdac_pg = &h->ctlr->mode_select.legacy; 289 rdac_pg = &ctlr->mode_select.legacy;
292 memset(rdac_pg, 0, data_size); 290 memset(rdac_pg, 0, data_size);
293 common = &rdac_pg->common; 291 common = &rdac_pg->common;
294 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; 292 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
304 } 302 }
305 303
306 /* Prepare the command. */ 304 /* Prepare the command. */
307 if (h->ctlr->use_ms10) { 305 if (ctlr->use_ms10) {
308 cdb[0] = MODE_SELECT_10; 306 cdb[0] = MODE_SELECT_10;
309 cdb[7] = data_size >> 8; 307 cdb[7] = data_size >> 8;
310 cdb[8] = data_size & 0xff; 308 cdb[8] = data_size & 0xff;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index d390325c99ec..abf6026645dd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1170 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1170 cmd = list_first_entry_or_null(&vscsi->free_cmd,
1171 struct ibmvscsis_cmd, list); 1171 struct ibmvscsis_cmd, list);
1172 if (cmd) { 1172 if (cmd) {
1173 if (cmd->abort_cmd)
1174 cmd->abort_cmd = NULL;
1173 cmd->flags &= ~(DELAY_SEND); 1175 cmd->flags &= ~(DELAY_SEND);
1174 list_del(&cmd->list); 1176 list_del(&cmd->list);
1175 cmd->iue = iue; 1177 cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1774 if (cmd->abort_cmd) { 1776 if (cmd->abort_cmd) {
1775 retry = true; 1777 retry = true;
1776 cmd->abort_cmd->flags &= ~(DELAY_SEND); 1778 cmd->abort_cmd->flags &= ~(DELAY_SEND);
1779 cmd->abort_cmd = NULL;
1777 } 1780 }
1778 1781
1779 /* 1782 /*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1788 list_del(&cmd->list); 1791 list_del(&cmd->list);
1789 ibmvscsis_free_cmd_resources(vscsi, 1792 ibmvscsis_free_cmd_resources(vscsi,
1790 cmd); 1793 cmd);
1794 /*
1795 * With a successfully aborted op
1796 * through LIO we want to increment the
1797 * the vscsi credit so that when we dont
1798 * send a rsp to the original scsi abort
1799 * op (h_send_crq), but the tm rsp to
1800 * the abort is sent, the credit is
1801 * correctly sent with the abort tm rsp.
1802 * We would need 1 for the abort tm rsp
1803 * and 1 credit for the aborted scsi op.
1804 * Thus we need to increment here.
1805 * Also we want to increment the credit
1806 * here because we want to make sure
1807 * cmd is actually released first
1808 * otherwise the client will think it
1809 * it can send a new cmd, and we could
1810 * find ourselves short of cmd elements.
1811 */
1812 vscsi->credit += 1;
1791 } else { 1813 } else {
1792 iue = cmd->iue; 1814 iue = cmd->iue;
1793 1815
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
2962 2984
2963 rsp->opcode = SRP_RSP; 2985 rsp->opcode = SRP_RSP;
2964 2986
2965 if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 2987 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
2966 rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
2967 else
2968 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
2969 rsp->tag = cmd->rsp.tag; 2988 rsp->tag = cmd->rsp.tag;
2970 rsp->flags = 0; 2989 rsp->flags = 0;
2971 2990
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index a808e8ef1d08..234352da5c3c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -407,11 +407,12 @@ unlock:
407 * can_queue. Eventually we will hit the point where we run 407 * can_queue. Eventually we will hit the point where we run
408 * on all reserved structs. 408 * on all reserved structs.
409 */ 409 */
410static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 410static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
411{ 411{
412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
413 unsigned long flags; 413 unsigned long flags;
414 int can_queue; 414 int can_queue;
415 bool changed = false;
415 416
416 spin_lock_irqsave(lport->host->host_lock, flags); 417 spin_lock_irqsave(lport->host->host_lock, flags);
417 418
@@ -427,9 +428,11 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
427 if (!can_queue) 428 if (!can_queue)
428 can_queue = 1; 429 can_queue = 1;
429 lport->host->can_queue = can_queue; 430 lport->host->can_queue = can_queue;
431 changed = true;
430 432
431unlock: 433unlock:
432 spin_unlock_irqrestore(lport->host->host_lock, flags); 434 spin_unlock_irqrestore(lport->host->host_lock, flags);
435 return changed;
433} 436}
434 437
435/* 438/*
@@ -1896,11 +1899,11 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1896 1899
1897 if (!fc_fcp_lport_queue_ready(lport)) { 1900 if (!fc_fcp_lport_queue_ready(lport)) {
1898 if (lport->qfull) { 1901 if (lport->qfull) {
1899 fc_fcp_can_queue_ramp_down(lport); 1902 if (fc_fcp_can_queue_ramp_down(lport))
1900 shost_printk(KERN_ERR, lport->host, 1903 shost_printk(KERN_ERR, lport->host,
1901 "libfc: queue full, " 1904 "libfc: queue full, "
1902 "reducing can_queue to %d.\n", 1905 "reducing can_queue to %d.\n",
1903 lport->host->can_queue); 1906 lport->host->can_queue);
1904 } 1907 }
1905 rc = SCSI_MLQUEUE_HOST_BUSY; 1908 rc = SCSI_MLQUEUE_HOST_BUSY;
1906 goto out; 1909 goto out;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b44c3136eb51..520325867e2b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
1422 fp = fc_frame_alloc(lport, sizeof(*rtv)); 1422 fp = fc_frame_alloc(lport, sizeof(*rtv));
1423 if (!fp) { 1423 if (!fp) {
1424 rjt_data.reason = ELS_RJT_UNAB; 1424 rjt_data.reason = ELS_RJT_UNAB;
1425 rjt_data.reason = ELS_EXPL_INSUF_RES; 1425 rjt_data.explan = ELS_EXPL_INSUF_RES;
1426 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 1426 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1427 goto drop; 1427 goto drop;
1428 } 1428 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6d7840b096e6..f2c0ba6ced78 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
141 uint32_t buffer_tag; /* used for tagged queue ring */ 141 uint32_t buffer_tag; /* used for tagged queue ring */
142}; 142};
143 143
144struct lpfc_nvmet_ctxbuf {
145 struct list_head list;
146 struct lpfc_nvmet_rcv_ctx *context;
147 struct lpfc_iocbq *iocbq;
148 struct lpfc_sglq *sglq;
149};
150
144struct lpfc_dma_pool { 151struct lpfc_dma_pool {
145 struct lpfc_dmabuf *elements; 152 struct lpfc_dmabuf *elements;
146 uint32_t max_count; 153 uint32_t max_count;
@@ -163,9 +170,7 @@ struct rqb_dmabuf {
163 struct lpfc_dmabuf dbuf; 170 struct lpfc_dmabuf dbuf;
164 uint16_t total_size; 171 uint16_t total_size;
165 uint16_t bytes_recv; 172 uint16_t bytes_recv;
166 void *context; 173 uint16_t idx;
167 struct lpfc_iocbq *iocbq;
168 struct lpfc_sglq *sglq;
169 struct lpfc_queue *hrq; /* ptr to associated Header RQ */ 174 struct lpfc_queue *hrq; /* ptr to associated Header RQ */
170 struct lpfc_queue *drq; /* ptr to associated Data RQ */ 175 struct lpfc_queue *drq; /* ptr to associated Data RQ */
171}; 176};
@@ -670,6 +675,8 @@ struct lpfc_hba {
670 /* INIT_LINK mailbox command */ 675 /* INIT_LINK mailbox command */
671#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 676#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
672#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 677#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
678#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
679#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
673 680
674 uint32_t hba_flag; /* hba generic flags */ 681 uint32_t hba_flag; /* hba generic flags */
675#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 682#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -777,7 +784,6 @@ struct lpfc_hba {
777 uint32_t cfg_nvme_oas; 784 uint32_t cfg_nvme_oas;
778 uint32_t cfg_nvme_io_channel; 785 uint32_t cfg_nvme_io_channel;
779 uint32_t cfg_nvmet_mrq; 786 uint32_t cfg_nvmet_mrq;
780 uint32_t cfg_nvmet_mrq_post;
781 uint32_t cfg_enable_nvmet; 787 uint32_t cfg_enable_nvmet;
782 uint32_t cfg_nvme_enable_fb; 788 uint32_t cfg_nvme_enable_fb;
783 uint32_t cfg_nvmet_fb_size; 789 uint32_t cfg_nvmet_fb_size;
@@ -943,6 +949,7 @@ struct lpfc_hba {
943 struct pci_pool *lpfc_mbuf_pool; 949 struct pci_pool *lpfc_mbuf_pool;
944 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ 950 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
945 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ 951 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
952 struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
946 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ 953 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
947 struct pci_pool *txrdy_payload_pool; 954 struct pci_pool *txrdy_payload_pool;
948 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 955 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
1228static inline struct lpfc_sli_ring * 1235static inline struct lpfc_sli_ring *
1229lpfc_phba_elsring(struct lpfc_hba *phba) 1236lpfc_phba_elsring(struct lpfc_hba *phba)
1230{ 1237{
1231 if (phba->sli_rev == LPFC_SLI_REV4) 1238 if (phba->sli_rev == LPFC_SLI_REV4) {
1232 return phba->sli4_hba.els_wq->pring; 1239 if (phba->sli4_hba.els_wq)
1240 return phba->sli4_hba.els_wq->pring;
1241 else
1242 return NULL;
1243 }
1233 return &phba->sli.sli3_ring[LPFC_ELS_RING]; 1244 return &phba->sli.sli3_ring[LPFC_ELS_RING];
1234} 1245}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4830370bfab1..bb2d9e238225 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -60,9 +60,9 @@
60#define LPFC_MIN_DEVLOSS_TMO 1 60#define LPFC_MIN_DEVLOSS_TMO 1
61#define LPFC_MAX_DEVLOSS_TMO 255 61#define LPFC_MAX_DEVLOSS_TMO 255
62 62
63#define LPFC_DEF_MRQ_POST 256 63#define LPFC_DEF_MRQ_POST 512
64#define LPFC_MIN_MRQ_POST 32 64#define LPFC_MIN_MRQ_POST 512
65#define LPFC_MAX_MRQ_POST 512 65#define LPFC_MAX_MRQ_POST 2048
66 66
67/* 67/*
68 * Write key size should be multiple of 4. If write key is changed 68 * Write key size should be multiple of 4. If write key is changed
@@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
205 atomic_read(&tgtp->xmt_ls_rsp_error)); 205 atomic_read(&tgtp->xmt_ls_rsp_error));
206 206
207 len += snprintf(buf+len, PAGE_SIZE-len, 207 len += snprintf(buf+len, PAGE_SIZE-len,
208 "FCP: Rcv %08x Drop %08x\n", 208 "FCP: Rcv %08x Release %08x Drop %08x\n",
209 atomic_read(&tgtp->rcv_fcp_cmd_in), 209 atomic_read(&tgtp->rcv_fcp_cmd_in),
210 atomic_read(&tgtp->xmt_fcp_release),
210 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 211 atomic_read(&tgtp->rcv_fcp_cmd_drop));
211 212
212 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 213 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
@@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
218 } 219 }
219 220
220 len += snprintf(buf+len, PAGE_SIZE-len, 221 len += snprintf(buf+len, PAGE_SIZE-len,
221 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", 222 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
223 "drop %08x\n",
222 atomic_read(&tgtp->xmt_fcp_read), 224 atomic_read(&tgtp->xmt_fcp_read),
223 atomic_read(&tgtp->xmt_fcp_read_rsp), 225 atomic_read(&tgtp->xmt_fcp_read_rsp),
224 atomic_read(&tgtp->xmt_fcp_write), 226 atomic_read(&tgtp->xmt_fcp_write),
225 atomic_read(&tgtp->xmt_fcp_rsp)); 227 atomic_read(&tgtp->xmt_fcp_rsp),
226
227 len += snprintf(buf+len, PAGE_SIZE-len,
228 "FCP Rsp: abort %08x drop %08x\n",
229 atomic_read(&tgtp->xmt_fcp_abort),
230 atomic_read(&tgtp->xmt_fcp_drop)); 228 atomic_read(&tgtp->xmt_fcp_drop));
231 229
232 len += snprintf(buf+len, PAGE_SIZE-len, 230 len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
236 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 234 atomic_read(&tgtp->xmt_fcp_rsp_drop));
237 235
238 len += snprintf(buf+len, PAGE_SIZE-len, 236 len += snprintf(buf+len, PAGE_SIZE-len,
239 "ABORT: Xmt %08x Err %08x Cmpl %08x", 237 "ABORT: Xmt %08x Cmpl %08x\n",
238 atomic_read(&tgtp->xmt_fcp_abort),
239 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
240
241 len += snprintf(buf + len, PAGE_SIZE - len,
242 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
243 atomic_read(&tgtp->xmt_abort_sol),
244 atomic_read(&tgtp->xmt_abort_unsol),
240 atomic_read(&tgtp->xmt_abort_rsp), 245 atomic_read(&tgtp->xmt_abort_rsp),
241 atomic_read(&tgtp->xmt_abort_rsp_error), 246 atomic_read(&tgtp->xmt_abort_rsp_error));
242 atomic_read(&tgtp->xmt_abort_cmpl)); 247
248 len += snprintf(buf + len, PAGE_SIZE - len,
249 "IO_CTX: %08x outstanding %08x total %x",
250 phba->sli4_hba.nvmet_ctx_cnt,
251 phba->sli4_hba.nvmet_io_wait_cnt,
252 phba->sli4_hba.nvmet_io_wait_total);
243 253
244 len += snprintf(buf+len, PAGE_SIZE-len, "\n"); 254 len += snprintf(buf+len, PAGE_SIZE-len, "\n");
245 return len; 255 return len;
@@ -3312,14 +3322,6 @@ LPFC_ATTR_R(nvmet_mrq,
3312 "Specify number of RQ pairs for processing NVMET cmds"); 3322 "Specify number of RQ pairs for processing NVMET cmds");
3313 3323
3314/* 3324/*
3315 * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
3316 *
3317 */
3318LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
3319 LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
3320 "Specify number of buffers to post on every MRQ");
3321
3322/*
3323 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3325 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3324 * Supported Values: 1 - register just FCP 3326 * Supported Values: 1 - register just FCP
3325 * 3 - register both FCP and NVME 3327 * 3 - register both FCP and NVME
@@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
5154 &dev_attr_lpfc_suppress_rsp, 5156 &dev_attr_lpfc_suppress_rsp,
5155 &dev_attr_lpfc_nvme_io_channel, 5157 &dev_attr_lpfc_nvme_io_channel,
5156 &dev_attr_lpfc_nvmet_mrq, 5158 &dev_attr_lpfc_nvmet_mrq,
5157 &dev_attr_lpfc_nvmet_mrq_post,
5158 &dev_attr_lpfc_nvme_enable_fb, 5159 &dev_attr_lpfc_nvme_enable_fb,
5159 &dev_attr_lpfc_nvmet_fb_size, 5160 &dev_attr_lpfc_nvmet_fb_size,
5160 &dev_attr_lpfc_enable_bg, 5161 &dev_attr_lpfc_enable_bg,
@@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
6194 6195
6195 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 6196 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
6196 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 6197 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
6197 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
6198 6198
6199 /* Initialize first burst. Target vs Initiator are different. */ 6199 /* Initialize first burst. Target vs Initiator are different. */
6200 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 6200 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
6291 /* Not NVME Target mode. Turn off Target parameters. */ 6291 /* Not NVME Target mode. Turn off Target parameters. */
6292 phba->nvmet_support = 0; 6292 phba->nvmet_support = 0;
6293 phba->cfg_nvmet_mrq = 0; 6293 phba->cfg_nvmet_mrq = 0;
6294 phba->cfg_nvmet_mrq_post = 0;
6295 phba->cfg_nvmet_fb_size = 0; 6294 phba->cfg_nvmet_fb_size = 0;
6296 } 6295 }
6297 6296
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 944b32ca4931..da669dce12fe 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
76void lpfc_retry_pport_discovery(struct lpfc_hba *); 76void lpfc_retry_pport_discovery(struct lpfc_hba *);
77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); 77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
78int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
79void lpfc_free_iocb_list(struct lpfc_hba *phba);
80int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
81 struct lpfc_queue *drq, int count, int idx);
78 82
79void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 83void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
80void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 84void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -123,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
123void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 127void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
124int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 128int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
125 struct serv_parm *, uint32_t, int); 129 struct serv_parm *, uint32_t, int);
126int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 130void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
127void lpfc_more_plogi(struct lpfc_vport *); 131void lpfc_more_plogi(struct lpfc_vport *);
128void lpfc_more_adisc(struct lpfc_vport *); 132void lpfc_more_adisc(struct lpfc_vport *);
129void lpfc_end_rscn(struct lpfc_vport *); 133void lpfc_end_rscn(struct lpfc_vport *);
@@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
246void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 250void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
247struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); 251struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
248void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); 252void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
249void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 253void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
250 struct lpfc_dmabuf *mp); 254 struct lpfc_nvmet_ctxbuf *ctxp);
251int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 255int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
252 struct fc_frame_header *fc_hdr); 256 struct fc_frame_header *fc_hdr);
253void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 257void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
254 uint16_t); 258 uint16_t);
255int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 259int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
256 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); 260 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
257int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
258 struct lpfc_queue *dq, int count);
259int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); 261int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
260void lpfc_unregister_fcf(struct lpfc_hba *); 262void lpfc_unregister_fcf(struct lpfc_hba *);
261void lpfc_unregister_fcf_rescan(struct lpfc_hba *); 263void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
@@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
271void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); 273void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
272 274
273int lpfc_mem_alloc(struct lpfc_hba *, int align); 275int lpfc_mem_alloc(struct lpfc_hba *, int align);
276int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
274int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); 277int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
275void lpfc_mem_free(struct lpfc_hba *); 278void lpfc_mem_free(struct lpfc_hba *);
276void lpfc_mem_free_all(struct lpfc_hba *); 279void lpfc_mem_free_all(struct lpfc_hba *);
@@ -294,6 +297,7 @@ int lpfc_selective_reset(struct lpfc_hba *);
294void lpfc_reset_barrier(struct lpfc_hba *); 297void lpfc_reset_barrier(struct lpfc_hba *);
295int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 298int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
296int lpfc_sli_brdkill(struct lpfc_hba *); 299int lpfc_sli_brdkill(struct lpfc_hba *);
300int lpfc_sli_chipset_init(struct lpfc_hba *phba);
297int lpfc_sli_brdreset(struct lpfc_hba *); 301int lpfc_sli_brdreset(struct lpfc_hba *);
298int lpfc_sli_brdrestart(struct lpfc_hba *); 302int lpfc_sli_brdrestart(struct lpfc_hba *);
299int lpfc_sli_hba_setup(struct lpfc_hba *); 303int lpfc_sli_hba_setup(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1487406aea77..24ce96dcc94d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -630,7 +630,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
630 NLP_EVT_DEVICE_RECOVERY); 630 NLP_EVT_DEVICE_RECOVERY);
631 spin_lock_irq(shost->host_lock); 631 spin_lock_irq(shost->host_lock);
632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV; 632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
633 spin_lock_irq(shost->host_lock); 633 spin_unlock_irq(shost->host_lock);
634 } 634 }
635 } 635 }
636 636
@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
978 ndlp, did, ndlp->nlp_fc4_type, 978 ndlp, did, ndlp->nlp_fc4_type,
979 FC_TYPE_FCP, FC_TYPE_NVME); 979 FC_TYPE_FCP, FC_TYPE_NVME);
980 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 980 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
981
982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
983 lpfc_issue_els_prli(vport, ndlp, 0);
981 } 984 }
982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
983 lpfc_issue_els_prli(vport, ndlp, 0);
984 } else 985 } else
985 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 986 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
986 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); 987 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
@@ -2092,6 +2093,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
2092 2093
2093 ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ 2094 ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
2094 ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ 2095 ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
2096 ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
2095 ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ 2097 ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
2096 size = FOURBYTES + 32; 2098 size = FOURBYTES + 32;
2097 ad->AttrLen = cpu_to_be16(size); 2099 ad->AttrLen = cpu_to_be16(size);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index fce549a91911..4bcb92c844ca 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -798,21 +798,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
798 atomic_read(&tgtp->xmt_fcp_rsp)); 798 atomic_read(&tgtp->xmt_fcp_rsp));
799 799
800 len += snprintf(buf + len, size - len, 800 len += snprintf(buf + len, size - len,
801 "FCP Rsp: abort %08x drop %08x\n",
802 atomic_read(&tgtp->xmt_fcp_abort),
803 atomic_read(&tgtp->xmt_fcp_drop));
804
805 len += snprintf(buf + len, size - len,
806 "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 801 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
807 atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 802 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
808 atomic_read(&tgtp->xmt_fcp_rsp_error), 803 atomic_read(&tgtp->xmt_fcp_rsp_error),
809 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 804 atomic_read(&tgtp->xmt_fcp_rsp_drop));
810 805
811 len += snprintf(buf + len, size - len, 806 len += snprintf(buf + len, size - len,
812 "ABORT: Xmt %08x Err %08x Cmpl %08x", 807 "ABORT: Xmt %08x Cmpl %08x\n",
808 atomic_read(&tgtp->xmt_fcp_abort),
809 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
810
811 len += snprintf(buf + len, size - len,
812 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
813 atomic_read(&tgtp->xmt_abort_sol),
814 atomic_read(&tgtp->xmt_abort_unsol),
813 atomic_read(&tgtp->xmt_abort_rsp), 815 atomic_read(&tgtp->xmt_abort_rsp),
814 atomic_read(&tgtp->xmt_abort_rsp_error), 816 atomic_read(&tgtp->xmt_abort_rsp_error));
815 atomic_read(&tgtp->xmt_abort_cmpl));
816 817
817 len += snprintf(buf + len, size - len, "\n"); 818 len += snprintf(buf + len, size - len, "\n");
818 819
@@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
841 } 842 }
842 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 843 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
843 } 844 }
845
846 len += snprintf(buf + len, size - len,
847 "IO_CTX: %08x outstanding %08x total %08x\n",
848 phba->sli4_hba.nvmet_ctx_cnt,
849 phba->sli4_hba.nvmet_io_wait_cnt,
850 phba->sli4_hba.nvmet_io_wait_total);
844 } else { 851 } else {
845 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 852 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
846 return len; 853 return len;
@@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
1959 atomic_set(&tgtp->rcv_ls_req_out, 0); 1966 atomic_set(&tgtp->rcv_ls_req_out, 0);
1960 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1967 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1961 atomic_set(&tgtp->xmt_ls_abort, 0); 1968 atomic_set(&tgtp->xmt_ls_abort, 0);
1969 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1962 atomic_set(&tgtp->xmt_ls_rsp, 0); 1970 atomic_set(&tgtp->xmt_ls_rsp, 0);
1963 atomic_set(&tgtp->xmt_ls_drop, 0); 1971 atomic_set(&tgtp->xmt_ls_drop, 0);
1964 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1972 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
1967 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1975 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1968 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1976 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1969 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1977 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1970 atomic_set(&tgtp->xmt_fcp_abort, 0);
1971 atomic_set(&tgtp->xmt_fcp_drop, 0); 1978 atomic_set(&tgtp->xmt_fcp_drop, 0);
1972 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1979 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1973 atomic_set(&tgtp->xmt_fcp_read, 0); 1980 atomic_set(&tgtp->xmt_fcp_read, 0);
1974 atomic_set(&tgtp->xmt_fcp_write, 0); 1981 atomic_set(&tgtp->xmt_fcp_write, 0);
1975 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1982 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1983 atomic_set(&tgtp->xmt_fcp_release, 0);
1976 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1984 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1977 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1985 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1978 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1986 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1979 1987
1988 atomic_set(&tgtp->xmt_fcp_abort, 0);
1989 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1990 atomic_set(&tgtp->xmt_abort_sol, 0);
1991 atomic_set(&tgtp->xmt_abort_unsol, 0);
1980 atomic_set(&tgtp->xmt_abort_rsp, 0); 1992 atomic_set(&tgtp->xmt_abort_rsp, 0);
1981 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1993 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1982 atomic_set(&tgtp->xmt_abort_cmpl, 0);
1983 } 1994 }
1984 return nbytes; 1995 return nbytes;
1985} 1996}
@@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
3070 qp->assoc_qid, qp->q_cnt_1, 3081 qp->assoc_qid, qp->q_cnt_1,
3071 (unsigned long long)qp->q_cnt_4); 3082 (unsigned long long)qp->q_cnt_4);
3072 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3083 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3073 "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3084 "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3074 "HOST-IDX[%04d], PORT-IDX[%04d]", 3085 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3075 qp->queue_id, qp->entry_count, 3086 qp->queue_id, qp->entry_count,
3076 qp->entry_size, qp->host_index, 3087 qp->entry_size, qp->host_index,
3077 qp->hba_index); 3088 qp->hba_index, qp->entry_repost);
3078 len += snprintf(pbuffer + len, 3089 len += snprintf(pbuffer + len,
3079 LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3090 LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3080 return len; 3091 return len;
@@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
3121 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, 3132 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
3122 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); 3133 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
3123 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3134 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3124 "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3135 "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3125 "HOST-IDX[%04d], PORT-IDX[%04d]", 3136 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3126 qp->queue_id, qp->entry_count, 3137 qp->queue_id, qp->entry_count,
3127 qp->entry_size, qp->host_index, 3138 qp->entry_size, qp->host_index,
3128 qp->hba_index); 3139 qp->hba_index, qp->entry_repost);
3129 3140
3130 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3141 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3131 3142
@@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
3143 "\t\t%s RQ info: ", rqtype); 3154 "\t\t%s RQ info: ", rqtype);
3144 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3155 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3145 "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " 3156 "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
3146 "trunc:x%x rcv:x%llx]\n", 3157 "posted:x%x rcv:x%llx]\n",
3147 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, 3158 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
3148 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); 3159 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
3149 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3160 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3150 "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3161 "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3151 "HOST-IDX[%04d], PORT-IDX[%04d]\n", 3162 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
3152 qp->queue_id, qp->entry_count, qp->entry_size, 3163 qp->queue_id, qp->entry_count, qp->entry_size,
3153 qp->host_index, qp->hba_index); 3164 qp->host_index, qp->hba_index, qp->entry_repost);
3154 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3165 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3155 "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3166 "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3156 "HOST-IDX[%04d], PORT-IDX[%04d]\n", 3167 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
3157 datqp->queue_id, datqp->entry_count, 3168 datqp->queue_id, datqp->entry_count,
3158 datqp->entry_size, datqp->host_index, 3169 datqp->entry_size, datqp->host_index,
3159 datqp->hba_index); 3170 datqp->hba_index, datqp->entry_repost);
3160 return len; 3171 return len;
3161} 3172}
3162 3173
@@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
3242 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, 3253 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
3243 (unsigned long long)qp->q_cnt_4); 3254 (unsigned long long)qp->q_cnt_4);
3244 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3255 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3245 "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3256 "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3246 "HOST-IDX[%04d], PORT-IDX[%04d]", 3257 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3247 qp->queue_id, qp->entry_count, qp->entry_size, 3258 qp->queue_id, qp->entry_count, qp->entry_size,
3248 qp->host_index, qp->hba_index); 3259 qp->host_index, qp->hba_index, qp->entry_repost);
3249 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3260 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3250 3261
3251 return len; 3262 return len;
@@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
5855 atomic_dec(&lpfc_debugfs_hba_count); 5866 atomic_dec(&lpfc_debugfs_hba_count);
5856 } 5867 }
5857 5868
5858 debugfs_remove(lpfc_debugfs_root); /* lpfc */ 5869 if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
5859 lpfc_debugfs_root = NULL; 5870 debugfs_remove(lpfc_debugfs_root); /* lpfc */
5871 lpfc_debugfs_root = NULL;
5872 }
5860 } 5873 }
5861#endif 5874#endif
5862 return; 5875 return;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 9d5a379f4b15..094c97b9e5f7 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -90,6 +90,7 @@ struct lpfc_nodelist {
90#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ 90#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
91#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ 91#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */
92#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ 92#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */
93#define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */
93 94
94 uint16_t nlp_fc4_type; /* FC types node supports. */ 95 uint16_t nlp_fc4_type; /* FC types node supports. */
95 /* Assigned from GID_FF, only 96 /* Assigned from GID_FF, only
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 67827e397431..8e532b39ae93 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi:
1047 irsp->ulpStatus, irsp->un.ulpWord[4], 1047 irsp->ulpStatus, irsp->un.ulpWord[4],
1048 irsp->ulpTimeout); 1048 irsp->ulpTimeout);
1049 1049
1050
1051 /* If this is not a loop open failure, bail out */
1052 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1054 IOERR_LOOP_OPEN_FAILURE)))
1055 goto flogifail;
1056
1050 /* FLOGI failed, so there is no fabric */ 1057 /* FLOGI failed, so there is no fabric */
1051 spin_lock_irq(shost->host_lock); 1058 spin_lock_irq(shost->host_lock);
1052 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1059 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2077 2084
2078 if (irsp->ulpStatus) { 2085 if (irsp->ulpStatus) {
2079 /* Check for retry */ 2086 /* Check for retry */
2087 ndlp->fc4_prli_sent--;
2080 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2088 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2081 /* ELS command is being retried */ 2089 /* ELS command is being retried */
2082 ndlp->fc4_prli_sent--;
2083 goto out; 2090 goto out;
2084 } 2091 }
2092
2085 /* PRLI failed */ 2093 /* PRLI failed */
2086 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2094 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2087 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 2095 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2096 "data: x%x\n",
2088 ndlp->nlp_DID, irsp->ulpStatus, 2097 ndlp->nlp_DID, irsp->ulpStatus,
2089 irsp->un.ulpWord[4]); 2098 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2099
2090 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2100 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2091 if (lpfc_error_lost_link(irsp)) 2101 if (lpfc_error_lost_link(irsp))
2092 goto out; 2102 goto out;
@@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
7441 */ 7451 */
7442 spin_lock_irq(&phba->hbalock); 7452 spin_lock_irq(&phba->hbalock);
7443 pring = lpfc_phba_elsring(phba); 7453 pring = lpfc_phba_elsring(phba);
7454
7455 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
7456 if (unlikely(!pring)) {
7457 spin_unlock_irq(&phba->hbalock);
7458 return;
7459 }
7460
7444 if (phba->sli_rev == LPFC_SLI_REV4) 7461 if (phba->sli_rev == LPFC_SLI_REV4)
7445 spin_lock(&pring->ring_lock); 7462 spin_lock(&pring->ring_lock);
7446 7463
@@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8667 lpfc_do_scr_ns_plogi(phba, vport); 8684 lpfc_do_scr_ns_plogi(phba, vport);
8668 goto out; 8685 goto out;
8669fdisc_failed: 8686fdisc_failed:
8670 if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) 8687 if (vport->fc_vport &&
8688 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
8671 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8689 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8672 /* Cancel discovery timer */ 8690 /* Cancel discovery timer */
8673 lpfc_can_disctmo(vport); 8691 lpfc_can_disctmo(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0482c5580331..3ffcd9215ca8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba)
693 pring = lpfc_phba_elsring(phba); 693 pring = lpfc_phba_elsring(phba);
694 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 694 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
695 status >>= (4*LPFC_ELS_RING); 695 status >>= (4*LPFC_ELS_RING);
696 if ((status & HA_RXMASK) || 696 if (pring && (status & HA_RXMASK ||
697 (pring->flag & LPFC_DEFERRED_RING_EVENT) || 697 pring->flag & LPFC_DEFERRED_RING_EVENT ||
698 (phba->hba_flag & HBA_SP_QUEUE_EVT)) { 698 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
699 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 699 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
700 pring->flag |= LPFC_DEFERRED_RING_EVENT; 700 pring->flag |= LPFC_DEFERRED_RING_EVENT;
701 /* Set the lpfc data pending flag */ 701 /* Set the lpfc data pending flag */
702 set_bit(LPFC_DATA_READY, &phba->data_flags); 702 set_bit(LPFC_DATA_READY, &phba->data_flags);
703 } else { 703 } else {
704 if (phba->link_state >= LPFC_LINK_UP) { 704 if (phba->link_state >= LPFC_LINK_UP ||
705 phba->link_flag & LS_MDS_LOOPBACK) {
705 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 706 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
706 lpfc_sli_handle_slow_ring_event(phba, pring, 707 lpfc_sli_handle_slow_ring_event(phba, pring,
707 (status & 708 (status &
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1d12f2be36bc..e0a5fce416ae 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
1356 1356
1357#define LPFC_HDR_BUF_SIZE 128 1357#define LPFC_HDR_BUF_SIZE 128
1358#define LPFC_DATA_BUF_SIZE 2048 1358#define LPFC_DATA_BUF_SIZE 2048
1359#define LPFC_NVMET_DATA_BUF_SIZE 128
1359struct rq_context { 1360struct rq_context {
1360 uint32_t word0; 1361 uint32_t word0;
1361#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ 1362#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
@@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe {
4420}; 4421};
4421#define TXRDY_PAYLOAD_LEN 12 4422#define TXRDY_PAYLOAD_LEN 12
4422 4423
4424#define CMD_SEND_FRAME 0xE1
4425
4426struct send_frame_wqe {
4427 struct ulp_bde64 bde; /* words 0-2 */
4428 uint32_t frame_len; /* word 3 */
4429 uint32_t fc_hdr_wd0; /* word 4 */
4430 uint32_t fc_hdr_wd1; /* word 5 */
4431 struct wqe_common wqe_com; /* words 6-11 */
4432 uint32_t fc_hdr_wd2; /* word 12 */
4433 uint32_t fc_hdr_wd3; /* word 13 */
4434 uint32_t fc_hdr_wd4; /* word 14 */
4435 uint32_t fc_hdr_wd5; /* word 15 */
4436};
4423 4437
4424union lpfc_wqe { 4438union lpfc_wqe {
4425 uint32_t words[16]; 4439 uint32_t words[16];
@@ -4438,7 +4452,7 @@ union lpfc_wqe {
4438 struct fcp_trsp64_wqe fcp_trsp; 4452 struct fcp_trsp64_wqe fcp_trsp;
4439 struct fcp_tsend64_wqe fcp_tsend; 4453 struct fcp_tsend64_wqe fcp_tsend;
4440 struct fcp_treceive64_wqe fcp_treceive; 4454 struct fcp_treceive64_wqe fcp_treceive;
4441 4455 struct send_frame_wqe send_frame;
4442}; 4456};
4443 4457
4444union lpfc_wqe128 { 4458union lpfc_wqe128 {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90ae354a9c45..9add9473cae5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1099 1099
1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1102 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1102 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1103 } 1103 }
1104 } 1104 }
1105 1105
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3381{ 3381{
3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3383 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3383 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3384 uint16_t nvmet_xri_cnt, tot_cnt; 3384 uint16_t nvmet_xri_cnt;
3385 LIST_HEAD(nvmet_sgl_list); 3385 LIST_HEAD(nvmet_sgl_list);
3386 int rc; 3386 int rc;
3387 3387
@@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3389 * update on pci function's nvmet xri-sgl list 3389 * update on pci function's nvmet xri-sgl list
3390 */ 3390 */
3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3392 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; 3392
3393 tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3393 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3394 if (nvmet_xri_cnt > tot_cnt) { 3394 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3395 phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
3396 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3397 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3398 "6301 NVMET post-sgl count changed to %d\n",
3399 phba->cfg_nvmet_mrq_post);
3400 }
3401 3395
3402 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3396 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3403 /* els xri-sgl expanded */ 3397 /* els xri-sgl expanded */
@@ -3602,6 +3596,13 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
3602 LPFC_MBOXQ_t *mboxq; 3596 LPFC_MBOXQ_t *mboxq;
3603 MAILBOX_t *mb; 3597 MAILBOX_t *mb;
3604 3598
3599 if (phba->sli_rev < LPFC_SLI_REV4) {
3600 /* Reset the port first */
3601 lpfc_sli_brdrestart(phba);
3602 rc = lpfc_sli_chipset_init(phba);
3603 if (rc)
3604 return (uint64_t)-1;
3605 }
3605 3606
3606 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3607 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3607 GFP_KERNEL); 3608 GFP_KERNEL);
@@ -4539,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4539 pmb->vport = phba->pport; 4540 pmb->vport = phba->pport;
4540 4541
4541 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4542 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
4543 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
4544
4545 switch (phba->sli4_hba.link_state.status) {
4546 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
4547 phba->link_flag |= LS_MDS_LINK_DOWN;
4548 break;
4549 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
4550 phba->link_flag |= LS_MDS_LOOPBACK;
4551 break;
4552 default:
4553 break;
4554 }
4555
4542 /* Parse and translate status field */ 4556 /* Parse and translate status field */
4543 mb = &pmb->u.mb; 4557 mb = &pmb->u.mb;
4544 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4558 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
@@ -5823,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5823 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5837 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5824 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5838 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5825 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
5840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
5841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
5842
5826 /* Fast-path XRI aborted CQ Event work queue list */ 5843 /* Fast-path XRI aborted CQ Event work queue list */
5827 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 5844 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5828 } 5845 }
@@ -5830,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5830 /* This abort list used by worker thread */ 5847 /* This abort list used by worker thread */
5831 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 5848 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
5832 spin_lock_init(&phba->sli4_hba.nvmet_io_lock); 5849 spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
5850 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
5833 5851
5834 /* 5852 /*
5835 * Initialize driver internal slow-path work queues 5853 * Initialize driver internal slow-path work queues
@@ -5944,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5944 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5962 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
5945 if (wwn == lpfc_enable_nvmet[i]) { 5963 if (wwn == lpfc_enable_nvmet[i]) {
5946#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 5964#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
5965 if (lpfc_nvmet_mem_alloc(phba))
5966 break;
5967
5968 phba->nvmet_support = 1; /* a match */
5969
5947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5948 "6017 NVME Target %016llx\n", 5971 "6017 NVME Target %016llx\n",
5949 wwn); 5972 wwn);
5950 phba->nvmet_support = 1; /* a match */
5951#else 5973#else
5952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5953 "6021 Can't enable NVME Target." 5975 "6021 Can't enable NVME Target."
5954 " NVME_TARGET_FC infrastructure" 5976 " NVME_TARGET_FC infrastructure"
5955 " is not in kernel\n"); 5977 " is not in kernel\n");
5956#endif 5978#endif
5979 break;
5957 } 5980 }
5958 } 5981 }
5959 } 5982 }
@@ -6262,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6262 * 6285 *
6263 * This routine is invoked to free the driver's IOCB list and memory. 6286 * This routine is invoked to free the driver's IOCB list and memory.
6264 **/ 6287 **/
6265static void 6288void
6266lpfc_free_iocb_list(struct lpfc_hba *phba) 6289lpfc_free_iocb_list(struct lpfc_hba *phba)
6267{ 6290{
6268 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6291 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6290,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
6290 * 0 - successful 6313 * 0 - successful
6291 * other values - error 6314 * other values - error
6292 **/ 6315 **/
6293static int 6316int
6294lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6317lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6295{ 6318{
6296 struct lpfc_iocbq *iocbq_entry = NULL; 6319 struct lpfc_iocbq *iocbq_entry = NULL;
@@ -6518,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6518 uint16_t rpi_limit, curr_rpi_range; 6541 uint16_t rpi_limit, curr_rpi_range;
6519 struct lpfc_dmabuf *dmabuf; 6542 struct lpfc_dmabuf *dmabuf;
6520 struct lpfc_rpi_hdr *rpi_hdr; 6543 struct lpfc_rpi_hdr *rpi_hdr;
6521 uint32_t rpi_count;
6522 6544
6523 /* 6545 /*
6524 * If the SLI4 port supports extents, posting the rpi header isn't 6546 * If the SLI4 port supports extents, posting the rpi header isn't
@@ -6531,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6531 return NULL; 6553 return NULL;
6532 6554
6533 /* The limit on the logical index is just the max_rpi count. */ 6555 /* The limit on the logical index is just the max_rpi count. */
6534 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 6556 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
6535 phba->sli4_hba.max_cfg_param.max_rpi - 1;
6536 6557
6537 spin_lock_irq(&phba->hbalock); 6558 spin_lock_irq(&phba->hbalock);
6538 /* 6559 /*
@@ -6543,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6543 curr_rpi_range = phba->sli4_hba.next_rpi; 6564 curr_rpi_range = phba->sli4_hba.next_rpi;
6544 spin_unlock_irq(&phba->hbalock); 6565 spin_unlock_irq(&phba->hbalock);
6545 6566
6546 /* 6567 /* Reached full RPI range */
6547 * The port has a limited number of rpis. The increment here 6568 if (curr_rpi_range == rpi_limit)
6548 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
6549 * and to allow the full max_rpi range per port.
6550 */
6551 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
6552 rpi_count = rpi_limit - curr_rpi_range;
6553 else
6554 rpi_count = LPFC_RPI_HDR_COUNT;
6555
6556 if (!rpi_count)
6557 return NULL; 6569 return NULL;
6570
6558 /* 6571 /*
6559 * First allocate the protocol header region for the port. The 6572 * First allocate the protocol header region for the port. The
6560 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6573 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -6588,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6588 6601
6589 /* The rpi_hdr stores the logical index only. */ 6602 /* The rpi_hdr stores the logical index only. */
6590 rpi_hdr->start_rpi = curr_rpi_range; 6603 rpi_hdr->start_rpi = curr_rpi_range;
6604 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
6591 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6605 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6592 6606
6593 /*
6594 * The next_rpi stores the next logical module-64 rpi value used
6595 * to post physical rpis in subsequent rpi postings.
6596 */
6597 phba->sli4_hba.next_rpi += rpi_count;
6598 spin_unlock_irq(&phba->hbalock); 6607 spin_unlock_irq(&phba->hbalock);
6599 return rpi_hdr; 6608 return rpi_hdr;
6600 6609
@@ -8165,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8165 /* Create NVMET Receive Queue for header */ 8174 /* Create NVMET Receive Queue for header */
8166 qdesc = lpfc_sli4_queue_alloc(phba, 8175 qdesc = lpfc_sli4_queue_alloc(phba,
8167 phba->sli4_hba.rq_esize, 8176 phba->sli4_hba.rq_esize,
8168 phba->sli4_hba.rq_ecount); 8177 LPFC_NVMET_RQE_DEF_COUNT);
8169 if (!qdesc) { 8178 if (!qdesc) {
8170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8171 "3146 Failed allocate " 8180 "3146 Failed allocate "
@@ -8187,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8187 /* Create NVMET Receive Queue for data */ 8196 /* Create NVMET Receive Queue for data */
8188 qdesc = lpfc_sli4_queue_alloc(phba, 8197 qdesc = lpfc_sli4_queue_alloc(phba,
8189 phba->sli4_hba.rq_esize, 8198 phba->sli4_hba.rq_esize,
8190 phba->sli4_hba.rq_ecount); 8199 LPFC_NVMET_RQE_DEF_COUNT);
8191 if (!qdesc) { 8200 if (!qdesc) {
8192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8193 "3156 Failed allocate " 8202 "3156 Failed allocate "
@@ -8319,46 +8328,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8319} 8328}
8320 8329
8321int 8330int
8322lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
8323 struct lpfc_queue *drq, int count)
8324{
8325 int rc, i;
8326 struct lpfc_rqe hrqe;
8327 struct lpfc_rqe drqe;
8328 struct lpfc_rqb *rqbp;
8329 struct rqb_dmabuf *rqb_buffer;
8330 LIST_HEAD(rqb_buf_list);
8331
8332 rqbp = hrq->rqbp;
8333 for (i = 0; i < count; i++) {
8334 rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
8335 if (!rqb_buffer)
8336 break;
8337 rqb_buffer->hrq = hrq;
8338 rqb_buffer->drq = drq;
8339 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
8340 }
8341 while (!list_empty(&rqb_buf_list)) {
8342 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
8343 hbuf.list);
8344
8345 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
8346 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
8347 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
8348 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
8349 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
8350 if (rc < 0) {
8351 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8352 } else {
8353 list_add_tail(&rqb_buffer->hbuf.list,
8354 &rqbp->rqb_buffer_list);
8355 rqbp->buffer_count++;
8356 }
8357 }
8358 return 1;
8359}
8360
8361int
8362lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8331lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8363{ 8332{
8364 struct lpfc_rqb *rqbp; 8333 struct lpfc_rqb *rqbp;
@@ -8777,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8777 goto out_destroy; 8746 goto out_destroy;
8778 } 8747 }
8779 8748
8780 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
8781 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
8782
8783 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 8749 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
8784 phba->sli4_hba.els_cq, LPFC_USOL); 8750 phba->sli4_hba.els_cq, LPFC_USOL);
8785 if (rc) { 8751 if (rc) {
@@ -8847,7 +8813,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
8847 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 8813 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
8848 8814
8849 /* Unset ELS work queue */ 8815 /* Unset ELS work queue */
8850 if (phba->sli4_hba.els_cq) 8816 if (phba->sli4_hba.els_wq)
8851 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 8817 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
8852 8818
8853 /* Unset unsolicited receive queue */ 8819 /* Unset unsolicited receive queue */
@@ -11103,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11103 struct lpfc_hba *phba; 11069 struct lpfc_hba *phba;
11104 struct lpfc_vport *vport = NULL; 11070 struct lpfc_vport *vport = NULL;
11105 struct Scsi_Host *shost = NULL; 11071 struct Scsi_Host *shost = NULL;
11106 int error, cnt; 11072 int error;
11107 uint32_t cfg_mode, intr_mode; 11073 uint32_t cfg_mode, intr_mode;
11108 11074
11109 /* Allocate memory for HBA structure */ 11075 /* Allocate memory for HBA structure */
@@ -11137,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11137 goto out_unset_pci_mem_s4; 11103 goto out_unset_pci_mem_s4;
11138 } 11104 }
11139 11105
11140 cnt = phba->cfg_iocb_cnt * 1024;
11141 if (phba->nvmet_support)
11142 cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
11143
11144 /* Initialize and populate the iocb list per host */
11145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11146 "2821 initialize iocb list %d total %d\n",
11147 phba->cfg_iocb_cnt, cnt);
11148 error = lpfc_init_iocb_list(phba, cnt);
11149
11150 if (error) {
11151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11152 "1413 Failed to initialize iocb list.\n");
11153 goto out_unset_driver_resource_s4;
11154 }
11155
11156 INIT_LIST_HEAD(&phba->active_rrq_list); 11106 INIT_LIST_HEAD(&phba->active_rrq_list);
11157 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11107 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
11158 11108
@@ -11161,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11161 if (error) { 11111 if (error) {
11162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11163 "1414 Failed to set up driver resource.\n"); 11113 "1414 Failed to set up driver resource.\n");
11164 goto out_free_iocb_list; 11114 goto out_unset_driver_resource_s4;
11165 } 11115 }
11166 11116
11167 /* Get the default values for Model Name and Description */ 11117 /* Get the default values for Model Name and Description */
@@ -11261,8 +11211,6 @@ out_destroy_shost:
11261 lpfc_destroy_shost(phba); 11211 lpfc_destroy_shost(phba);
11262out_unset_driver_resource: 11212out_unset_driver_resource:
11263 lpfc_unset_driver_resource_phase2(phba); 11213 lpfc_unset_driver_resource_phase2(phba);
11264out_free_iocb_list:
11265 lpfc_free_iocb_list(phba);
11266out_unset_driver_resource_s4: 11214out_unset_driver_resource_s4:
11267 lpfc_sli4_driver_resource_unset(phba); 11215 lpfc_sli4_driver_resource_unset(phba);
11268out_unset_pci_mem_s4: 11216out_unset_pci_mem_s4:
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 5986c7957199..fcc05a1517c2 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -214,6 +214,21 @@ fail_free_drb_pool:
214 return -ENOMEM; 214 return -ENOMEM;
215} 215}
216 216
217int
218lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
219{
220 phba->lpfc_nvmet_drb_pool =
221 pci_pool_create("lpfc_nvmet_drb_pool",
222 phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
223 SGL_ALIGN_SZ, 0);
224 if (!phba->lpfc_nvmet_drb_pool) {
225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
226 "6024 Can't enable NVME Target - no memory\n");
227 return -ENOMEM;
228 }
229 return 0;
230}
231
217/** 232/**
218 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 233 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
219 * @phba: HBA to free memory for 234 * @phba: HBA to free memory for
@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
232 247
233 /* Free HBQ pools */ 248 /* Free HBQ pools */
234 lpfc_sli_hbqbuf_free_all(phba); 249 lpfc_sli_hbqbuf_free_all(phba);
250 if (phba->lpfc_nvmet_drb_pool)
251 pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
252 phba->lpfc_nvmet_drb_pool = NULL;
235 if (phba->lpfc_drb_pool) 253 if (phba->lpfc_drb_pool)
236 pci_pool_destroy(phba->lpfc_drb_pool); 254 pci_pool_destroy(phba->lpfc_drb_pool);
237 phba->lpfc_drb_pool = NULL; 255 phba->lpfc_drb_pool = NULL;
@@ -611,8 +629,6 @@ struct rqb_dmabuf *
611lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 629lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
612{ 630{
613 struct rqb_dmabuf *dma_buf; 631 struct rqb_dmabuf *dma_buf;
614 struct lpfc_iocbq *nvmewqe;
615 union lpfc_wqe128 *wqe;
616 632
617 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); 633 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
618 if (!dma_buf) 634 if (!dma_buf)
@@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
624 kfree(dma_buf); 640 kfree(dma_buf);
625 return NULL; 641 return NULL;
626 } 642 }
627 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 643 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
628 &dma_buf->dbuf.phys); 644 GFP_KERNEL, &dma_buf->dbuf.phys);
629 if (!dma_buf->dbuf.virt) { 645 if (!dma_buf->dbuf.virt) {
630 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 646 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
631 dma_buf->hbuf.phys); 647 dma_buf->hbuf.phys);
632 kfree(dma_buf); 648 kfree(dma_buf);
633 return NULL; 649 return NULL;
634 } 650 }
635 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 651 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
636
637 dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
638 GFP_KERNEL);
639 if (!dma_buf->context) {
640 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
641 dma_buf->dbuf.phys);
642 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
643 dma_buf->hbuf.phys);
644 kfree(dma_buf);
645 return NULL;
646 }
647
648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
649 if (!dma_buf->iocbq) {
650 kfree(dma_buf->context);
651 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
652 dma_buf->dbuf.phys);
653 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
654 dma_buf->hbuf.phys);
655 kfree(dma_buf);
656 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
657 "2621 Ran out of nvmet iocb/WQEs\n");
658 return NULL;
659 }
660 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
661 nvmewqe = dma_buf->iocbq;
662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
663 /* Initialize WQE */
664 memset(wqe, 0, sizeof(union lpfc_wqe));
665 /* Word 7 */
666 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
667 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
668 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
669 /* Word 10 */
670 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
671 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
672 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
673
674 dma_buf->iocbq->context1 = NULL;
675 spin_lock(&phba->sli4_hba.sgl_list_lock);
676 dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
677 spin_unlock(&phba->sli4_hba.sgl_list_lock);
678 if (!dma_buf->sglq) {
679 lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
680 kfree(dma_buf->context);
681 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
682 dma_buf->dbuf.phys);
683 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
684 dma_buf->hbuf.phys);
685 kfree(dma_buf);
686 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
687 "6132 Ran out of nvmet XRIs\n");
688 return NULL;
689 }
690 return dma_buf; 652 return dma_buf;
691} 653}
692 654
@@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
705void 667void
706lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 668lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
707{ 669{
708 unsigned long flags;
709
710 __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
711 dmab->sglq->state = SGL_FREED;
712 dmab->sglq->ndlp = NULL;
713
714 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
715 list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
716 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
717
718 lpfc_sli_release_iocbq(phba, dmab->iocbq);
719 kfree(dmab->context);
720 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 670 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
721 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 671 pci_pool_free(phba->lpfc_nvmet_drb_pool,
672 dmab->dbuf.virt, dmab->dbuf.phys);
722 kfree(dmab); 673 kfree(dmab);
723} 674}
724 675
@@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
803 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
804 if (rc < 0) { 755 if (rc < 0) {
805 (rqbp->rqb_free_buffer)(phba, rqb_entry); 756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index);
806 } else { 762 } else {
807 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
808 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 8777c2d5f50d..f74cb0142fd4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
206 * associated with a LPFC_NODELIST entry. This 206 * associated with a LPFC_NODELIST entry. This
207 * routine effectively results in a "software abort". 207 * routine effectively results in a "software abort".
208 */ 208 */
209int 209void
210lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 210lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
211{ 211{
212 LIST_HEAD(abort_list); 212 LIST_HEAD(abort_list);
@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
215 215
216 pring = lpfc_phba_elsring(phba); 216 pring = lpfc_phba_elsring(phba);
217 217
218 /* In case of error recovery path, we might have a NULL pring here */
219 if (!pring)
220 return;
221
218 /* Abort outstanding I/O on NPort <nlp_DID> */ 222 /* Abort outstanding I/O on NPort <nlp_DID> */
219 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 223 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
220 "2819 Abort outstanding I/O on NPort x%x " 224 "2819 Abort outstanding I/O on NPort x%x "
@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
273 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 277 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
274 278
275 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 279 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
276 return 0;
277} 280}
278 281
279static int 282static int
@@ -1944,7 +1947,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1944 1947
1945 /* Target driver cannot solicit NVME FB. */ 1948 /* Target driver cannot solicit NVME FB. */
1946 if (bf_get_be32(prli_tgt, nvpr)) { 1949 if (bf_get_be32(prli_tgt, nvpr)) {
1950 /* Complete the nvme target roles. The transport
1951 * needs to know if the rport is capable of
1952 * discovery in addition to its role.
1953 */
1947 ndlp->nlp_type |= NLP_NVME_TARGET; 1954 ndlp->nlp_type |= NLP_NVME_TARGET;
1955 if (bf_get_be32(prli_disc, nvpr))
1956 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
1948 if ((bf_get_be32(prli_fba, nvpr) == 1) && 1957 if ((bf_get_be32(prli_fba, nvpr) == 1) &&
1949 (bf_get_be32(prli_fb_sz, nvpr) > 0) && 1958 (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
1950 (phba->cfg_nvme_enable_fb) && 1959 (phba->cfg_nvme_enable_fb) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 94434e621c33..518b15e6f222 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -142,7 +142,7 @@ out:
142} 142}
143 143
144/** 144/**
145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context 145 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with 146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up 147 * @ctxp: context to clean up
148 * @mp: Buffer to free 148 * @mp: Buffer to free
@@ -155,24 +155,113 @@ out:
155 * Returns: None 155 * Returns: None
156 **/ 156 **/
157void 157void
158lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 158lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
159 struct lpfc_dmabuf *mp)
160{ 159{
161 if (ctxp) { 160#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
162 if (ctxp->flag) 161 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 162 struct lpfc_nvmet_tgtport *tgtp;
164 "6314 rq_post ctx xri x%x flag x%x\n", 163 struct fc_frame_header *fc_hdr;
165 ctxp->oxid, ctxp->flag); 164 struct rqb_dmabuf *nvmebuf;
166 165 struct lpfc_dmabuf *hbufp;
167 if (ctxp->txrdy) { 166 uint32_t *payload;
168 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 167 uint32_t size, oxid, sid, rc;
169 ctxp->txrdy_phys); 168 unsigned long iflag;
170 ctxp->txrdy = NULL; 169
171 ctxp->txrdy_phys = 0; 170 if (ctxp->txrdy) {
171 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
172 ctxp->txrdy_phys);
173 ctxp->txrdy = NULL;
174 ctxp->txrdy_phys = 0;
175 }
176 ctxp->state = LPFC_NVMET_STE_FREE;
177
178 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
179 if (phba->sli4_hba.nvmet_io_wait_cnt) {
180 hbufp = &nvmebuf->hbuf;
181 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
182 nvmebuf, struct rqb_dmabuf,
183 hbuf.list);
184 phba->sli4_hba.nvmet_io_wait_cnt--;
185 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
186 iflag);
187
188 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
189 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
190 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
191 payload = (uint32_t *)(nvmebuf->dbuf.virt);
192 size = nvmebuf->bytes_recv;
193 sid = sli4_sid_from_fc_hdr(fc_hdr);
194
195 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
196 memset(ctxp, 0, sizeof(ctxp->ctx));
197 ctxp->wqeq = NULL;
198 ctxp->txrdy = NULL;
199 ctxp->offset = 0;
200 ctxp->phba = phba;
201 ctxp->size = size;
202 ctxp->oxid = oxid;
203 ctxp->sid = sid;
204 ctxp->state = LPFC_NVMET_STE_RCV;
205 ctxp->entry_cnt = 1;
206 ctxp->flag = 0;
207 ctxp->ctxbuf = ctx_buf;
208 spin_lock_init(&ctxp->ctxlock);
209
210#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
211 if (phba->ktime_on) {
212 ctxp->ts_cmd_nvme = ktime_get_ns();
213 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
214 ctxp->ts_nvme_data = 0;
215 ctxp->ts_data_wqput = 0;
216 ctxp->ts_isr_data = 0;
217 ctxp->ts_data_nvme = 0;
218 ctxp->ts_nvme_status = 0;
219 ctxp->ts_status_wqput = 0;
220 ctxp->ts_isr_status = 0;
221 ctxp->ts_status_nvme = 0;
172 } 222 }
173 ctxp->state = LPFC_NVMET_STE_FREE; 223#endif
224 atomic_inc(&tgtp->rcv_fcp_cmd_in);
225 /*
226 * The calling sequence should be:
227 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
228 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
229 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
230 * the NVME command / FC header is stored.
231 * A buffer has already been reposted for this IO, so just free
232 * the nvmebuf.
233 */
234 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
235 payload, size);
236
237 /* Process FCP command */
238 if (rc == 0) {
239 atomic_inc(&tgtp->rcv_fcp_cmd_out);
240 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
241 return;
242 }
243
244 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
245 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
246 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
247 ctxp->oxid, rc,
248 atomic_read(&tgtp->rcv_fcp_cmd_in),
249 atomic_read(&tgtp->rcv_fcp_cmd_out),
250 atomic_read(&tgtp->xmt_fcp_release));
251
252 lpfc_nvmet_defer_release(phba, ctxp);
253 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
254 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
255 return;
174 } 256 }
175 lpfc_rq_buf_free(phba, mp); 257 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
258
259 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
260 list_add_tail(&ctx_buf->list,
261 &phba->sli4_hba.lpfc_nvmet_ctx_list);
262 phba->sli4_hba.nvmet_ctx_cnt++;
263 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
264#endif
176} 265}
177 266
178#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 267#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
502 "6150 LS Drop IO x%x: Prep\n", 591 "6150 LS Drop IO x%x: Prep\n",
503 ctxp->oxid); 592 ctxp->oxid);
504 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 593 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
594 atomic_inc(&nvmep->xmt_ls_abort);
505 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 595 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
506 ctxp->sid, ctxp->oxid); 596 ctxp->sid, ctxp->oxid);
507 return -ENOMEM; 597 return -ENOMEM;
@@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
545 lpfc_nlp_put(nvmewqeq->context1); 635 lpfc_nlp_put(nvmewqeq->context1);
546 636
547 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 637 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
638 atomic_inc(&nvmep->xmt_ls_abort);
548 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 639 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
549 return -ENXIO; 640 return -ENXIO;
550} 641}
@@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
612 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 703 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
613 ctxp->oxid, rsp->op, rsp->rsplen); 704 ctxp->oxid, rsp->op, rsp->rsplen);
614 705
706 ctxp->flag |= LPFC_NVMET_IO_INP;
615 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 707 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
616 if (rc == WQE_SUCCESS) { 708 if (rc == WQE_SUCCESS) {
617 ctxp->flag |= LPFC_NVMET_IO_INP;
618#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 709#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
619 if (!phba->ktime_on) 710 if (!phba->ktime_on)
620 return 0; 711 return 0;
@@ -692,6 +783,7 @@ static void
692lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 783lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
693 struct nvmefc_tgt_fcp_req *rsp) 784 struct nvmefc_tgt_fcp_req *rsp)
694{ 785{
786 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
695 struct lpfc_nvmet_rcv_ctx *ctxp = 787 struct lpfc_nvmet_rcv_ctx *ctxp =
696 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 788 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
697 struct lpfc_hba *phba = ctxp->phba; 789 struct lpfc_hba *phba = ctxp->phba;
@@ -707,13 +799,15 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
707 } 799 }
708 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 800 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
709 801
710 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, 802 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
711 ctxp->state, 0); 803 ctxp->state, aborting);
804
805 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
712 806
713 if (aborting) 807 if (aborting)
714 return; 808 return;
715 809
716 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 810 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
717} 811}
718 812
719static struct nvmet_fc_target_template lpfc_tgttemplate = { 813static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
734 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 828 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
735}; 829};
736 830
831void
832lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
833{
834 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
835 unsigned long flags;
836
837 list_for_each_entry_safe(
838 ctx_buf, next_ctx_buf,
839 &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
840 spin_lock_irqsave(
841 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
842 list_del_init(&ctx_buf->list);
843 spin_unlock_irqrestore(
844 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
845 __lpfc_clear_active_sglq(phba,
846 ctx_buf->sglq->sli4_lxritag);
847 ctx_buf->sglq->state = SGL_FREED;
848 ctx_buf->sglq->ndlp = NULL;
849
850 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
851 list_add_tail(&ctx_buf->sglq->list,
852 &phba->sli4_hba.lpfc_nvmet_sgl_list);
853 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
854 flags);
855
856 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
857 kfree(ctx_buf->context);
858 }
859}
860
861int
862lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
863{
864 struct lpfc_nvmet_ctxbuf *ctx_buf;
865 struct lpfc_iocbq *nvmewqe;
866 union lpfc_wqe128 *wqe;
867 int i;
868
869 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
870 "6403 Allocate NVMET resources for %d XRIs\n",
871 phba->sli4_hba.nvmet_xri_cnt);
872
873 /* For all nvmet xris, allocate resources needed to process a
874 * received command on a per xri basis.
875 */
876 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
877 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
878 if (!ctx_buf) {
879 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
880 "6404 Ran out of memory for NVMET\n");
881 return -ENOMEM;
882 }
883
884 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
885 GFP_KERNEL);
886 if (!ctx_buf->context) {
887 kfree(ctx_buf);
888 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
889 "6405 Ran out of NVMET "
890 "context memory\n");
891 return -ENOMEM;
892 }
893 ctx_buf->context->ctxbuf = ctx_buf;
894
895 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
896 if (!ctx_buf->iocbq) {
897 kfree(ctx_buf->context);
898 kfree(ctx_buf);
899 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
900 "6406 Ran out of NVMET iocb/WQEs\n");
901 return -ENOMEM;
902 }
903 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
904 nvmewqe = ctx_buf->iocbq;
905 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
906 /* Initialize WQE */
907 memset(wqe, 0, sizeof(union lpfc_wqe));
908 /* Word 7 */
909 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
910 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
911 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
912 /* Word 10 */
913 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
914 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
915 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
916
917 ctx_buf->iocbq->context1 = NULL;
918 spin_lock(&phba->sli4_hba.sgl_list_lock);
919 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
920 spin_unlock(&phba->sli4_hba.sgl_list_lock);
921 if (!ctx_buf->sglq) {
922 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
923 kfree(ctx_buf->context);
924 kfree(ctx_buf);
925 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
926 "6407 Ran out of NVMET XRIs\n");
927 return -ENOMEM;
928 }
929 spin_lock(&phba->sli4_hba.nvmet_io_lock);
930 list_add_tail(&ctx_buf->list,
931 &phba->sli4_hba.lpfc_nvmet_ctx_list);
932 spin_unlock(&phba->sli4_hba.nvmet_io_lock);
933 }
934 phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
935 return 0;
936}
937
737int 938int
738lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 939lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
739{ 940{
740 struct lpfc_vport *vport = phba->pport; 941 struct lpfc_vport *vport = phba->pport;
741 struct lpfc_nvmet_tgtport *tgtp; 942 struct lpfc_nvmet_tgtport *tgtp;
742 struct nvmet_fc_port_info pinfo; 943 struct nvmet_fc_port_info pinfo;
743 int error = 0; 944 int error;
744 945
745 if (phba->targetport) 946 if (phba->targetport)
746 return 0; 947 return 0;
747 948
949 error = lpfc_nvmet_setup_io_context(phba);
950 if (error)
951 return error;
952
748 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 953 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
749 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 954 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
750 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 955 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@@ -764,7 +969,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 969 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 970 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 971 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
767 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
768 NVMET_FCTGTFEAT_CMD_IN_ISR | 972 NVMET_FCTGTFEAT_CMD_IN_ISR |
769 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 973 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
770 974
@@ -773,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
773 &phba->pcidev->dev, 977 &phba->pcidev->dev,
774 &phba->targetport); 978 &phba->targetport);
775#else 979#else
776 error = -ENOMEM; 980 error = -ENOENT;
777#endif 981#endif
778 if (error) { 982 if (error) {
779 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 983 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
780 "6025 Cannot register NVME targetport " 984 "6025 Cannot register NVME targetport "
781 "x%x\n", error); 985 "x%x\n", error);
782 phba->targetport = NULL; 986 phba->targetport = NULL;
987
988 lpfc_nvmet_cleanup_io_context(phba);
989
783 } else { 990 } else {
784 tgtp = (struct lpfc_nvmet_tgtport *) 991 tgtp = (struct lpfc_nvmet_tgtport *)
785 phba->targetport->private; 992 phba->targetport->private;
@@ -796,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
796 atomic_set(&tgtp->rcv_ls_req_out, 0); 1003 atomic_set(&tgtp->rcv_ls_req_out, 0);
797 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1004 atomic_set(&tgtp->rcv_ls_req_drop, 0);
798 atomic_set(&tgtp->xmt_ls_abort, 0); 1005 atomic_set(&tgtp->xmt_ls_abort, 0);
1006 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
799 atomic_set(&tgtp->xmt_ls_rsp, 0); 1007 atomic_set(&tgtp->xmt_ls_rsp, 0);
800 atomic_set(&tgtp->xmt_ls_drop, 0); 1008 atomic_set(&tgtp->xmt_ls_drop, 0);
801 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1009 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -803,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
803 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1011 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
804 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1012 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
805 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1013 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
806 atomic_set(&tgtp->xmt_fcp_abort, 0);
807 atomic_set(&tgtp->xmt_fcp_drop, 0); 1014 atomic_set(&tgtp->xmt_fcp_drop, 0);
808 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1015 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
809 atomic_set(&tgtp->xmt_fcp_read, 0); 1016 atomic_set(&tgtp->xmt_fcp_read, 0);
810 atomic_set(&tgtp->xmt_fcp_write, 0); 1017 atomic_set(&tgtp->xmt_fcp_write, 0);
811 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1018 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1019 atomic_set(&tgtp->xmt_fcp_release, 0);
812 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1020 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
813 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1021 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
814 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1022 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1023 atomic_set(&tgtp->xmt_fcp_abort, 0);
1024 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1025 atomic_set(&tgtp->xmt_abort_unsol, 0);
1026 atomic_set(&tgtp->xmt_abort_sol, 0);
815 atomic_set(&tgtp->xmt_abort_rsp, 0); 1027 atomic_set(&tgtp->xmt_abort_rsp, 0);
816 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1028 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
817 atomic_set(&tgtp->xmt_abort_cmpl, 0);
818 } 1029 }
819 return error; 1030 return error;
820} 1031}
@@ -865,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
865 list_for_each_entry_safe(ctxp, next_ctxp, 1076 list_for_each_entry_safe(ctxp, next_ctxp,
866 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1077 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
867 list) { 1078 list) {
868 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 1079 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
869 continue; 1080 continue;
870 1081
871 /* Check if we already received a free context call 1082 /* Check if we already received a free context call
@@ -886,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
886 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1097 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
887 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1098 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
888 lpfc_set_rrq_active(phba, ndlp, 1099 lpfc_set_rrq_active(phba, ndlp,
889 ctxp->rqb_buffer->sglq->sli4_lxritag, 1100 ctxp->ctxbuf->sglq->sli4_lxritag,
890 rxid, 1); 1101 rxid, 1);
891 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1102 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
892 } 1103 }
@@ -895,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
895 "6318 XB aborted %x flg x%x (%x)\n", 1106 "6318 XB aborted %x flg x%x (%x)\n",
896 ctxp->oxid, ctxp->flag, released); 1107 ctxp->oxid, ctxp->flag, released);
897 if (released) 1108 if (released)
898 lpfc_nvmet_rq_post(phba, ctxp, 1109 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
899 &ctxp->rqb_buffer->hbuf); 1110
900 if (rrq_empty) 1111 if (rrq_empty)
901 lpfc_worker_wake_up(phba); 1112 lpfc_worker_wake_up(phba);
902 return; 1113 return;
@@ -924,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
924 list_for_each_entry_safe(ctxp, next_ctxp, 1135 list_for_each_entry_safe(ctxp, next_ctxp,
925 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1136 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
926 list) { 1137 list) {
927 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 1138 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
928 continue; 1139 continue;
929 1140
930 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1141 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -976,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
976 init_completion(&tgtp->tport_unreg_done); 1187 init_completion(&tgtp->tport_unreg_done);
977 nvmet_fc_unregister_targetport(phba->targetport); 1188 nvmet_fc_unregister_targetport(phba->targetport);
978 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1189 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1190 lpfc_nvmet_cleanup_io_context(phba);
979 } 1191 }
980 phba->targetport = NULL; 1192 phba->targetport = NULL;
981#endif 1193#endif
@@ -1011,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1011 oxid = 0; 1223 oxid = 0;
1012 size = 0; 1224 size = 0;
1013 sid = 0; 1225 sid = 0;
1226 ctxp = NULL;
1014 goto dropit; 1227 goto dropit;
1015 } 1228 }
1016 1229
@@ -1105,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1105 struct lpfc_nvmet_rcv_ctx *ctxp; 1318 struct lpfc_nvmet_rcv_ctx *ctxp;
1106 struct lpfc_nvmet_tgtport *tgtp; 1319 struct lpfc_nvmet_tgtport *tgtp;
1107 struct fc_frame_header *fc_hdr; 1320 struct fc_frame_header *fc_hdr;
1321 struct lpfc_nvmet_ctxbuf *ctx_buf;
1108 uint32_t *payload; 1322 uint32_t *payload;
1109 uint32_t size, oxid, sid, rc; 1323 uint32_t size, oxid, sid, rc, qno;
1324 unsigned long iflag;
1110#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1325#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1111 uint32_t id; 1326 uint32_t id;
1112#endif 1327#endif
1113 1328
1329 ctx_buf = NULL;
1114 if (!nvmebuf || !phba->targetport) { 1330 if (!nvmebuf || !phba->targetport) {
1115 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1331 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1116 "6157 FCP Drop IO\n"); 1332 "6157 NVMET FCP Drop IO\n");
1117 oxid = 0; 1333 oxid = 0;
1118 size = 0; 1334 size = 0;
1119 sid = 0; 1335 sid = 0;
1336 ctxp = NULL;
1120 goto dropit; 1337 goto dropit;
1121 } 1338 }
1122 1339
1340 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
1341 if (phba->sli4_hba.nvmet_ctx_cnt) {
1342 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
1343 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1344 phba->sli4_hba.nvmet_ctx_cnt--;
1345 }
1346 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
1123 1347
1124 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1125 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1126 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1348 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1127 size = nvmebuf->bytes_recv;
1128 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1349 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1129 sid = sli4_sid_from_fc_hdr(fc_hdr); 1350 size = nvmebuf->bytes_recv;
1130 1351
1131 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; 1352#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1132 if (ctxp == NULL) { 1353 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1133 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1354 id = smp_processor_id();
1134 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1355 if (id < LPFC_CHECK_CPU_CNT)
1135 "6158 FCP Drop IO x%x: Alloc\n", 1356 phba->cpucheck_rcv_io[id]++;
1136 oxid); 1357 }
1137 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1358#endif
1138 /* Cannot send ABTS without context */ 1359
1360 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1361 oxid, size, smp_processor_id());
1362
1363 if (!ctx_buf) {
1364 /* Queue this NVME IO to process later */
1365 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1366 list_add_tail(&nvmebuf->hbuf.list,
1367 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1368 phba->sli4_hba.nvmet_io_wait_cnt++;
1369 phba->sli4_hba.nvmet_io_wait_total++;
1370 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1371 iflag);
1372
1373 /* Post a brand new DMA buffer to RQ */
1374 qno = nvmebuf->idx;
1375 lpfc_post_rq_buffer(
1376 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1377 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1139 return; 1378 return;
1140 } 1379 }
1380
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1383 sid = sli4_sid_from_fc_hdr(fc_hdr);
1384
1385 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1141 memset(ctxp, 0, sizeof(ctxp->ctx)); 1386 memset(ctxp, 0, sizeof(ctxp->ctx));
1142 ctxp->wqeq = NULL; 1387 ctxp->wqeq = NULL;
1143 ctxp->txrdy = NULL; 1388 ctxp->txrdy = NULL;
@@ -1147,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1147 ctxp->oxid = oxid; 1392 ctxp->oxid = oxid;
1148 ctxp->sid = sid; 1393 ctxp->sid = sid;
1149 ctxp->state = LPFC_NVMET_STE_RCV; 1394 ctxp->state = LPFC_NVMET_STE_RCV;
1150 ctxp->rqb_buffer = nvmebuf;
1151 ctxp->entry_cnt = 1; 1395 ctxp->entry_cnt = 1;
1152 ctxp->flag = 0; 1396 ctxp->flag = 0;
1397 ctxp->ctxbuf = ctx_buf;
1153 spin_lock_init(&ctxp->ctxlock); 1398 spin_lock_init(&ctxp->ctxlock);
1154 1399
1155#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1400#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1165,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1165 ctxp->ts_isr_status = 0; 1410 ctxp->ts_isr_status = 0;
1166 ctxp->ts_status_nvme = 0; 1411 ctxp->ts_status_nvme = 0;
1167 } 1412 }
1168
1169 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1170 id = smp_processor_id();
1171 if (id < LPFC_CHECK_CPU_CNT)
1172 phba->cpucheck_rcv_io[id]++;
1173 }
1174#endif 1413#endif
1175 1414
1176 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1177 oxid, size, smp_processor_id());
1178
1179 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1415 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1180 /* 1416 /*
1181 * The calling sequence should be: 1417 * The calling sequence should be:
1182 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1418 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1183 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1419 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1420 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1421 * the NVME command / FC header is stored, so we are free to repost
1422 * the buffer.
1184 */ 1423 */
1185 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1424 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1186 payload, size); 1425 payload, size);
@@ -1188,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1188 /* Process FCP command */ 1427 /* Process FCP command */
1189 if (rc == 0) { 1428 if (rc == 0) {
1190 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1429 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1430 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1191 return; 1431 return;
1192 } 1432 }
1193 1433
1194 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1434 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1195 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1435 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1196 "6159 FCP Drop IO x%x: err x%x\n", 1436 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1197 ctxp->oxid, rc); 1437 ctxp->oxid, rc,
1438 atomic_read(&tgtp->rcv_fcp_cmd_in),
1439 atomic_read(&tgtp->rcv_fcp_cmd_out),
1440 atomic_read(&tgtp->xmt_fcp_release));
1198dropit: 1441dropit:
1199 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1442 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1200 oxid, size, sid); 1443 oxid, size, sid);
1201 if (oxid) { 1444 if (oxid) {
1445 lpfc_nvmet_defer_release(phba, ctxp);
1202 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1446 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1447 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1203 return; 1448 return;
1204 } 1449 }
1205 1450
1206 if (nvmebuf) { 1451 if (ctx_buf)
1207 nvmebuf->iocbq->hba_wqidx = 0; 1452 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1208 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1453
1209 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1454 if (nvmebuf)
1210 } 1455 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1211#endif 1456#endif
1212} 1457}
1213 1458
@@ -1259,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1259 uint64_t isr_timestamp) 1504 uint64_t isr_timestamp)
1260{ 1505{
1261 if (phba->nvmet_support == 0) { 1506 if (phba->nvmet_support == 0) {
1262 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1507 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1263 return; 1508 return;
1264 } 1509 }
1265 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1510 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@@ -1460,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1460 nvmewqe = ctxp->wqeq; 1705 nvmewqe = ctxp->wqeq;
1461 if (nvmewqe == NULL) { 1706 if (nvmewqe == NULL) {
1462 /* Allocate buffer for command wqe */ 1707 /* Allocate buffer for command wqe */
1463 nvmewqe = ctxp->rqb_buffer->iocbq; 1708 nvmewqe = ctxp->ctxbuf->iocbq;
1464 if (nvmewqe == NULL) { 1709 if (nvmewqe == NULL) {
1465 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1710 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1466 "6110 lpfc_nvmet_prep_fcp_wqe: No " 1711 "6110 lpfc_nvmet_prep_fcp_wqe: No "
@@ -1487,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1487 return NULL; 1732 return NULL;
1488 } 1733 }
1489 1734
1490 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; 1735 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1491 switch (rsp->op) { 1736 switch (rsp->op) {
1492 case NVMET_FCOP_READDATA: 1737 case NVMET_FCOP_READDATA:
1493 case NVMET_FCOP_READDATA_RSP: 1738 case NVMET_FCOP_READDATA_RSP:
@@ -1812,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1812 result = wcqe->parameter; 2057 result = wcqe->parameter;
1813 2058
1814 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2059 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1815 atomic_inc(&tgtp->xmt_abort_cmpl); 2060 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2061 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
1816 2062
1817 ctxp->state = LPFC_NVMET_STE_DONE; 2063 ctxp->state = LPFC_NVMET_STE_DONE;
1818 2064
@@ -1827,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1827 } 2073 }
1828 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2074 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1829 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2075 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2076 atomic_inc(&tgtp->xmt_abort_rsp);
1830 2077
1831 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2078 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1832 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2079 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
@@ -1835,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1835 wcqe->word0, wcqe->total_data_placed, 2082 wcqe->word0, wcqe->total_data_placed,
1836 result, wcqe->word3); 2083 result, wcqe->word3);
1837 2084
2085 cmdwqe->context2 = NULL;
2086 cmdwqe->context3 = NULL;
1838 /* 2087 /*
1839 * if transport has released ctx, then can reuse it. Otherwise, 2088 * if transport has released ctx, then can reuse it. Otherwise,
1840 * will be recycled by transport release call. 2089 * will be recycled by transport release call.
1841 */ 2090 */
1842 if (released) 2091 if (released)
1843 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 2092 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1844 2093
1845 cmdwqe->context2 = NULL; 2094 /* This is the iocbq for the abort, not the command */
1846 cmdwqe->context3 = NULL;
1847 lpfc_sli_release_iocbq(phba, cmdwqe); 2095 lpfc_sli_release_iocbq(phba, cmdwqe);
1848 2096
1849 /* Since iaab/iaar are NOT set, there is no work left. 2097 /* Since iaab/iaar are NOT set, there is no work left.
@@ -1877,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1877 result = wcqe->parameter; 2125 result = wcqe->parameter;
1878 2126
1879 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1880 atomic_inc(&tgtp->xmt_abort_cmpl); 2128 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2129 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
1881 2130
1882 if (!ctxp) { 2131 if (!ctxp) {
1883 /* if context is clear, related io alrady complete */ 2132 /* if context is clear, related io alrady complete */
@@ -1907,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1907 } 2156 }
1908 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2157 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1909 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2158 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2159 atomic_inc(&tgtp->xmt_abort_rsp);
1910 2160
1911 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1912 "6316 ABTS cmpl xri x%x flg x%x (%x) " 2162 "6316 ABTS cmpl xri x%x flg x%x (%x) "
@@ -1914,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1914 ctxp->oxid, ctxp->flag, released, 2164 ctxp->oxid, ctxp->flag, released,
1915 wcqe->word0, wcqe->total_data_placed, 2165 wcqe->word0, wcqe->total_data_placed,
1916 result, wcqe->word3); 2166 result, wcqe->word3);
2167
2168 cmdwqe->context2 = NULL;
2169 cmdwqe->context3 = NULL;
1917 /* 2170 /*
1918 * if transport has released ctx, then can reuse it. Otherwise, 2171 * if transport has released ctx, then can reuse it. Otherwise,
1919 * will be recycled by transport release call. 2172 * will be recycled by transport release call.
1920 */ 2173 */
1921 if (released) 2174 if (released)
1922 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 2175 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1923
1924 cmdwqe->context2 = NULL;
1925 cmdwqe->context3 = NULL;
1926 2176
1927 /* Since iaab/iaar are NOT set, there is no work left. 2177 /* Since iaab/iaar are NOT set, there is no work left.
1928 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2178 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@@ -1953,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1953 result = wcqe->parameter; 2203 result = wcqe->parameter;
1954 2204
1955 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2205 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1956 atomic_inc(&tgtp->xmt_abort_cmpl); 2206 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
1957 2207
1958 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2208 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1959 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", 2209 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
@@ -1984,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1984 sid, xri, ctxp->wqeq->sli4_xritag); 2234 sid, xri, ctxp->wqeq->sli4_xritag);
1985 2235
1986 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2236 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1987 if (!ctxp->wqeq) {
1988 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1989 ctxp->wqeq->hba_wqidx = 0;
1990 }
1991 2237
1992 ndlp = lpfc_findnode_did(phba->pport, sid); 2238 ndlp = lpfc_findnode_did(phba->pport, sid);
1993 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2239 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -2083,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2083 2329
2084 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2330 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2085 if (!ctxp->wqeq) { 2331 if (!ctxp->wqeq) {
2086 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2332 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2087 ctxp->wqeq->hba_wqidx = 0; 2333 ctxp->wqeq->hba_wqidx = 0;
2088 } 2334 }
2089 2335
@@ -2104,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2104 /* Issue ABTS for this WQE based on iotag */ 2350 /* Issue ABTS for this WQE based on iotag */
2105 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2351 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2106 if (!ctxp->abort_wqeq) { 2352 if (!ctxp->abort_wqeq) {
2353 atomic_inc(&tgtp->xmt_abort_rsp_error);
2107 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2354 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2108 "6161 ABORT failed: No wqeqs: " 2355 "6161 ABORT failed: No wqeqs: "
2109 "xri: x%x\n", ctxp->oxid); 2356 "xri: x%x\n", ctxp->oxid);
@@ -2128,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2128 /* driver queued commands are in process of being flushed */ 2375 /* driver queued commands are in process of being flushed */
2129 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2376 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2130 spin_unlock_irqrestore(&phba->hbalock, flags); 2377 spin_unlock_irqrestore(&phba->hbalock, flags);
2378 atomic_inc(&tgtp->xmt_abort_rsp_error);
2131 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2379 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2132 "6163 Driver in reset cleanup - flushing " 2380 "6163 Driver in reset cleanup - flushing "
2133 "NVME Req now. hba_flag x%x oxid x%x\n", 2381 "NVME Req now. hba_flag x%x oxid x%x\n",
@@ -2140,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2140 /* Outstanding abort is in progress */ 2388 /* Outstanding abort is in progress */
2141 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2389 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2142 spin_unlock_irqrestore(&phba->hbalock, flags); 2390 spin_unlock_irqrestore(&phba->hbalock, flags);
2391 atomic_inc(&tgtp->xmt_abort_rsp_error);
2143 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2392 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2144 "6164 Outstanding NVME I/O Abort Request " 2393 "6164 Outstanding NVME I/O Abort Request "
2145 "still pending on oxid x%x\n", 2394 "still pending on oxid x%x\n",
@@ -2190,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2190 abts_wqeq->context2 = ctxp; 2439 abts_wqeq->context2 = ctxp;
2191 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2440 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2192 spin_unlock_irqrestore(&phba->hbalock, flags); 2441 spin_unlock_irqrestore(&phba->hbalock, flags);
2193 if (rc == WQE_SUCCESS) 2442 if (rc == WQE_SUCCESS) {
2443 atomic_inc(&tgtp->xmt_abort_sol);
2194 return 0; 2444 return 0;
2445 }
2195 2446
2447 atomic_inc(&tgtp->xmt_abort_rsp_error);
2196 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2448 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2197 lpfc_sli_release_iocbq(phba, abts_wqeq); 2449 lpfc_sli_release_iocbq(phba, abts_wqeq);
2198 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2450 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2215,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2215 2467
2216 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2468 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2217 if (!ctxp->wqeq) { 2469 if (!ctxp->wqeq) {
2218 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2470 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2219 ctxp->wqeq->hba_wqidx = 0; 2471 ctxp->wqeq->hba_wqidx = 0;
2220 } 2472 }
2221 2473
@@ -2231,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2231 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2483 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2232 spin_unlock_irqrestore(&phba->hbalock, flags); 2484 spin_unlock_irqrestore(&phba->hbalock, flags);
2233 if (rc == WQE_SUCCESS) { 2485 if (rc == WQE_SUCCESS) {
2234 atomic_inc(&tgtp->xmt_abort_rsp);
2235 return 0; 2486 return 0;
2236 } 2487 }
2237 2488
2238aerr: 2489aerr:
2490 atomic_inc(&tgtp->xmt_abort_rsp_error);
2239 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2491 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2240 atomic_inc(&tgtp->xmt_abort_rsp_error); 2492 atomic_inc(&tgtp->xmt_abort_rsp_error);
2241 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2493 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
@@ -2270,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2270 } 2522 }
2271 abts_wqeq = ctxp->wqeq; 2523 abts_wqeq = ctxp->wqeq;
2272 wqe_abts = &abts_wqeq->wqe; 2524 wqe_abts = &abts_wqeq->wqe;
2525
2273 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2526 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2274 2527
2275 spin_lock_irqsave(&phba->hbalock, flags); 2528 spin_lock_irqsave(&phba->hbalock, flags);
@@ -2279,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2279 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2532 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2280 spin_unlock_irqrestore(&phba->hbalock, flags); 2533 spin_unlock_irqrestore(&phba->hbalock, flags);
2281 if (rc == WQE_SUCCESS) { 2534 if (rc == WQE_SUCCESS) {
2282 atomic_inc(&tgtp->xmt_abort_rsp); 2535 atomic_inc(&tgtp->xmt_abort_unsol);
2283 return 0; 2536 return 0;
2284 } 2537 }
2285 2538
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 128759fe6650..6eb2f5d8d4ed 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -22,6 +22,7 @@
22 ********************************************************************/ 22 ********************************************************************/
23 23
24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ 24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
25#define LPFC_NVMET_RQE_DEF_COUNT 512
25#define LPFC_NVMET_SUCCESS_LEN 12 26#define LPFC_NVMET_SUCCESS_LEN 12
26 27
27/* Used for NVME Target */ 28/* Used for NVME Target */
@@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport {
34 atomic_t rcv_ls_req_out; 35 atomic_t rcv_ls_req_out;
35 atomic_t rcv_ls_req_drop; 36 atomic_t rcv_ls_req_drop;
36 atomic_t xmt_ls_abort; 37 atomic_t xmt_ls_abort;
38 atomic_t xmt_ls_abort_cmpl;
37 39
38 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ 40 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
39 atomic_t xmt_ls_rsp; 41 atomic_t xmt_ls_rsp;
@@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport {
47 atomic_t rcv_fcp_cmd_in; 49 atomic_t rcv_fcp_cmd_in;
48 atomic_t rcv_fcp_cmd_out; 50 atomic_t rcv_fcp_cmd_out;
49 atomic_t rcv_fcp_cmd_drop; 51 atomic_t rcv_fcp_cmd_drop;
52 atomic_t xmt_fcp_release;
50 53
51 /* Stats counters - lpfc_nvmet_xmt_fcp_op */ 54 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
52 atomic_t xmt_fcp_abort;
53 atomic_t xmt_fcp_drop; 55 atomic_t xmt_fcp_drop;
54 atomic_t xmt_fcp_read_rsp; 56 atomic_t xmt_fcp_read_rsp;
55 atomic_t xmt_fcp_read; 57 atomic_t xmt_fcp_read;
@@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport {
62 atomic_t xmt_fcp_rsp_drop; 64 atomic_t xmt_fcp_rsp_drop;
63 65
64 66
65 /* Stats counters - lpfc_nvmet_unsol_issue_abort */ 67 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
68 atomic_t xmt_fcp_abort;
69 atomic_t xmt_fcp_abort_cmpl;
70 atomic_t xmt_abort_sol;
71 atomic_t xmt_abort_unsol;
66 atomic_t xmt_abort_rsp; 72 atomic_t xmt_abort_rsp;
67 atomic_t xmt_abort_rsp_error; 73 atomic_t xmt_abort_rsp_error;
68
69 /* Stats counters - lpfc_nvmet_xmt_abort_cmp */
70 atomic_t xmt_abort_cmpl;
71}; 74};
72 75
73struct lpfc_nvmet_rcv_ctx { 76struct lpfc_nvmet_rcv_ctx {
@@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
103#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ 106#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
104#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ 107#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
105 struct rqb_dmabuf *rqb_buffer; 108 struct rqb_dmabuf *rqb_buffer;
109 struct lpfc_nvmet_ctxbuf *ctxbuf;
106 110
107#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 111#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
108 uint64_t ts_isr_cmd; 112 uint64_t ts_isr_cmd;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cf19f4976f5f..d6b184839bc2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *); 74 struct lpfc_iocbq *);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *); 76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
77static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 79static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
78 struct lpfc_cqe *); 80 struct lpfc_cqe *);
79static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
@@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
479 if (unlikely(!hq) || unlikely(!dq)) 481 if (unlikely(!hq) || unlikely(!dq))
480 return -ENOMEM; 482 return -ENOMEM;
481 put_index = hq->host_index; 483 put_index = hq->host_index;
482 temp_hrqe = hq->qe[hq->host_index].rqe; 484 temp_hrqe = hq->qe[put_index].rqe;
483 temp_drqe = dq->qe[dq->host_index].rqe; 485 temp_drqe = dq->qe[dq->host_index].rqe;
484 486
485 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 487 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
486 return -EINVAL; 488 return -EINVAL;
487 if (hq->host_index != dq->host_index) 489 if (put_index != dq->host_index)
488 return -EINVAL; 490 return -EINVAL;
489 /* If the host has not yet processed the next entry then we are done */ 491 /* If the host has not yet processed the next entry then we are done */
490 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 492 if (((put_index + 1) % hq->entry_count) == hq->hba_index)
491 return -EBUSY; 493 return -EBUSY;
492 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 494 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
493 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 495 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
494 496
495 /* Update the host index to point to the next slot */ 497 /* Update the host index to point to the next slot */
496 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 498 hq->host_index = ((put_index + 1) % hq->entry_count);
497 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 499 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
500 hq->RQ_buf_posted++;
498 501
499 /* Ring The Header Receive Queue Doorbell */ 502 /* Ring The Header Receive Queue Doorbell */
500 if (!(hq->host_index % hq->entry_repost)) { 503 if (!(hq->host_index % hq->entry_repost)) {
@@ -4204,13 +4207,16 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
4204 /* Reset HBA */ 4207 /* Reset HBA */
4205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4206 "0325 Reset HBA Data: x%x x%x\n", 4209 "0325 Reset HBA Data: x%x x%x\n",
4207 phba->pport->port_state, psli->sli_flag); 4210 (phba->pport) ? phba->pport->port_state : 0,
4211 psli->sli_flag);
4208 4212
4209 /* perform board reset */ 4213 /* perform board reset */
4210 phba->fc_eventTag = 0; 4214 phba->fc_eventTag = 0;
4211 phba->link_events = 0; 4215 phba->link_events = 0;
4212 phba->pport->fc_myDID = 0; 4216 if (phba->pport) {
4213 phba->pport->fc_prevDID = 0; 4217 phba->pport->fc_myDID = 0;
4218 phba->pport->fc_prevDID = 0;
4219 }
4214 4220
4215 /* Turn off parity checking and serr during the physical reset */ 4221 /* Turn off parity checking and serr during the physical reset */
4216 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4222 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -4336,7 +4342,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4336 /* Restart HBA */ 4342 /* Restart HBA */
4337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4338 "0337 Restart HBA Data: x%x x%x\n", 4344 "0337 Restart HBA Data: x%x x%x\n",
4339 phba->pport->port_state, psli->sli_flag); 4345 (phba->pport) ? phba->pport->port_state : 0,
4346 psli->sli_flag);
4340 4347
4341 word0 = 0; 4348 word0 = 0;
4342 mb = (MAILBOX_t *) &word0; 4349 mb = (MAILBOX_t *) &word0;
@@ -4350,7 +4357,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4350 readl(to_slim); /* flush */ 4357 readl(to_slim); /* flush */
4351 4358
4352 /* Only skip post after fc_ffinit is completed */ 4359 /* Only skip post after fc_ffinit is completed */
4353 if (phba->pport->port_state) 4360 if (phba->pport && phba->pport->port_state)
4354 word0 = 1; /* This is really setting up word1 */ 4361 word0 = 1; /* This is really setting up word1 */
4355 else 4362 else
4356 word0 = 0; /* This is really setting up word1 */ 4363 word0 = 0; /* This is really setting up word1 */
@@ -4359,7 +4366,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4359 readl(to_slim); /* flush */ 4366 readl(to_slim); /* flush */
4360 4367
4361 lpfc_sli_brdreset(phba); 4368 lpfc_sli_brdreset(phba);
4362 phba->pport->stopped = 0; 4369 if (phba->pport)
4370 phba->pport->stopped = 0;
4363 phba->link_state = LPFC_INIT_START; 4371 phba->link_state = LPFC_INIT_START;
4364 phba->hba_flag = 0; 4372 phba->hba_flag = 0;
4365 spin_unlock_irq(&phba->hbalock); 4373 spin_unlock_irq(&phba->hbalock);
@@ -4446,7 +4454,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
4446 * iteration, the function will restart the HBA again. The function returns 4454 * iteration, the function will restart the HBA again. The function returns
4447 * zero if HBA successfully restarted else returns negative error code. 4455 * zero if HBA successfully restarted else returns negative error code.
4448 **/ 4456 **/
4449static int 4457int
4450lpfc_sli_chipset_init(struct lpfc_hba *phba) 4458lpfc_sli_chipset_init(struct lpfc_hba *phba)
4451{ 4459{
4452 uint32_t status, i = 0; 4460 uint32_t status, i = 0;
@@ -5901,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5901 bf_set(lpfc_mbx_set_feature_mds, 5909 bf_set(lpfc_mbx_set_feature_mds,
5902 &mbox->u.mqe.un.set_feature, 1); 5910 &mbox->u.mqe.un.set_feature, 1);
5903 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 5911 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5904 &mbox->u.mqe.un.set_feature, 0); 5912 &mbox->u.mqe.un.set_feature, 1);
5905 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 5913 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5906 mbox->u.mqe.un.set_feature.param_len = 8; 5914 mbox->u.mqe.un.set_feature.param_len = 8;
5907 break; 5915 break;
@@ -6507,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6507 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6515 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6508} 6516}
6509 6517
6518int
6519lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6520 struct lpfc_queue *drq, int count, int idx)
6521{
6522 int rc, i;
6523 struct lpfc_rqe hrqe;
6524 struct lpfc_rqe drqe;
6525 struct lpfc_rqb *rqbp;
6526 struct rqb_dmabuf *rqb_buffer;
6527 LIST_HEAD(rqb_buf_list);
6528
6529 rqbp = hrq->rqbp;
6530 for (i = 0; i < count; i++) {
6531 /* IF RQ is already full, don't bother */
6532 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6533 break;
6534 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6535 if (!rqb_buffer)
6536 break;
6537 rqb_buffer->hrq = hrq;
6538 rqb_buffer->drq = drq;
6539 rqb_buffer->idx = idx;
6540 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6541 }
6542 while (!list_empty(&rqb_buf_list)) {
6543 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6544 hbuf.list);
6545
6546 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6547 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6548 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6549 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6550 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6551 if (rc < 0) {
6552 rqbp->rqb_free_buffer(phba, rqb_buffer);
6553 } else {
6554 list_add_tail(&rqb_buffer->hbuf.list,
6555 &rqbp->rqb_buffer_list);
6556 rqbp->buffer_count++;
6557 }
6558 }
6559 return 1;
6560}
6561
6510/** 6562/**
6511 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6563 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6512 * @phba: Pointer to HBA context object. 6564 * @phba: Pointer to HBA context object.
@@ -6519,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6519int 6571int
6520lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6572lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6521{ 6573{
6522 int rc, i; 6574 int rc, i, cnt;
6523 LPFC_MBOXQ_t *mboxq; 6575 LPFC_MBOXQ_t *mboxq;
6524 struct lpfc_mqe *mqe; 6576 struct lpfc_mqe *mqe;
6525 uint8_t *vpd; 6577 uint8_t *vpd;
@@ -6870,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6870 goto out_destroy_queue; 6922 goto out_destroy_queue;
6871 } 6923 }
6872 phba->sli4_hba.nvmet_xri_cnt = rc; 6924 phba->sli4_hba.nvmet_xri_cnt = rc;
6925
6926 cnt = phba->cfg_iocb_cnt * 1024;
6927 /* We need 1 iocbq for every SGL, for IO processing */
6928 cnt += phba->sli4_hba.nvmet_xri_cnt;
6929 /* Initialize and populate the iocb list per host */
6930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6931 "2821 initialize iocb list %d total %d\n",
6932 phba->cfg_iocb_cnt, cnt);
6933 rc = lpfc_init_iocb_list(phba, cnt);
6934 if (rc) {
6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6936 "1413 Failed to init iocb list.\n");
6937 goto out_destroy_queue;
6938 }
6939
6873 lpfc_nvmet_create_targetport(phba); 6940 lpfc_nvmet_create_targetport(phba);
6874 } else { 6941 } else {
6875 /* update host scsi xri-sgl sizes and mappings */ 6942 /* update host scsi xri-sgl sizes and mappings */
@@ -6889,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6889 "and mapping: %d\n", rc); 6956 "and mapping: %d\n", rc);
6890 goto out_destroy_queue; 6957 goto out_destroy_queue;
6891 } 6958 }
6959
6960 cnt = phba->cfg_iocb_cnt * 1024;
6961 /* Initialize and populate the iocb list per host */
6962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6963 "2820 initialize iocb list %d total %d\n",
6964 phba->cfg_iocb_cnt, cnt);
6965 rc = lpfc_init_iocb_list(phba, cnt);
6966 if (rc) {
6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6968 "6301 Failed to init iocb list.\n");
6969 goto out_destroy_queue;
6970 }
6892 } 6971 }
6893 6972
6894 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 6973 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6895
6896 /* Post initial buffers to all RQs created */ 6974 /* Post initial buffers to all RQs created */
6897 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 6975 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6898 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 6976 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6899 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 6977 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6900 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 6978 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6901 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 6979 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
6902 rqbp->entry_count = 256; 6980 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
6903 rqbp->buffer_count = 0; 6981 rqbp->buffer_count = 0;
6904 6982
6905 /* Divide by 4 and round down to multiple of 16 */
6906 rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
6907 phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
6908 phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
6909
6910 lpfc_post_rq_buffer( 6983 lpfc_post_rq_buffer(
6911 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 6984 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6912 phba->sli4_hba.nvmet_mrq_data[i], 6985 phba->sli4_hba.nvmet_mrq_data[i],
6913 phba->cfg_nvmet_mrq_post); 6986 LPFC_NVMET_RQE_DEF_COUNT, i);
6914 } 6987 }
6915 } 6988 }
6916 6989
@@ -7077,6 +7150,7 @@ out_unset_queue:
7077 /* Unset all the queues set up in this routine when error out */ 7150 /* Unset all the queues set up in this routine when error out */
7078 lpfc_sli4_queue_unset(phba); 7151 lpfc_sli4_queue_unset(phba);
7079out_destroy_queue: 7152out_destroy_queue:
7153 lpfc_free_iocb_list(phba);
7080 lpfc_sli4_queue_destroy(phba); 7154 lpfc_sli4_queue_destroy(phba);
7081out_stop_timers: 7155out_stop_timers:
7082 lpfc_stop_hba_timers(phba); 7156 lpfc_stop_hba_timers(phba);
@@ -8616,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8616 memset(wqe, 0, sizeof(union lpfc_wqe128)); 8690 memset(wqe, 0, sizeof(union lpfc_wqe128));
8617 /* Some of the fields are in the right position already */ 8691 /* Some of the fields are in the right position already */
8618 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8619 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8693 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8620 wqe->generic.wqe_com.word10 = 0; 8694 /* The ct field has moved so reset */
8695 wqe->generic.wqe_com.word7 = 0;
8696 wqe->generic.wqe_com.word10 = 0;
8697 }
8621 8698
8622 abort_tag = (uint32_t) iocbq->iotag; 8699 abort_tag = (uint32_t) iocbq->iotag;
8623 xritag = iocbq->sli4_xritag; 8700 xritag = iocbq->sli4_xritag;
@@ -9111,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9111 } 9188 }
9112 9189
9113 break; 9190 break;
9191 case CMD_SEND_FRAME:
9192 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9193 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9194 return 0;
9114 case CMD_XRI_ABORTED_CX: 9195 case CMD_XRI_ABORTED_CX:
9115 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9196 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9116 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9197 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -12783,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12783 struct fc_frame_header *fc_hdr; 12864 struct fc_frame_header *fc_hdr;
12784 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 12865 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12785 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 12866 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12867 struct lpfc_nvmet_tgtport *tgtp;
12786 struct hbq_dmabuf *dma_buf; 12868 struct hbq_dmabuf *dma_buf;
12787 uint32_t status, rq_id; 12869 uint32_t status, rq_id;
12788 unsigned long iflags; 12870 unsigned long iflags;
@@ -12803,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12803 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 12885 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12805 "2537 Receive Frame Truncated!!\n"); 12887 "2537 Receive Frame Truncated!!\n");
12806 hrq->RQ_buf_trunc++;
12807 case FC_STATUS_RQ_SUCCESS: 12888 case FC_STATUS_RQ_SUCCESS:
12808 lpfc_sli4_rq_release(hrq, drq); 12889 lpfc_sli4_rq_release(hrq, drq);
12809 spin_lock_irqsave(&phba->hbalock, iflags); 12890 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12814,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12814 goto out; 12895 goto out;
12815 } 12896 }
12816 hrq->RQ_rcv_buf++; 12897 hrq->RQ_rcv_buf++;
12898 hrq->RQ_buf_posted--;
12817 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 12899 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12818 12900
12819 /* If a NVME LS event (type 0x28), treat it as Fast path */ 12901 /* If a NVME LS event (type 0x28), treat it as Fast path */
@@ -12827,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12827 spin_unlock_irqrestore(&phba->hbalock, iflags); 12909 spin_unlock_irqrestore(&phba->hbalock, iflags);
12828 workposted = true; 12910 workposted = true;
12829 break; 12911 break;
12830 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12831 case FC_STATUS_INSUFF_BUF_FRM_DISC: 12912 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12913 if (phba->nvmet_support) {
12914 tgtp = phba->targetport->private;
12915 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
12916 "6402 RQE Error x%x, posted %d err_cnt "
12917 "%d: %x %x %x\n",
12918 status, hrq->RQ_buf_posted,
12919 hrq->RQ_no_posted_buf,
12920 atomic_read(&tgtp->rcv_fcp_cmd_in),
12921 atomic_read(&tgtp->rcv_fcp_cmd_out),
12922 atomic_read(&tgtp->xmt_fcp_release));
12923 }
12924 /* fallthrough */
12925
12926 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12832 hrq->RQ_no_posted_buf++; 12927 hrq->RQ_no_posted_buf++;
12833 /* Post more buffers if possible */ 12928 /* Post more buffers if possible */
12834 spin_lock_irqsave(&phba->hbalock, iflags); 12929 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12946,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12946 while ((cqe = lpfc_sli4_cq_get(cq))) { 13041 while ((cqe = lpfc_sli4_cq_get(cq))) {
12947 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13042 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12948 if (!(++ecount % cq->entry_repost)) 13043 if (!(++ecount % cq->entry_repost))
12949 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13044 break;
12950 cq->CQ_mbox++; 13045 cq->CQ_mbox++;
12951 } 13046 }
12952 break; 13047 break;
@@ -12960,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12960 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13055 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12961 cqe); 13056 cqe);
12962 if (!(++ecount % cq->entry_repost)) 13057 if (!(++ecount % cq->entry_repost))
12963 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13058 break;
12964 } 13059 }
12965 13060
12966 /* Track the max number of CQEs processed in 1 EQ */ 13061 /* Track the max number of CQEs processed in 1 EQ */
@@ -13130,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13130 struct lpfc_queue *drq; 13225 struct lpfc_queue *drq;
13131 struct rqb_dmabuf *dma_buf; 13226 struct rqb_dmabuf *dma_buf;
13132 struct fc_frame_header *fc_hdr; 13227 struct fc_frame_header *fc_hdr;
13228 struct lpfc_nvmet_tgtport *tgtp;
13133 uint32_t status, rq_id; 13229 uint32_t status, rq_id;
13134 unsigned long iflags; 13230 unsigned long iflags;
13135 uint32_t fctl, idx; 13231 uint32_t fctl, idx;
@@ -13160,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13160 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13256 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13162 "6126 Receive Frame Truncated!!\n"); 13258 "6126 Receive Frame Truncated!!\n");
13163 hrq->RQ_buf_trunc++;
13164 break;
13165 case FC_STATUS_RQ_SUCCESS: 13259 case FC_STATUS_RQ_SUCCESS:
13166 lpfc_sli4_rq_release(hrq, drq); 13260 lpfc_sli4_rq_release(hrq, drq);
13167 spin_lock_irqsave(&phba->hbalock, iflags); 13261 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13173,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13173 } 13267 }
13174 spin_unlock_irqrestore(&phba->hbalock, iflags); 13268 spin_unlock_irqrestore(&phba->hbalock, iflags);
13175 hrq->RQ_rcv_buf++; 13269 hrq->RQ_rcv_buf++;
13270 hrq->RQ_buf_posted--;
13176 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13271 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13177 13272
13178 /* Just some basic sanity checks on FCP Command frame */ 13273 /* Just some basic sanity checks on FCP Command frame */
@@ -13195,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13195drop: 13290drop:
13196 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13291 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13197 break; 13292 break;
13198 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13199 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13293 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13294 if (phba->nvmet_support) {
13295 tgtp = phba->targetport->private;
13296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13297 "6401 RQE Error x%x, posted %d err_cnt "
13298 "%d: %x %x %x\n",
13299 status, hrq->RQ_buf_posted,
13300 hrq->RQ_no_posted_buf,
13301 atomic_read(&tgtp->rcv_fcp_cmd_in),
13302 atomic_read(&tgtp->rcv_fcp_cmd_out),
13303 atomic_read(&tgtp->xmt_fcp_release));
13304 }
13305 /* fallthrough */
13306
13307 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13200 hrq->RQ_no_posted_buf++; 13308 hrq->RQ_no_posted_buf++;
13201 /* Post more buffers if possible */ 13309 /* Post more buffers if possible */
13202 spin_lock_irqsave(&phba->hbalock, iflags);
13203 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13204 spin_unlock_irqrestore(&phba->hbalock, iflags);
13205 workposted = true;
13206 break; 13310 break;
13207 } 13311 }
13208out: 13312out:
@@ -13356,7 +13460,7 @@ process_cq:
13356 while ((cqe = lpfc_sli4_cq_get(cq))) { 13460 while ((cqe = lpfc_sli4_cq_get(cq))) {
13357 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13461 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13358 if (!(++ecount % cq->entry_repost)) 13462 if (!(++ecount % cq->entry_repost))
13359 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13463 break;
13360 } 13464 }
13361 13465
13362 /* Track the max number of CQEs processed in 1 EQ */ 13466 /* Track the max number of CQEs processed in 1 EQ */
@@ -13447,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13447 while ((cqe = lpfc_sli4_cq_get(cq))) { 13551 while ((cqe = lpfc_sli4_cq_get(cq))) {
13448 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13552 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13449 if (!(++ecount % cq->entry_repost)) 13553 if (!(++ecount % cq->entry_repost))
13450 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13554 break;
13451 } 13555 }
13452 13556
13453 /* Track the max number of CQEs processed in 1 EQ */ 13557 /* Track the max number of CQEs processed in 1 EQ */
@@ -13529,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13529 while ((eqe = lpfc_sli4_eq_get(eq))) { 13633 while ((eqe = lpfc_sli4_eq_get(eq))) {
13530 lpfc_sli4_fof_handle_eqe(phba, eqe); 13634 lpfc_sli4_fof_handle_eqe(phba, eqe);
13531 if (!(++ecount % eq->entry_repost)) 13635 if (!(++ecount % eq->entry_repost))
13532 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); 13636 break;
13533 eq->EQ_processed++; 13637 eq->EQ_processed++;
13534 } 13638 }
13535 13639
@@ -13646,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13646 13750
13647 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 13751 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13648 if (!(++ecount % fpeq->entry_repost)) 13752 if (!(++ecount % fpeq->entry_repost))
13649 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 13753 break;
13650 fpeq->EQ_processed++; 13754 fpeq->EQ_processed++;
13651 } 13755 }
13652 13756
@@ -13827,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13827 } 13931 }
13828 queue->entry_size = entry_size; 13932 queue->entry_size = entry_size;
13829 queue->entry_count = entry_count; 13933 queue->entry_count = entry_count;
13830
13831 /*
13832 * entry_repost is calculated based on the number of entries in the
13833 * queue. This works out except for RQs. If buffers are NOT initially
13834 * posted for every RQE, entry_repost should be adjusted accordingly.
13835 */
13836 queue->entry_repost = (entry_count >> 3);
13837 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13838 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13839 queue->phba = phba; 13934 queue->phba = phba;
13840 13935
13936 /* entry_repost will be set during q creation */
13937
13841 return queue; 13938 return queue;
13842out_fail: 13939out_fail:
13843 lpfc_sli4_queue_free(queue); 13940 lpfc_sli4_queue_free(queue);
@@ -14068,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14068 status = -ENXIO; 14165 status = -ENXIO;
14069 eq->host_index = 0; 14166 eq->host_index = 0;
14070 eq->hba_index = 0; 14167 eq->hba_index = 0;
14168 eq->entry_repost = LPFC_EQ_REPOST;
14071 14169
14072 mempool_free(mbox, phba->mbox_mem_pool); 14170 mempool_free(mbox, phba->mbox_mem_pool);
14073 return status; 14171 return status;
@@ -14141,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14141 default: 14239 default:
14142 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14143 "0361 Unsupported CQ count: " 14241 "0361 Unsupported CQ count: "
14144 "entry cnt %d sz %d pg cnt %d repost %d\n", 14242 "entry cnt %d sz %d pg cnt %d\n",
14145 cq->entry_count, cq->entry_size, 14243 cq->entry_count, cq->entry_size,
14146 cq->page_count, cq->entry_repost); 14244 cq->page_count);
14147 if (cq->entry_count < 256) { 14245 if (cq->entry_count < 256) {
14148 status = -EINVAL; 14246 status = -EINVAL;
14149 goto out; 14247 goto out;
@@ -14196,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14196 cq->assoc_qid = eq->queue_id; 14294 cq->assoc_qid = eq->queue_id;
14197 cq->host_index = 0; 14295 cq->host_index = 0;
14198 cq->hba_index = 0; 14296 cq->hba_index = 0;
14297 cq->entry_repost = LPFC_CQ_REPOST;
14199 14298
14200out: 14299out:
14201 mempool_free(mbox, phba->mbox_mem_pool); 14300 mempool_free(mbox, phba->mbox_mem_pool);
@@ -14387,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14387 cq->assoc_qid = eq->queue_id; 14486 cq->assoc_qid = eq->queue_id;
14388 cq->host_index = 0; 14487 cq->host_index = 0;
14389 cq->hba_index = 0; 14488 cq->hba_index = 0;
14489 cq->entry_repost = LPFC_CQ_REPOST;
14390 14490
14391 rc = 0; 14491 rc = 0;
14392 list_for_each_entry(dmabuf, &cq->page_list, list) { 14492 list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14635,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14635 mq->subtype = subtype; 14735 mq->subtype = subtype;
14636 mq->host_index = 0; 14736 mq->host_index = 0;
14637 mq->hba_index = 0; 14737 mq->hba_index = 0;
14738 mq->entry_repost = LPFC_MQ_REPOST;
14638 14739
14639 /* link the mq onto the parent cq child list */ 14740 /* link the mq onto the parent cq child list */
14640 list_add_tail(&mq->list, &cq->child_list); 14741 list_add_tail(&mq->list, &cq->child_list);
@@ -14860,34 +14961,6 @@ out:
14860} 14961}
14861 14962
14862/** 14963/**
14863 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
14864 * @phba: HBA structure that indicates port to create a queue on.
14865 * @rq: The queue structure to use for the receive queue.
14866 * @qno: The associated HBQ number
14867 *
14868 *
14869 * For SLI4 we need to adjust the RQ repost value based on
14870 * the number of buffers that are initially posted to the RQ.
14871 */
14872void
14873lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
14874{
14875 uint32_t cnt;
14876
14877 /* sanity check on queue memory */
14878 if (!rq)
14879 return;
14880 cnt = lpfc_hbq_defs[qno]->entry_count;
14881
14882 /* Recalc repost for RQs based on buffers initially posted */
14883 cnt = (cnt >> 3);
14884 if (cnt < LPFC_QUEUE_MIN_REPOST)
14885 cnt = LPFC_QUEUE_MIN_REPOST;
14886
14887 rq->entry_repost = cnt;
14888}
14889
14890/**
14891 * lpfc_rq_create - Create a Receive Queue on the HBA 14964 * lpfc_rq_create - Create a Receive Queue on the HBA
14892 * @phba: HBA structure that indicates port to create a queue on. 14965 * @phba: HBA structure that indicates port to create a queue on.
14893 * @hrq: The queue structure to use to create the header receive queue. 14966 * @hrq: The queue structure to use to create the header receive queue.
@@ -15072,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15072 hrq->subtype = subtype; 15145 hrq->subtype = subtype;
15073 hrq->host_index = 0; 15146 hrq->host_index = 0;
15074 hrq->hba_index = 0; 15147 hrq->hba_index = 0;
15148 hrq->entry_repost = LPFC_RQ_REPOST;
15075 15149
15076 /* now create the data queue */ 15150 /* now create the data queue */
15077 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -15082,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15082 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15156 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15083 bf_set(lpfc_rq_context_rqe_count_1, 15157 bf_set(lpfc_rq_context_rqe_count_1,
15084 &rq_create->u.request.context, hrq->entry_count); 15158 &rq_create->u.request.context, hrq->entry_count);
15085 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 15159 if (subtype == LPFC_NVMET)
15160 rq_create->u.request.context.buffer_size =
15161 LPFC_NVMET_DATA_BUF_SIZE;
15162 else
15163 rq_create->u.request.context.buffer_size =
15164 LPFC_DATA_BUF_SIZE;
15086 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15165 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15087 LPFC_RQE_SIZE_8); 15166 LPFC_RQE_SIZE_8);
15088 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15167 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
@@ -15119,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15119 LPFC_RQ_RING_SIZE_4096); 15198 LPFC_RQ_RING_SIZE_4096);
15120 break; 15199 break;
15121 } 15200 }
15122 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15201 if (subtype == LPFC_NVMET)
15123 LPFC_DATA_BUF_SIZE); 15202 bf_set(lpfc_rq_context_buf_size,
15203 &rq_create->u.request.context,
15204 LPFC_NVMET_DATA_BUF_SIZE);
15205 else
15206 bf_set(lpfc_rq_context_buf_size,
15207 &rq_create->u.request.context,
15208 LPFC_DATA_BUF_SIZE);
15124 } 15209 }
15125 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15210 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15126 cq->queue_id); 15211 cq->queue_id);
@@ -15153,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15153 drq->subtype = subtype; 15238 drq->subtype = subtype;
15154 drq->host_index = 0; 15239 drq->host_index = 0;
15155 drq->hba_index = 0; 15240 drq->hba_index = 0;
15241 drq->entry_repost = LPFC_RQ_REPOST;
15156 15242
15157 /* link the header and data RQs onto the parent cq child list */ 15243 /* link the header and data RQs onto the parent cq child list */
15158 list_add_tail(&hrq->list, &cq->child_list); 15244 list_add_tail(&hrq->list, &cq->child_list);
@@ -15265,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15265 cq->queue_id); 15351 cq->queue_id);
15266 bf_set(lpfc_rq_context_data_size, 15352 bf_set(lpfc_rq_context_data_size,
15267 &rq_create->u.request.context, 15353 &rq_create->u.request.context,
15268 LPFC_DATA_BUF_SIZE); 15354 LPFC_NVMET_DATA_BUF_SIZE);
15269 bf_set(lpfc_rq_context_hdr_size, 15355 bf_set(lpfc_rq_context_hdr_size,
15270 &rq_create->u.request.context, 15356 &rq_create->u.request.context,
15271 LPFC_HDR_BUF_SIZE); 15357 LPFC_HDR_BUF_SIZE);
@@ -15310,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15310 hrq->subtype = subtype; 15396 hrq->subtype = subtype;
15311 hrq->host_index = 0; 15397 hrq->host_index = 0;
15312 hrq->hba_index = 0; 15398 hrq->hba_index = 0;
15399 hrq->entry_repost = LPFC_RQ_REPOST;
15313 15400
15314 drq->db_format = LPFC_DB_RING_FORMAT; 15401 drq->db_format = LPFC_DB_RING_FORMAT;
15315 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15402 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -15318,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15318 drq->subtype = subtype; 15405 drq->subtype = subtype;
15319 drq->host_index = 0; 15406 drq->host_index = 0;
15320 drq->hba_index = 0; 15407 drq->hba_index = 0;
15408 drq->entry_repost = LPFC_RQ_REPOST;
15321 15409
15322 list_add_tail(&hrq->list, &cq->child_list); 15410 list_add_tail(&hrq->list, &cq->child_list);
15323 list_add_tail(&drq->list, &cq->child_list); 15411 list_add_tail(&drq->list, &cq->child_list);
@@ -16058,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16058 struct fc_vft_header *fc_vft_hdr; 16146 struct fc_vft_header *fc_vft_hdr;
16059 uint32_t *header = (uint32_t *) fc_hdr; 16147 uint32_t *header = (uint32_t *) fc_hdr;
16060 16148
16149#define FC_RCTL_MDS_DIAGS 0xF4
16150
16061 switch (fc_hdr->fh_r_ctl) { 16151 switch (fc_hdr->fh_r_ctl) {
16062 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16152 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16063 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16153 case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -16085,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16085 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16175 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16086 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16176 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16087 case FC_RCTL_LCR: /* link credit reset */ 16177 case FC_RCTL_LCR: /* link credit reset */
16178 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16088 case FC_RCTL_END: /* end */ 16179 case FC_RCTL_END: /* end */
16089 break; 16180 break;
16090 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16181 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
@@ -16094,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16094 default: 16185 default:
16095 goto drop; 16186 goto drop;
16096 } 16187 }
16188
16189#define FC_TYPE_VENDOR_UNIQUE 0xFF
16190
16097 switch (fc_hdr->fh_type) { 16191 switch (fc_hdr->fh_type) {
16098 case FC_TYPE_BLS: 16192 case FC_TYPE_BLS:
16099 case FC_TYPE_ELS: 16193 case FC_TYPE_ELS:
16100 case FC_TYPE_FCP: 16194 case FC_TYPE_FCP:
16101 case FC_TYPE_CT: 16195 case FC_TYPE_CT:
16102 case FC_TYPE_NVME: 16196 case FC_TYPE_NVME:
16197 case FC_TYPE_VENDOR_UNIQUE:
16103 break; 16198 break;
16104 case FC_TYPE_IP: 16199 case FC_TYPE_IP:
16105 case FC_TYPE_ILS: 16200 case FC_TYPE_ILS:
@@ -16110,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16110 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16205 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16111 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 16206 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
16112 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16207 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16208 (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
16113 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 16209 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
16114 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, 16210 (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
16115 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16211 "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
16116 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16212 fc_hdr->fh_type, be32_to_cpu(header[0]),
16117 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16213 be32_to_cpu(header[1]), be32_to_cpu(header[2]),
16118 be32_to_cpu(header[6])); 16214 be32_to_cpu(header[3]), be32_to_cpu(header[4]),
16215 be32_to_cpu(header[5]), be32_to_cpu(header[6]));
16119 return 0; 16216 return 0;
16120drop: 16217drop:
16121 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16218 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -16921,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16921 lpfc_sli_release_iocbq(phba, iocbq); 17018 lpfc_sli_release_iocbq(phba, iocbq);
16922} 17019}
16923 17020
17021static void
17022lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17023 struct lpfc_iocbq *rspiocb)
17024{
17025 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17026
17027 if (pcmd && pcmd->virt)
17028 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17029 kfree(pcmd);
17030 lpfc_sli_release_iocbq(phba, cmdiocb);
17031}
17032
17033static void
17034lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17035 struct hbq_dmabuf *dmabuf)
17036{
17037 struct fc_frame_header *fc_hdr;
17038 struct lpfc_hba *phba = vport->phba;
17039 struct lpfc_iocbq *iocbq = NULL;
17040 union lpfc_wqe *wqe;
17041 struct lpfc_dmabuf *pcmd = NULL;
17042 uint32_t frame_len;
17043 int rc;
17044
17045 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17046 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17047
17048 /* Send the received frame back */
17049 iocbq = lpfc_sli_get_iocbq(phba);
17050 if (!iocbq)
17051 goto exit;
17052
17053 /* Allocate buffer for command payload */
17054 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17055 if (pcmd)
17056 pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17057 &pcmd->phys);
17058 if (!pcmd || !pcmd->virt)
17059 goto exit;
17060
17061 INIT_LIST_HEAD(&pcmd->list);
17062
17063 /* copyin the payload */
17064 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17065
17066 /* fill in BDE's for command */
17067 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17068 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17069 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17070 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17071
17072 iocbq->context2 = pcmd;
17073 iocbq->vport = vport;
17074 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17075 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17076
17077 /*
17078 * Setup rest of the iocb as though it were a WQE
17079 * Build the SEND_FRAME WQE
17080 */
17081 wqe = (union lpfc_wqe *)&iocbq->iocb;
17082
17083 wqe->send_frame.frame_len = frame_len;
17084 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17085 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17086 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17087 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17088 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17089 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17090
17091 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17092 iocbq->iocb.ulpLe = 1;
17093 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17094 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17095 if (rc == IOCB_ERROR)
17096 goto exit;
17097
17098 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17099 return;
17100
17101exit:
17102 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17103 "2023 Unable to process MDS loopback frame\n");
17104 if (pcmd && pcmd->virt)
17105 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17106 kfree(pcmd);
17107 lpfc_sli_release_iocbq(phba, iocbq);
17108 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17109}
17110
16924/** 17111/**
16925 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17112 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16926 * @phba: Pointer to HBA context object. 17113 * @phba: Pointer to HBA context object.
@@ -16959,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16959 fcfi = bf_get(lpfc_rcqe_fcf_id, 17146 fcfi = bf_get(lpfc_rcqe_fcf_id,
16960 &dmabuf->cq_event.cqe.rcqe_cmpl); 17147 &dmabuf->cq_event.cqe.rcqe_cmpl);
16961 17148
17149 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17150 vport = phba->pport;
17151 /* Handle MDS Loopback frames */
17152 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17153 return;
17154 }
17155
16962 /* d_id this frame is directed to */ 17156 /* d_id this frame is directed to */
16963 did = sli4_did_from_fc_hdr(fc_hdr); 17157 did = sli4_did_from_fc_hdr(fc_hdr);
16964 17158
@@ -17132,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17132 "status x%x add_status x%x, mbx status x%x\n", 17326 "status x%x add_status x%x, mbx status x%x\n",
17133 shdr_status, shdr_add_status, rc); 17327 shdr_status, shdr_add_status, rc);
17134 rc = -ENXIO; 17328 rc = -ENXIO;
17329 } else {
17330 /*
17331 * The next_rpi stores the next logical module-64 rpi value used
17332 * to post physical rpis in subsequent rpi postings.
17333 */
17334 spin_lock_irq(&phba->hbalock);
17335 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17336 spin_unlock_irq(&phba->hbalock);
17135 } 17337 }
17136 return rc; 17338 return rc;
17137} 17339}
@@ -18712,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18712 18914
18713 spin_lock_irqsave(&pring->ring_lock, iflags); 18915 spin_lock_irqsave(&pring->ring_lock, iflags);
18714 ctxp = pwqe->context2; 18916 ctxp = pwqe->context2;
18715 sglq = ctxp->rqb_buffer->sglq; 18917 sglq = ctxp->ctxbuf->sglq;
18716 if (pwqe->sli4_xritag == NO_XRI) { 18918 if (pwqe->sli4_xritag == NO_XRI) {
18717 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18919 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18718 pwqe->sli4_xritag = sglq->sli4_xritag; 18920 pwqe->sli4_xritag = sglq->sli4_xritag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index da46471337c8..cf863db27700 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -24,7 +24,6 @@
24#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 24#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
25#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 25#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
26#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 26#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
27#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
28#define LPFC_RPI_LOW_WATER_MARK 10 27#define LPFC_RPI_LOW_WATER_MARK 10
29 28
30#define LPFC_UNREG_FCF 1 29#define LPFC_UNREG_FCF 1
@@ -155,7 +154,11 @@ struct lpfc_queue {
155 uint32_t entry_count; /* Number of entries to support on the queue */ 154 uint32_t entry_count; /* Number of entries to support on the queue */
156 uint32_t entry_size; /* Size of each queue entry. */ 155 uint32_t entry_size; /* Size of each queue entry. */
157 uint32_t entry_repost; /* Count of entries before doorbell is rung */ 156 uint32_t entry_repost; /* Count of entries before doorbell is rung */
158#define LPFC_QUEUE_MIN_REPOST 8 157#define LPFC_EQ_REPOST 8
158#define LPFC_MQ_REPOST 8
159#define LPFC_CQ_REPOST 64
160#define LPFC_RQ_REPOST 64
161#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
159 uint32_t queue_id; /* Queue ID assigned by the hardware */ 162 uint32_t queue_id; /* Queue ID assigned by the hardware */
160 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ 163 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
161 uint32_t page_count; /* Number of pages allocated for this queue */ 164 uint32_t page_count; /* Number of pages allocated for this queue */
@@ -195,7 +198,7 @@ struct lpfc_queue {
195/* defines for RQ stats */ 198/* defines for RQ stats */
196#define RQ_no_posted_buf q_cnt_1 199#define RQ_no_posted_buf q_cnt_1
197#define RQ_no_buf_found q_cnt_2 200#define RQ_no_buf_found q_cnt_2
198#define RQ_buf_trunc q_cnt_3 201#define RQ_buf_posted q_cnt_3
199#define RQ_rcv_buf q_cnt_4 202#define RQ_rcv_buf q_cnt_4
200 203
201 uint64_t isr_timestamp; 204 uint64_t isr_timestamp;
@@ -617,12 +620,17 @@ struct lpfc_sli4_hba {
617 uint16_t scsi_xri_start; 620 uint16_t scsi_xri_start;
618 uint16_t els_xri_cnt; 621 uint16_t els_xri_cnt;
619 uint16_t nvmet_xri_cnt; 622 uint16_t nvmet_xri_cnt;
623 uint16_t nvmet_ctx_cnt;
624 uint16_t nvmet_io_wait_cnt;
625 uint16_t nvmet_io_wait_total;
620 struct list_head lpfc_els_sgl_list; 626 struct list_head lpfc_els_sgl_list;
621 struct list_head lpfc_abts_els_sgl_list; 627 struct list_head lpfc_abts_els_sgl_list;
622 struct list_head lpfc_nvmet_sgl_list; 628 struct list_head lpfc_nvmet_sgl_list;
623 struct list_head lpfc_abts_nvmet_ctx_list; 629 struct list_head lpfc_abts_nvmet_ctx_list;
624 struct list_head lpfc_abts_scsi_buf_list; 630 struct list_head lpfc_abts_scsi_buf_list;
625 struct list_head lpfc_abts_nvme_buf_list; 631 struct list_head lpfc_abts_nvme_buf_list;
632 struct list_head lpfc_nvmet_ctx_list;
633 struct list_head lpfc_nvmet_io_wait_list;
626 struct lpfc_sglq **lpfc_sglq_active_list; 634 struct lpfc_sglq **lpfc_sglq_active_list;
627 struct list_head lpfc_rpi_hdr_list; 635 struct list_head lpfc_rpi_hdr_list;
628 unsigned long *rpi_bmask; 636 unsigned long *rpi_bmask;
@@ -654,6 +662,7 @@ struct lpfc_sli4_hba {
654 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 662 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
655 spinlock_t sgl_list_lock; /* list of aborted els IOs */ 663 spinlock_t sgl_list_lock; /* list of aborted els IOs */
656 spinlock_t nvmet_io_lock; 664 spinlock_t nvmet_io_lock;
665 spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
657 uint32_t physical_port; 666 uint32_t physical_port;
658 667
659 /* CPU to vector mapping information */ 668 /* CPU to vector mapping information */
@@ -661,8 +670,6 @@ struct lpfc_sli4_hba {
661 uint16_t num_online_cpu; 670 uint16_t num_online_cpu;
662 uint16_t num_present_cpu; 671 uint16_t num_present_cpu;
663 uint16_t curr_disp_cpu; 672 uint16_t curr_disp_cpu;
664
665 uint16_t nvmet_mrq_post_idx;
666}; 673};
667 674
668enum lpfc_sge_type { 675enum lpfc_sge_type {
@@ -698,6 +705,7 @@ struct lpfc_rpi_hdr {
698 struct lpfc_dmabuf *dmabuf; 705 struct lpfc_dmabuf *dmabuf;
699 uint32_t page_count; 706 uint32_t page_count;
700 uint32_t start_rpi; 707 uint32_t start_rpi;
708 uint16_t next_rpi;
701}; 709};
702 710
703struct lpfc_rsrc_blks { 711struct lpfc_rsrc_blks {
@@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
762int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 770int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
763 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 771 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
764 uint32_t subtype); 772 uint32_t subtype);
765void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
766int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); 773int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
767int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); 774int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
768int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); 775int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 1c26dc67151b..c2653244221c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.2.0.12" 23#define LPFC_DRIVER_VERSION "11.2.0.14"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index a4aadf5f4dc6..1cc814f1505a 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3770,9 +3770,6 @@ static long pmcraid_ioctl_passthrough(
3770 pmcraid_err("couldn't build passthrough ioadls\n"); 3770 pmcraid_err("couldn't build passthrough ioadls\n");
3771 goto out_free_cmd; 3771 goto out_free_cmd;
3772 } 3772 }
3773 } else if (request_size < 0) {
3774 rc = -EINVAL;
3775 goto out_free_cmd;
3776 } 3773 }
3777 3774
3778 /* If data is being written into the device, copy the data from user 3775 /* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 40aeb6bb96a2..07ee88200e91 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -259,7 +259,7 @@ struct qedf_io_log {
259 uint16_t task_id; 259 uint16_t task_id;
260 uint32_t port_id; /* Remote port fabric ID */ 260 uint32_t port_id; /* Remote port fabric ID */
261 int lun; 261 int lun;
262 char op; /* SCSI CDB */ 262 unsigned char op; /* SCSI CDB */
263 uint8_t lba[4]; 263 uint8_t lba[4];
264 unsigned int bufflen; /* SCSI buffer length */ 264 unsigned int bufflen; /* SCSI buffer length */
265 unsigned int sg_count; /* Number of SG elements */ 265 unsigned int sg_count; /* Number of SG elements */
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index c505d41f6dc8..90627033bde6 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -109,7 +109,7 @@ retry_els:
109 did = fcport->rdata->ids.port_id; 109 did = fcport->rdata->ids.port_id;
110 sid = fcport->sid; 110 sid = fcport->sid;
111 111
112 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, 112 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
114 FC_FC_SEQ_INIT, 0); 114 FC_FC_SEQ_INIT, 0);
115 115
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index cceddd995a4b..a5c97342fd5d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2895,7 +2895,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; 2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; 2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; 2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
2898 memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); 2898 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); 2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
2900 if (rc) { 2900 if (rc) {
2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); 2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 5ca3e8c28a3f..32632c9b2276 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -38,7 +38,7 @@ struct qedi_endpoint;
38#define QEDI_MAX_ISCSI_TASK 4096 38#define QEDI_MAX_ISCSI_TASK 4096
39#define QEDI_MAX_TASK_NUM 0x0FFF 39#define QEDI_MAX_TASK_NUM 0x0FFF
40#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 40#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
41#define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ 41#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */
42#define MAX_OUSTANDING_TASKS_PER_CON 1024 42#define MAX_OUSTANDING_TASKS_PER_CON 1024
43 43
44#define QEDI_MAX_BD_LEN 0xffff 44#define QEDI_MAX_BD_LEN 0xffff
@@ -63,6 +63,7 @@ struct qedi_endpoint;
63#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) 63#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
64 64
65#define QEDI_PAGE_SIZE 4096 65#define QEDI_PAGE_SIZE 4096
66#define QEDI_HW_DMA_BOUNDARY 0xfff
66#define QEDI_PATH_HANDLE 0xFE0000000UL 67#define QEDI_PATH_HANDLE 0xFE0000000UL
67 68
68struct qedi_uio_ctrl { 69struct qedi_uio_ctrl {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d6978cbc56f0..507512cc478b 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
870 QEDI_ERR(&qedi->dbg_ctx, 870 QEDI_ERR(&qedi->dbg_ctx,
871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", 871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); 872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
873 WARN_ON(1);
874 } 873 }
875} 874}
876 875
@@ -1494,6 +1493,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1494 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1493 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1495 qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1494 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1496 ep = qedi_conn->ep; 1495 ep = qedi_conn->ep;
1496 if (!ep)
1497 return -ENODEV;
1497 1498
1498 tid = qedi_get_task_idx(qedi); 1499 tid = qedi_get_task_idx(qedi);
1499 if (tid == -1) 1500 if (tid == -1)
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 3548d46f9b27..87f0af358b33 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
59 .this_id = -1, 59 .this_id = -1,
60 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, 60 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
61 .max_sectors = 0xffff, 61 .max_sectors = 0xffff,
62 .dma_boundary = QEDI_HW_DMA_BOUNDARY,
62 .cmd_per_lun = 128, 63 .cmd_per_lun = 128,
63 .use_clustering = ENABLE_CLUSTERING, 64 .use_clustering = ENABLE_CLUSTERING,
64 .shost_attrs = qedi_shost_attrs, 65 .shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1223 1224
1224 iscsi_cid = (u32)path_data->handle; 1225 iscsi_cid = (u32)path_data->handle;
1225 qedi_ep = qedi->ep_tbl[iscsi_cid]; 1226 qedi_ep = qedi->ep_tbl[iscsi_cid];
1226 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1227 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1227 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); 1228 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
1229 if (!qedi_ep) {
1230 ret = -EINVAL;
1231 goto set_path_exit;
1232 }
1228 1233
1229 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1234 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
1230 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); 1235 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 92775a8b74b1..879d3b7462f9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
151 151
152static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) 152static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
153{ 153{
154 if (udev->uctrl) {
155 free_page((unsigned long)udev->uctrl);
156 udev->uctrl = NULL;
157 }
158
154 if (udev->ll2_ring) { 159 if (udev->ll2_ring) {
155 free_page((unsigned long)udev->ll2_ring); 160 free_page((unsigned long)udev->ll2_ring);
156 udev->ll2_ring = NULL; 161 udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev)
169 __qedi_free_uio_rings(udev); 174 __qedi_free_uio_rings(udev);
170 175
171 pci_dev_put(udev->pdev); 176 pci_dev_put(udev->pdev);
172 kfree(udev->uctrl);
173 kfree(udev); 177 kfree(udev);
174} 178}
175 179
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
208 if (udev->ll2_ring || udev->ll2_buf) 212 if (udev->ll2_ring || udev->ll2_buf)
209 return rc; 213 return rc;
210 214
215 /* Memory for control area. */
216 udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
217 if (!udev->uctrl)
218 return -ENOMEM;
219
211 /* Allocating memory for LL2 ring */ 220 /* Allocating memory for LL2 ring */
212 udev->ll2_ring_size = QEDI_PAGE_SIZE; 221 udev->ll2_ring_size = QEDI_PAGE_SIZE;
213 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); 222 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@ exit_alloc_ring:
237static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) 246static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
238{ 247{
239 struct qedi_uio_dev *udev = NULL; 248 struct qedi_uio_dev *udev = NULL;
240 struct qedi_uio_ctrl *uctrl = NULL;
241 int rc = 0; 249 int rc = 0;
242 250
243 list_for_each_entry(udev, &qedi_udev_list, list) { 251 list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
258 goto err_udev; 266 goto err_udev;
259 } 267 }
260 268
261 uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
262 if (!uctrl) {
263 rc = -ENOMEM;
264 goto err_uctrl;
265 }
266
267 udev->uio_dev = -1; 269 udev->uio_dev = -1;
268 270
269 udev->qedi = qedi; 271 udev->qedi = qedi;
270 udev->pdev = qedi->pdev; 272 udev->pdev = qedi->pdev;
271 udev->uctrl = uctrl;
272 273
273 rc = __qedi_alloc_uio_rings(udev); 274 rc = __qedi_alloc_uio_rings(udev);
274 if (rc) 275 if (rc)
275 goto err_uio_rings; 276 goto err_uctrl;
276 277
277 list_add(&udev->list, &qedi_udev_list); 278 list_add(&udev->list, &qedi_udev_list);
278 279
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
283 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 284 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
284 return 0; 285 return 0;
285 286
286 err_uio_rings:
287 kfree(uctrl);
288 err_uctrl: 287 err_uctrl:
289 kfree(udev); 288 kfree(udev);
290 err_udev: 289 err_udev:
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
828 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; 827 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
829 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; 828 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
830 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; 829 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
830 qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
831 qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
831 832
832 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 833 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
833 if ((1 << log_page_size) == PAGE_SIZE) 834 if ((1 << log_page_size) == PAGE_SIZE)
@@ -1498,11 +1499,9 @@ err_idx:
1498 1499
1499void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) 1500void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
1500{ 1501{
1501 if (!test_and_clear_bit(idx, qedi->task_idx_map)) { 1502 if (!test_and_clear_bit(idx, qedi->task_idx_map))
1502 QEDI_ERR(&qedi->dbg_ctx, 1503 QEDI_ERR(&qedi->dbg_ctx,
1503 "FW task context, already cleared, tid=0x%x\n", idx); 1504 "FW task context, already cleared, tid=0x%x\n", idx);
1504 WARN_ON(1);
1505 }
1506} 1505}
1507 1506
1508void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, 1507void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 16d1cd50feed..ca3420de5a01 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
730 return -EIO; 730 return -EIO;
731 } 731 }
732 732
733 memset(&elreq, 0, sizeof(elreq));
734
733 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
734 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
735 DMA_TO_DEVICE); 737 DMA_TO_DEVICE);
@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
795 797
796 if (atomic_read(&vha->loop_state) == LOOP_READY && 798 if (atomic_read(&vha->loop_state) == LOOP_READY &&
797 (ha->current_topology == ISP_CFG_F || 799 (ha->current_topology == ISP_CFG_F ||
798 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 800 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
799 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 801 req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
800 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 802 elreq.options == EXTERNAL_LOOPBACK) {
801 elreq.options == EXTERNAL_LOOPBACK) {
802 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 803 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
803 ql_dbg(ql_dbg_user, vha, 0x701e, 804 ql_dbg(ql_dbg_user, vha, 0x701e,
804 "BSG request type: %s.\n", type); 805 "BSG request type: %s.\n", type);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 51b4179469d1..88748a6ab73f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1131 1131
1132 /* Mailbox registers. */ 1132 /* Mailbox registers. */
1133 mbx_reg = &reg->mailbox0; 1133 mbx_reg = &reg->mailbox0;
1134 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 1134 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1136 1136
1137 /* Transfer sequence registers. */ 1137 /* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2090 2090
2091 /* Mailbox registers. */ 2091 /* Mailbox registers. */
2092 mbx_reg = &reg->mailbox0; 2092 mbx_reg = &reg->mailbox0;
2093 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 2093 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2095 2095
2096 /* Transfer sequence registers. */ 2096 /* Transfer sequence registers. */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae119018dfaa..eddbc1218a39 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3425,6 +3425,7 @@ struct qla_hw_data {
3425 uint8_t max_req_queues; 3425 uint8_t max_req_queues;
3426 uint8_t max_rsp_queues; 3426 uint8_t max_rsp_queues;
3427 uint8_t max_qpairs; 3427 uint8_t max_qpairs;
3428 uint8_t num_qpairs;
3428 struct qla_qpair *base_qpair; 3429 struct qla_qpair *base_qpair;
3429 struct qla_npiv_entry *npiv_info; 3430 struct qla_npiv_entry *npiv_info;
3430 uint16_t nvram_npiv_size; 3431 uint16_t nvram_npiv_size;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 034743309ada..0391fc317003 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
7543 /* Assign available que pair id */ 7543 /* Assign available que pair id */
7544 mutex_lock(&ha->mq_lock); 7544 mutex_lock(&ha->mq_lock);
7545 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 7545 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
7546 if (qpair_id >= ha->max_qpairs) { 7546 if (ha->num_qpairs >= ha->max_qpairs) {
7547 mutex_unlock(&ha->mq_lock); 7547 mutex_unlock(&ha->mq_lock);
7548 ql_log(ql_log_warn, vha, 0x0183, 7548 ql_log(ql_log_warn, vha, 0x0183,
7549 "No resources to create additional q pair.\n"); 7549 "No resources to create additional q pair.\n");
7550 goto fail_qid_map; 7550 goto fail_qid_map;
7551 } 7551 }
7552 ha->num_qpairs++;
7552 set_bit(qpair_id, ha->qpair_qid_map); 7553 set_bit(qpair_id, ha->qpair_qid_map);
7553 ha->queue_pair_map[qpair_id] = qpair; 7554 ha->queue_pair_map[qpair_id] = qpair;
7554 qpair->id = qpair_id; 7555 qpair->id = qpair_id;
@@ -7635,6 +7636,7 @@ fail_rsp:
7635fail_msix: 7636fail_msix:
7636 ha->queue_pair_map[qpair_id] = NULL; 7637 ha->queue_pair_map[qpair_id] = NULL;
7637 clear_bit(qpair_id, ha->qpair_qid_map); 7638 clear_bit(qpair_id, ha->qpair_qid_map);
7639 ha->num_qpairs--;
7638 mutex_unlock(&ha->mq_lock); 7640 mutex_unlock(&ha->mq_lock);
7639fail_qid_map: 7641fail_qid_map:
7640 kfree(qpair); 7642 kfree(qpair);
@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7660 mutex_lock(&ha->mq_lock); 7662 mutex_lock(&ha->mq_lock);
7661 ha->queue_pair_map[qpair->id] = NULL; 7663 ha->queue_pair_map[qpair->id] = NULL;
7662 clear_bit(qpair->id, ha->qpair_qid_map); 7664 clear_bit(qpair->id, ha->qpair_qid_map);
7665 ha->num_qpairs--;
7663 list_del(&qpair->qp_list_elem); 7666 list_del(&qpair->qp_list_elem);
7664 if (list_empty(&vha->qp_list)) 7667 if (list_empty(&vha->qp_list))
7665 vha->flags.qpairs_available = 0; 7668 vha->flags.qpairs_available = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 66df6cec59da..c61a6a871c8e 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
129} 129}
130 130
131static inline void 131static inline void
132qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, 132qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
133 struct qla_tgt_cmd *tc)
134{ 133{
135 struct dsd_dma *dsd_ptr, *tdsd_ptr; 134 struct dsd_dma *dsd, *tdsd;
136 struct crc_context *ctx;
137
138 if (sp)
139 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
140 else if (tc)
141 ctx = (struct crc_context *)tc->ctx;
142 else {
143 BUG();
144 return;
145 }
146 135
147 /* clean up allocated prev pool */ 136 /* clean up allocated prev pool */
148 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 137 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
149 &ctx->dsd_list, list) { 138 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
150 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 139 dsd->dsd_list_dma);
151 dsd_ptr->dsd_list_dma); 140 list_del(&dsd->list);
152 list_del(&dsd_ptr->list); 141 kfree(dsd);
153 kfree(dsd_ptr);
154 } 142 }
155 INIT_LIST_HEAD(&ctx->dsd_list); 143 INIT_LIST_HEAD(&ctx->dsd_list);
156} 144}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index aac03504d9a3..2572121b765b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3282,7 +3282,7 @@ msix_register_fail:
3282 } 3282 }
3283 3283
3284 /* Enable MSI-X vector for response queue update for queue 0 */ 3284 /* Enable MSI-X vector for response queue update for queue 0 */
3285 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3285 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3286 if (ha->msixbase && ha->mqiobase && 3286 if (ha->msixbase && ha->mqiobase &&
3287 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3287 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3288 ql2xmqsupport)) 3288 ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a113ab3592a7..cba1fc5e8be9 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3676 qlt_update_host_map(vha, id); 3676 qlt_update_host_map(vha, id);
3677 } 3677 }
3678 3678
3679 fc_host_port_name(vha->host) =
3680 wwn_to_u64(vha->port_name);
3681
3682 if (qla_ini_mode_enabled(vha))
3683 ql_dbg(ql_dbg_mbx, vha, 0x1018,
3684 "FA-WWN portname %016llx (%x)\n",
3685 fc_host_port_name(vha->host),
3686 rptid_entry->vp_status);
3687
3688 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3679 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3689 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3680 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3690 } else { 3681 } else {
@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4821 4812
4822 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4813 memset(mcp->mb, 0 , sizeof(mcp->mb));
4823 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4814 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4824 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ 4815 /* BIT_6 specifies 64bit address */
4816 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4825 if (IS_CNA_CAPABLE(ha)) { 4817 if (IS_CNA_CAPABLE(ha)) {
4826 mcp->mb[1] |= BIT_15;
4827 mcp->mb[2] = vha->fcoe_fcf_idx; 4818 mcp->mb[2] = vha->fcoe_fcf_idx;
4828 } 4819 }
4829 mcp->mb[16] = LSW(mreq->rcv_dma); 4820 mcp->mb[16] = LSW(mreq->rcv_dma);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1c7957903283..79f050256c55 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr)
630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
631 } 631 }
632 632
633 if (!ctx)
634 goto end;
635
633 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 636 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
634 /* List assured to be having elements */ 637 /* List assured to be having elements */
635 qla2x00_clean_dsd_pool(ha, sp, NULL); 638 qla2x00_clean_dsd_pool(ha, ctx);
636 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 639 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
637 } 640 }
638 641
639 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 642 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
640 dma_pool_free(ha->dl_dma_pool, ctx, 643 struct crc_context *ctx0 = ctx;
641 ((struct crc_context *)ctx)->crc_ctx_dma); 644
645 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
642 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 646 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
643 } 647 }
644 648
645 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 649 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
646 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 650 struct ct6_dsd *ctx1 = ctx;
647 651
648 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 652 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
649 ctx1->fcp_cmnd_dma); 653 ctx1->fcp_cmnd_dma);
650 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 654 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
651 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 655 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
652 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 656 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
653 mempool_free(ctx1, ha->ctx_mempool); 657 mempool_free(ctx1, ha->ctx_mempool);
654 } 658 }
655 659
660end:
656 CMD_SP(cmd) = NULL; 661 CMD_SP(cmd) = NULL;
657 qla2x00_rel_sp(sp); 662 qla2x00_rel_sp(sp);
658} 663}
@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
699 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 704 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
700 } 705 }
701 706
707 if (!ctx)
708 goto end;
709
702 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 710 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
703 /* List assured to be having elements */ 711 /* List assured to be having elements */
704 qla2x00_clean_dsd_pool(ha, sp, NULL); 712 qla2x00_clean_dsd_pool(ha, ctx);
705 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 713 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
706 } 714 }
707 715
708 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 716 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
709 dma_pool_free(ha->dl_dma_pool, ctx, 717 struct crc_context *ctx0 = ctx;
710 ((struct crc_context *)ctx)->crc_ctx_dma); 718
719 dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
711 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 720 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
712 } 721 }
713 722
714 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 723 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
715 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 724 struct ct6_dsd *ctx1 = ctx;
716
717 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 725 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
718 ctx1->fcp_cmnd_dma); 726 ctx1->fcp_cmnd_dma);
719 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 727 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
721 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 729 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
722 mempool_free(ctx1, ha->ctx_mempool); 730 mempool_free(ctx1, ha->ctx_mempool);
723 } 731 }
724 732end:
725 CMD_SP(cmd) = NULL; 733 CMD_SP(cmd) = NULL;
726 qla2xxx_rel_qpair_sp(sp->qpair, sp); 734 qla2xxx_rel_qpair_sp(sp->qpair, sp);
727} 735}
@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1632void 1640void
1633qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1641qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1634{ 1642{
1635 int que, cnt; 1643 int que, cnt, status;
1636 unsigned long flags; 1644 unsigned long flags;
1637 srb_t *sp; 1645 srb_t *sp;
1638 struct qla_hw_data *ha = vha->hw; 1646 struct qla_hw_data *ha = vha->hw;
@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1662 */ 1670 */
1663 sp_get(sp); 1671 sp_get(sp);
1664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1672 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1665 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1673 status = qla2xxx_eh_abort(GET_CMD_SP(sp));
1666 spin_lock_irqsave(&ha->hardware_lock, flags); 1674 spin_lock_irqsave(&ha->hardware_lock, flags);
1675 /* Get rid of extra reference if immediate exit
1676 * from ql2xxx_eh_abort */
1677 if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
1678 atomic_dec(&sp->ref_count);
1667 } 1679 }
1668 req->outstanding_cmds[cnt] = NULL; 1680 req->outstanding_cmds[cnt] = NULL;
1669 sp->done(sp, res); 1681 sp->done(sp, res);
@@ -2623,10 +2635,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2623 2635
2624 if (mem_only) { 2636 if (mem_only) {
2625 if (pci_enable_device_mem(pdev)) 2637 if (pci_enable_device_mem(pdev))
2626 goto probe_out; 2638 return ret;
2627 } else { 2639 } else {
2628 if (pci_enable_device(pdev)) 2640 if (pci_enable_device(pdev))
2629 goto probe_out; 2641 return ret;
2630 } 2642 }
2631 2643
2632 /* This may fail but that's ok */ 2644 /* This may fail but that's ok */
@@ -2636,7 +2648,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2636 if (!ha) { 2648 if (!ha) {
2637 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2649 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2638 "Unable to allocate memory for ha.\n"); 2650 "Unable to allocate memory for ha.\n");
2639 goto probe_out; 2651 goto disable_device;
2640 } 2652 }
2641 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2653 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2642 "Memory allocated for ha=%p.\n", ha); 2654 "Memory allocated for ha=%p.\n", ha);
@@ -3254,7 +3266,7 @@ iospace_config_failed:
3254 pci_release_selected_regions(ha->pdev, ha->bars); 3266 pci_release_selected_regions(ha->pdev, ha->bars);
3255 kfree(ha); 3267 kfree(ha);
3256 3268
3257probe_out: 3269disable_device:
3258 pci_disable_device(pdev); 3270 pci_disable_device(pdev);
3259 return ret; 3271 return ret;
3260} 3272}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0e03ca2ab3e5..e766d8412384 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2245,11 +2245,13 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2246 cmd->dma_data_direction); 2246 cmd->dma_data_direction);
2247 2247
2248 if (!cmd->ctx)
2249 return;
2250
2248 if (cmd->ctx_dsd_alloced) 2251 if (cmd->ctx_dsd_alloced)
2249 qla2x00_clean_dsd_pool(ha, NULL, cmd); 2252 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2250 2253
2251 if (cmd->ctx) 2254 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2252 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2253} 2255}
2254 2256
2255static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 2257static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8a58ef3adab4..c197972a3e2d 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
371 goto done; 371 goto done;
372 } 372 }
373 373
374 if (end <= start || start == 0 || end == 0) { 374 if (end < start || start == 0 || end == 0) {
375 ql_dbg(ql_dbg_misc, vha, 0xd023, 375 ql_dbg(ql_dbg_misc, vha, 0xd023,
376 "%s: unusable range (start=%x end=%x)\n", __func__, 376 "%s: unusable range (start=%x end=%x)\n", __func__,
377 ent->t262.end_addr, ent->t262.start_addr); 377 ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7bfbcfa7af40..61cdd99ae41e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -763,6 +763,8 @@ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
763 struct scsi_device *sdev; 763 struct scsi_device *sdev;
764 764
765 list_for_each_entry(sdev, &shost->__devices, siblings) { 765 list_for_each_entry(sdev, &shost->__devices, siblings) {
766 if (sdev->sdev_state == SDEV_DEL)
767 continue;
766 if (sdev->channel == channel && sdev->id == id && 768 if (sdev->channel == channel && sdev->id == id &&
767 sdev->lun ==lun) 769 sdev->lun ==lun)
768 return sdev; 770 return sdev;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 17249c3650fe..dc095a292c61 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1404,7 +1404,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1404 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1404 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1405 arr[5] = (int)have_dif_prot; /* PROTECT bit */ 1405 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1406 if (sdebug_vpd_use_hostno == 0) 1406 if (sdebug_vpd_use_hostno == 0)
1407 arr[5] = 0x10; /* claim: implicit TGPS */ 1407 arr[5] |= 0x10; /* claim: implicit TPGS */
1408 arr[6] = 0x10; /* claim: MultiP */ 1408 arr[6] = 0x10; /* claim: MultiP */
1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */ 1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 814a4bd8405d..99e16ac479e3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_driver.h> 30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h> 31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
33#include <scsi/scsi_dh.h> 34#include <scsi/scsi_dh.h>
34 35
35#include <trace/events/scsi.h> 36#include <trace/events/scsi.h>
@@ -1850,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req)
1850 1851
1851 /* zero out the cmd, except for the embedded scsi_request */ 1852 /* zero out the cmd, except for the embedded scsi_request */
1852 memset((char *)cmd + sizeof(cmd->req), 0, 1853 memset((char *)cmd + sizeof(cmd->req), 0,
1853 sizeof(*cmd) - sizeof(cmd->req)); 1854 sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
1854 1855
1855 req->special = cmd; 1856 req->special = cmd;
1856 1857
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f9d1432d7cc5..b6bb4e0ce0e3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
827 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 827 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
828 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); 828 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
829 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); 829 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
830 int ret;
830 831
831 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 832 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
832 switch (sdkp->zeroing_mode) { 833 switch (sdkp->zeroing_mode) {
833 case SD_ZERO_WS16_UNMAP: 834 case SD_ZERO_WS16_UNMAP:
834 return sd_setup_write_same16_cmnd(cmd, true); 835 ret = sd_setup_write_same16_cmnd(cmd, true);
836 goto out;
835 case SD_ZERO_WS10_UNMAP: 837 case SD_ZERO_WS10_UNMAP:
836 return sd_setup_write_same10_cmnd(cmd, true); 838 ret = sd_setup_write_same10_cmnd(cmd, true);
839 goto out;
837 } 840 }
838 } 841 }
839 842
840 if (sdp->no_write_same) 843 if (sdp->no_write_same)
841 return BLKPREP_INVALID; 844 return BLKPREP_INVALID;
845
842 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) 846 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
843 return sd_setup_write_same16_cmnd(cmd, false); 847 ret = sd_setup_write_same16_cmnd(cmd, false);
844 return sd_setup_write_same10_cmnd(cmd, false); 848 else
849 ret = sd_setup_write_same10_cmnd(cmd, false);
850
851out:
852 if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
853 return sd_zbc_write_lock_zone(cmd);
854
855 return ret;
845} 856}
846 857
847static void sd_config_write_same(struct scsi_disk *sdkp) 858static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
948 rq->__data_len = sdp->sector_size; 959 rq->__data_len = sdp->sector_size;
949 ret = scsi_init_io(cmd); 960 ret = scsi_init_io(cmd);
950 rq->__data_len = nr_bytes; 961 rq->__data_len = nr_bytes;
962
963 if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
964 sd_zbc_write_unlock_zone(cmd);
965
951 return ret; 966 return ret;
952} 967}
953 968
@@ -1567,17 +1582,21 @@ out:
1567 return retval; 1582 return retval;
1568} 1583}
1569 1584
1570static int sd_sync_cache(struct scsi_disk *sdkp) 1585static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1571{ 1586{
1572 int retries, res; 1587 int retries, res;
1573 struct scsi_device *sdp = sdkp->device; 1588 struct scsi_device *sdp = sdkp->device;
1574 const int timeout = sdp->request_queue->rq_timeout 1589 const int timeout = sdp->request_queue->rq_timeout
1575 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1590 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1576 struct scsi_sense_hdr sshdr; 1591 struct scsi_sense_hdr my_sshdr;
1577 1592
1578 if (!scsi_device_online(sdp)) 1593 if (!scsi_device_online(sdp))
1579 return -ENODEV; 1594 return -ENODEV;
1580 1595
1596 /* caller might not be interested in sense, but we need it */
1597 if (!sshdr)
1598 sshdr = &my_sshdr;
1599
1581 for (retries = 3; retries > 0; --retries) { 1600 for (retries = 3; retries > 0; --retries) {
1582 unsigned char cmd[10] = { 0 }; 1601 unsigned char cmd[10] = { 0 };
1583 1602
@@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1586 * Leave the rest of the command zero to indicate 1605 * Leave the rest of the command zero to indicate
1587 * flush everything. 1606 * flush everything.
1588 */ 1607 */
1589 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 1608 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1590 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); 1609 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
1591 if (res == 0) 1610 if (res == 0)
1592 break; 1611 break;
@@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1596 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1615 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1597 1616
1598 if (driver_byte(res) & DRIVER_SENSE) 1617 if (driver_byte(res) & DRIVER_SENSE)
1599 sd_print_sense_hdr(sdkp, &sshdr); 1618 sd_print_sense_hdr(sdkp, sshdr);
1619
1600 /* we need to evaluate the error return */ 1620 /* we need to evaluate the error return */
1601 if (scsi_sense_valid(&sshdr) && 1621 if (scsi_sense_valid(sshdr) &&
1602 (sshdr.asc == 0x3a || /* medium not present */ 1622 (sshdr->asc == 0x3a || /* medium not present */
1603 sshdr.asc == 0x20)) /* invalid command */ 1623 sshdr->asc == 0x20)) /* invalid command */
1604 /* this is no error here */ 1624 /* this is no error here */
1605 return 0; 1625 return 0;
1606 1626
@@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev)
3444 3464
3445 if (sdkp->WCE && sdkp->media_present) { 3465 if (sdkp->WCE && sdkp->media_present) {
3446 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3466 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3447 sd_sync_cache(sdkp); 3467 sd_sync_cache(sdkp, NULL);
3448 } 3468 }
3449 3469
3450 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3470 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
@@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev)
3456static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3476static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3457{ 3477{
3458 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3478 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3479 struct scsi_sense_hdr sshdr;
3459 int ret = 0; 3480 int ret = 0;
3460 3481
3461 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3482 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
@@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3463 3484
3464 if (sdkp->WCE && sdkp->media_present) { 3485 if (sdkp->WCE && sdkp->media_present) {
3465 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3486 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3466 ret = sd_sync_cache(sdkp); 3487 ret = sd_sync_cache(sdkp, &sshdr);
3488
3467 if (ret) { 3489 if (ret) {
3468 /* ignore OFFLINE device */ 3490 /* ignore OFFLINE device */
3469 if (ret == -ENODEV) 3491 if (ret == -ENODEV)
3470 ret = 0; 3492 return 0;
3471 goto done; 3493
3494 if (!scsi_sense_valid(&sshdr) ||
3495 sshdr.sense_key != ILLEGAL_REQUEST)
3496 return ret;
3497
3498 /*
3499 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3500 * doesn't support sync. There's not much to do and
3501 * suspend shouldn't fail.
3502 */
3503 ret = 0;
3472 } 3504 }
3473 } 3505 }
3474 3506
@@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3480 ret = 0; 3512 ret = 0;
3481 } 3513 }
3482 3514
3483done:
3484 return ret; 3515 return ret;
3485} 3516}
3486 3517
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0a38ba01b7b4..82c33a6edbea 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2074 if ((1 == resp->done) && (!resp->sg_io_owned) && 2074 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2075 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2075 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2076 resp->done = 2; /* guard against other readers */ 2076 resp->done = 2; /* guard against other readers */
2077 break; 2077 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2078 return resp;
2078 } 2079 }
2079 } 2080 }
2080 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2081 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2081 return resp; 2082 return NULL;
2082} 2083}
2083 2084
2084/* always adds to end of list */ 2085/* always adds to end of list */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index abc7e87937cc..ffe8d8608818 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7698 ufshcd_add_spm_lvl_sysfs_nodes(hba); 7698 ufshcd_add_spm_lvl_sysfs_nodes(hba);
7699} 7699}
7700 7700
7701static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
7702{
7703 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
7704 device_remove_file(hba->dev, &hba->spm_lvl_attr);
7705}
7706
7701/** 7707/**
7702 * ufshcd_shutdown - shutdown routine 7708 * ufshcd_shutdown - shutdown routine
7703 * @hba: per adapter instance 7709 * @hba: per adapter instance
@@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
7735 */ 7741 */
7736void ufshcd_remove(struct ufs_hba *hba) 7742void ufshcd_remove(struct ufs_hba *hba)
7737{ 7743{
7744 ufshcd_remove_sysfs_nodes(hba);
7738 scsi_remove_host(hba->host); 7745 scsi_remove_host(hba->host);
7739 /* disable interrupts */ 7746 /* disable interrupts */
7740 ufshcd_disable_intr(hba, hba->intr_mask); 7747 ufshcd_disable_intr(hba, hba->intr_mask);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index b6195fdf0d00..22e98a90468c 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -49,7 +49,7 @@ static const struct of_device_id sun_top_ctrl_match[] = {
49 { .compatible = "brcm,bcm7420-sun-top-ctrl", }, 49 { .compatible = "brcm,bcm7420-sun-top-ctrl", },
50 { .compatible = "brcm,bcm7425-sun-top-ctrl", }, 50 { .compatible = "brcm,bcm7425-sun-top-ctrl", },
51 { .compatible = "brcm,bcm7429-sun-top-ctrl", }, 51 { .compatible = "brcm,bcm7429-sun-top-ctrl", },
52 { .compatible = "brcm,bcm7425-sun-top-ctrl", }, 52 { .compatible = "brcm,bcm7435-sun-top-ctrl", },
53 { .compatible = "brcm,brcmstb-sun-top-ctrl", }, 53 { .compatible = "brcm,brcmstb-sun-top-ctrl", },
54 { } 54 { }
55}; 55};
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 357a5d8f8da0..a5b86a28f343 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -2,8 +2,9 @@ menu "i.MX SoC drivers"
2 2
3config IMX7_PM_DOMAINS 3config IMX7_PM_DOMAINS
4 bool "i.MX7 PM domains" 4 bool "i.MX7 PM domains"
5 select PM_GENERIC_DOMAINS
6 depends on SOC_IMX7D || (COMPILE_TEST && OF) 5 depends on SOC_IMX7D || (COMPILE_TEST && OF)
6 depends on PM
7 select PM_GENERIC_DOMAINS
7 default y if SOC_IMX7D 8 default y if SOC_IMX7D
8 9
9endmenu 10endmenu
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index ecebe2eecc3a..026182d3b27c 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -413,7 +413,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
413 * @name: slave channel name 413 * @name: slave channel name
414 * @config: dma configuration parameters 414 * @config: dma configuration parameters
415 * 415 *
416 * Returns pointer to appropriate DMA channel on success or NULL. 416 * Returns pointer to appropriate DMA channel on success or error.
417 */ 417 */
418void *knav_dma_open_channel(struct device *dev, const char *name, 418void *knav_dma_open_channel(struct device *dev, const char *name,
419 struct knav_dma_cfg *config) 419 struct knav_dma_cfg *config)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1761c9004fc1..fd1b4fdb72a4 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -393,6 +393,13 @@ config SPI_FSL_ESPI
393 From MPC8536, 85xx platform uses the controller, and all P10xx, 393 From MPC8536, 85xx platform uses the controller, and all P10xx,
394 P20xx, P30xx,P40xx, P50xx uses this controller. 394 P20xx, P30xx,P40xx, P50xx uses this controller.
395 395
396config SPI_MESON_SPICC
397 tristate "Amlogic Meson SPICC controller"
398 depends on ARCH_MESON || COMPILE_TEST
399 help
400 This enables master mode support for the SPICC (SPI communication
401 controller) available in Amlogic Meson SoCs.
402
396config SPI_MESON_SPIFC 403config SPI_MESON_SPIFC
397 tristate "Amlogic Meson SPIFC controller" 404 tristate "Amlogic Meson SPIFC controller"
398 depends on ARCH_MESON || COMPILE_TEST 405 depends on ARCH_MESON || COMPILE_TEST
@@ -457,6 +464,7 @@ config SPI_OMAP24XX
457 464
458config SPI_TI_QSPI 465config SPI_TI_QSPI
459 tristate "DRA7xxx QSPI controller support" 466 tristate "DRA7xxx QSPI controller support"
467 depends on HAS_DMA
460 depends on ARCH_OMAP2PLUS || COMPILE_TEST 468 depends on ARCH_OMAP2PLUS || COMPILE_TEST
461 help 469 help
462 QSPI master controller for DRA7xxx used for flash devices. 470 QSPI master controller for DRA7xxx used for flash devices.
@@ -784,6 +792,30 @@ config SPI_TLE62X0
784 792
785endif # SPI_MASTER 793endif # SPI_MASTER
786 794
787# (slave support would go here) 795#
796# SLAVE side ... listening to other SPI masters
797#
798
799config SPI_SLAVE
800 bool "SPI slave protocol handlers"
801 help
802 If your system has a slave-capable SPI controller, you can enable
803 slave protocol handlers.
804
805if SPI_SLAVE
806
807config SPI_SLAVE_TIME
808 tristate "SPI slave handler reporting boot up time"
809 help
810 SPI slave handler responding with the time of reception of the last
811 SPI message.
812
813config SPI_SLAVE_SYSTEM_CONTROL
814 tristate "SPI slave handler controlling system state"
815 help
816 SPI slave handler to allow remote control of system reboot, power
817 off, halt, and suspend.
818
819endif # SPI_SLAVE
788 820
789endif # SPI 821endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index b375a7a89216..31dccfbb335e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
53obj-$(CONFIG_SPI_JCORE) += spi-jcore.o 53obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
54obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o 54obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
55obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o 55obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
56obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
56obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o 57obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
57obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o 58obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
58obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o 59obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
@@ -105,3 +106,7 @@ obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
105obj-$(CONFIG_SPI_XLP) += spi-xlp.o 106obj-$(CONFIG_SPI_XLP) += spi-xlp.o
106obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o 107obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
107obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o 108obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
109
110# SPI slave protocol handlers
111obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
112obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 1eb83c9613d5..f95da364c283 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -269,6 +269,7 @@ struct atmel_spi_caps {
269 bool is_spi2; 269 bool is_spi2;
270 bool has_wdrbt; 270 bool has_wdrbt;
271 bool has_dma_support; 271 bool has_dma_support;
272 bool has_pdc_support;
272}; 273};
273 274
274/* 275/*
@@ -1422,11 +1423,31 @@ static void atmel_get_caps(struct atmel_spi *as)
1422 unsigned int version; 1423 unsigned int version;
1423 1424
1424 version = atmel_get_version(as); 1425 version = atmel_get_version(as);
1425 dev_info(&as->pdev->dev, "version: 0x%x\n", version);
1426 1426
1427 as->caps.is_spi2 = version > 0x121; 1427 as->caps.is_spi2 = version > 0x121;
1428 as->caps.has_wdrbt = version >= 0x210; 1428 as->caps.has_wdrbt = version >= 0x210;
1429#ifdef CONFIG_SOC_SAM_V4_V5
1430 /*
1431 * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
1432 * since this later function tries to map buffers with dma_map_sg()
1433 * even if they have not been allocated inside DMA-safe areas.
1434 * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
1435 * those ARM cores, the data cache follows the PIPT model.
1436 * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
1437 * In case of PIPT caches, there cannot be cache aliases.
1438 * However on ARM9 cores, the data cache follows the VIVT model, hence
1439 * the cache aliases issue can occur when buffers are allocated from
1440 * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
1441 * not taken into account or at least not handled completely (cache
1442 * lines of aliases are not invalidated).
1443 * This is not a theorical issue: it was reproduced when trying to mount
1444 * a UBI file-system on a at91sam9g35ek board.
1445 */
1446 as->caps.has_dma_support = false;
1447#else
1429 as->caps.has_dma_support = version >= 0x212; 1448 as->caps.has_dma_support = version >= 0x212;
1449#endif
1450 as->caps.has_pdc_support = version < 0x212;
1430} 1451}
1431 1452
1432/*-------------------------------------------------------------------------*/ 1453/*-------------------------------------------------------------------------*/
@@ -1567,7 +1588,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1567 } else if (ret == -EPROBE_DEFER) { 1588 } else if (ret == -EPROBE_DEFER) {
1568 return ret; 1589 return ret;
1569 } 1590 }
1570 } else { 1591 } else if (as->caps.has_pdc_support) {
1571 as->use_pdc = true; 1592 as->use_pdc = true;
1572 } 1593 }
1573 1594
@@ -1609,8 +1630,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
1609 goto out_free_dma; 1630 goto out_free_dma;
1610 1631
1611 /* go! */ 1632 /* go! */
1612 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1633 dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
1613 (unsigned long)regs->start, irq); 1634 atmel_get_version(as), (unsigned long)regs->start,
1635 irq);
1614 1636
1615 return 0; 1637 return 0;
1616 1638
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 5514cd02e93a..4da2d4a524ca 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -484,6 +484,7 @@ static const struct of_device_id bcm63xx_hsspi_of_match[] = {
484 { .compatible = "brcm,bcm6328-hsspi", }, 484 { .compatible = "brcm,bcm6328-hsspi", },
485 { }, 485 { },
486}; 486};
487MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
487 488
488static struct platform_driver bcm63xx_hsspi_driver = { 489static struct platform_driver bcm63xx_hsspi_driver = {
489 .driver = { 490 .driver = {
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 247f71b02235..84c7356ce5b4 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -147,7 +147,7 @@ struct bcm63xx_spi {
147 147
148 /* Platform data */ 148 /* Platform data */
149 const unsigned long *reg_offsets; 149 const unsigned long *reg_offsets;
150 unsigned fifo_size; 150 unsigned int fifo_size;
151 unsigned int msg_type_shift; 151 unsigned int msg_type_shift;
152 unsigned int msg_ctl_width; 152 unsigned int msg_ctl_width;
153 153
@@ -191,7 +191,7 @@ static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
191#endif 191#endif
192} 192}
193 193
194static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = { 194static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
195 { 20000000, SPI_CLK_20MHZ }, 195 { 20000000, SPI_CLK_20MHZ },
196 { 12500000, SPI_CLK_12_50MHZ }, 196 { 12500000, SPI_CLK_12_50MHZ },
197 { 6250000, SPI_CLK_6_250MHZ }, 197 { 6250000, SPI_CLK_6_250MHZ },
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 595acdcfc7d0..6ddb6ef1fda4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -873,9 +873,8 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
873 return 0; 873 return 0;
874} 874}
875#else 875#else
876static struct davinci_spi_platform_data 876static int spi_davinci_get_pdata(struct platform_device *pdev,
877 *spi_davinci_get_pdata(struct platform_device *pdev, 877 struct davinci_spi *dspi)
878 struct davinci_spi *dspi)
879{ 878{
880 return -ENODEV; 879 return -ENODEV;
881} 880}
@@ -965,7 +964,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
965 ret = -ENODEV; 964 ret = -ENODEV;
966 goto free_master; 965 goto free_master;
967 } 966 }
968 clk_prepare_enable(dspi->clk); 967 ret = clk_prepare_enable(dspi->clk);
968 if (ret)
969 goto free_master;
969 970
970 master->dev.of_node = pdev->dev.of_node; 971 master->dev.of_node = pdev->dev.of_node;
971 master->bus_num = pdev->id; 972 master->bus_num = pdev->id;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 15201645bdc4..d89127f4a46d 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1032,7 +1032,8 @@ static int dspi_probe(struct platform_device *pdev)
1032 goto out_master_put; 1032 goto out_master_put;
1033 1033
1034 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1034 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1035 if (dspi_request_dma(dspi, res->start)) { 1035 ret = dspi_request_dma(dspi, res->start);
1036 if (ret < 0) {
1036 dev_err(&pdev->dev, "can't get dma channels\n"); 1037 dev_err(&pdev->dev, "can't get dma channels\n");
1037 goto out_clk_put; 1038 goto out_clk_put;
1038 } 1039 }
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b402530a7a9a..f9698b7aeb3b 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,10 +56,6 @@
56 56
57/* The maximum bytes that a sdma BD can transfer.*/ 57/* The maximum bytes that a sdma BD can transfer.*/
58#define MAX_SDMA_BD_BYTES (1 << 15) 58#define MAX_SDMA_BD_BYTES (1 << 15)
59struct spi_imx_config {
60 unsigned int speed_hz;
61 unsigned int bpw;
62};
63 59
64enum spi_imx_devtype { 60enum spi_imx_devtype {
65 IMX1_CSPI, 61 IMX1_CSPI,
@@ -74,7 +70,7 @@ struct spi_imx_data;
74 70
75struct spi_imx_devtype_data { 71struct spi_imx_devtype_data {
76 void (*intctrl)(struct spi_imx_data *, int); 72 void (*intctrl)(struct spi_imx_data *, int);
77 int (*config)(struct spi_device *, struct spi_imx_config *); 73 int (*config)(struct spi_device *);
78 void (*trigger)(struct spi_imx_data *); 74 void (*trigger)(struct spi_imx_data *);
79 int (*rx_available)(struct spi_imx_data *); 75 int (*rx_available)(struct spi_imx_data *);
80 void (*reset)(struct spi_imx_data *); 76 void (*reset)(struct spi_imx_data *);
@@ -94,7 +90,8 @@ struct spi_imx_data {
94 unsigned long spi_clk; 90 unsigned long spi_clk;
95 unsigned int spi_bus_clk; 91 unsigned int spi_bus_clk;
96 92
97 unsigned int bytes_per_word; 93 unsigned int speed_hz;
94 unsigned int bits_per_word;
98 unsigned int spi_drctl; 95 unsigned int spi_drctl;
99 96
100 unsigned int count; 97 unsigned int count;
@@ -203,34 +200,27 @@ out:
203 return i; 200 return i;
204} 201}
205 202
206static int spi_imx_bytes_per_word(const int bpw) 203static int spi_imx_bytes_per_word(const int bits_per_word)
207{ 204{
208 return DIV_ROUND_UP(bpw, BITS_PER_BYTE); 205 return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
209} 206}
210 207
211static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 208static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
212 struct spi_transfer *transfer) 209 struct spi_transfer *transfer)
213{ 210{
214 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 211 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
215 unsigned int bpw, i; 212 unsigned int bytes_per_word, i;
216 213
217 if (!master->dma_rx) 214 if (!master->dma_rx)
218 return false; 215 return false;
219 216
220 if (!transfer) 217 bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
221 return false;
222
223 bpw = transfer->bits_per_word;
224 if (!bpw)
225 bpw = spi->bits_per_word;
226
227 bpw = spi_imx_bytes_per_word(bpw);
228 218
229 if (bpw != 1 && bpw != 2 && bpw != 4) 219 if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
230 return false; 220 return false;
231 221
232 for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) { 222 for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) {
233 if (!(transfer->len % (i * bpw))) 223 if (!(transfer->len % (i * bytes_per_word)))
234 break; 224 break;
235 } 225 }
236 226
@@ -340,12 +330,11 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
340 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 330 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
341} 331}
342 332
343static int mx51_ecspi_config(struct spi_device *spi, 333static int mx51_ecspi_config(struct spi_device *spi)
344 struct spi_imx_config *config)
345{ 334{
346 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 335 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
347 u32 ctrl = MX51_ECSPI_CTRL_ENABLE; 336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
348 u32 clk = config->speed_hz, delay, reg; 337 u32 clk = spi_imx->speed_hz, delay, reg;
349 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); 338 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
350 339
351 /* 340 /*
@@ -364,13 +353,13 @@ static int mx51_ecspi_config(struct spi_device *spi,
364 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl); 353 ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
365 354
366 /* set clock speed */ 355 /* set clock speed */
367 ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk); 356 ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
368 spi_imx->spi_bus_clk = clk; 357 spi_imx->spi_bus_clk = clk;
369 358
370 /* set chip select to use */ 359 /* set chip select to use */
371 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select); 360 ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
372 361
373 ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; 362 ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
374 363
375 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); 364 cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
376 365
@@ -501,21 +490,21 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
501 writel(reg, spi_imx->base + MXC_CSPICTRL); 490 writel(reg, spi_imx->base + MXC_CSPICTRL);
502} 491}
503 492
504static int mx31_config(struct spi_device *spi, struct spi_imx_config *config) 493static int mx31_config(struct spi_device *spi)
505{ 494{
506 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 495 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
507 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 496 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
508 unsigned int clk; 497 unsigned int clk;
509 498
510 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) << 499 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
511 MX31_CSPICTRL_DR_SHIFT; 500 MX31_CSPICTRL_DR_SHIFT;
512 spi_imx->spi_bus_clk = clk; 501 spi_imx->spi_bus_clk = clk;
513 502
514 if (is_imx35_cspi(spi_imx)) { 503 if (is_imx35_cspi(spi_imx)) {
515 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 504 reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
516 reg |= MX31_CSPICTRL_SSCTL; 505 reg |= MX31_CSPICTRL_SSCTL;
517 } else { 506 } else {
518 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 507 reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
519 } 508 }
520 509
521 if (spi->mode & SPI_CPHA) 510 if (spi->mode & SPI_CPHA)
@@ -597,18 +586,18 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
597 writel(reg, spi_imx->base + MXC_CSPICTRL); 586 writel(reg, spi_imx->base + MXC_CSPICTRL);
598} 587}
599 588
600static int mx21_config(struct spi_device *spi, struct spi_imx_config *config) 589static int mx21_config(struct spi_device *spi)
601{ 590{
602 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 591 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
603 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; 592 unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
604 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; 593 unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
605 unsigned int clk; 594 unsigned int clk;
606 595
607 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk) 596 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk)
608 << MX21_CSPICTRL_DR_SHIFT; 597 << MX21_CSPICTRL_DR_SHIFT;
609 spi_imx->spi_bus_clk = clk; 598 spi_imx->spi_bus_clk = clk;
610 599
611 reg |= config->bpw - 1; 600 reg |= spi_imx->bits_per_word - 1;
612 601
613 if (spi->mode & SPI_CPHA) 602 if (spi->mode & SPI_CPHA)
614 reg |= MX21_CSPICTRL_PHA; 603 reg |= MX21_CSPICTRL_PHA;
@@ -666,17 +655,17 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
666 writel(reg, spi_imx->base + MXC_CSPICTRL); 655 writel(reg, spi_imx->base + MXC_CSPICTRL);
667} 656}
668 657
669static int mx1_config(struct spi_device *spi, struct spi_imx_config *config) 658static int mx1_config(struct spi_device *spi)
670{ 659{
671 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 660 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
672 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 661 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
673 unsigned int clk; 662 unsigned int clk;
674 663
675 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) << 664 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
676 MX1_CSPICTRL_DR_SHIFT; 665 MX1_CSPICTRL_DR_SHIFT;
677 spi_imx->spi_bus_clk = clk; 666 spi_imx->spi_bus_clk = clk;
678 667
679 reg |= config->bpw - 1; 668 reg |= spi_imx->bits_per_word - 1;
680 669
681 if (spi->mode & SPI_CPHA) 670 if (spi->mode & SPI_CPHA)
682 reg |= MX1_CSPICTRL_PHA; 671 reg |= MX1_CSPICTRL_PHA;
@@ -841,15 +830,14 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
841 return IRQ_HANDLED; 830 return IRQ_HANDLED;
842} 831}
843 832
844static int spi_imx_dma_configure(struct spi_master *master, 833static int spi_imx_dma_configure(struct spi_master *master)
845 int bytes_per_word)
846{ 834{
847 int ret; 835 int ret;
848 enum dma_slave_buswidth buswidth; 836 enum dma_slave_buswidth buswidth;
849 struct dma_slave_config rx = {}, tx = {}; 837 struct dma_slave_config rx = {}, tx = {};
850 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 838 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
851 839
852 switch (bytes_per_word) { 840 switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
853 case 4: 841 case 4:
854 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 842 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
855 break; 843 break;
@@ -883,8 +871,6 @@ static int spi_imx_dma_configure(struct spi_master *master,
883 return ret; 871 return ret;
884 } 872 }
885 873
886 spi_imx->bytes_per_word = bytes_per_word;
887
888 return 0; 874 return 0;
889} 875}
890 876
@@ -892,22 +878,19 @@ static int spi_imx_setupxfer(struct spi_device *spi,
892 struct spi_transfer *t) 878 struct spi_transfer *t)
893{ 879{
894 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 880 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
895 struct spi_imx_config config;
896 int ret; 881 int ret;
897 882
898 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 883 if (!t)
899 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 884 return 0;
900 885
901 if (!config.speed_hz) 886 spi_imx->bits_per_word = t->bits_per_word;
902 config.speed_hz = spi->max_speed_hz; 887 spi_imx->speed_hz = t->speed_hz;
903 if (!config.bpw)
904 config.bpw = spi->bits_per_word;
905 888
906 /* Initialize the functions for transfer */ 889 /* Initialize the functions for transfer */
907 if (config.bpw <= 8) { 890 if (spi_imx->bits_per_word <= 8) {
908 spi_imx->rx = spi_imx_buf_rx_u8; 891 spi_imx->rx = spi_imx_buf_rx_u8;
909 spi_imx->tx = spi_imx_buf_tx_u8; 892 spi_imx->tx = spi_imx_buf_tx_u8;
910 } else if (config.bpw <= 16) { 893 } else if (spi_imx->bits_per_word <= 16) {
911 spi_imx->rx = spi_imx_buf_rx_u16; 894 spi_imx->rx = spi_imx_buf_rx_u16;
912 spi_imx->tx = spi_imx_buf_tx_u16; 895 spi_imx->tx = spi_imx_buf_tx_u16;
913 } else { 896 } else {
@@ -921,13 +904,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
921 spi_imx->usedma = 0; 904 spi_imx->usedma = 0;
922 905
923 if (spi_imx->usedma) { 906 if (spi_imx->usedma) {
924 ret = spi_imx_dma_configure(spi->master, 907 ret = spi_imx_dma_configure(spi->master);
925 spi_imx_bytes_per_word(config.bpw));
926 if (ret) 908 if (ret)
927 return ret; 909 return ret;
928 } 910 }
929 911
930 spi_imx->devtype_data->config(spi, &config); 912 spi_imx->devtype_data->config(spi);
931 913
932 return 0; 914 return 0;
933} 915}
@@ -976,8 +958,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
976 goto err; 958 goto err;
977 } 959 }
978 960
979 spi_imx_dma_configure(master, 1);
980
981 init_completion(&spi_imx->dma_rx_completion); 961 init_completion(&spi_imx->dma_rx_completion);
982 init_completion(&spi_imx->dma_tx_completion); 962 init_completion(&spi_imx->dma_tx_completion);
983 master->can_dma = spi_imx_can_dma; 963 master->can_dma = spi_imx_can_dma;
@@ -1189,15 +1169,15 @@ static int spi_imx_probe(struct platform_device *pdev)
1189 } 1169 }
1190 1170
1191 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); 1171 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
1172 if (!master)
1173 return -ENOMEM;
1174
1192 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl); 1175 ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
1193 if ((ret < 0) || (spi_drctl >= 0x3)) { 1176 if ((ret < 0) || (spi_drctl >= 0x3)) {
1194 /* '11' is reserved */ 1177 /* '11' is reserved */
1195 spi_drctl = 0; 1178 spi_drctl = 0;
1196 } 1179 }
1197 1180
1198 if (!master)
1199 return -ENOMEM;
1200
1201 platform_set_drvdata(pdev, master); 1181 platform_set_drvdata(pdev, master);
1202 1182
1203 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 1183 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index f4875f177df0..3459965004f8 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -894,7 +894,7 @@ int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
894 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start)); 894 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
895 if (ret == -ETIMEDOUT) { 895 if (ret == -ETIMEDOUT) {
896 dev_info(&spi->dev, 896 dev_info(&spi->dev,
897 "spi-message timed out - reruning...\n"); 897 "spi-message timed out - rerunning...\n");
898 /* rerun after a few explicit schedules */ 898 /* rerun after a few explicit schedules */
899 for (i = 0; i < 16; i++) 899 for (i = 0; i < 16; i++)
900 schedule(); 900 schedule();
@@ -1021,10 +1021,9 @@ int spi_test_run_tests(struct spi_device *spi,
1021 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1021 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
1022 else 1022 else
1023 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1023 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
1024 if (!rx) { 1024 if (!rx)
1025 ret = -ENOMEM; 1025 return -ENOMEM;
1026 goto out; 1026
1027 }
1028 1027
1029 if (use_vmalloc) 1028 if (use_vmalloc)
1030 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1029 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
@@ -1032,7 +1031,7 @@ int spi_test_run_tests(struct spi_device *spi,
1032 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1031 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
1033 if (!tx) { 1032 if (!tx) {
1034 ret = -ENOMEM; 1033 ret = -ENOMEM;
1035 goto out; 1034 goto err_tx;
1036 } 1035 }
1037 1036
1038 /* now run the individual tests in the table */ 1037 /* now run the individual tests in the table */
@@ -1057,8 +1056,9 @@ int spi_test_run_tests(struct spi_device *spi,
1057 } 1056 }
1058 1057
1059out: 1058out:
1060 kvfree(rx);
1061 kvfree(tx); 1059 kvfree(tx);
1060err_tx:
1061 kvfree(rx);
1062 return ret; 1062 return ret;
1063} 1063}
1064EXPORT_SYMBOL_GPL(spi_test_run_tests); 1064EXPORT_SYMBOL_GPL(spi_test_run_tests);
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
new file mode 100644
index 000000000000..7f8429635502
--- /dev/null
+++ b/drivers/spi/spi-meson-spicc.c
@@ -0,0 +1,619 @@
1/*
2 * Driver for Amlogic Meson SPI communication controller (SPICC)
3 *
4 * Copyright (C) BayLibre, SAS
5 * Author: Neil Armstrong <narmstrong@baylibre.com>
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <linux/bitfield.h>
11#include <linux/clk.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/spi/spi.h>
19#include <linux/types.h>
20#include <linux/interrupt.h>
21#include <linux/reset.h>
22#include <linux/gpio.h>
23
24/*
25 * The Meson SPICC controller could support DMA based transfers, but is not
26 * implemented by the vendor code, and while having the registers documentation
27 * it has never worked on the GXL Hardware.
28 * The PIO mode is the only mode implemented, and due to badly designed HW :
29 * - all transfers are cutted in 16 words burst because the FIFO hangs on
30 * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
31 * FIFO max size chunk only
32 * - CS management is dumb, and goes UP between every burst, so is really a
33 * "Data Valid" signal than a Chip Select, GPIO link should be used instead
34 * to have a CS go down over the full transfer
35 */
36
37#define SPICC_MAX_FREQ 30000000
38#define SPICC_MAX_BURST 128
39
40/* Register Map */
41#define SPICC_RXDATA 0x00
42
43#define SPICC_TXDATA 0x04
44
45#define SPICC_CONREG 0x08
46#define SPICC_ENABLE BIT(0)
47#define SPICC_MODE_MASTER BIT(1)
48#define SPICC_XCH BIT(2)
49#define SPICC_SMC BIT(3)
50#define SPICC_POL BIT(4)
51#define SPICC_PHA BIT(5)
52#define SPICC_SSCTL BIT(6)
53#define SPICC_SSPOL BIT(7)
54#define SPICC_DRCTL_MASK GENMASK(9, 8)
55#define SPICC_DRCTL_IGNORE 0
56#define SPICC_DRCTL_FALLING 1
57#define SPICC_DRCTL_LOWLEVEL 2
58#define SPICC_CS_MASK GENMASK(13, 12)
59#define SPICC_DATARATE_MASK GENMASK(18, 16)
60#define SPICC_DATARATE_DIV4 0
61#define SPICC_DATARATE_DIV8 1
62#define SPICC_DATARATE_DIV16 2
63#define SPICC_DATARATE_DIV32 3
64#define SPICC_BITLENGTH_MASK GENMASK(24, 19)
65#define SPICC_BURSTLENGTH_MASK GENMASK(31, 25)
66
67#define SPICC_INTREG 0x0c
68#define SPICC_TE_EN BIT(0) /* TX FIFO Empty Interrupt */
69#define SPICC_TH_EN BIT(1) /* TX FIFO Half-Full Interrupt */
70#define SPICC_TF_EN BIT(2) /* TX FIFO Full Interrupt */
71#define SPICC_RR_EN BIT(3) /* RX FIFO Ready Interrupt */
72#define SPICC_RH_EN BIT(4) /* RX FIFO Half-Full Interrupt */
73#define SPICC_RF_EN BIT(5) /* RX FIFO Full Interrupt */
74#define SPICC_RO_EN BIT(6) /* RX FIFO Overflow Interrupt */
75#define SPICC_TC_EN BIT(7) /* Transfert Complete Interrupt */
76
77#define SPICC_DMAREG 0x10
78#define SPICC_DMA_ENABLE BIT(0)
79#define SPICC_TXFIFO_THRESHOLD_MASK GENMASK(5, 1)
80#define SPICC_RXFIFO_THRESHOLD_MASK GENMASK(10, 6)
81#define SPICC_READ_BURST_MASK GENMASK(14, 11)
82#define SPICC_WRITE_BURST_MASK GENMASK(18, 15)
83#define SPICC_DMA_URGENT BIT(19)
84#define SPICC_DMA_THREADID_MASK GENMASK(25, 20)
85#define SPICC_DMA_BURSTNUM_MASK GENMASK(31, 26)
86
87#define SPICC_STATREG 0x14
88#define SPICC_TE BIT(0) /* TX FIFO Empty Interrupt */
89#define SPICC_TH BIT(1) /* TX FIFO Half-Full Interrupt */
90#define SPICC_TF BIT(2) /* TX FIFO Full Interrupt */
91#define SPICC_RR BIT(3) /* RX FIFO Ready Interrupt */
92#define SPICC_RH BIT(4) /* RX FIFO Half-Full Interrupt */
93#define SPICC_RF BIT(5) /* RX FIFO Full Interrupt */
94#define SPICC_RO BIT(6) /* RX FIFO Overflow Interrupt */
95#define SPICC_TC BIT(7) /* Transfert Complete Interrupt */
96
97#define SPICC_PERIODREG 0x18
98#define SPICC_PERIOD GENMASK(14, 0) /* Wait cycles */
99
100#define SPICC_TESTREG 0x1c
101#define SPICC_TXCNT_MASK GENMASK(4, 0) /* TX FIFO Counter */
102#define SPICC_RXCNT_MASK GENMASK(9, 5) /* RX FIFO Counter */
103#define SPICC_SMSTATUS_MASK GENMASK(12, 10) /* State Machine Status */
104#define SPICC_LBC_RO BIT(13) /* Loop Back Control Read-Only */
105#define SPICC_LBC_W1 BIT(14) /* Loop Back Control Write-Only */
106#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
107#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
108#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
109#define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */
110#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
111#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
112
113#define SPICC_DRADDR 0x20 /* Read Address of DMA */
114
115#define SPICC_DWADDR 0x24 /* Write Address of DMA */
116
117#define writel_bits_relaxed(mask, val, addr) \
118 writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
119
120#define SPICC_BURST_MAX 16
121#define SPICC_FIFO_HALF 10
122
123struct meson_spicc_device {
124 struct spi_master *master;
125 struct platform_device *pdev;
126 void __iomem *base;
127 struct clk *core;
128 struct spi_message *message;
129 struct spi_transfer *xfer;
130 u8 *tx_buf;
131 u8 *rx_buf;
132 unsigned int bytes_per_word;
133 unsigned long tx_remain;
134 unsigned long txb_remain;
135 unsigned long rx_remain;
136 unsigned long rxb_remain;
137 unsigned long xfer_remain;
138 bool is_burst_end;
139 bool is_last_burst;
140};
141
142static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
143{
144 return !!FIELD_GET(SPICC_TF,
145 readl_relaxed(spicc->base + SPICC_STATREG));
146}
147
148static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
149{
150 return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN,
151 readl_relaxed(spicc->base + SPICC_STATREG));
152}
153
154static inline u32 meson_spicc_pull_data(struct meson_spicc_device *spicc)
155{
156 unsigned int bytes = spicc->bytes_per_word;
157 unsigned int byte_shift = 0;
158 u32 data = 0;
159 u8 byte;
160
161 while (bytes--) {
162 byte = *spicc->tx_buf++;
163 data |= (byte & 0xff) << byte_shift;
164 byte_shift += 8;
165 }
166
167 spicc->tx_remain--;
168 return data;
169}
170
171static inline void meson_spicc_push_data(struct meson_spicc_device *spicc,
172 u32 data)
173{
174 unsigned int bytes = spicc->bytes_per_word;
175 unsigned int byte_shift = 0;
176 u8 byte;
177
178 while (bytes--) {
179 byte = (data >> byte_shift) & 0xff;
180 *spicc->rx_buf++ = byte;
181 byte_shift += 8;
182 }
183
184 spicc->rx_remain--;
185}
186
187static inline void meson_spicc_rx(struct meson_spicc_device *spicc)
188{
189 /* Empty RX FIFO */
190 while (spicc->rx_remain &&
191 meson_spicc_rxready(spicc))
192 meson_spicc_push_data(spicc,
193 readl_relaxed(spicc->base + SPICC_RXDATA));
194}
195
196static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
197{
198 /* Fill Up TX FIFO */
199 while (spicc->tx_remain &&
200 !meson_spicc_txfull(spicc))
201 writel_relaxed(meson_spicc_pull_data(spicc),
202 spicc->base + SPICC_TXDATA);
203}
204
205static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc,
206 u32 irq_ctrl)
207{
208 if (spicc->rx_remain > SPICC_FIFO_HALF)
209 irq_ctrl |= SPICC_RH_EN;
210 else
211 irq_ctrl |= SPICC_RR_EN;
212
213 return irq_ctrl;
214}
215
216static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
217 unsigned int burst_len)
218{
219 /* Setup Xfer variables */
220 spicc->tx_remain = burst_len;
221 spicc->rx_remain = burst_len;
222 spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
223 spicc->is_burst_end = false;
224 if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain)
225 spicc->is_last_burst = true;
226 else
227 spicc->is_last_burst = false;
228
229 /* Setup burst length */
230 writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
231 FIELD_PREP(SPICC_BURSTLENGTH_MASK,
232 burst_len),
233 spicc->base + SPICC_CONREG);
234
235 /* Fill TX FIFO */
236 meson_spicc_tx(spicc);
237}
238
239static irqreturn_t meson_spicc_irq(int irq, void *data)
240{
241 struct meson_spicc_device *spicc = (void *) data;
242 u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG);
243 u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
244
245 ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN);
246
247 /* Empty RX FIFO */
248 meson_spicc_rx(spicc);
249
250 /* Enable TC interrupt since we transferred everything */
251 if (!spicc->tx_remain && !spicc->rx_remain) {
252 spicc->is_burst_end = true;
253
254 /* Enable TC interrupt */
255 ctrl |= SPICC_TC_EN;
256
257 /* Reload IRQ status */
258 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
259 }
260
261 /* Check transfer complete */
262 if ((stat & SPICC_TC) && spicc->is_burst_end) {
263 unsigned int burst_len;
264
265 /* Clear TC bit */
266 writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG);
267
268 /* Disable TC interrupt */
269 ctrl &= ~SPICC_TC_EN;
270
271 if (spicc->is_last_burst) {
272 /* Disable all IRQs */
273 writel(0, spicc->base + SPICC_INTREG);
274
275 spi_finalize_current_transfer(spicc->master);
276
277 return IRQ_HANDLED;
278 }
279
280 burst_len = min_t(unsigned int,
281 spicc->xfer_remain / spicc->bytes_per_word,
282 SPICC_BURST_MAX);
283
284 /* Setup burst */
285 meson_spicc_setup_burst(spicc, burst_len);
286
287 /* Restart burst */
288 writel_bits_relaxed(SPICC_XCH, SPICC_XCH,
289 spicc->base + SPICC_CONREG);
290 }
291
292 /* Setup RX interrupt trigger */
293 ctrl = meson_spicc_setup_rx_irq(spicc, ctrl);
294
295 /* Reconfigure interrupts */
296 writel(ctrl, spicc->base + SPICC_INTREG);
297
298 return IRQ_HANDLED;
299}
300
301static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf,
302 u32 speed)
303{
304 unsigned long parent, value;
305 unsigned int i, div;
306
307 parent = clk_get_rate(spicc->core);
308
309 /* Find closest inferior/equal possible speed */
310 for (i = 0 ; i < 7 ; ++i) {
311 /* 2^(data_rate+2) */
312 value = parent >> (i + 2);
313
314 if (value <= speed)
315 break;
316 }
317
318 /* If provided speed it lower than max divider, use max divider */
319 if (i > 7) {
320 div = 7;
321 dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n",
322 speed);
323 } else
324 div = i;
325
326 dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n",
327 parent, speed, value, div);
328
329 conf &= ~SPICC_DATARATE_MASK;
330 conf |= FIELD_PREP(SPICC_DATARATE_MASK, div);
331
332 return conf;
333}
334
335static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
336 struct spi_transfer *xfer)
337{
338 u32 conf, conf_orig;
339
340 /* Read original configuration */
341 conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
342
343 /* Select closest divider */
344 conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz);
345
346 /* Setup word width */
347 conf &= ~SPICC_BITLENGTH_MASK;
348 conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
349 (spicc->bytes_per_word << 3) - 1);
350
351 /* Ignore if unchanged */
352 if (conf != conf_orig)
353 writel_relaxed(conf, spicc->base + SPICC_CONREG);
354}
355
356static int meson_spicc_transfer_one(struct spi_master *master,
357 struct spi_device *spi,
358 struct spi_transfer *xfer)
359{
360 struct meson_spicc_device *spicc = spi_master_get_devdata(master);
361 unsigned int burst_len;
362 u32 irq = 0;
363
364 /* Store current transfer */
365 spicc->xfer = xfer;
366
367 /* Setup transfer parameters */
368 spicc->tx_buf = (u8 *)xfer->tx_buf;
369 spicc->rx_buf = (u8 *)xfer->rx_buf;
370 spicc->xfer_remain = xfer->len;
371
372 /* Pre-calculate word size */
373 spicc->bytes_per_word =
374 DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
375
376 /* Setup transfer parameters */
377 meson_spicc_setup_xfer(spicc, xfer);
378
379 burst_len = min_t(unsigned int,
380 spicc->xfer_remain / spicc->bytes_per_word,
381 SPICC_BURST_MAX);
382
383 meson_spicc_setup_burst(spicc, burst_len);
384
385 irq = meson_spicc_setup_rx_irq(spicc, irq);
386
387 /* Start burst */
388 writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
389
390 /* Enable interrupts */
391 writel_relaxed(irq, spicc->base + SPICC_INTREG);
392
393 return 1;
394}
395
396static int meson_spicc_prepare_message(struct spi_master *master,
397 struct spi_message *message)
398{
399 struct meson_spicc_device *spicc = spi_master_get_devdata(master);
400 struct spi_device *spi = message->spi;
401 u32 conf = 0;
402
403 /* Store current message */
404 spicc->message = message;
405
406 /* Enable Master */
407 conf |= SPICC_ENABLE;
408 conf |= SPICC_MODE_MASTER;
409
410 /* SMC = 0 */
411
412 /* Setup transfer mode */
413 if (spi->mode & SPI_CPOL)
414 conf |= SPICC_POL;
415 else
416 conf &= ~SPICC_POL;
417
418 if (spi->mode & SPI_CPHA)
419 conf |= SPICC_PHA;
420 else
421 conf &= ~SPICC_PHA;
422
423 /* SSCTL = 0 */
424
425 if (spi->mode & SPI_CS_HIGH)
426 conf |= SPICC_SSPOL;
427 else
428 conf &= ~SPICC_SSPOL;
429
430 if (spi->mode & SPI_READY)
431 conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_LOWLEVEL);
432 else
433 conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_IGNORE);
434
435 /* Select CS */
436 conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
437
438 /* Default Clock rate core/4 */
439
440 /* Default 8bit word */
441 conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
442
443 writel_relaxed(conf, spicc->base + SPICC_CONREG);
444
445 /* Setup no wait cycles by default */
446 writel_relaxed(0, spicc->base + SPICC_PERIODREG);
447
448 writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG);
449
450 return 0;
451}
452
453static int meson_spicc_unprepare_transfer(struct spi_master *master)
454{
455 struct meson_spicc_device *spicc = spi_master_get_devdata(master);
456
457 /* Disable all IRQs */
458 writel(0, spicc->base + SPICC_INTREG);
459
460 /* Disable controller */
461 writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG);
462
463 device_reset_optional(&spicc->pdev->dev);
464
465 return 0;
466}
467
468static int meson_spicc_setup(struct spi_device *spi)
469{
470 int ret = 0;
471
472 if (!spi->controller_state)
473 spi->controller_state = spi_master_get_devdata(spi->master);
474 else if (gpio_is_valid(spi->cs_gpio))
475 goto out_gpio;
476 else if (spi->cs_gpio == -ENOENT)
477 return 0;
478
479 if (gpio_is_valid(spi->cs_gpio)) {
480 ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
481 if (ret) {
482 dev_err(&spi->dev, "failed to request cs gpio\n");
483 return ret;
484 }
485 }
486
487out_gpio:
488 ret = gpio_direction_output(spi->cs_gpio,
489 !(spi->mode & SPI_CS_HIGH));
490
491 return ret;
492}
493
494static void meson_spicc_cleanup(struct spi_device *spi)
495{
496 if (gpio_is_valid(spi->cs_gpio))
497 gpio_free(spi->cs_gpio);
498
499 spi->controller_state = NULL;
500}
501
502static int meson_spicc_probe(struct platform_device *pdev)
503{
504 struct spi_master *master;
505 struct meson_spicc_device *spicc;
506 struct resource *res;
507 int ret, irq, rate;
508
509 master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
510 if (!master) {
511 dev_err(&pdev->dev, "master allocation failed\n");
512 return -ENOMEM;
513 }
514 spicc = spi_master_get_devdata(master);
515 spicc->master = master;
516
517 spicc->pdev = pdev;
518 platform_set_drvdata(pdev, spicc);
519
520 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
521 spicc->base = devm_ioremap_resource(&pdev->dev, res);
522 if (IS_ERR(spicc->base)) {
523 dev_err(&pdev->dev, "io resource mapping failed\n");
524 ret = PTR_ERR(spicc->base);
525 goto out_master;
526 }
527
528 /* Disable all IRQs */
529 writel_relaxed(0, spicc->base + SPICC_INTREG);
530
531 irq = platform_get_irq(pdev, 0);
532 ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
533 0, NULL, spicc);
534 if (ret) {
535 dev_err(&pdev->dev, "irq request failed\n");
536 goto out_master;
537 }
538
539 spicc->core = devm_clk_get(&pdev->dev, "core");
540 if (IS_ERR(spicc->core)) {
541 dev_err(&pdev->dev, "core clock request failed\n");
542 ret = PTR_ERR(spicc->core);
543 goto out_master;
544 }
545
546 ret = clk_prepare_enable(spicc->core);
547 if (ret) {
548 dev_err(&pdev->dev, "core clock enable failed\n");
549 goto out_master;
550 }
551 rate = clk_get_rate(spicc->core);
552
553 device_reset_optional(&pdev->dev);
554
555 master->num_chipselect = 4;
556 master->dev.of_node = pdev->dev.of_node;
557 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
558 master->bits_per_word_mask = SPI_BPW_MASK(32) |
559 SPI_BPW_MASK(24) |
560 SPI_BPW_MASK(16) |
561 SPI_BPW_MASK(8);
562 master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
563 master->min_speed_hz = rate >> 9;
564 master->setup = meson_spicc_setup;
565 master->cleanup = meson_spicc_cleanup;
566 master->prepare_message = meson_spicc_prepare_message;
567 master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
568 master->transfer_one = meson_spicc_transfer_one;
569
570 /* Setup max rate according to the Meson GX datasheet */
571 if ((rate >> 2) > SPICC_MAX_FREQ)
572 master->max_speed_hz = SPICC_MAX_FREQ;
573 else
574 master->max_speed_hz = rate >> 2;
575
576 ret = devm_spi_register_master(&pdev->dev, master);
577 if (!ret)
578 return 0;
579
580 dev_err(&pdev->dev, "spi master registration failed\n");
581
582out_master:
583 spi_master_put(master);
584
585 return ret;
586}
587
588static int meson_spicc_remove(struct platform_device *pdev)
589{
590 struct meson_spicc_device *spicc = platform_get_drvdata(pdev);
591
592 /* Disable SPI */
593 writel(0, spicc->base + SPICC_CONREG);
594
595 clk_disable_unprepare(spicc->core);
596
597 return 0;
598}
599
600static const struct of_device_id meson_spicc_of_match[] = {
601 { .compatible = "amlogic,meson-gx-spicc", },
602 { /* sentinel */ }
603};
604MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
605
606static struct platform_driver meson_spicc_driver = {
607 .probe = meson_spicc_probe,
608 .remove = meson_spicc_remove,
609 .driver = {
610 .name = "meson-spicc",
611 .of_match_table = of_match_ptr(meson_spicc_of_match),
612 },
613};
614
615module_platform_driver(meson_spicc_driver);
616
617MODULE_DESCRIPTION("Meson SPI Communication Controller driver");
618MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
619MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 278867a31950..86bf45667a04 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -35,11 +35,15 @@
35#define SPI_CMD_REG 0x0018 35#define SPI_CMD_REG 0x0018
36#define SPI_STATUS0_REG 0x001c 36#define SPI_STATUS0_REG 0x001c
37#define SPI_PAD_SEL_REG 0x0024 37#define SPI_PAD_SEL_REG 0x0024
38#define SPI_CFG2_REG 0x0028
38 39
39#define SPI_CFG0_SCK_HIGH_OFFSET 0 40#define SPI_CFG0_SCK_HIGH_OFFSET 0
40#define SPI_CFG0_SCK_LOW_OFFSET 8 41#define SPI_CFG0_SCK_LOW_OFFSET 8
41#define SPI_CFG0_CS_HOLD_OFFSET 16 42#define SPI_CFG0_CS_HOLD_OFFSET 16
42#define SPI_CFG0_CS_SETUP_OFFSET 24 43#define SPI_CFG0_CS_SETUP_OFFSET 24
44#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
45#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
46#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
43 47
44#define SPI_CFG1_CS_IDLE_OFFSET 0 48#define SPI_CFG1_CS_IDLE_OFFSET 0
45#define SPI_CFG1_PACKET_LOOP_OFFSET 8 49#define SPI_CFG1_PACKET_LOOP_OFFSET 8
@@ -55,6 +59,8 @@
55#define SPI_CMD_RST BIT(2) 59#define SPI_CMD_RST BIT(2)
56#define SPI_CMD_PAUSE_EN BIT(4) 60#define SPI_CMD_PAUSE_EN BIT(4)
57#define SPI_CMD_DEASSERT BIT(5) 61#define SPI_CMD_DEASSERT BIT(5)
62#define SPI_CMD_SAMPLE_SEL BIT(6)
63#define SPI_CMD_CS_POL BIT(7)
58#define SPI_CMD_CPHA BIT(8) 64#define SPI_CMD_CPHA BIT(8)
59#define SPI_CMD_CPOL BIT(9) 65#define SPI_CMD_CPOL BIT(9)
60#define SPI_CMD_RX_DMA BIT(10) 66#define SPI_CMD_RX_DMA BIT(10)
@@ -80,6 +86,8 @@ struct mtk_spi_compatible {
80 bool need_pad_sel; 86 bool need_pad_sel;
81 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 87 /* Must explicitly send dummy Tx bytes to do Rx only transfer */
82 bool must_tx; 88 bool must_tx;
89 /* some IC design adjust cfg register to enhance time accuracy */
90 bool enhance_timing;
83}; 91};
84 92
85struct mtk_spi { 93struct mtk_spi {
@@ -96,6 +104,16 @@ struct mtk_spi {
96}; 104};
97 105
98static const struct mtk_spi_compatible mtk_common_compat; 106static const struct mtk_spi_compatible mtk_common_compat;
107
108static const struct mtk_spi_compatible mt2712_compat = {
109 .must_tx = true,
110};
111
112static const struct mtk_spi_compatible mt7622_compat = {
113 .must_tx = true,
114 .enhance_timing = true,
115};
116
99static const struct mtk_spi_compatible mt8173_compat = { 117static const struct mtk_spi_compatible mt8173_compat = {
100 .need_pad_sel = true, 118 .need_pad_sel = true,
101 .must_tx = true, 119 .must_tx = true,
@@ -108,15 +126,23 @@ static const struct mtk_spi_compatible mt8173_compat = {
108static const struct mtk_chip_config mtk_default_chip_info = { 126static const struct mtk_chip_config mtk_default_chip_info = {
109 .rx_mlsb = 1, 127 .rx_mlsb = 1,
110 .tx_mlsb = 1, 128 .tx_mlsb = 1,
129 .cs_pol = 0,
130 .sample_sel = 0,
111}; 131};
112 132
113static const struct of_device_id mtk_spi_of_match[] = { 133static const struct of_device_id mtk_spi_of_match[] = {
114 { .compatible = "mediatek,mt2701-spi", 134 { .compatible = "mediatek,mt2701-spi",
115 .data = (void *)&mtk_common_compat, 135 .data = (void *)&mtk_common_compat,
116 }, 136 },
137 { .compatible = "mediatek,mt2712-spi",
138 .data = (void *)&mt2712_compat,
139 },
117 { .compatible = "mediatek,mt6589-spi", 140 { .compatible = "mediatek,mt6589-spi",
118 .data = (void *)&mtk_common_compat, 141 .data = (void *)&mtk_common_compat,
119 }, 142 },
143 { .compatible = "mediatek,mt7622-spi",
144 .data = (void *)&mt7622_compat,
145 },
120 { .compatible = "mediatek,mt8135-spi", 146 { .compatible = "mediatek,mt8135-spi",
121 .data = (void *)&mtk_common_compat, 147 .data = (void *)&mtk_common_compat,
122 }, 148 },
@@ -182,6 +208,17 @@ static int mtk_spi_prepare_message(struct spi_master *master,
182 reg_val |= SPI_CMD_RX_ENDIAN; 208 reg_val |= SPI_CMD_RX_ENDIAN;
183#endif 209#endif
184 210
211 if (mdata->dev_comp->enhance_timing) {
212 if (chip_config->cs_pol)
213 reg_val |= SPI_CMD_CS_POL;
214 else
215 reg_val &= ~SPI_CMD_CS_POL;
216 if (chip_config->sample_sel)
217 reg_val |= SPI_CMD_SAMPLE_SEL;
218 else
219 reg_val &= ~SPI_CMD_SAMPLE_SEL;
220 }
221
185 /* set finish and pause interrupt always enable */ 222 /* set finish and pause interrupt always enable */
186 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 223 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
187 224
@@ -233,11 +270,25 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
233 sck_time = (div + 1) / 2; 270 sck_time = (div + 1) / 2;
234 cs_time = sck_time * 2; 271 cs_time = sck_time * 2;
235 272
236 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); 273 if (mdata->dev_comp->enhance_timing) {
237 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 274 reg_val |= (((sck_time - 1) & 0xffff)
238 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 275 << SPI_CFG0_SCK_HIGH_OFFSET);
239 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 276 reg_val |= (((sck_time - 1) & 0xffff)
240 writel(reg_val, mdata->base + SPI_CFG0_REG); 277 << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
278 writel(reg_val, mdata->base + SPI_CFG2_REG);
279 reg_val |= (((cs_time - 1) & 0xffff)
280 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
281 reg_val |= (((cs_time - 1) & 0xffff)
282 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
283 writel(reg_val, mdata->base + SPI_CFG0_REG);
284 } else {
285 reg_val |= (((sck_time - 1) & 0xff)
286 << SPI_CFG0_SCK_HIGH_OFFSET);
287 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
288 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
289 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
290 writel(reg_val, mdata->base + SPI_CFG0_REG);
291 }
241 292
242 reg_val = readl(mdata->base + SPI_CFG1_REG); 293 reg_val = readl(mdata->base + SPI_CFG1_REG);
243 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 294 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
new file mode 100644
index 000000000000..c0257e937995
--- /dev/null
+++ b/drivers/spi/spi-slave-system-control.c
@@ -0,0 +1,154 @@
1/*
2 * SPI slave handler controlling system state
3 *
4 * This SPI slave handler allows remote control of system reboot, power off,
5 * halt, and suspend.
6 *
7 * Copyright (C) 2016-2017 Glider bvba
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
14 * system):
15 *
16 * # reboot='\x7c\x50'
17 * # poweroff='\x71\x3f'
18 * # halt='\x38\x76'
19 * # suspend='\x1b\x1b'
20 * # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
21 */
22
23#include <linux/completion.h>
24#include <linux/module.h>
25#include <linux/reboot.h>
26#include <linux/suspend.h>
27#include <linux/spi/spi.h>
28
29/*
30 * The numbers are chosen to display something human-readable on two 7-segment
31 * displays connected to two 74HC595 shift registers
32 */
33#define CMD_REBOOT 0x7c50 /* rb */
34#define CMD_POWEROFF 0x713f /* OF */
35#define CMD_HALT 0x3876 /* HL */
36#define CMD_SUSPEND 0x1b1b /* ZZ */
37
38struct spi_slave_system_control_priv {
39 struct spi_device *spi;
40 struct completion finished;
41 struct spi_transfer xfer;
42 struct spi_message msg;
43 __be16 cmd;
44};
45
46static
47int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
48
49static void spi_slave_system_control_complete(void *arg)
50{
51 struct spi_slave_system_control_priv *priv = arg;
52 u16 cmd;
53 int ret;
54
55 if (priv->msg.status)
56 goto terminate;
57
58 cmd = be16_to_cpu(priv->cmd);
59 switch (cmd) {
60 case CMD_REBOOT:
61 dev_info(&priv->spi->dev, "Rebooting system...\n");
62 kernel_restart(NULL);
63
64 case CMD_POWEROFF:
65 dev_info(&priv->spi->dev, "Powering off system...\n");
66 kernel_power_off();
67 break;
68
69 case CMD_HALT:
70 dev_info(&priv->spi->dev, "Halting system...\n");
71 kernel_halt();
72 break;
73
74 case CMD_SUSPEND:
75 dev_info(&priv->spi->dev, "Suspending system...\n");
76 pm_suspend(PM_SUSPEND_MEM);
77 break;
78
79 default:
80 dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
81 break;
82 }
83
84 ret = spi_slave_system_control_submit(priv);
85 if (ret)
86 goto terminate;
87
88 return;
89
90terminate:
91 dev_info(&priv->spi->dev, "Terminating\n");
92 complete(&priv->finished);
93}
94
95static
96int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
97{
98 int ret;
99
100 spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
101
102 priv->msg.complete = spi_slave_system_control_complete;
103 priv->msg.context = priv;
104
105 ret = spi_async(priv->spi, &priv->msg);
106 if (ret)
107 dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
108
109 return ret;
110}
111
112static int spi_slave_system_control_probe(struct spi_device *spi)
113{
114 struct spi_slave_system_control_priv *priv;
115 int ret;
116
117 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
118 if (!priv)
119 return -ENOMEM;
120
121 priv->spi = spi;
122 init_completion(&priv->finished);
123 priv->xfer.rx_buf = &priv->cmd;
124 priv->xfer.len = sizeof(priv->cmd);
125
126 ret = spi_slave_system_control_submit(priv);
127 if (ret)
128 return ret;
129
130 spi_set_drvdata(spi, priv);
131 return 0;
132}
133
134static int spi_slave_system_control_remove(struct spi_device *spi)
135{
136 struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
137
138 spi_slave_abort(spi);
139 wait_for_completion(&priv->finished);
140 return 0;
141}
142
143static struct spi_driver spi_slave_system_control_driver = {
144 .driver = {
145 .name = "spi-slave-system-control",
146 },
147 .probe = spi_slave_system_control_probe,
148 .remove = spi_slave_system_control_remove,
149};
150module_spi_driver(spi_slave_system_control_driver);
151
152MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
153MODULE_DESCRIPTION("SPI slave handler controlling system state");
154MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
new file mode 100644
index 000000000000..f2e07a392d68
--- /dev/null
+++ b/drivers/spi/spi-slave-time.c
@@ -0,0 +1,129 @@
1/*
2 * SPI slave handler reporting uptime at reception of previous SPI message
3 *
4 * This SPI slave handler sends the time of reception of the last SPI message
5 * as two 32-bit unsigned integers in binary format and in network byte order,
6 * representing the number of seconds and fractional seconds (in microseconds)
7 * since boot up.
8 *
9 * Copyright (C) 2016-2017 Glider bvba
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
16 * system):
17 *
18 * # spidev_test -D /dev/spidev2.0 -p dummy-8B
19 * spi mode: 0x0
20 * bits per word: 8
21 * max speed: 500000 Hz (500 KHz)
22 * RX | 00 00 04 6D 00 09 5B BB ...
23 * ^^^^^ ^^^^^^^^
24 * seconds microseconds
25 */
26
27#include <linux/completion.h>
28#include <linux/module.h>
29#include <linux/sched/clock.h>
30#include <linux/spi/spi.h>
31
32
33struct spi_slave_time_priv {
34 struct spi_device *spi;
35 struct completion finished;
36 struct spi_transfer xfer;
37 struct spi_message msg;
38 __be32 buf[2];
39};
40
41static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
42
43static void spi_slave_time_complete(void *arg)
44{
45 struct spi_slave_time_priv *priv = arg;
46 int ret;
47
48 ret = priv->msg.status;
49 if (ret)
50 goto terminate;
51
52 ret = spi_slave_time_submit(priv);
53 if (ret)
54 goto terminate;
55
56 return;
57
58terminate:
59 dev_info(&priv->spi->dev, "Terminating\n");
60 complete(&priv->finished);
61}
62
63static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
64{
65 u32 rem_us;
66 int ret;
67 u64 ts;
68
69 ts = local_clock();
70 rem_us = do_div(ts, 1000000000) / 1000;
71
72 priv->buf[0] = cpu_to_be32(ts);
73 priv->buf[1] = cpu_to_be32(rem_us);
74
75 spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
76
77 priv->msg.complete = spi_slave_time_complete;
78 priv->msg.context = priv;
79
80 ret = spi_async(priv->spi, &priv->msg);
81 if (ret)
82 dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
83
84 return ret;
85}
86
87static int spi_slave_time_probe(struct spi_device *spi)
88{
89 struct spi_slave_time_priv *priv;
90 int ret;
91
92 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
93 if (!priv)
94 return -ENOMEM;
95
96 priv->spi = spi;
97 init_completion(&priv->finished);
98 priv->xfer.tx_buf = priv->buf;
99 priv->xfer.len = sizeof(priv->buf);
100
101 ret = spi_slave_time_submit(priv);
102 if (ret)
103 return ret;
104
105 spi_set_drvdata(spi, priv);
106 return 0;
107}
108
109static int spi_slave_time_remove(struct spi_device *spi)
110{
111 struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
112
113 spi_slave_abort(spi);
114 wait_for_completion(&priv->finished);
115 return 0;
116}
117
118static struct spi_driver spi_slave_time_driver = {
119 .driver = {
120 .name = "spi-slave-time",
121 },
122 .probe = spi_slave_time_probe,
123 .remove = spi_slave_time_remove,
124};
125module_spi_driver(spi_slave_time_driver);
126
127MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
128MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
129MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 89254a55eb2e..4fcbb0aa71d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -48,11 +48,11 @@ static void spidev_release(struct device *dev)
48{ 48{
49 struct spi_device *spi = to_spi_device(dev); 49 struct spi_device *spi = to_spi_device(dev);
50 50
51 /* spi masters may cleanup for released devices */ 51 /* spi controllers may cleanup for released devices */
52 if (spi->master->cleanup) 52 if (spi->controller->cleanup)
53 spi->master->cleanup(spi); 53 spi->controller->cleanup(spi);
54 54
55 spi_master_put(spi->master); 55 spi_controller_put(spi->controller);
56 kfree(spi); 56 kfree(spi);
57} 57}
58 58
@@ -71,17 +71,17 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
71static DEVICE_ATTR_RO(modalias); 71static DEVICE_ATTR_RO(modalias);
72 72
73#define SPI_STATISTICS_ATTRS(field, file) \ 73#define SPI_STATISTICS_ATTRS(field, file) \
74static ssize_t spi_master_##field##_show(struct device *dev, \ 74static ssize_t spi_controller_##field##_show(struct device *dev, \
75 struct device_attribute *attr, \ 75 struct device_attribute *attr, \
76 char *buf) \ 76 char *buf) \
77{ \ 77{ \
78 struct spi_master *master = container_of(dev, \ 78 struct spi_controller *ctlr = container_of(dev, \
79 struct spi_master, dev); \ 79 struct spi_controller, dev); \
80 return spi_statistics_##field##_show(&master->statistics, buf); \ 80 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
81} \ 81} \
82static struct device_attribute dev_attr_spi_master_##field = { \ 82static struct device_attribute dev_attr_spi_controller_##field = { \
83 .attr = { .name = file, .mode = S_IRUGO }, \ 83 .attr = { .name = file, .mode = 0444 }, \
84 .show = spi_master_##field##_show, \ 84 .show = spi_controller_##field##_show, \
85}; \ 85}; \
86static ssize_t spi_device_##field##_show(struct device *dev, \ 86static ssize_t spi_device_##field##_show(struct device *dev, \
87 struct device_attribute *attr, \ 87 struct device_attribute *attr, \
@@ -91,7 +91,7 @@ static ssize_t spi_device_##field##_show(struct device *dev, \
91 return spi_statistics_##field##_show(&spi->statistics, buf); \ 91 return spi_statistics_##field##_show(&spi->statistics, buf); \
92} \ 92} \
93static struct device_attribute dev_attr_spi_device_##field = { \ 93static struct device_attribute dev_attr_spi_device_##field = { \
94 .attr = { .name = file, .mode = S_IRUGO }, \ 94 .attr = { .name = file, .mode = 0444 }, \
95 .show = spi_device_##field##_show, \ 95 .show = spi_device_##field##_show, \
96} 96}
97 97
@@ -201,51 +201,51 @@ static const struct attribute_group *spi_dev_groups[] = {
201 NULL, 201 NULL,
202}; 202};
203 203
204static struct attribute *spi_master_statistics_attrs[] = { 204static struct attribute *spi_controller_statistics_attrs[] = {
205 &dev_attr_spi_master_messages.attr, 205 &dev_attr_spi_controller_messages.attr,
206 &dev_attr_spi_master_transfers.attr, 206 &dev_attr_spi_controller_transfers.attr,
207 &dev_attr_spi_master_errors.attr, 207 &dev_attr_spi_controller_errors.attr,
208 &dev_attr_spi_master_timedout.attr, 208 &dev_attr_spi_controller_timedout.attr,
209 &dev_attr_spi_master_spi_sync.attr, 209 &dev_attr_spi_controller_spi_sync.attr,
210 &dev_attr_spi_master_spi_sync_immediate.attr, 210 &dev_attr_spi_controller_spi_sync_immediate.attr,
211 &dev_attr_spi_master_spi_async.attr, 211 &dev_attr_spi_controller_spi_async.attr,
212 &dev_attr_spi_master_bytes.attr, 212 &dev_attr_spi_controller_bytes.attr,
213 &dev_attr_spi_master_bytes_rx.attr, 213 &dev_attr_spi_controller_bytes_rx.attr,
214 &dev_attr_spi_master_bytes_tx.attr, 214 &dev_attr_spi_controller_bytes_tx.attr,
215 &dev_attr_spi_master_transfer_bytes_histo0.attr, 215 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
216 &dev_attr_spi_master_transfer_bytes_histo1.attr, 216 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
217 &dev_attr_spi_master_transfer_bytes_histo2.attr, 217 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
218 &dev_attr_spi_master_transfer_bytes_histo3.attr, 218 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
219 &dev_attr_spi_master_transfer_bytes_histo4.attr, 219 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
220 &dev_attr_spi_master_transfer_bytes_histo5.attr, 220 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
221 &dev_attr_spi_master_transfer_bytes_histo6.attr, 221 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
222 &dev_attr_spi_master_transfer_bytes_histo7.attr, 222 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
223 &dev_attr_spi_master_transfer_bytes_histo8.attr, 223 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
224 &dev_attr_spi_master_transfer_bytes_histo9.attr, 224 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
225 &dev_attr_spi_master_transfer_bytes_histo10.attr, 225 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
226 &dev_attr_spi_master_transfer_bytes_histo11.attr, 226 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
227 &dev_attr_spi_master_transfer_bytes_histo12.attr, 227 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
228 &dev_attr_spi_master_transfer_bytes_histo13.attr, 228 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
229 &dev_attr_spi_master_transfer_bytes_histo14.attr, 229 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
230 &dev_attr_spi_master_transfer_bytes_histo15.attr, 230 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
231 &dev_attr_spi_master_transfer_bytes_histo16.attr, 231 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
232 &dev_attr_spi_master_transfers_split_maxsize.attr, 232 &dev_attr_spi_controller_transfers_split_maxsize.attr,
233 NULL, 233 NULL,
234}; 234};
235 235
236static const struct attribute_group spi_master_statistics_group = { 236static const struct attribute_group spi_controller_statistics_group = {
237 .name = "statistics", 237 .name = "statistics",
238 .attrs = spi_master_statistics_attrs, 238 .attrs = spi_controller_statistics_attrs,
239}; 239};
240 240
241static const struct attribute_group *spi_master_groups[] = { 241static const struct attribute_group *spi_master_groups[] = {
242 &spi_master_statistics_group, 242 &spi_controller_statistics_group,
243 NULL, 243 NULL,
244}; 244};
245 245
246void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 246void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
247 struct spi_transfer *xfer, 247 struct spi_transfer *xfer,
248 struct spi_master *master) 248 struct spi_controller *ctlr)
249{ 249{
250 unsigned long flags; 250 unsigned long flags;
251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
@@ -260,10 +260,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
260 260
261 stats->bytes += xfer->len; 261 stats->bytes += xfer->len;
262 if ((xfer->tx_buf) && 262 if ((xfer->tx_buf) &&
263 (xfer->tx_buf != master->dummy_tx)) 263 (xfer->tx_buf != ctlr->dummy_tx))
264 stats->bytes_tx += xfer->len; 264 stats->bytes_tx += xfer->len;
265 if ((xfer->rx_buf) && 265 if ((xfer->rx_buf) &&
266 (xfer->rx_buf != master->dummy_rx)) 266 (xfer->rx_buf != ctlr->dummy_rx))
267 stats->bytes_rx += xfer->len; 267 stats->bytes_rx += xfer->len;
268 268
269 spin_unlock_irqrestore(&stats->lock, flags); 269 spin_unlock_irqrestore(&stats->lock, flags);
@@ -405,7 +405,7 @@ EXPORT_SYMBOL_GPL(__spi_register_driver);
405/*-------------------------------------------------------------------------*/ 405/*-------------------------------------------------------------------------*/
406 406
407/* SPI devices should normally not be created by SPI device drivers; that 407/* SPI devices should normally not be created by SPI device drivers; that
408 * would make them board-specific. Similarly with SPI master drivers. 408 * would make them board-specific. Similarly with SPI controller drivers.
409 * Device registration normally goes into like arch/.../mach.../board-YYY.c 409 * Device registration normally goes into like arch/.../mach.../board-YYY.c
410 * with other readonly (flashable) information about mainboard devices. 410 * with other readonly (flashable) information about mainboard devices.
411 */ 411 */
@@ -416,17 +416,17 @@ struct boardinfo {
416}; 416};
417 417
418static LIST_HEAD(board_list); 418static LIST_HEAD(board_list);
419static LIST_HEAD(spi_master_list); 419static LIST_HEAD(spi_controller_list);
420 420
421/* 421/*
422 * Used to protect add/del opertion for board_info list and 422 * Used to protect add/del opertion for board_info list and
423 * spi_master list, and their matching process 423 * spi_controller list, and their matching process
424 */ 424 */
425static DEFINE_MUTEX(board_lock); 425static DEFINE_MUTEX(board_lock);
426 426
427/** 427/**
428 * spi_alloc_device - Allocate a new SPI device 428 * spi_alloc_device - Allocate a new SPI device
429 * @master: Controller to which device is connected 429 * @ctlr: Controller to which device is connected
430 * Context: can sleep 430 * Context: can sleep
431 * 431 *
432 * Allows a driver to allocate and initialize a spi_device without 432 * Allows a driver to allocate and initialize a spi_device without
@@ -435,27 +435,27 @@ static DEFINE_MUTEX(board_lock);
435 * spi_add_device() on it. 435 * spi_add_device() on it.
436 * 436 *
437 * Caller is responsible to call spi_add_device() on the returned 437 * Caller is responsible to call spi_add_device() on the returned
438 * spi_device structure to add it to the SPI master. If the caller 438 * spi_device structure to add it to the SPI controller. If the caller
439 * needs to discard the spi_device without adding it, then it should 439 * needs to discard the spi_device without adding it, then it should
440 * call spi_dev_put() on it. 440 * call spi_dev_put() on it.
441 * 441 *
442 * Return: a pointer to the new device, or NULL. 442 * Return: a pointer to the new device, or NULL.
443 */ 443 */
444struct spi_device *spi_alloc_device(struct spi_master *master) 444struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
445{ 445{
446 struct spi_device *spi; 446 struct spi_device *spi;
447 447
448 if (!spi_master_get(master)) 448 if (!spi_controller_get(ctlr))
449 return NULL; 449 return NULL;
450 450
451 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 451 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
452 if (!spi) { 452 if (!spi) {
453 spi_master_put(master); 453 spi_controller_put(ctlr);
454 return NULL; 454 return NULL;
455 } 455 }
456 456
457 spi->master = master; 457 spi->master = spi->controller = ctlr;
458 spi->dev.parent = &master->dev; 458 spi->dev.parent = &ctlr->dev;
459 spi->dev.bus = &spi_bus_type; 459 spi->dev.bus = &spi_bus_type;
460 spi->dev.release = spidev_release; 460 spi->dev.release = spidev_release;
461 spi->cs_gpio = -ENOENT; 461 spi->cs_gpio = -ENOENT;
@@ -476,7 +476,7 @@ static void spi_dev_set_name(struct spi_device *spi)
476 return; 476 return;
477 } 477 }
478 478
479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
480 spi->chip_select); 480 spi->chip_select);
481} 481}
482 482
@@ -485,7 +485,7 @@ static int spi_dev_check(struct device *dev, void *data)
485 struct spi_device *spi = to_spi_device(dev); 485 struct spi_device *spi = to_spi_device(dev);
486 struct spi_device *new_spi = data; 486 struct spi_device *new_spi = data;
487 487
488 if (spi->master == new_spi->master && 488 if (spi->controller == new_spi->controller &&
489 spi->chip_select == new_spi->chip_select) 489 spi->chip_select == new_spi->chip_select)
490 return -EBUSY; 490 return -EBUSY;
491 return 0; 491 return 0;
@@ -503,15 +503,14 @@ static int spi_dev_check(struct device *dev, void *data)
503int spi_add_device(struct spi_device *spi) 503int spi_add_device(struct spi_device *spi)
504{ 504{
505 static DEFINE_MUTEX(spi_add_lock); 505 static DEFINE_MUTEX(spi_add_lock);
506 struct spi_master *master = spi->master; 506 struct spi_controller *ctlr = spi->controller;
507 struct device *dev = master->dev.parent; 507 struct device *dev = ctlr->dev.parent;
508 int status; 508 int status;
509 509
510 /* Chipselects are numbered 0..max; validate. */ 510 /* Chipselects are numbered 0..max; validate. */
511 if (spi->chip_select >= master->num_chipselect) { 511 if (spi->chip_select >= ctlr->num_chipselect) {
512 dev_err(dev, "cs%d >= max %d\n", 512 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
513 spi->chip_select, 513 ctlr->num_chipselect);
514 master->num_chipselect);
515 return -EINVAL; 514 return -EINVAL;
516 } 515 }
517 516
@@ -531,8 +530,8 @@ int spi_add_device(struct spi_device *spi)
531 goto done; 530 goto done;
532 } 531 }
533 532
534 if (master->cs_gpios) 533 if (ctlr->cs_gpios)
535 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 534 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
536 535
537 /* Drivers may modify this initial i/o setup, but will 536 /* Drivers may modify this initial i/o setup, but will
538 * normally rely on the device being setup. Devices 537 * normally rely on the device being setup. Devices
@@ -561,7 +560,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
561 560
562/** 561/**
563 * spi_new_device - instantiate one new SPI device 562 * spi_new_device - instantiate one new SPI device
564 * @master: Controller to which device is connected 563 * @ctlr: Controller to which device is connected
565 * @chip: Describes the SPI device 564 * @chip: Describes the SPI device
566 * Context: can sleep 565 * Context: can sleep
567 * 566 *
@@ -573,7 +572,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
573 * 572 *
574 * Return: the new device, or NULL. 573 * Return: the new device, or NULL.
575 */ 574 */
576struct spi_device *spi_new_device(struct spi_master *master, 575struct spi_device *spi_new_device(struct spi_controller *ctlr,
577 struct spi_board_info *chip) 576 struct spi_board_info *chip)
578{ 577{
579 struct spi_device *proxy; 578 struct spi_device *proxy;
@@ -586,7 +585,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
586 * suggests syslogged diagnostics are best here (ugh). 585 * suggests syslogged diagnostics are best here (ugh).
587 */ 586 */
588 587
589 proxy = spi_alloc_device(master); 588 proxy = spi_alloc_device(ctlr);
590 if (!proxy) 589 if (!proxy)
591 return NULL; 590 return NULL;
592 591
@@ -604,7 +603,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
604 if (chip->properties) { 603 if (chip->properties) {
605 status = device_add_properties(&proxy->dev, chip->properties); 604 status = device_add_properties(&proxy->dev, chip->properties);
606 if (status) { 605 if (status) {
607 dev_err(&master->dev, 606 dev_err(&ctlr->dev,
608 "failed to add properties to '%s': %d\n", 607 "failed to add properties to '%s': %d\n",
609 chip->modalias, status); 608 chip->modalias, status);
610 goto err_dev_put; 609 goto err_dev_put;
@@ -631,7 +630,7 @@ EXPORT_SYMBOL_GPL(spi_new_device);
631 * @spi: spi_device to unregister 630 * @spi: spi_device to unregister
632 * 631 *
633 * Start making the passed SPI device vanish. Normally this would be handled 632 * Start making the passed SPI device vanish. Normally this would be handled
634 * by spi_unregister_master(). 633 * by spi_unregister_controller().
635 */ 634 */
636void spi_unregister_device(struct spi_device *spi) 635void spi_unregister_device(struct spi_device *spi)
637{ 636{
@@ -648,17 +647,17 @@ void spi_unregister_device(struct spi_device *spi)
648} 647}
649EXPORT_SYMBOL_GPL(spi_unregister_device); 648EXPORT_SYMBOL_GPL(spi_unregister_device);
650 649
651static void spi_match_master_to_boardinfo(struct spi_master *master, 650static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
652 struct spi_board_info *bi) 651 struct spi_board_info *bi)
653{ 652{
654 struct spi_device *dev; 653 struct spi_device *dev;
655 654
656 if (master->bus_num != bi->bus_num) 655 if (ctlr->bus_num != bi->bus_num)
657 return; 656 return;
658 657
659 dev = spi_new_device(master, bi); 658 dev = spi_new_device(ctlr, bi);
660 if (!dev) 659 if (!dev)
661 dev_err(master->dev.parent, "can't create new device for %s\n", 660 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
662 bi->modalias); 661 bi->modalias);
663} 662}
664 663
@@ -697,7 +696,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
697 return -ENOMEM; 696 return -ENOMEM;
698 697
699 for (i = 0; i < n; i++, bi++, info++) { 698 for (i = 0; i < n; i++, bi++, info++) {
700 struct spi_master *master; 699 struct spi_controller *ctlr;
701 700
702 memcpy(&bi->board_info, info, sizeof(*info)); 701 memcpy(&bi->board_info, info, sizeof(*info));
703 if (info->properties) { 702 if (info->properties) {
@@ -709,8 +708,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
709 708
710 mutex_lock(&board_lock); 709 mutex_lock(&board_lock);
711 list_add_tail(&bi->list, &board_list); 710 list_add_tail(&bi->list, &board_list);
712 list_for_each_entry(master, &spi_master_list, list) 711 list_for_each_entry(ctlr, &spi_controller_list, list)
713 spi_match_master_to_boardinfo(master, &bi->board_info); 712 spi_match_controller_to_boardinfo(ctlr,
713 &bi->board_info);
714 mutex_unlock(&board_lock); 714 mutex_unlock(&board_lock);
715 } 715 }
716 716
@@ -727,16 +727,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
727 if (gpio_is_valid(spi->cs_gpio)) { 727 if (gpio_is_valid(spi->cs_gpio)) {
728 gpio_set_value(spi->cs_gpio, !enable); 728 gpio_set_value(spi->cs_gpio, !enable);
729 /* Some SPI masters need both GPIO CS & slave_select */ 729 /* Some SPI masters need both GPIO CS & slave_select */
730 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 730 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
731 spi->master->set_cs) 731 spi->controller->set_cs)
732 spi->master->set_cs(spi, !enable); 732 spi->controller->set_cs(spi, !enable);
733 } else if (spi->master->set_cs) { 733 } else if (spi->controller->set_cs) {
734 spi->master->set_cs(spi, !enable); 734 spi->controller->set_cs(spi, !enable);
735 } 735 }
736} 736}
737 737
738#ifdef CONFIG_HAS_DMA 738#ifdef CONFIG_HAS_DMA
739static int spi_map_buf(struct spi_master *master, struct device *dev, 739static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
740 struct sg_table *sgt, void *buf, size_t len, 740 struct sg_table *sgt, void *buf, size_t len,
741 enum dma_data_direction dir) 741 enum dma_data_direction dir)
742{ 742{
@@ -761,7 +761,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
761 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 761 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
763 } else if (virt_addr_valid(buf)) { 763 } else if (virt_addr_valid(buf)) {
764 desc_len = min_t(int, max_seg_size, master->max_dma_len); 764 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
765 sgs = DIV_ROUND_UP(len, desc_len); 765 sgs = DIV_ROUND_UP(len, desc_len);
766 } else { 766 } else {
767 return -EINVAL; 767 return -EINVAL;
@@ -811,7 +811,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
811 return 0; 811 return 0;
812} 812}
813 813
814static void spi_unmap_buf(struct spi_master *master, struct device *dev, 814static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
815 struct sg_table *sgt, enum dma_data_direction dir) 815 struct sg_table *sgt, enum dma_data_direction dir)
816{ 816{
817 if (sgt->orig_nents) { 817 if (sgt->orig_nents) {
@@ -820,31 +820,31 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
820 } 820 }
821} 821}
822 822
823static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 823static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
824{ 824{
825 struct device *tx_dev, *rx_dev; 825 struct device *tx_dev, *rx_dev;
826 struct spi_transfer *xfer; 826 struct spi_transfer *xfer;
827 int ret; 827 int ret;
828 828
829 if (!master->can_dma) 829 if (!ctlr->can_dma)
830 return 0; 830 return 0;
831 831
832 if (master->dma_tx) 832 if (ctlr->dma_tx)
833 tx_dev = master->dma_tx->device->dev; 833 tx_dev = ctlr->dma_tx->device->dev;
834 else 834 else
835 tx_dev = master->dev.parent; 835 tx_dev = ctlr->dev.parent;
836 836
837 if (master->dma_rx) 837 if (ctlr->dma_rx)
838 rx_dev = master->dma_rx->device->dev; 838 rx_dev = ctlr->dma_rx->device->dev;
839 else 839 else
840 rx_dev = master->dev.parent; 840 rx_dev = ctlr->dev.parent;
841 841
842 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 842 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
843 if (!master->can_dma(master, msg->spi, xfer)) 843 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
844 continue; 844 continue;
845 845
846 if (xfer->tx_buf != NULL) { 846 if (xfer->tx_buf != NULL) {
847 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 847 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
848 (void *)xfer->tx_buf, xfer->len, 848 (void *)xfer->tx_buf, xfer->len,
849 DMA_TO_DEVICE); 849 DMA_TO_DEVICE);
850 if (ret != 0) 850 if (ret != 0)
@@ -852,79 +852,78 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
852 } 852 }
853 853
854 if (xfer->rx_buf != NULL) { 854 if (xfer->rx_buf != NULL) {
855 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 855 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
856 xfer->rx_buf, xfer->len, 856 xfer->rx_buf, xfer->len,
857 DMA_FROM_DEVICE); 857 DMA_FROM_DEVICE);
858 if (ret != 0) { 858 if (ret != 0) {
859 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 859 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
860 DMA_TO_DEVICE); 860 DMA_TO_DEVICE);
861 return ret; 861 return ret;
862 } 862 }
863 } 863 }
864 } 864 }
865 865
866 master->cur_msg_mapped = true; 866 ctlr->cur_msg_mapped = true;
867 867
868 return 0; 868 return 0;
869} 869}
870 870
871static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 871static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
872{ 872{
873 struct spi_transfer *xfer; 873 struct spi_transfer *xfer;
874 struct device *tx_dev, *rx_dev; 874 struct device *tx_dev, *rx_dev;
875 875
876 if (!master->cur_msg_mapped || !master->can_dma) 876 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
877 return 0; 877 return 0;
878 878
879 if (master->dma_tx) 879 if (ctlr->dma_tx)
880 tx_dev = master->dma_tx->device->dev; 880 tx_dev = ctlr->dma_tx->device->dev;
881 else 881 else
882 tx_dev = master->dev.parent; 882 tx_dev = ctlr->dev.parent;
883 883
884 if (master->dma_rx) 884 if (ctlr->dma_rx)
885 rx_dev = master->dma_rx->device->dev; 885 rx_dev = ctlr->dma_rx->device->dev;
886 else 886 else
887 rx_dev = master->dev.parent; 887 rx_dev = ctlr->dev.parent;
888 888
889 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 889 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
890 if (!master->can_dma(master, msg->spi, xfer)) 890 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
891 continue; 891 continue;
892 892
893 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 893 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
894 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 894 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
895 } 895 }
896 896
897 return 0; 897 return 0;
898} 898}
899#else /* !CONFIG_HAS_DMA */ 899#else /* !CONFIG_HAS_DMA */
900static inline int spi_map_buf(struct spi_master *master, 900static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
901 struct device *dev, struct sg_table *sgt, 901 struct sg_table *sgt, void *buf, size_t len,
902 void *buf, size_t len,
903 enum dma_data_direction dir) 902 enum dma_data_direction dir)
904{ 903{
905 return -EINVAL; 904 return -EINVAL;
906} 905}
907 906
908static inline void spi_unmap_buf(struct spi_master *master, 907static inline void spi_unmap_buf(struct spi_controller *ctlr,
909 struct device *dev, struct sg_table *sgt, 908 struct device *dev, struct sg_table *sgt,
910 enum dma_data_direction dir) 909 enum dma_data_direction dir)
911{ 910{
912} 911}
913 912
914static inline int __spi_map_msg(struct spi_master *master, 913static inline int __spi_map_msg(struct spi_controller *ctlr,
915 struct spi_message *msg) 914 struct spi_message *msg)
916{ 915{
917 return 0; 916 return 0;
918} 917}
919 918
920static inline int __spi_unmap_msg(struct spi_master *master, 919static inline int __spi_unmap_msg(struct spi_controller *ctlr,
921 struct spi_message *msg) 920 struct spi_message *msg)
922{ 921{
923 return 0; 922 return 0;
924} 923}
925#endif /* !CONFIG_HAS_DMA */ 924#endif /* !CONFIG_HAS_DMA */
926 925
927static inline int spi_unmap_msg(struct spi_master *master, 926static inline int spi_unmap_msg(struct spi_controller *ctlr,
928 struct spi_message *msg) 927 struct spi_message *msg)
929{ 928{
930 struct spi_transfer *xfer; 929 struct spi_transfer *xfer;
@@ -934,63 +933,63 @@ static inline int spi_unmap_msg(struct spi_master *master,
934 * Restore the original value of tx_buf or rx_buf if they are 933 * Restore the original value of tx_buf or rx_buf if they are
935 * NULL. 934 * NULL.
936 */ 935 */
937 if (xfer->tx_buf == master->dummy_tx) 936 if (xfer->tx_buf == ctlr->dummy_tx)
938 xfer->tx_buf = NULL; 937 xfer->tx_buf = NULL;
939 if (xfer->rx_buf == master->dummy_rx) 938 if (xfer->rx_buf == ctlr->dummy_rx)
940 xfer->rx_buf = NULL; 939 xfer->rx_buf = NULL;
941 } 940 }
942 941
943 return __spi_unmap_msg(master, msg); 942 return __spi_unmap_msg(ctlr, msg);
944} 943}
945 944
946static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 945static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
947{ 946{
948 struct spi_transfer *xfer; 947 struct spi_transfer *xfer;
949 void *tmp; 948 void *tmp;
950 unsigned int max_tx, max_rx; 949 unsigned int max_tx, max_rx;
951 950
952 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 951 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
953 max_tx = 0; 952 max_tx = 0;
954 max_rx = 0; 953 max_rx = 0;
955 954
956 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 955 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
957 if ((master->flags & SPI_MASTER_MUST_TX) && 956 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
958 !xfer->tx_buf) 957 !xfer->tx_buf)
959 max_tx = max(xfer->len, max_tx); 958 max_tx = max(xfer->len, max_tx);
960 if ((master->flags & SPI_MASTER_MUST_RX) && 959 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
961 !xfer->rx_buf) 960 !xfer->rx_buf)
962 max_rx = max(xfer->len, max_rx); 961 max_rx = max(xfer->len, max_rx);
963 } 962 }
964 963
965 if (max_tx) { 964 if (max_tx) {
966 tmp = krealloc(master->dummy_tx, max_tx, 965 tmp = krealloc(ctlr->dummy_tx, max_tx,
967 GFP_KERNEL | GFP_DMA); 966 GFP_KERNEL | GFP_DMA);
968 if (!tmp) 967 if (!tmp)
969 return -ENOMEM; 968 return -ENOMEM;
970 master->dummy_tx = tmp; 969 ctlr->dummy_tx = tmp;
971 memset(tmp, 0, max_tx); 970 memset(tmp, 0, max_tx);
972 } 971 }
973 972
974 if (max_rx) { 973 if (max_rx) {
975 tmp = krealloc(master->dummy_rx, max_rx, 974 tmp = krealloc(ctlr->dummy_rx, max_rx,
976 GFP_KERNEL | GFP_DMA); 975 GFP_KERNEL | GFP_DMA);
977 if (!tmp) 976 if (!tmp)
978 return -ENOMEM; 977 return -ENOMEM;
979 master->dummy_rx = tmp; 978 ctlr->dummy_rx = tmp;
980 } 979 }
981 980
982 if (max_tx || max_rx) { 981 if (max_tx || max_rx) {
983 list_for_each_entry(xfer, &msg->transfers, 982 list_for_each_entry(xfer, &msg->transfers,
984 transfer_list) { 983 transfer_list) {
985 if (!xfer->tx_buf) 984 if (!xfer->tx_buf)
986 xfer->tx_buf = master->dummy_tx; 985 xfer->tx_buf = ctlr->dummy_tx;
987 if (!xfer->rx_buf) 986 if (!xfer->rx_buf)
988 xfer->rx_buf = master->dummy_rx; 987 xfer->rx_buf = ctlr->dummy_rx;
989 } 988 }
990 } 989 }
991 } 990 }
992 991
993 return __spi_map_msg(master, msg); 992 return __spi_map_msg(ctlr, msg);
994} 993}
995 994
996/* 995/*
@@ -1000,14 +999,14 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
1000 * drivers which implement a transfer_one() operation. It provides 999 * drivers which implement a transfer_one() operation. It provides
1001 * standard handling of delays and chip select management. 1000 * standard handling of delays and chip select management.
1002 */ 1001 */
1003static int spi_transfer_one_message(struct spi_master *master, 1002static int spi_transfer_one_message(struct spi_controller *ctlr,
1004 struct spi_message *msg) 1003 struct spi_message *msg)
1005{ 1004{
1006 struct spi_transfer *xfer; 1005 struct spi_transfer *xfer;
1007 bool keep_cs = false; 1006 bool keep_cs = false;
1008 int ret = 0; 1007 int ret = 0;
1009 unsigned long long ms = 1; 1008 unsigned long long ms = 1;
1010 struct spi_statistics *statm = &master->statistics; 1009 struct spi_statistics *statm = &ctlr->statistics;
1011 struct spi_statistics *stats = &msg->spi->statistics; 1010 struct spi_statistics *stats = &msg->spi->statistics;
1012 1011
1013 spi_set_cs(msg->spi, true); 1012 spi_set_cs(msg->spi, true);
@@ -1018,13 +1017,13 @@ static int spi_transfer_one_message(struct spi_master *master,
1018 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1017 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1019 trace_spi_transfer_start(msg, xfer); 1018 trace_spi_transfer_start(msg, xfer);
1020 1019
1021 spi_statistics_add_transfer_stats(statm, xfer, master); 1020 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1022 spi_statistics_add_transfer_stats(stats, xfer, master); 1021 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1023 1022
1024 if (xfer->tx_buf || xfer->rx_buf) { 1023 if (xfer->tx_buf || xfer->rx_buf) {
1025 reinit_completion(&master->xfer_completion); 1024 reinit_completion(&ctlr->xfer_completion);
1026 1025
1027 ret = master->transfer_one(master, msg->spi, xfer); 1026 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1028 if (ret < 0) { 1027 if (ret < 0) {
1029 SPI_STATISTICS_INCREMENT_FIELD(statm, 1028 SPI_STATISTICS_INCREMENT_FIELD(statm,
1030 errors); 1029 errors);
@@ -1044,7 +1043,7 @@ static int spi_transfer_one_message(struct spi_master *master,
1044 if (ms > UINT_MAX) 1043 if (ms > UINT_MAX)
1045 ms = UINT_MAX; 1044 ms = UINT_MAX;
1046 1045
1047 ms = wait_for_completion_timeout(&master->xfer_completion, 1046 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1048 msecs_to_jiffies(ms)); 1047 msecs_to_jiffies(ms));
1049 } 1048 }
1050 1049
@@ -1099,33 +1098,33 @@ out:
1099 if (msg->status == -EINPROGRESS) 1098 if (msg->status == -EINPROGRESS)
1100 msg->status = ret; 1099 msg->status = ret;
1101 1100
1102 if (msg->status && master->handle_err) 1101 if (msg->status && ctlr->handle_err)
1103 master->handle_err(master, msg); 1102 ctlr->handle_err(ctlr, msg);
1104 1103
1105 spi_res_release(master, msg); 1104 spi_res_release(ctlr, msg);
1106 1105
1107 spi_finalize_current_message(master); 1106 spi_finalize_current_message(ctlr);
1108 1107
1109 return ret; 1108 return ret;
1110} 1109}
1111 1110
1112/** 1111/**
1113 * spi_finalize_current_transfer - report completion of a transfer 1112 * spi_finalize_current_transfer - report completion of a transfer
1114 * @master: the master reporting completion 1113 * @ctlr: the controller reporting completion
1115 * 1114 *
1116 * Called by SPI drivers using the core transfer_one_message() 1115 * Called by SPI drivers using the core transfer_one_message()
1117 * implementation to notify it that the current interrupt driven 1116 * implementation to notify it that the current interrupt driven
1118 * transfer has finished and the next one may be scheduled. 1117 * transfer has finished and the next one may be scheduled.
1119 */ 1118 */
1120void spi_finalize_current_transfer(struct spi_master *master) 1119void spi_finalize_current_transfer(struct spi_controller *ctlr)
1121{ 1120{
1122 complete(&master->xfer_completion); 1121 complete(&ctlr->xfer_completion);
1123} 1122}
1124EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1123EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1125 1124
1126/** 1125/**
1127 * __spi_pump_messages - function which processes spi message queue 1126 * __spi_pump_messages - function which processes spi message queue
1128 * @master: master to process queue for 1127 * @ctlr: controller to process queue for
1129 * @in_kthread: true if we are in the context of the message pump thread 1128 * @in_kthread: true if we are in the context of the message pump thread
1130 * 1129 *
1131 * This function checks if there is any spi message in the queue that 1130 * This function checks if there is any spi message in the queue that
@@ -1136,136 +1135,136 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1136 * inside spi_sync(); the queue extraction handling at the top of the 1135 * inside spi_sync(); the queue extraction handling at the top of the
1137 * function should deal with this safely. 1136 * function should deal with this safely.
1138 */ 1137 */
1139static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1138static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1140{ 1139{
1141 unsigned long flags; 1140 unsigned long flags;
1142 bool was_busy = false; 1141 bool was_busy = false;
1143 int ret; 1142 int ret;
1144 1143
1145 /* Lock queue */ 1144 /* Lock queue */
1146 spin_lock_irqsave(&master->queue_lock, flags); 1145 spin_lock_irqsave(&ctlr->queue_lock, flags);
1147 1146
1148 /* Make sure we are not already running a message */ 1147 /* Make sure we are not already running a message */
1149 if (master->cur_msg) { 1148 if (ctlr->cur_msg) {
1150 spin_unlock_irqrestore(&master->queue_lock, flags); 1149 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1151 return; 1150 return;
1152 } 1151 }
1153 1152
1154 /* If another context is idling the device then defer */ 1153 /* If another context is idling the device then defer */
1155 if (master->idling) { 1154 if (ctlr->idling) {
1156 kthread_queue_work(&master->kworker, &master->pump_messages); 1155 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1157 spin_unlock_irqrestore(&master->queue_lock, flags); 1156 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1158 return; 1157 return;
1159 } 1158 }
1160 1159
1161 /* Check if the queue is idle */ 1160 /* Check if the queue is idle */
1162 if (list_empty(&master->queue) || !master->running) { 1161 if (list_empty(&ctlr->queue) || !ctlr->running) {
1163 if (!master->busy) { 1162 if (!ctlr->busy) {
1164 spin_unlock_irqrestore(&master->queue_lock, flags); 1163 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1165 return; 1164 return;
1166 } 1165 }
1167 1166
1168 /* Only do teardown in the thread */ 1167 /* Only do teardown in the thread */
1169 if (!in_kthread) { 1168 if (!in_kthread) {
1170 kthread_queue_work(&master->kworker, 1169 kthread_queue_work(&ctlr->kworker,
1171 &master->pump_messages); 1170 &ctlr->pump_messages);
1172 spin_unlock_irqrestore(&master->queue_lock, flags); 1171 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1173 return; 1172 return;
1174 } 1173 }
1175 1174
1176 master->busy = false; 1175 ctlr->busy = false;
1177 master->idling = true; 1176 ctlr->idling = true;
1178 spin_unlock_irqrestore(&master->queue_lock, flags); 1177 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1179 1178
1180 kfree(master->dummy_rx); 1179 kfree(ctlr->dummy_rx);
1181 master->dummy_rx = NULL; 1180 ctlr->dummy_rx = NULL;
1182 kfree(master->dummy_tx); 1181 kfree(ctlr->dummy_tx);
1183 master->dummy_tx = NULL; 1182 ctlr->dummy_tx = NULL;
1184 if (master->unprepare_transfer_hardware && 1183 if (ctlr->unprepare_transfer_hardware &&
1185 master->unprepare_transfer_hardware(master)) 1184 ctlr->unprepare_transfer_hardware(ctlr))
1186 dev_err(&master->dev, 1185 dev_err(&ctlr->dev,
1187 "failed to unprepare transfer hardware\n"); 1186 "failed to unprepare transfer hardware\n");
1188 if (master->auto_runtime_pm) { 1187 if (ctlr->auto_runtime_pm) {
1189 pm_runtime_mark_last_busy(master->dev.parent); 1188 pm_runtime_mark_last_busy(ctlr->dev.parent);
1190 pm_runtime_put_autosuspend(master->dev.parent); 1189 pm_runtime_put_autosuspend(ctlr->dev.parent);
1191 } 1190 }
1192 trace_spi_master_idle(master); 1191 trace_spi_controller_idle(ctlr);
1193 1192
1194 spin_lock_irqsave(&master->queue_lock, flags); 1193 spin_lock_irqsave(&ctlr->queue_lock, flags);
1195 master->idling = false; 1194 ctlr->idling = false;
1196 spin_unlock_irqrestore(&master->queue_lock, flags); 1195 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1197 return; 1196 return;
1198 } 1197 }
1199 1198
1200 /* Extract head of queue */ 1199 /* Extract head of queue */
1201 master->cur_msg = 1200 ctlr->cur_msg =
1202 list_first_entry(&master->queue, struct spi_message, queue); 1201 list_first_entry(&ctlr->queue, struct spi_message, queue);
1203 1202
1204 list_del_init(&master->cur_msg->queue); 1203 list_del_init(&ctlr->cur_msg->queue);
1205 if (master->busy) 1204 if (ctlr->busy)
1206 was_busy = true; 1205 was_busy = true;
1207 else 1206 else
1208 master->busy = true; 1207 ctlr->busy = true;
1209 spin_unlock_irqrestore(&master->queue_lock, flags); 1208 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1210 1209
1211 mutex_lock(&master->io_mutex); 1210 mutex_lock(&ctlr->io_mutex);
1212 1211
1213 if (!was_busy && master->auto_runtime_pm) { 1212 if (!was_busy && ctlr->auto_runtime_pm) {
1214 ret = pm_runtime_get_sync(master->dev.parent); 1213 ret = pm_runtime_get_sync(ctlr->dev.parent);
1215 if (ret < 0) { 1214 if (ret < 0) {
1216 dev_err(&master->dev, "Failed to power device: %d\n", 1215 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1217 ret); 1216 ret);
1218 mutex_unlock(&master->io_mutex); 1217 mutex_unlock(&ctlr->io_mutex);
1219 return; 1218 return;
1220 } 1219 }
1221 } 1220 }
1222 1221
1223 if (!was_busy) 1222 if (!was_busy)
1224 trace_spi_master_busy(master); 1223 trace_spi_controller_busy(ctlr);
1225 1224
1226 if (!was_busy && master->prepare_transfer_hardware) { 1225 if (!was_busy && ctlr->prepare_transfer_hardware) {
1227 ret = master->prepare_transfer_hardware(master); 1226 ret = ctlr->prepare_transfer_hardware(ctlr);
1228 if (ret) { 1227 if (ret) {
1229 dev_err(&master->dev, 1228 dev_err(&ctlr->dev,
1230 "failed to prepare transfer hardware\n"); 1229 "failed to prepare transfer hardware\n");
1231 1230
1232 if (master->auto_runtime_pm) 1231 if (ctlr->auto_runtime_pm)
1233 pm_runtime_put(master->dev.parent); 1232 pm_runtime_put(ctlr->dev.parent);
1234 mutex_unlock(&master->io_mutex); 1233 mutex_unlock(&ctlr->io_mutex);
1235 return; 1234 return;
1236 } 1235 }
1237 } 1236 }
1238 1237
1239 trace_spi_message_start(master->cur_msg); 1238 trace_spi_message_start(ctlr->cur_msg);
1240 1239
1241 if (master->prepare_message) { 1240 if (ctlr->prepare_message) {
1242 ret = master->prepare_message(master, master->cur_msg); 1241 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1243 if (ret) { 1242 if (ret) {
1244 dev_err(&master->dev, 1243 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1245 "failed to prepare message: %d\n", ret); 1244 ret);
1246 master->cur_msg->status = ret; 1245 ctlr->cur_msg->status = ret;
1247 spi_finalize_current_message(master); 1246 spi_finalize_current_message(ctlr);
1248 goto out; 1247 goto out;
1249 } 1248 }
1250 master->cur_msg_prepared = true; 1249 ctlr->cur_msg_prepared = true;
1251 } 1250 }
1252 1251
1253 ret = spi_map_msg(master, master->cur_msg); 1252 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1254 if (ret) { 1253 if (ret) {
1255 master->cur_msg->status = ret; 1254 ctlr->cur_msg->status = ret;
1256 spi_finalize_current_message(master); 1255 spi_finalize_current_message(ctlr);
1257 goto out; 1256 goto out;
1258 } 1257 }
1259 1258
1260 ret = master->transfer_one_message(master, master->cur_msg); 1259 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1261 if (ret) { 1260 if (ret) {
1262 dev_err(&master->dev, 1261 dev_err(&ctlr->dev,
1263 "failed to transfer one message from queue\n"); 1262 "failed to transfer one message from queue\n");
1264 goto out; 1263 goto out;
1265 } 1264 }
1266 1265
1267out: 1266out:
1268 mutex_unlock(&master->io_mutex); 1267 mutex_unlock(&ctlr->io_mutex);
1269 1268
1270 /* Prod the scheduler in case transfer_one() was busy waiting */ 1269 /* Prod the scheduler in case transfer_one() was busy waiting */
1271 if (!ret) 1270 if (!ret)
@@ -1274,44 +1273,43 @@ out:
1274 1273
1275/** 1274/**
1276 * spi_pump_messages - kthread work function which processes spi message queue 1275 * spi_pump_messages - kthread work function which processes spi message queue
1277 * @work: pointer to kthread work struct contained in the master struct 1276 * @work: pointer to kthread work struct contained in the controller struct
1278 */ 1277 */
1279static void spi_pump_messages(struct kthread_work *work) 1278static void spi_pump_messages(struct kthread_work *work)
1280{ 1279{
1281 struct spi_master *master = 1280 struct spi_controller *ctlr =
1282 container_of(work, struct spi_master, pump_messages); 1281 container_of(work, struct spi_controller, pump_messages);
1283 1282
1284 __spi_pump_messages(master, true); 1283 __spi_pump_messages(ctlr, true);
1285} 1284}
1286 1285
1287static int spi_init_queue(struct spi_master *master) 1286static int spi_init_queue(struct spi_controller *ctlr)
1288{ 1287{
1289 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1288 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1290 1289
1291 master->running = false; 1290 ctlr->running = false;
1292 master->busy = false; 1291 ctlr->busy = false;
1293 1292
1294 kthread_init_worker(&master->kworker); 1293 kthread_init_worker(&ctlr->kworker);
1295 master->kworker_task = kthread_run(kthread_worker_fn, 1294 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1296 &master->kworker, "%s", 1295 "%s", dev_name(&ctlr->dev));
1297 dev_name(&master->dev)); 1296 if (IS_ERR(ctlr->kworker_task)) {
1298 if (IS_ERR(master->kworker_task)) { 1297 dev_err(&ctlr->dev, "failed to create message pump task\n");
1299 dev_err(&master->dev, "failed to create message pump task\n"); 1298 return PTR_ERR(ctlr->kworker_task);
1300 return PTR_ERR(master->kworker_task);
1301 } 1299 }
1302 kthread_init_work(&master->pump_messages, spi_pump_messages); 1300 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1303 1301
1304 /* 1302 /*
1305 * Master config will indicate if this controller should run the 1303 * Controller config will indicate if this controller should run the
1306 * message pump with high (realtime) priority to reduce the transfer 1304 * message pump with high (realtime) priority to reduce the transfer
1307 * latency on the bus by minimising the delay between a transfer 1305 * latency on the bus by minimising the delay between a transfer
1308 * request and the scheduling of the message pump thread. Without this 1306 * request and the scheduling of the message pump thread. Without this
1309 * setting the message pump thread will remain at default priority. 1307 * setting the message pump thread will remain at default priority.
1310 */ 1308 */
1311 if (master->rt) { 1309 if (ctlr->rt) {
1312 dev_info(&master->dev, 1310 dev_info(&ctlr->dev,
1313 "will run message pump with realtime priority\n"); 1311 "will run message pump with realtime priority\n");
1314 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param); 1312 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1315 } 1313 }
1316 1314
1317 return 0; 1315 return 0;
@@ -1320,23 +1318,23 @@ static int spi_init_queue(struct spi_master *master)
1320/** 1318/**
1321 * spi_get_next_queued_message() - called by driver to check for queued 1319 * spi_get_next_queued_message() - called by driver to check for queued
1322 * messages 1320 * messages
1323 * @master: the master to check for queued messages 1321 * @ctlr: the controller to check for queued messages
1324 * 1322 *
1325 * If there are more messages in the queue, the next message is returned from 1323 * If there are more messages in the queue, the next message is returned from
1326 * this call. 1324 * this call.
1327 * 1325 *
1328 * Return: the next message in the queue, else NULL if the queue is empty. 1326 * Return: the next message in the queue, else NULL if the queue is empty.
1329 */ 1327 */
1330struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1328struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1331{ 1329{
1332 struct spi_message *next; 1330 struct spi_message *next;
1333 unsigned long flags; 1331 unsigned long flags;
1334 1332
1335 /* get a pointer to the next message, if any */ 1333 /* get a pointer to the next message, if any */
1336 spin_lock_irqsave(&master->queue_lock, flags); 1334 spin_lock_irqsave(&ctlr->queue_lock, flags);
1337 next = list_first_entry_or_null(&master->queue, struct spi_message, 1335 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1338 queue); 1336 queue);
1339 spin_unlock_irqrestore(&master->queue_lock, flags); 1337 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1340 1338
1341 return next; 1339 return next;
1342} 1340}
@@ -1344,36 +1342,36 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1344 1342
1345/** 1343/**
1346 * spi_finalize_current_message() - the current message is complete 1344 * spi_finalize_current_message() - the current message is complete
1347 * @master: the master to return the message to 1345 * @ctlr: the controller to return the message to
1348 * 1346 *
1349 * Called by the driver to notify the core that the message in the front of the 1347 * Called by the driver to notify the core that the message in the front of the
1350 * queue is complete and can be removed from the queue. 1348 * queue is complete and can be removed from the queue.
1351 */ 1349 */
1352void spi_finalize_current_message(struct spi_master *master) 1350void spi_finalize_current_message(struct spi_controller *ctlr)
1353{ 1351{
1354 struct spi_message *mesg; 1352 struct spi_message *mesg;
1355 unsigned long flags; 1353 unsigned long flags;
1356 int ret; 1354 int ret;
1357 1355
1358 spin_lock_irqsave(&master->queue_lock, flags); 1356 spin_lock_irqsave(&ctlr->queue_lock, flags);
1359 mesg = master->cur_msg; 1357 mesg = ctlr->cur_msg;
1360 spin_unlock_irqrestore(&master->queue_lock, flags); 1358 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1361 1359
1362 spi_unmap_msg(master, mesg); 1360 spi_unmap_msg(ctlr, mesg);
1363 1361
1364 if (master->cur_msg_prepared && master->unprepare_message) { 1362 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1365 ret = master->unprepare_message(master, mesg); 1363 ret = ctlr->unprepare_message(ctlr, mesg);
1366 if (ret) { 1364 if (ret) {
1367 dev_err(&master->dev, 1365 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1368 "failed to unprepare message: %d\n", ret); 1366 ret);
1369 } 1367 }
1370 } 1368 }
1371 1369
1372 spin_lock_irqsave(&master->queue_lock, flags); 1370 spin_lock_irqsave(&ctlr->queue_lock, flags);
1373 master->cur_msg = NULL; 1371 ctlr->cur_msg = NULL;
1374 master->cur_msg_prepared = false; 1372 ctlr->cur_msg_prepared = false;
1375 kthread_queue_work(&master->kworker, &master->pump_messages); 1373 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1376 spin_unlock_irqrestore(&master->queue_lock, flags); 1374 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1377 1375
1378 trace_spi_message_done(mesg); 1376 trace_spi_message_done(mesg);
1379 1377
@@ -1383,66 +1381,65 @@ void spi_finalize_current_message(struct spi_master *master)
1383} 1381}
1384EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1382EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1385 1383
1386static int spi_start_queue(struct spi_master *master) 1384static int spi_start_queue(struct spi_controller *ctlr)
1387{ 1385{
1388 unsigned long flags; 1386 unsigned long flags;
1389 1387
1390 spin_lock_irqsave(&master->queue_lock, flags); 1388 spin_lock_irqsave(&ctlr->queue_lock, flags);
1391 1389
1392 if (master->running || master->busy) { 1390 if (ctlr->running || ctlr->busy) {
1393 spin_unlock_irqrestore(&master->queue_lock, flags); 1391 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1394 return -EBUSY; 1392 return -EBUSY;
1395 } 1393 }
1396 1394
1397 master->running = true; 1395 ctlr->running = true;
1398 master->cur_msg = NULL; 1396 ctlr->cur_msg = NULL;
1399 spin_unlock_irqrestore(&master->queue_lock, flags); 1397 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1400 1398
1401 kthread_queue_work(&master->kworker, &master->pump_messages); 1399 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1402 1400
1403 return 0; 1401 return 0;
1404} 1402}
1405 1403
1406static int spi_stop_queue(struct spi_master *master) 1404static int spi_stop_queue(struct spi_controller *ctlr)
1407{ 1405{
1408 unsigned long flags; 1406 unsigned long flags;
1409 unsigned limit = 500; 1407 unsigned limit = 500;
1410 int ret = 0; 1408 int ret = 0;
1411 1409
1412 spin_lock_irqsave(&master->queue_lock, flags); 1410 spin_lock_irqsave(&ctlr->queue_lock, flags);
1413 1411
1414 /* 1412 /*
1415 * This is a bit lame, but is optimized for the common execution path. 1413 * This is a bit lame, but is optimized for the common execution path.
1416 * A wait_queue on the master->busy could be used, but then the common 1414 * A wait_queue on the ctlr->busy could be used, but then the common
1417 * execution path (pump_messages) would be required to call wake_up or 1415 * execution path (pump_messages) would be required to call wake_up or
1418 * friends on every SPI message. Do this instead. 1416 * friends on every SPI message. Do this instead.
1419 */ 1417 */
1420 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1418 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1421 spin_unlock_irqrestore(&master->queue_lock, flags); 1419 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1422 usleep_range(10000, 11000); 1420 usleep_range(10000, 11000);
1423 spin_lock_irqsave(&master->queue_lock, flags); 1421 spin_lock_irqsave(&ctlr->queue_lock, flags);
1424 } 1422 }
1425 1423
1426 if (!list_empty(&master->queue) || master->busy) 1424 if (!list_empty(&ctlr->queue) || ctlr->busy)
1427 ret = -EBUSY; 1425 ret = -EBUSY;
1428 else 1426 else
1429 master->running = false; 1427 ctlr->running = false;
1430 1428
1431 spin_unlock_irqrestore(&master->queue_lock, flags); 1429 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1432 1430
1433 if (ret) { 1431 if (ret) {
1434 dev_warn(&master->dev, 1432 dev_warn(&ctlr->dev, "could not stop message queue\n");
1435 "could not stop message queue\n");
1436 return ret; 1433 return ret;
1437 } 1434 }
1438 return ret; 1435 return ret;
1439} 1436}
1440 1437
1441static int spi_destroy_queue(struct spi_master *master) 1438static int spi_destroy_queue(struct spi_controller *ctlr)
1442{ 1439{
1443 int ret; 1440 int ret;
1444 1441
1445 ret = spi_stop_queue(master); 1442 ret = spi_stop_queue(ctlr);
1446 1443
1447 /* 1444 /*
1448 * kthread_flush_worker will block until all work is done. 1445 * kthread_flush_worker will block until all work is done.
@@ -1451,12 +1448,12 @@ static int spi_destroy_queue(struct spi_master *master)
1451 * return anyway. 1448 * return anyway.
1452 */ 1449 */
1453 if (ret) { 1450 if (ret) {
1454 dev_err(&master->dev, "problem destroying queue\n"); 1451 dev_err(&ctlr->dev, "problem destroying queue\n");
1455 return ret; 1452 return ret;
1456 } 1453 }
1457 1454
1458 kthread_flush_worker(&master->kworker); 1455 kthread_flush_worker(&ctlr->kworker);
1459 kthread_stop(master->kworker_task); 1456 kthread_stop(ctlr->kworker_task);
1460 1457
1461 return 0; 1458 return 0;
1462} 1459}
@@ -1465,23 +1462,23 @@ static int __spi_queued_transfer(struct spi_device *spi,
1465 struct spi_message *msg, 1462 struct spi_message *msg,
1466 bool need_pump) 1463 bool need_pump)
1467{ 1464{
1468 struct spi_master *master = spi->master; 1465 struct spi_controller *ctlr = spi->controller;
1469 unsigned long flags; 1466 unsigned long flags;
1470 1467
1471 spin_lock_irqsave(&master->queue_lock, flags); 1468 spin_lock_irqsave(&ctlr->queue_lock, flags);
1472 1469
1473 if (!master->running) { 1470 if (!ctlr->running) {
1474 spin_unlock_irqrestore(&master->queue_lock, flags); 1471 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1475 return -ESHUTDOWN; 1472 return -ESHUTDOWN;
1476 } 1473 }
1477 msg->actual_length = 0; 1474 msg->actual_length = 0;
1478 msg->status = -EINPROGRESS; 1475 msg->status = -EINPROGRESS;
1479 1476
1480 list_add_tail(&msg->queue, &master->queue); 1477 list_add_tail(&msg->queue, &ctlr->queue);
1481 if (!master->busy && need_pump) 1478 if (!ctlr->busy && need_pump)
1482 kthread_queue_work(&master->kworker, &master->pump_messages); 1479 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1483 1480
1484 spin_unlock_irqrestore(&master->queue_lock, flags); 1481 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1485 return 0; 1482 return 0;
1486} 1483}
1487 1484
@@ -1497,31 +1494,31 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1497 return __spi_queued_transfer(spi, msg, true); 1494 return __spi_queued_transfer(spi, msg, true);
1498} 1495}
1499 1496
1500static int spi_master_initialize_queue(struct spi_master *master) 1497static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1501{ 1498{
1502 int ret; 1499 int ret;
1503 1500
1504 master->transfer = spi_queued_transfer; 1501 ctlr->transfer = spi_queued_transfer;
1505 if (!master->transfer_one_message) 1502 if (!ctlr->transfer_one_message)
1506 master->transfer_one_message = spi_transfer_one_message; 1503 ctlr->transfer_one_message = spi_transfer_one_message;
1507 1504
1508 /* Initialize and start queue */ 1505 /* Initialize and start queue */
1509 ret = spi_init_queue(master); 1506 ret = spi_init_queue(ctlr);
1510 if (ret) { 1507 if (ret) {
1511 dev_err(&master->dev, "problem initializing queue\n"); 1508 dev_err(&ctlr->dev, "problem initializing queue\n");
1512 goto err_init_queue; 1509 goto err_init_queue;
1513 } 1510 }
1514 master->queued = true; 1511 ctlr->queued = true;
1515 ret = spi_start_queue(master); 1512 ret = spi_start_queue(ctlr);
1516 if (ret) { 1513 if (ret) {
1517 dev_err(&master->dev, "problem starting queue\n"); 1514 dev_err(&ctlr->dev, "problem starting queue\n");
1518 goto err_start_queue; 1515 goto err_start_queue;
1519 } 1516 }
1520 1517
1521 return 0; 1518 return 0;
1522 1519
1523err_start_queue: 1520err_start_queue:
1524 spi_destroy_queue(master); 1521 spi_destroy_queue(ctlr);
1525err_init_queue: 1522err_init_queue:
1526 return ret; 1523 return ret;
1527} 1524}
@@ -1529,21 +1526,12 @@ err_init_queue:
1529/*-------------------------------------------------------------------------*/ 1526/*-------------------------------------------------------------------------*/
1530 1527
1531#if defined(CONFIG_OF) 1528#if defined(CONFIG_OF)
1532static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, 1529static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1533 struct device_node *nc) 1530 struct device_node *nc)
1534{ 1531{
1535 u32 value; 1532 u32 value;
1536 int rc; 1533 int rc;
1537 1534
1538 /* Device address */
1539 rc = of_property_read_u32(nc, "reg", &value);
1540 if (rc) {
1541 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1542 nc->full_name, rc);
1543 return rc;
1544 }
1545 spi->chip_select = value;
1546
1547 /* Mode (clock phase/polarity/etc.) */ 1535 /* Mode (clock phase/polarity/etc.) */
1548 if (of_find_property(nc, "spi-cpha", NULL)) 1536 if (of_find_property(nc, "spi-cpha", NULL))
1549 spi->mode |= SPI_CPHA; 1537 spi->mode |= SPI_CPHA;
@@ -1568,7 +1556,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1568 spi->mode |= SPI_TX_QUAD; 1556 spi->mode |= SPI_TX_QUAD;
1569 break; 1557 break;
1570 default: 1558 default:
1571 dev_warn(&master->dev, 1559 dev_warn(&ctlr->dev,
1572 "spi-tx-bus-width %d not supported\n", 1560 "spi-tx-bus-width %d not supported\n",
1573 value); 1561 value);
1574 break; 1562 break;
@@ -1586,17 +1574,36 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1586 spi->mode |= SPI_RX_QUAD; 1574 spi->mode |= SPI_RX_QUAD;
1587 break; 1575 break;
1588 default: 1576 default:
1589 dev_warn(&master->dev, 1577 dev_warn(&ctlr->dev,
1590 "spi-rx-bus-width %d not supported\n", 1578 "spi-rx-bus-width %d not supported\n",
1591 value); 1579 value);
1592 break; 1580 break;
1593 } 1581 }
1594 } 1582 }
1595 1583
1584 if (spi_controller_is_slave(ctlr)) {
1585 if (strcmp(nc->name, "slave")) {
1586 dev_err(&ctlr->dev, "%s is not called 'slave'\n",
1587 nc->full_name);
1588 return -EINVAL;
1589 }
1590 return 0;
1591 }
1592
1593 /* Device address */
1594 rc = of_property_read_u32(nc, "reg", &value);
1595 if (rc) {
1596 dev_err(&ctlr->dev, "%s has no valid 'reg' property (%d)\n",
1597 nc->full_name, rc);
1598 return rc;
1599 }
1600 spi->chip_select = value;
1601
1596 /* Device speed */ 1602 /* Device speed */
1597 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1603 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1598 if (rc) { 1604 if (rc) {
1599 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1605 dev_err(&ctlr->dev,
1606 "%s has no valid 'spi-max-frequency' property (%d)\n",
1600 nc->full_name, rc); 1607 nc->full_name, rc);
1601 return rc; 1608 return rc;
1602 } 1609 }
@@ -1606,15 +1613,15 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
1606} 1613}
1607 1614
1608static struct spi_device * 1615static struct spi_device *
1609of_register_spi_device(struct spi_master *master, struct device_node *nc) 1616of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1610{ 1617{
1611 struct spi_device *spi; 1618 struct spi_device *spi;
1612 int rc; 1619 int rc;
1613 1620
1614 /* Alloc an spi_device */ 1621 /* Alloc an spi_device */
1615 spi = spi_alloc_device(master); 1622 spi = spi_alloc_device(ctlr);
1616 if (!spi) { 1623 if (!spi) {
1617 dev_err(&master->dev, "spi_device alloc error for %s\n", 1624 dev_err(&ctlr->dev, "spi_device alloc error for %s\n",
1618 nc->full_name); 1625 nc->full_name);
1619 rc = -ENOMEM; 1626 rc = -ENOMEM;
1620 goto err_out; 1627 goto err_out;
@@ -1624,12 +1631,12 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
1624 rc = of_modalias_node(nc, spi->modalias, 1631 rc = of_modalias_node(nc, spi->modalias,
1625 sizeof(spi->modalias)); 1632 sizeof(spi->modalias));
1626 if (rc < 0) { 1633 if (rc < 0) {
1627 dev_err(&master->dev, "cannot find modalias for %s\n", 1634 dev_err(&ctlr->dev, "cannot find modalias for %s\n",
1628 nc->full_name); 1635 nc->full_name);
1629 goto err_out; 1636 goto err_out;
1630 } 1637 }
1631 1638
1632 rc = of_spi_parse_dt(master, spi, nc); 1639 rc = of_spi_parse_dt(ctlr, spi, nc);
1633 if (rc) 1640 if (rc)
1634 goto err_out; 1641 goto err_out;
1635 1642
@@ -1640,7 +1647,7 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
1640 /* Register the new device */ 1647 /* Register the new device */
1641 rc = spi_add_device(spi); 1648 rc = spi_add_device(spi);
1642 if (rc) { 1649 if (rc) {
1643 dev_err(&master->dev, "spi_device register error %s\n", 1650 dev_err(&ctlr->dev, "spi_device register error %s\n",
1644 nc->full_name); 1651 nc->full_name);
1645 goto err_of_node_put; 1652 goto err_of_node_put;
1646 } 1653 }
@@ -1656,39 +1663,40 @@ err_out:
1656 1663
1657/** 1664/**
1658 * of_register_spi_devices() - Register child devices onto the SPI bus 1665 * of_register_spi_devices() - Register child devices onto the SPI bus
1659 * @master: Pointer to spi_master device 1666 * @ctlr: Pointer to spi_controller device
1660 * 1667 *
1661 * Registers an spi_device for each child node of master node which has a 'reg' 1668 * Registers an spi_device for each child node of controller node which
1662 * property. 1669 * represents a valid SPI slave.
1663 */ 1670 */
1664static void of_register_spi_devices(struct spi_master *master) 1671static void of_register_spi_devices(struct spi_controller *ctlr)
1665{ 1672{
1666 struct spi_device *spi; 1673 struct spi_device *spi;
1667 struct device_node *nc; 1674 struct device_node *nc;
1668 1675
1669 if (!master->dev.of_node) 1676 if (!ctlr->dev.of_node)
1670 return; 1677 return;
1671 1678
1672 for_each_available_child_of_node(master->dev.of_node, nc) { 1679 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1673 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1680 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1674 continue; 1681 continue;
1675 spi = of_register_spi_device(master, nc); 1682 spi = of_register_spi_device(ctlr, nc);
1676 if (IS_ERR(spi)) { 1683 if (IS_ERR(spi)) {
1677 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1684 dev_warn(&ctlr->dev,
1678 nc->full_name); 1685 "Failed to create SPI device for %s\n",
1686 nc->full_name);
1679 of_node_clear_flag(nc, OF_POPULATED); 1687 of_node_clear_flag(nc, OF_POPULATED);
1680 } 1688 }
1681 } 1689 }
1682} 1690}
1683#else 1691#else
1684static void of_register_spi_devices(struct spi_master *master) { } 1692static void of_register_spi_devices(struct spi_controller *ctlr) { }
1685#endif 1693#endif
1686 1694
1687#ifdef CONFIG_ACPI 1695#ifdef CONFIG_ACPI
1688static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1696static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1689{ 1697{
1690 struct spi_device *spi = data; 1698 struct spi_device *spi = data;
1691 struct spi_master *master = spi->master; 1699 struct spi_controller *ctlr = spi->controller;
1692 1700
1693 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1701 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1694 struct acpi_resource_spi_serialbus *sb; 1702 struct acpi_resource_spi_serialbus *sb;
@@ -1702,8 +1710,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1702 * 0 .. max - 1 so we need to ask the driver to 1710 * 0 .. max - 1 so we need to ask the driver to
1703 * translate between the two schemes. 1711 * translate between the two schemes.
1704 */ 1712 */
1705 if (master->fw_translate_cs) { 1713 if (ctlr->fw_translate_cs) {
1706 int cs = master->fw_translate_cs(master, 1714 int cs = ctlr->fw_translate_cs(ctlr,
1707 sb->device_selection); 1715 sb->device_selection);
1708 if (cs < 0) 1716 if (cs < 0)
1709 return cs; 1717 return cs;
@@ -1732,7 +1740,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1732 return 1; 1740 return 1;
1733} 1741}
1734 1742
1735static acpi_status acpi_register_spi_device(struct spi_master *master, 1743static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1736 struct acpi_device *adev) 1744 struct acpi_device *adev)
1737{ 1745{
1738 struct list_head resource_list; 1746 struct list_head resource_list;
@@ -1743,9 +1751,9 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1743 acpi_device_enumerated(adev)) 1751 acpi_device_enumerated(adev))
1744 return AE_OK; 1752 return AE_OK;
1745 1753
1746 spi = spi_alloc_device(master); 1754 spi = spi_alloc_device(ctlr);
1747 if (!spi) { 1755 if (!spi) {
1748 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1756 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1749 dev_name(&adev->dev)); 1757 dev_name(&adev->dev));
1750 return AE_NO_MEMORY; 1758 return AE_NO_MEMORY;
1751 } 1759 }
@@ -1774,7 +1782,7 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1774 adev->power.flags.ignore_parent = true; 1782 adev->power.flags.ignore_parent = true;
1775 if (spi_add_device(spi)) { 1783 if (spi_add_device(spi)) {
1776 adev->power.flags.ignore_parent = false; 1784 adev->power.flags.ignore_parent = false;
1777 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1785 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1778 dev_name(&adev->dev)); 1786 dev_name(&adev->dev));
1779 spi_dev_put(spi); 1787 spi_dev_put(spi);
1780 } 1788 }
@@ -1785,104 +1793,211 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
1785static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1793static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1786 void *data, void **return_value) 1794 void *data, void **return_value)
1787{ 1795{
1788 struct spi_master *master = data; 1796 struct spi_controller *ctlr = data;
1789 struct acpi_device *adev; 1797 struct acpi_device *adev;
1790 1798
1791 if (acpi_bus_get_device(handle, &adev)) 1799 if (acpi_bus_get_device(handle, &adev))
1792 return AE_OK; 1800 return AE_OK;
1793 1801
1794 return acpi_register_spi_device(master, adev); 1802 return acpi_register_spi_device(ctlr, adev);
1795} 1803}
1796 1804
1797static void acpi_register_spi_devices(struct spi_master *master) 1805static void acpi_register_spi_devices(struct spi_controller *ctlr)
1798{ 1806{
1799 acpi_status status; 1807 acpi_status status;
1800 acpi_handle handle; 1808 acpi_handle handle;
1801 1809
1802 handle = ACPI_HANDLE(master->dev.parent); 1810 handle = ACPI_HANDLE(ctlr->dev.parent);
1803 if (!handle) 1811 if (!handle)
1804 return; 1812 return;
1805 1813
1806 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1814 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1807 acpi_spi_add_device, NULL, 1815 acpi_spi_add_device, NULL, ctlr, NULL);
1808 master, NULL);
1809 if (ACPI_FAILURE(status)) 1816 if (ACPI_FAILURE(status))
1810 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1817 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1811} 1818}
1812#else 1819#else
1813static inline void acpi_register_spi_devices(struct spi_master *master) {} 1820static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1814#endif /* CONFIG_ACPI */ 1821#endif /* CONFIG_ACPI */
1815 1822
1816static void spi_master_release(struct device *dev) 1823static void spi_controller_release(struct device *dev)
1817{ 1824{
1818 struct spi_master *master; 1825 struct spi_controller *ctlr;
1819 1826
1820 master = container_of(dev, struct spi_master, dev); 1827 ctlr = container_of(dev, struct spi_controller, dev);
1821 kfree(master); 1828 kfree(ctlr);
1822} 1829}
1823 1830
1824static struct class spi_master_class = { 1831static struct class spi_master_class = {
1825 .name = "spi_master", 1832 .name = "spi_master",
1826 .owner = THIS_MODULE, 1833 .owner = THIS_MODULE,
1827 .dev_release = spi_master_release, 1834 .dev_release = spi_controller_release,
1828 .dev_groups = spi_master_groups, 1835 .dev_groups = spi_master_groups,
1829}; 1836};
1830 1837
1838#ifdef CONFIG_SPI_SLAVE
1839/**
1840 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1841 * controller
1842 * @spi: device used for the current transfer
1843 */
1844int spi_slave_abort(struct spi_device *spi)
1845{
1846 struct spi_controller *ctlr = spi->controller;
1847
1848 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1849 return ctlr->slave_abort(ctlr);
1850
1851 return -ENOTSUPP;
1852}
1853EXPORT_SYMBOL_GPL(spi_slave_abort);
1854
1855static int match_true(struct device *dev, void *data)
1856{
1857 return 1;
1858}
1859
1860static ssize_t spi_slave_show(struct device *dev,
1861 struct device_attribute *attr, char *buf)
1862{
1863 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1864 dev);
1865 struct device *child;
1866
1867 child = device_find_child(&ctlr->dev, NULL, match_true);
1868 return sprintf(buf, "%s\n",
1869 child ? to_spi_device(child)->modalias : NULL);
1870}
1871
1872static ssize_t spi_slave_store(struct device *dev,
1873 struct device_attribute *attr, const char *buf,
1874 size_t count)
1875{
1876 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1877 dev);
1878 struct spi_device *spi;
1879 struct device *child;
1880 char name[32];
1881 int rc;
1882
1883 rc = sscanf(buf, "%31s", name);
1884 if (rc != 1 || !name[0])
1885 return -EINVAL;
1886
1887 child = device_find_child(&ctlr->dev, NULL, match_true);
1888 if (child) {
1889 /* Remove registered slave */
1890 device_unregister(child);
1891 put_device(child);
1892 }
1893
1894 if (strcmp(name, "(null)")) {
1895 /* Register new slave */
1896 spi = spi_alloc_device(ctlr);
1897 if (!spi)
1898 return -ENOMEM;
1899
1900 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1901
1902 rc = spi_add_device(spi);
1903 if (rc) {
1904 spi_dev_put(spi);
1905 return rc;
1906 }
1907 }
1908
1909 return count;
1910}
1911
1912static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1913
1914static struct attribute *spi_slave_attrs[] = {
1915 &dev_attr_slave.attr,
1916 NULL,
1917};
1918
1919static const struct attribute_group spi_slave_group = {
1920 .attrs = spi_slave_attrs,
1921};
1922
1923static const struct attribute_group *spi_slave_groups[] = {
1924 &spi_controller_statistics_group,
1925 &spi_slave_group,
1926 NULL,
1927};
1928
1929static struct class spi_slave_class = {
1930 .name = "spi_slave",
1931 .owner = THIS_MODULE,
1932 .dev_release = spi_controller_release,
1933 .dev_groups = spi_slave_groups,
1934};
1935#else
1936extern struct class spi_slave_class; /* dummy */
1937#endif
1831 1938
1832/** 1939/**
1833 * spi_alloc_master - allocate SPI master controller 1940 * __spi_alloc_controller - allocate an SPI master or slave controller
1834 * @dev: the controller, possibly using the platform_bus 1941 * @dev: the controller, possibly using the platform_bus
1835 * @size: how much zeroed driver-private data to allocate; the pointer to this 1942 * @size: how much zeroed driver-private data to allocate; the pointer to this
1836 * memory is in the driver_data field of the returned device, 1943 * memory is in the driver_data field of the returned device,
1837 * accessible with spi_master_get_devdata(). 1944 * accessible with spi_controller_get_devdata().
1945 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
1946 * slave (true) controller
1838 * Context: can sleep 1947 * Context: can sleep
1839 * 1948 *
1840 * This call is used only by SPI master controller drivers, which are the 1949 * This call is used only by SPI controller drivers, which are the
1841 * only ones directly touching chip registers. It's how they allocate 1950 * only ones directly touching chip registers. It's how they allocate
1842 * an spi_master structure, prior to calling spi_register_master(). 1951 * an spi_controller structure, prior to calling spi_register_controller().
1843 * 1952 *
1844 * This must be called from context that can sleep. 1953 * This must be called from context that can sleep.
1845 * 1954 *
1846 * The caller is responsible for assigning the bus number and initializing 1955 * The caller is responsible for assigning the bus number and initializing the
1847 * the master's methods before calling spi_register_master(); and (after errors 1956 * controller's methods before calling spi_register_controller(); and (after
1848 * adding the device) calling spi_master_put() to prevent a memory leak. 1957 * errors adding the device) calling spi_controller_put() to prevent a memory
1958 * leak.
1849 * 1959 *
1850 * Return: the SPI master structure on success, else NULL. 1960 * Return: the SPI controller structure on success, else NULL.
1851 */ 1961 */
1852struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1962struct spi_controller *__spi_alloc_controller(struct device *dev,
1963 unsigned int size, bool slave)
1853{ 1964{
1854 struct spi_master *master; 1965 struct spi_controller *ctlr;
1855 1966
1856 if (!dev) 1967 if (!dev)
1857 return NULL; 1968 return NULL;
1858 1969
1859 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1970 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
1860 if (!master) 1971 if (!ctlr)
1861 return NULL; 1972 return NULL;
1862 1973
1863 device_initialize(&master->dev); 1974 device_initialize(&ctlr->dev);
1864 master->bus_num = -1; 1975 ctlr->bus_num = -1;
1865 master->num_chipselect = 1; 1976 ctlr->num_chipselect = 1;
1866 master->dev.class = &spi_master_class; 1977 ctlr->slave = slave;
1867 master->dev.parent = dev; 1978 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
1868 pm_suspend_ignore_children(&master->dev, true); 1979 ctlr->dev.class = &spi_slave_class;
1869 spi_master_set_devdata(master, &master[1]); 1980 else
1981 ctlr->dev.class = &spi_master_class;
1982 ctlr->dev.parent = dev;
1983 pm_suspend_ignore_children(&ctlr->dev, true);
1984 spi_controller_set_devdata(ctlr, &ctlr[1]);
1870 1985
1871 return master; 1986 return ctlr;
1872} 1987}
1873EXPORT_SYMBOL_GPL(spi_alloc_master); 1988EXPORT_SYMBOL_GPL(__spi_alloc_controller);
1874 1989
1875#ifdef CONFIG_OF 1990#ifdef CONFIG_OF
1876static int of_spi_register_master(struct spi_master *master) 1991static int of_spi_register_master(struct spi_controller *ctlr)
1877{ 1992{
1878 int nb, i, *cs; 1993 int nb, i, *cs;
1879 struct device_node *np = master->dev.of_node; 1994 struct device_node *np = ctlr->dev.of_node;
1880 1995
1881 if (!np) 1996 if (!np)
1882 return 0; 1997 return 0;
1883 1998
1884 nb = of_gpio_named_count(np, "cs-gpios"); 1999 nb = of_gpio_named_count(np, "cs-gpios");
1885 master->num_chipselect = max_t(int, nb, master->num_chipselect); 2000 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
1886 2001
1887 /* Return error only for an incorrectly formed cs-gpios property */ 2002 /* Return error only for an incorrectly formed cs-gpios property */
1888 if (nb == 0 || nb == -ENOENT) 2003 if (nb == 0 || nb == -ENOENT)
@@ -1890,15 +2005,14 @@ static int of_spi_register_master(struct spi_master *master)
1890 else if (nb < 0) 2005 else if (nb < 0)
1891 return nb; 2006 return nb;
1892 2007
1893 cs = devm_kzalloc(&master->dev, 2008 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
1894 sizeof(int) * master->num_chipselect,
1895 GFP_KERNEL); 2009 GFP_KERNEL);
1896 master->cs_gpios = cs; 2010 ctlr->cs_gpios = cs;
1897 2011
1898 if (!master->cs_gpios) 2012 if (!ctlr->cs_gpios)
1899 return -ENOMEM; 2013 return -ENOMEM;
1900 2014
1901 for (i = 0; i < master->num_chipselect; i++) 2015 for (i = 0; i < ctlr->num_chipselect; i++)
1902 cs[i] = -ENOENT; 2016 cs[i] = -ENOENT;
1903 2017
1904 for (i = 0; i < nb; i++) 2018 for (i = 0; i < nb; i++)
@@ -1907,20 +2021,21 @@ static int of_spi_register_master(struct spi_master *master)
1907 return 0; 2021 return 0;
1908} 2022}
1909#else 2023#else
1910static int of_spi_register_master(struct spi_master *master) 2024static int of_spi_register_master(struct spi_controller *ctlr)
1911{ 2025{
1912 return 0; 2026 return 0;
1913} 2027}
1914#endif 2028#endif
1915 2029
1916/** 2030/**
1917 * spi_register_master - register SPI master controller 2031 * spi_register_controller - register SPI master or slave controller
1918 * @master: initialized master, originally from spi_alloc_master() 2032 * @ctlr: initialized master, originally from spi_alloc_master() or
2033 * spi_alloc_slave()
1919 * Context: can sleep 2034 * Context: can sleep
1920 * 2035 *
1921 * SPI master controllers connect to their drivers using some non-SPI bus, 2036 * SPI controllers connect to their drivers using some non-SPI bus,
1922 * such as the platform bus. The final stage of probe() in that code 2037 * such as the platform bus. The final stage of probe() in that code
1923 * includes calling spi_register_master() to hook up to this SPI bus glue. 2038 * includes calling spi_register_controller() to hook up to this SPI bus glue.
1924 * 2039 *
1925 * SPI controllers use board specific (often SOC specific) bus numbers, 2040 * SPI controllers use board specific (often SOC specific) bus numbers,
1926 * and board-specific addressing for SPI devices combines those numbers 2041 * and board-specific addressing for SPI devices combines those numbers
@@ -1929,16 +2044,16 @@ static int of_spi_register_master(struct spi_master *master)
1929 * chip is at which address. 2044 * chip is at which address.
1930 * 2045 *
1931 * This must be called from context that can sleep. It returns zero on 2046 * This must be called from context that can sleep. It returns zero on
1932 * success, else a negative error code (dropping the master's refcount). 2047 * success, else a negative error code (dropping the controller's refcount).
1933 * After a successful return, the caller is responsible for calling 2048 * After a successful return, the caller is responsible for calling
1934 * spi_unregister_master(). 2049 * spi_unregister_controller().
1935 * 2050 *
1936 * Return: zero on success, else a negative error code. 2051 * Return: zero on success, else a negative error code.
1937 */ 2052 */
1938int spi_register_master(struct spi_master *master) 2053int spi_register_controller(struct spi_controller *ctlr)
1939{ 2054{
1940 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 2055 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1941 struct device *dev = master->dev.parent; 2056 struct device *dev = ctlr->dev.parent;
1942 struct boardinfo *bi; 2057 struct boardinfo *bi;
1943 int status = -ENODEV; 2058 int status = -ENODEV;
1944 int dynamic = 0; 2059 int dynamic = 0;
@@ -1946,103 +2061,109 @@ int spi_register_master(struct spi_master *master)
1946 if (!dev) 2061 if (!dev)
1947 return -ENODEV; 2062 return -ENODEV;
1948 2063
1949 status = of_spi_register_master(master); 2064 if (!spi_controller_is_slave(ctlr)) {
1950 if (status) 2065 status = of_spi_register_master(ctlr);
1951 return status; 2066 if (status)
2067 return status;
2068 }
1952 2069
1953 /* even if it's just one always-selected device, there must 2070 /* even if it's just one always-selected device, there must
1954 * be at least one chipselect 2071 * be at least one chipselect
1955 */ 2072 */
1956 if (master->num_chipselect == 0) 2073 if (ctlr->num_chipselect == 0)
1957 return -EINVAL; 2074 return -EINVAL;
1958 2075
1959 if ((master->bus_num < 0) && master->dev.of_node) 2076 if ((ctlr->bus_num < 0) && ctlr->dev.of_node)
1960 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 2077 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
1961 2078
1962 /* convention: dynamically assigned bus IDs count down from the max */ 2079 /* convention: dynamically assigned bus IDs count down from the max */
1963 if (master->bus_num < 0) { 2080 if (ctlr->bus_num < 0) {
1964 /* FIXME switch to an IDR based scheme, something like 2081 /* FIXME switch to an IDR based scheme, something like
1965 * I2C now uses, so we can't run out of "dynamic" IDs 2082 * I2C now uses, so we can't run out of "dynamic" IDs
1966 */ 2083 */
1967 master->bus_num = atomic_dec_return(&dyn_bus_id); 2084 ctlr->bus_num = atomic_dec_return(&dyn_bus_id);
1968 dynamic = 1; 2085 dynamic = 1;
1969 } 2086 }
1970 2087
1971 INIT_LIST_HEAD(&master->queue); 2088 INIT_LIST_HEAD(&ctlr->queue);
1972 spin_lock_init(&master->queue_lock); 2089 spin_lock_init(&ctlr->queue_lock);
1973 spin_lock_init(&master->bus_lock_spinlock); 2090 spin_lock_init(&ctlr->bus_lock_spinlock);
1974 mutex_init(&master->bus_lock_mutex); 2091 mutex_init(&ctlr->bus_lock_mutex);
1975 mutex_init(&master->io_mutex); 2092 mutex_init(&ctlr->io_mutex);
1976 master->bus_lock_flag = 0; 2093 ctlr->bus_lock_flag = 0;
1977 init_completion(&master->xfer_completion); 2094 init_completion(&ctlr->xfer_completion);
1978 if (!master->max_dma_len) 2095 if (!ctlr->max_dma_len)
1979 master->max_dma_len = INT_MAX; 2096 ctlr->max_dma_len = INT_MAX;
1980 2097
1981 /* register the device, then userspace will see it. 2098 /* register the device, then userspace will see it.
1982 * registration fails if the bus ID is in use. 2099 * registration fails if the bus ID is in use.
1983 */ 2100 */
1984 dev_set_name(&master->dev, "spi%u", master->bus_num); 2101 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
1985 status = device_add(&master->dev); 2102 status = device_add(&ctlr->dev);
1986 if (status < 0) 2103 if (status < 0)
1987 goto done; 2104 goto done;
1988 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 2105 dev_dbg(dev, "registered %s %s%s\n",
1989 dynamic ? " (dynamic)" : ""); 2106 spi_controller_is_slave(ctlr) ? "slave" : "master",
2107 dev_name(&ctlr->dev), dynamic ? " (dynamic)" : "");
1990 2108
1991 /* If we're using a queued driver, start the queue */ 2109 /* If we're using a queued driver, start the queue */
1992 if (master->transfer) 2110 if (ctlr->transfer)
1993 dev_info(dev, "master is unqueued, this is deprecated\n"); 2111 dev_info(dev, "controller is unqueued, this is deprecated\n");
1994 else { 2112 else {
1995 status = spi_master_initialize_queue(master); 2113 status = spi_controller_initialize_queue(ctlr);
1996 if (status) { 2114 if (status) {
1997 device_del(&master->dev); 2115 device_del(&ctlr->dev);
1998 goto done; 2116 goto done;
1999 } 2117 }
2000 } 2118 }
2001 /* add statistics */ 2119 /* add statistics */
2002 spin_lock_init(&master->statistics.lock); 2120 spin_lock_init(&ctlr->statistics.lock);
2003 2121
2004 mutex_lock(&board_lock); 2122 mutex_lock(&board_lock);
2005 list_add_tail(&master->list, &spi_master_list); 2123 list_add_tail(&ctlr->list, &spi_controller_list);
2006 list_for_each_entry(bi, &board_list, list) 2124 list_for_each_entry(bi, &board_list, list)
2007 spi_match_master_to_boardinfo(master, &bi->board_info); 2125 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2008 mutex_unlock(&board_lock); 2126 mutex_unlock(&board_lock);
2009 2127
2010 /* Register devices from the device tree and ACPI */ 2128 /* Register devices from the device tree and ACPI */
2011 of_register_spi_devices(master); 2129 of_register_spi_devices(ctlr);
2012 acpi_register_spi_devices(master); 2130 acpi_register_spi_devices(ctlr);
2013done: 2131done:
2014 return status; 2132 return status;
2015} 2133}
2016EXPORT_SYMBOL_GPL(spi_register_master); 2134EXPORT_SYMBOL_GPL(spi_register_controller);
2017 2135
2018static void devm_spi_unregister(struct device *dev, void *res) 2136static void devm_spi_unregister(struct device *dev, void *res)
2019{ 2137{
2020 spi_unregister_master(*(struct spi_master **)res); 2138 spi_unregister_controller(*(struct spi_controller **)res);
2021} 2139}
2022 2140
2023/** 2141/**
2024 * dev_spi_register_master - register managed SPI master controller 2142 * devm_spi_register_controller - register managed SPI master or slave
2025 * @dev: device managing SPI master 2143 * controller
2026 * @master: initialized master, originally from spi_alloc_master() 2144 * @dev: device managing SPI controller
2145 * @ctlr: initialized controller, originally from spi_alloc_master() or
2146 * spi_alloc_slave()
2027 * Context: can sleep 2147 * Context: can sleep
2028 * 2148 *
2029 * Register a SPI device as with spi_register_master() which will 2149 * Register a SPI device as with spi_register_controller() which will
2030 * automatically be unregister 2150 * automatically be unregister
2031 * 2151 *
2032 * Return: zero on success, else a negative error code. 2152 * Return: zero on success, else a negative error code.
2033 */ 2153 */
2034int devm_spi_register_master(struct device *dev, struct spi_master *master) 2154int devm_spi_register_controller(struct device *dev,
2155 struct spi_controller *ctlr)
2035{ 2156{
2036 struct spi_master **ptr; 2157 struct spi_controller **ptr;
2037 int ret; 2158 int ret;
2038 2159
2039 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2160 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2040 if (!ptr) 2161 if (!ptr)
2041 return -ENOMEM; 2162 return -ENOMEM;
2042 2163
2043 ret = spi_register_master(master); 2164 ret = spi_register_controller(ctlr);
2044 if (!ret) { 2165 if (!ret) {
2045 *ptr = master; 2166 *ptr = ctlr;
2046 devres_add(dev, ptr); 2167 devres_add(dev, ptr);
2047 } else { 2168 } else {
2048 devres_free(ptr); 2169 devres_free(ptr);
@@ -2050,7 +2171,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
2050 2171
2051 return ret; 2172 return ret;
2052} 2173}
2053EXPORT_SYMBOL_GPL(devm_spi_register_master); 2174EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2054 2175
2055static int __unregister(struct device *dev, void *null) 2176static int __unregister(struct device *dev, void *null)
2056{ 2177{
@@ -2059,71 +2180,71 @@ static int __unregister(struct device *dev, void *null)
2059} 2180}
2060 2181
2061/** 2182/**
2062 * spi_unregister_master - unregister SPI master controller 2183 * spi_unregister_controller - unregister SPI master or slave controller
2063 * @master: the master being unregistered 2184 * @ctlr: the controller being unregistered
2064 * Context: can sleep 2185 * Context: can sleep
2065 * 2186 *
2066 * This call is used only by SPI master controller drivers, which are the 2187 * This call is used only by SPI controller drivers, which are the
2067 * only ones directly touching chip registers. 2188 * only ones directly touching chip registers.
2068 * 2189 *
2069 * This must be called from context that can sleep. 2190 * This must be called from context that can sleep.
2070 */ 2191 */
2071void spi_unregister_master(struct spi_master *master) 2192void spi_unregister_controller(struct spi_controller *ctlr)
2072{ 2193{
2073 int dummy; 2194 int dummy;
2074 2195
2075 if (master->queued) { 2196 if (ctlr->queued) {
2076 if (spi_destroy_queue(master)) 2197 if (spi_destroy_queue(ctlr))
2077 dev_err(&master->dev, "queue remove failed\n"); 2198 dev_err(&ctlr->dev, "queue remove failed\n");
2078 } 2199 }
2079 2200
2080 mutex_lock(&board_lock); 2201 mutex_lock(&board_lock);
2081 list_del(&master->list); 2202 list_del(&ctlr->list);
2082 mutex_unlock(&board_lock); 2203 mutex_unlock(&board_lock);
2083 2204
2084 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2205 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
2085 device_unregister(&master->dev); 2206 device_unregister(&ctlr->dev);
2086} 2207}
2087EXPORT_SYMBOL_GPL(spi_unregister_master); 2208EXPORT_SYMBOL_GPL(spi_unregister_controller);
2088 2209
2089int spi_master_suspend(struct spi_master *master) 2210int spi_controller_suspend(struct spi_controller *ctlr)
2090{ 2211{
2091 int ret; 2212 int ret;
2092 2213
2093 /* Basically no-ops for non-queued masters */ 2214 /* Basically no-ops for non-queued controllers */
2094 if (!master->queued) 2215 if (!ctlr->queued)
2095 return 0; 2216 return 0;
2096 2217
2097 ret = spi_stop_queue(master); 2218 ret = spi_stop_queue(ctlr);
2098 if (ret) 2219 if (ret)
2099 dev_err(&master->dev, "queue stop failed\n"); 2220 dev_err(&ctlr->dev, "queue stop failed\n");
2100 2221
2101 return ret; 2222 return ret;
2102} 2223}
2103EXPORT_SYMBOL_GPL(spi_master_suspend); 2224EXPORT_SYMBOL_GPL(spi_controller_suspend);
2104 2225
2105int spi_master_resume(struct spi_master *master) 2226int spi_controller_resume(struct spi_controller *ctlr)
2106{ 2227{
2107 int ret; 2228 int ret;
2108 2229
2109 if (!master->queued) 2230 if (!ctlr->queued)
2110 return 0; 2231 return 0;
2111 2232
2112 ret = spi_start_queue(master); 2233 ret = spi_start_queue(ctlr);
2113 if (ret) 2234 if (ret)
2114 dev_err(&master->dev, "queue restart failed\n"); 2235 dev_err(&ctlr->dev, "queue restart failed\n");
2115 2236
2116 return ret; 2237 return ret;
2117} 2238}
2118EXPORT_SYMBOL_GPL(spi_master_resume); 2239EXPORT_SYMBOL_GPL(spi_controller_resume);
2119 2240
2120static int __spi_master_match(struct device *dev, const void *data) 2241static int __spi_controller_match(struct device *dev, const void *data)
2121{ 2242{
2122 struct spi_master *m; 2243 struct spi_controller *ctlr;
2123 const u16 *bus_num = data; 2244 const u16 *bus_num = data;
2124 2245
2125 m = container_of(dev, struct spi_master, dev); 2246 ctlr = container_of(dev, struct spi_controller, dev);
2126 return m->bus_num == *bus_num; 2247 return ctlr->bus_num == *bus_num;
2127} 2248}
2128 2249
2129/** 2250/**
@@ -2133,22 +2254,22 @@ static int __spi_master_match(struct device *dev, const void *data)
2133 * 2254 *
2134 * This call may be used with devices that are registered after 2255 * This call may be used with devices that are registered after
2135 * arch init time. It returns a refcounted pointer to the relevant 2256 * arch init time. It returns a refcounted pointer to the relevant
2136 * spi_master (which the caller must release), or NULL if there is 2257 * spi_controller (which the caller must release), or NULL if there is
2137 * no such master registered. 2258 * no such master registered.
2138 * 2259 *
2139 * Return: the SPI master structure on success, else NULL. 2260 * Return: the SPI master structure on success, else NULL.
2140 */ 2261 */
2141struct spi_master *spi_busnum_to_master(u16 bus_num) 2262struct spi_controller *spi_busnum_to_master(u16 bus_num)
2142{ 2263{
2143 struct device *dev; 2264 struct device *dev;
2144 struct spi_master *master = NULL; 2265 struct spi_controller *ctlr = NULL;
2145 2266
2146 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2267 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2147 __spi_master_match); 2268 __spi_controller_match);
2148 if (dev) 2269 if (dev)
2149 master = container_of(dev, struct spi_master, dev); 2270 ctlr = container_of(dev, struct spi_controller, dev);
2150 /* reference got in class_find_device */ 2271 /* reference got in class_find_device */
2151 return master; 2272 return ctlr;
2152} 2273}
2153EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2274EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2154 2275
@@ -2168,7 +2289,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2168 * Return: the pointer to the allocated data 2289 * Return: the pointer to the allocated data
2169 * 2290 *
2170 * This may get enhanced in the future to allocate from a memory pool 2291 * This may get enhanced in the future to allocate from a memory pool
2171 * of the @spi_device or @spi_master to avoid repeated allocations. 2292 * of the @spi_device or @spi_controller to avoid repeated allocations.
2172 */ 2293 */
2173void *spi_res_alloc(struct spi_device *spi, 2294void *spi_res_alloc(struct spi_device *spi,
2174 spi_res_release_t release, 2295 spi_res_release_t release,
@@ -2220,11 +2341,10 @@ EXPORT_SYMBOL_GPL(spi_res_add);
2220 2341
2221/** 2342/**
2222 * spi_res_release - release all spi resources for this message 2343 * spi_res_release - release all spi resources for this message
2223 * @master: the @spi_master 2344 * @ctlr: the @spi_controller
2224 * @message: the @spi_message 2345 * @message: the @spi_message
2225 */ 2346 */
2226void spi_res_release(struct spi_master *master, 2347void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2227 struct spi_message *message)
2228{ 2348{
2229 struct spi_res *res; 2349 struct spi_res *res;
2230 2350
@@ -2233,7 +2353,7 @@ void spi_res_release(struct spi_master *master,
2233 struct spi_res, entry); 2353 struct spi_res, entry);
2234 2354
2235 if (res->release) 2355 if (res->release)
2236 res->release(master, message, res->data); 2356 res->release(ctlr, message, res->data);
2237 2357
2238 list_del(&res->entry); 2358 list_del(&res->entry);
2239 2359
@@ -2246,7 +2366,7 @@ EXPORT_SYMBOL_GPL(spi_res_release);
2246 2366
2247/* Core methods for spi_message alterations */ 2367/* Core methods for spi_message alterations */
2248 2368
2249static void __spi_replace_transfers_release(struct spi_master *master, 2369static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2250 struct spi_message *msg, 2370 struct spi_message *msg,
2251 void *res) 2371 void *res)
2252{ 2372{
@@ -2255,7 +2375,7 @@ static void __spi_replace_transfers_release(struct spi_master *master,
2255 2375
2256 /* call extra callback if requested */ 2376 /* call extra callback if requested */
2257 if (rxfer->release) 2377 if (rxfer->release)
2258 rxfer->release(master, msg, res); 2378 rxfer->release(ctlr, msg, res);
2259 2379
2260 /* insert replaced transfers back into the message */ 2380 /* insert replaced transfers back into the message */
2261 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2381 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
@@ -2375,7 +2495,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
2375} 2495}
2376EXPORT_SYMBOL_GPL(spi_replace_transfers); 2496EXPORT_SYMBOL_GPL(spi_replace_transfers);
2377 2497
2378static int __spi_split_transfer_maxsize(struct spi_master *master, 2498static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2379 struct spi_message *msg, 2499 struct spi_message *msg,
2380 struct spi_transfer **xferp, 2500 struct spi_transfer **xferp,
2381 size_t maxsize, 2501 size_t maxsize,
@@ -2437,7 +2557,7 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
2437 *xferp = &xfers[count - 1]; 2557 *xferp = &xfers[count - 1];
2438 2558
2439 /* increment statistics counters */ 2559 /* increment statistics counters */
2440 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2560 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2441 transfers_split_maxsize); 2561 transfers_split_maxsize);
2442 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2562 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2443 transfers_split_maxsize); 2563 transfers_split_maxsize);
@@ -2449,14 +2569,14 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
2449 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2569 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2450 * when an individual transfer exceeds a 2570 * when an individual transfer exceeds a
2451 * certain size 2571 * certain size
2452 * @master: the @spi_master for this transfer 2572 * @ctlr: the @spi_controller for this transfer
2453 * @msg: the @spi_message to transform 2573 * @msg: the @spi_message to transform
2454 * @maxsize: the maximum when to apply this 2574 * @maxsize: the maximum when to apply this
2455 * @gfp: GFP allocation flags 2575 * @gfp: GFP allocation flags
2456 * 2576 *
2457 * Return: status of transformation 2577 * Return: status of transformation
2458 */ 2578 */
2459int spi_split_transfers_maxsize(struct spi_master *master, 2579int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2460 struct spi_message *msg, 2580 struct spi_message *msg,
2461 size_t maxsize, 2581 size_t maxsize,
2462 gfp_t gfp) 2582 gfp_t gfp)
@@ -2472,8 +2592,8 @@ int spi_split_transfers_maxsize(struct spi_master *master,
2472 */ 2592 */
2473 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2593 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2474 if (xfer->len > maxsize) { 2594 if (xfer->len > maxsize) {
2475 ret = __spi_split_transfer_maxsize( 2595 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2476 master, msg, &xfer, maxsize, gfp); 2596 maxsize, gfp);
2477 if (ret) 2597 if (ret)
2478 return ret; 2598 return ret;
2479 } 2599 }
@@ -2485,18 +2605,18 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2485 2605
2486/*-------------------------------------------------------------------------*/ 2606/*-------------------------------------------------------------------------*/
2487 2607
2488/* Core methods for SPI master protocol drivers. Some of the 2608/* Core methods for SPI controller protocol drivers. Some of the
2489 * other core methods are currently defined as inline functions. 2609 * other core methods are currently defined as inline functions.
2490 */ 2610 */
2491 2611
2492static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2612static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2613 u8 bits_per_word)
2493{ 2614{
2494 if (master->bits_per_word_mask) { 2615 if (ctlr->bits_per_word_mask) {
2495 /* Only 32 bits fit in the mask */ 2616 /* Only 32 bits fit in the mask */
2496 if (bits_per_word > 32) 2617 if (bits_per_word > 32)
2497 return -EINVAL; 2618 return -EINVAL;
2498 if (!(master->bits_per_word_mask & 2619 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2499 SPI_BPW_MASK(bits_per_word)))
2500 return -EINVAL; 2620 return -EINVAL;
2501 } 2621 }
2502 2622
@@ -2542,9 +2662,9 @@ int spi_setup(struct spi_device *spi)
2542 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2662 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2543 return -EINVAL; 2663 return -EINVAL;
2544 /* help drivers fail *cleanly* when they need options 2664 /* help drivers fail *cleanly* when they need options
2545 * that aren't supported with their current master 2665 * that aren't supported with their current controller
2546 */ 2666 */
2547 bad_bits = spi->mode & ~spi->master->mode_bits; 2667 bad_bits = spi->mode & ~spi->controller->mode_bits;
2548 ugly_bits = bad_bits & 2668 ugly_bits = bad_bits &
2549 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2669 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2550 if (ugly_bits) { 2670 if (ugly_bits) {
@@ -2563,15 +2683,16 @@ int spi_setup(struct spi_device *spi)
2563 if (!spi->bits_per_word) 2683 if (!spi->bits_per_word)
2564 spi->bits_per_word = 8; 2684 spi->bits_per_word = 8;
2565 2685
2566 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2686 status = __spi_validate_bits_per_word(spi->controller,
2687 spi->bits_per_word);
2567 if (status) 2688 if (status)
2568 return status; 2689 return status;
2569 2690
2570 if (!spi->max_speed_hz) 2691 if (!spi->max_speed_hz)
2571 spi->max_speed_hz = spi->master->max_speed_hz; 2692 spi->max_speed_hz = spi->controller->max_speed_hz;
2572 2693
2573 if (spi->master->setup) 2694 if (spi->controller->setup)
2574 status = spi->master->setup(spi); 2695 status = spi->controller->setup(spi);
2575 2696
2576 spi_set_cs(spi, false); 2697 spi_set_cs(spi, false);
2577 2698
@@ -2590,7 +2711,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
2590 2711
2591static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2712static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2592{ 2713{
2593 struct spi_master *master = spi->master; 2714 struct spi_controller *ctlr = spi->controller;
2594 struct spi_transfer *xfer; 2715 struct spi_transfer *xfer;
2595 int w_size; 2716 int w_size;
2596 2717
@@ -2602,16 +2723,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2602 * either MOSI or MISO is missing. They can also be caused by 2723 * either MOSI or MISO is missing. They can also be caused by
2603 * software limitations. 2724 * software limitations.
2604 */ 2725 */
2605 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2726 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2606 || (spi->mode & SPI_3WIRE)) { 2727 (spi->mode & SPI_3WIRE)) {
2607 unsigned flags = master->flags; 2728 unsigned flags = ctlr->flags;
2608 2729
2609 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2730 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2610 if (xfer->rx_buf && xfer->tx_buf) 2731 if (xfer->rx_buf && xfer->tx_buf)
2611 return -EINVAL; 2732 return -EINVAL;
2612 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2733 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2613 return -EINVAL; 2734 return -EINVAL;
2614 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2735 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2615 return -EINVAL; 2736 return -EINVAL;
2616 } 2737 }
2617 } 2738 }
@@ -2631,13 +2752,12 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2631 if (!xfer->speed_hz) 2752 if (!xfer->speed_hz)
2632 xfer->speed_hz = spi->max_speed_hz; 2753 xfer->speed_hz = spi->max_speed_hz;
2633 if (!xfer->speed_hz) 2754 if (!xfer->speed_hz)
2634 xfer->speed_hz = master->max_speed_hz; 2755 xfer->speed_hz = ctlr->max_speed_hz;
2635 2756
2636 if (master->max_speed_hz && 2757 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2637 xfer->speed_hz > master->max_speed_hz) 2758 xfer->speed_hz = ctlr->max_speed_hz;
2638 xfer->speed_hz = master->max_speed_hz;
2639 2759
2640 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2760 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2641 return -EINVAL; 2761 return -EINVAL;
2642 2762
2643 /* 2763 /*
@@ -2655,8 +2775,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2655 if (xfer->len % w_size) 2775 if (xfer->len % w_size)
2656 return -EINVAL; 2776 return -EINVAL;
2657 2777
2658 if (xfer->speed_hz && master->min_speed_hz && 2778 if (xfer->speed_hz && ctlr->min_speed_hz &&
2659 xfer->speed_hz < master->min_speed_hz) 2779 xfer->speed_hz < ctlr->min_speed_hz)
2660 return -EINVAL; 2780 return -EINVAL;
2661 2781
2662 if (xfer->tx_buf && !xfer->tx_nbits) 2782 if (xfer->tx_buf && !xfer->tx_nbits)
@@ -2701,16 +2821,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2701 2821
2702static int __spi_async(struct spi_device *spi, struct spi_message *message) 2822static int __spi_async(struct spi_device *spi, struct spi_message *message)
2703{ 2823{
2704 struct spi_master *master = spi->master; 2824 struct spi_controller *ctlr = spi->controller;
2705 2825
2706 message->spi = spi; 2826 message->spi = spi;
2707 2827
2708 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2828 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2709 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2829 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2710 2830
2711 trace_spi_message_submit(message); 2831 trace_spi_message_submit(message);
2712 2832
2713 return master->transfer(spi, message); 2833 return ctlr->transfer(spi, message);
2714} 2834}
2715 2835
2716/** 2836/**
@@ -2746,7 +2866,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
2746 */ 2866 */
2747int spi_async(struct spi_device *spi, struct spi_message *message) 2867int spi_async(struct spi_device *spi, struct spi_message *message)
2748{ 2868{
2749 struct spi_master *master = spi->master; 2869 struct spi_controller *ctlr = spi->controller;
2750 int ret; 2870 int ret;
2751 unsigned long flags; 2871 unsigned long flags;
2752 2872
@@ -2754,14 +2874,14 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
2754 if (ret != 0) 2874 if (ret != 0)
2755 return ret; 2875 return ret;
2756 2876
2757 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2877 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2758 2878
2759 if (master->bus_lock_flag) 2879 if (ctlr->bus_lock_flag)
2760 ret = -EBUSY; 2880 ret = -EBUSY;
2761 else 2881 else
2762 ret = __spi_async(spi, message); 2882 ret = __spi_async(spi, message);
2763 2883
2764 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2884 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2765 2885
2766 return ret; 2886 return ret;
2767} 2887}
@@ -2800,7 +2920,7 @@ EXPORT_SYMBOL_GPL(spi_async);
2800 */ 2920 */
2801int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2921int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2802{ 2922{
2803 struct spi_master *master = spi->master; 2923 struct spi_controller *ctlr = spi->controller;
2804 int ret; 2924 int ret;
2805 unsigned long flags; 2925 unsigned long flags;
2806 2926
@@ -2808,11 +2928,11 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2808 if (ret != 0) 2928 if (ret != 0)
2809 return ret; 2929 return ret;
2810 2930
2811 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2931 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2812 2932
2813 ret = __spi_async(spi, message); 2933 ret = __spi_async(spi, message);
2814 2934
2815 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2935 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2816 2936
2817 return ret; 2937 return ret;
2818 2938
@@ -2824,7 +2944,7 @@ int spi_flash_read(struct spi_device *spi,
2824 struct spi_flash_read_message *msg) 2944 struct spi_flash_read_message *msg)
2825 2945
2826{ 2946{
2827 struct spi_master *master = spi->master; 2947 struct spi_controller *master = spi->controller;
2828 struct device *rx_dev = NULL; 2948 struct device *rx_dev = NULL;
2829 int ret; 2949 int ret;
2830 2950
@@ -2878,7 +2998,7 @@ EXPORT_SYMBOL_GPL(spi_flash_read);
2878 2998
2879/*-------------------------------------------------------------------------*/ 2999/*-------------------------------------------------------------------------*/
2880 3000
2881/* Utility methods for SPI master protocol drivers, layered on 3001/* Utility methods for SPI protocol drivers, layered on
2882 * top of the core. Some other utility methods are defined as 3002 * top of the core. Some other utility methods are defined as
2883 * inline functions. 3003 * inline functions.
2884 */ 3004 */
@@ -2892,7 +3012,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2892{ 3012{
2893 DECLARE_COMPLETION_ONSTACK(done); 3013 DECLARE_COMPLETION_ONSTACK(done);
2894 int status; 3014 int status;
2895 struct spi_master *master = spi->master; 3015 struct spi_controller *ctlr = spi->controller;
2896 unsigned long flags; 3016 unsigned long flags;
2897 3017
2898 status = __spi_validate(spi, message); 3018 status = __spi_validate(spi, message);
@@ -2903,7 +3023,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2903 message->context = &done; 3023 message->context = &done;
2904 message->spi = spi; 3024 message->spi = spi;
2905 3025
2906 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 3026 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
2907 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3027 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2908 3028
2909 /* If we're not using the legacy transfer method then we will 3029 /* If we're not using the legacy transfer method then we will
@@ -2911,14 +3031,14 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2911 * This code would be less tricky if we could remove the 3031 * This code would be less tricky if we could remove the
2912 * support for driver implemented message queues. 3032 * support for driver implemented message queues.
2913 */ 3033 */
2914 if (master->transfer == spi_queued_transfer) { 3034 if (ctlr->transfer == spi_queued_transfer) {
2915 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 3035 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
2916 3036
2917 trace_spi_message_submit(message); 3037 trace_spi_message_submit(message);
2918 3038
2919 status = __spi_queued_transfer(spi, message, false); 3039 status = __spi_queued_transfer(spi, message, false);
2920 3040
2921 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3041 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
2922 } else { 3042 } else {
2923 status = spi_async_locked(spi, message); 3043 status = spi_async_locked(spi, message);
2924 } 3044 }
@@ -2927,12 +3047,12 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2927 /* Push out the messages in the calling context if we 3047 /* Push out the messages in the calling context if we
2928 * can. 3048 * can.
2929 */ 3049 */
2930 if (master->transfer == spi_queued_transfer) { 3050 if (ctlr->transfer == spi_queued_transfer) {
2931 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 3051 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2932 spi_sync_immediate); 3052 spi_sync_immediate);
2933 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3053 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2934 spi_sync_immediate); 3054 spi_sync_immediate);
2935 __spi_pump_messages(master, false); 3055 __spi_pump_messages(ctlr, false);
2936 } 3056 }
2937 3057
2938 wait_for_completion(&done); 3058 wait_for_completion(&done);
@@ -2967,9 +3087,9 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
2967{ 3087{
2968 int ret; 3088 int ret;
2969 3089
2970 mutex_lock(&spi->master->bus_lock_mutex); 3090 mutex_lock(&spi->controller->bus_lock_mutex);
2971 ret = __spi_sync(spi, message); 3091 ret = __spi_sync(spi, message);
2972 mutex_unlock(&spi->master->bus_lock_mutex); 3092 mutex_unlock(&spi->controller->bus_lock_mutex);
2973 3093
2974 return ret; 3094 return ret;
2975} 3095}
@@ -2999,7 +3119,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
2999 3119
3000/** 3120/**
3001 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3121 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3002 * @master: SPI bus master that should be locked for exclusive bus access 3122 * @ctlr: SPI bus master that should be locked for exclusive bus access
3003 * Context: can sleep 3123 * Context: can sleep
3004 * 3124 *
3005 * This call may only be used from a context that may sleep. The sleep 3125 * This call may only be used from a context that may sleep. The sleep
@@ -3012,15 +3132,15 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
3012 * 3132 *
3013 * Return: always zero. 3133 * Return: always zero.
3014 */ 3134 */
3015int spi_bus_lock(struct spi_master *master) 3135int spi_bus_lock(struct spi_controller *ctlr)
3016{ 3136{
3017 unsigned long flags; 3137 unsigned long flags;
3018 3138
3019 mutex_lock(&master->bus_lock_mutex); 3139 mutex_lock(&ctlr->bus_lock_mutex);
3020 3140
3021 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 3141 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3022 master->bus_lock_flag = 1; 3142 ctlr->bus_lock_flag = 1;
3023 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3143 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3024 3144
3025 /* mutex remains locked until spi_bus_unlock is called */ 3145 /* mutex remains locked until spi_bus_unlock is called */
3026 3146
@@ -3030,7 +3150,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
3030 3150
3031/** 3151/**
3032 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3152 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3033 * @master: SPI bus master that was locked for exclusive bus access 3153 * @ctlr: SPI bus master that was locked for exclusive bus access
3034 * Context: can sleep 3154 * Context: can sleep
3035 * 3155 *
3036 * This call may only be used from a context that may sleep. The sleep 3156 * This call may only be used from a context that may sleep. The sleep
@@ -3041,11 +3161,11 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
3041 * 3161 *
3042 * Return: always zero. 3162 * Return: always zero.
3043 */ 3163 */
3044int spi_bus_unlock(struct spi_master *master) 3164int spi_bus_unlock(struct spi_controller *ctlr)
3045{ 3165{
3046 master->bus_lock_flag = 0; 3166 ctlr->bus_lock_flag = 0;
3047 3167
3048 mutex_unlock(&master->bus_lock_mutex); 3168 mutex_unlock(&ctlr->bus_lock_mutex);
3049 3169
3050 return 0; 3170 return 0;
3051} 3171}
@@ -3147,45 +3267,48 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3147 return dev ? to_spi_device(dev) : NULL; 3267 return dev ? to_spi_device(dev) : NULL;
3148} 3268}
3149 3269
3150static int __spi_of_master_match(struct device *dev, const void *data) 3270static int __spi_of_controller_match(struct device *dev, const void *data)
3151{ 3271{
3152 return dev->of_node == data; 3272 return dev->of_node == data;
3153} 3273}
3154 3274
3155/* the spi masters are not using spi_bus, so we find it with another way */ 3275/* the spi controllers are not using spi_bus, so we find it with another way */
3156static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3276static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3157{ 3277{
3158 struct device *dev; 3278 struct device *dev;
3159 3279
3160 dev = class_find_device(&spi_master_class, NULL, node, 3280 dev = class_find_device(&spi_master_class, NULL, node,
3161 __spi_of_master_match); 3281 __spi_of_controller_match);
3282 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3283 dev = class_find_device(&spi_slave_class, NULL, node,
3284 __spi_of_controller_match);
3162 if (!dev) 3285 if (!dev)
3163 return NULL; 3286 return NULL;
3164 3287
3165 /* reference got in class_find_device */ 3288 /* reference got in class_find_device */
3166 return container_of(dev, struct spi_master, dev); 3289 return container_of(dev, struct spi_controller, dev);
3167} 3290}
3168 3291
3169static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3292static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3170 void *arg) 3293 void *arg)
3171{ 3294{
3172 struct of_reconfig_data *rd = arg; 3295 struct of_reconfig_data *rd = arg;
3173 struct spi_master *master; 3296 struct spi_controller *ctlr;
3174 struct spi_device *spi; 3297 struct spi_device *spi;
3175 3298
3176 switch (of_reconfig_get_state_change(action, arg)) { 3299 switch (of_reconfig_get_state_change(action, arg)) {
3177 case OF_RECONFIG_CHANGE_ADD: 3300 case OF_RECONFIG_CHANGE_ADD:
3178 master = of_find_spi_master_by_node(rd->dn->parent); 3301 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3179 if (master == NULL) 3302 if (ctlr == NULL)
3180 return NOTIFY_OK; /* not for us */ 3303 return NOTIFY_OK; /* not for us */
3181 3304
3182 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3305 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3183 put_device(&master->dev); 3306 put_device(&ctlr->dev);
3184 return NOTIFY_OK; 3307 return NOTIFY_OK;
3185 } 3308 }
3186 3309
3187 spi = of_register_spi_device(master, rd->dn); 3310 spi = of_register_spi_device(ctlr, rd->dn);
3188 put_device(&master->dev); 3311 put_device(&ctlr->dev);
3189 3312
3190 if (IS_ERR(spi)) { 3313 if (IS_ERR(spi)) {
3191 pr_err("%s: failed to create for '%s'\n", 3314 pr_err("%s: failed to create for '%s'\n",
@@ -3224,7 +3347,7 @@ extern struct notifier_block spi_of_notifier;
3224#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3347#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3225 3348
3226#if IS_ENABLED(CONFIG_ACPI) 3349#if IS_ENABLED(CONFIG_ACPI)
3227static int spi_acpi_master_match(struct device *dev, const void *data) 3350static int spi_acpi_controller_match(struct device *dev, const void *data)
3228{ 3351{
3229 return ACPI_COMPANION(dev->parent) == data; 3352 return ACPI_COMPANION(dev->parent) == data;
3230} 3353}
@@ -3234,16 +3357,19 @@ static int spi_acpi_device_match(struct device *dev, void *data)
3234 return ACPI_COMPANION(dev) == data; 3357 return ACPI_COMPANION(dev) == data;
3235} 3358}
3236 3359
3237static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3360static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3238{ 3361{
3239 struct device *dev; 3362 struct device *dev;
3240 3363
3241 dev = class_find_device(&spi_master_class, NULL, adev, 3364 dev = class_find_device(&spi_master_class, NULL, adev,
3242 spi_acpi_master_match); 3365 spi_acpi_controller_match);
3366 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3367 dev = class_find_device(&spi_slave_class, NULL, adev,
3368 spi_acpi_controller_match);
3243 if (!dev) 3369 if (!dev)
3244 return NULL; 3370 return NULL;
3245 3371
3246 return container_of(dev, struct spi_master, dev); 3372 return container_of(dev, struct spi_controller, dev);
3247} 3373}
3248 3374
3249static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3375static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
@@ -3259,17 +3385,17 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3259 void *arg) 3385 void *arg)
3260{ 3386{
3261 struct acpi_device *adev = arg; 3387 struct acpi_device *adev = arg;
3262 struct spi_master *master; 3388 struct spi_controller *ctlr;
3263 struct spi_device *spi; 3389 struct spi_device *spi;
3264 3390
3265 switch (value) { 3391 switch (value) {
3266 case ACPI_RECONFIG_DEVICE_ADD: 3392 case ACPI_RECONFIG_DEVICE_ADD:
3267 master = acpi_spi_find_master_by_adev(adev->parent); 3393 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3268 if (!master) 3394 if (!ctlr)
3269 break; 3395 break;
3270 3396
3271 acpi_register_spi_device(master, adev); 3397 acpi_register_spi_device(ctlr, adev);
3272 put_device(&master->dev); 3398 put_device(&ctlr->dev);
3273 break; 3399 break;
3274 case ACPI_RECONFIG_DEVICE_REMOVE: 3400 case ACPI_RECONFIG_DEVICE_REMOVE:
3275 if (!acpi_device_enumerated(adev)) 3401 if (!acpi_device_enumerated(adev))
@@ -3312,6 +3438,12 @@ static int __init spi_init(void)
3312 if (status < 0) 3438 if (status < 0)
3313 goto err2; 3439 goto err2;
3314 3440
3441 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3442 status = class_register(&spi_slave_class);
3443 if (status < 0)
3444 goto err3;
3445 }
3446
3315 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3447 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3316 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3448 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3317 if (IS_ENABLED(CONFIG_ACPI)) 3449 if (IS_ENABLED(CONFIG_ACPI))
@@ -3319,6 +3451,8 @@ static int __init spi_init(void)
3319 3451
3320 return 0; 3452 return 0;
3321 3453
3454err3:
3455 class_unregister(&spi_master_class);
3322err2: 3456err2:
3323 bus_unregister(&spi_bus_type); 3457 bus_unregister(&spi_bus_type);
3324err1: 3458err1:
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt
deleted file mode 100644
index 168715271f06..000000000000
--- a/drivers/staging/android/ion/devicetree.txt
+++ /dev/null
@@ -1,51 +0,0 @@
1Ion Memory Manager
2
3Ion is a memory manager that allows for sharing of buffers via dma-buf.
4Ion allows for different types of allocation via an abstraction called
5a 'heap'. A heap represents a specific type of memory. Each heap has
6a different type. There can be multiple instances of the same heap
7type.
8
9Specific heap instances are tied to heap IDs. Heap IDs are not to be specified
10in the devicetree.
11
12Required properties for Ion
13
14- compatible: "linux,ion" PLUS a compatible property for the device
15
16All child nodes of a linux,ion node are interpreted as heaps
17
18required properties for heaps
19
20- compatible: compatible string for a heap type PLUS a compatible property
21for the specific instance of the heap. Current heap types
22-- linux,ion-heap-system
23-- linux,ion-heap-system-contig
24-- linux,ion-heap-carveout
25-- linux,ion-heap-chunk
26-- linux,ion-heap-dma
27-- linux,ion-heap-custom
28
29Optional properties
30- memory-region: A phandle to a memory region. Required for DMA heap type
31(see reserved-memory.txt for details on the reservation)
32
33Example:
34
35 ion {
36 compatbile = "hisilicon,ion", "linux,ion";
37
38 ion-system-heap {
39 compatbile = "hisilicon,system-heap", "linux,ion-heap-system"
40 };
41
42 ion-camera-region {
43 compatible = "hisilicon,camera-heap", "linux,ion-heap-dma"
44 memory-region = <&camera_region>;
45 };
46
47 ion-fb-region {
48 compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma"
49 memory-region = <&fb_region>;
50 };
51 }
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index ae627049c499..4be87f503e3b 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,6 +1,6 @@
1config CRYPTO_DEV_CCREE 1config CRYPTO_DEV_CCREE
2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
3 depends on CRYPTO_HW && OF && HAS_DMA 3 depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
4 default n 4 default n
5 select CRYPTO_HASH 5 select CRYPTO_HASH
6 select CRYPTO_BLKCIPHER 6 select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 038e2ff5e545..6471d3d2d375 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -216,7 +216,8 @@ void ssi_buffer_mgr_copy_scatterlist_portion(
216 uint32_t nents, lbytes; 216 uint32_t nents, lbytes;
217 217
218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); 218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
219 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF)); 219 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
220 (direct == SSI_SG_TO_BUF));
220} 221}
221 222
222static inline int ssi_buffer_mgr_render_buff_to_mlli( 223static inline int ssi_buffer_mgr_render_buff_to_mlli(
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 522bd62c102e..8611adf3bb2e 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -376,7 +376,6 @@ int send_request(
376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); 376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
377 if (rc != 0) { 377 if (rc != 0) {
378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); 378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc);
379 spin_unlock_bh(&req_mgr_h->hw_lock);
380 return rc; 379 return rc;
381 } 380 }
382#endif 381#endif
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index 2e325cb747ae..730fd6d4db33 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -12,6 +12,7 @@ config FSL_DPAA2
12config FSL_DPAA2_ETH 12config FSL_DPAA2_ETH
13 tristate "Freescale DPAA2 Ethernet" 13 tristate "Freescale DPAA2 Ethernet"
14 depends on FSL_DPAA2 && FSL_MC_DPIO 14 depends on FSL_DPAA2 && FSL_MC_DPIO
15 depends on NETDEVICES && ETHERNET
15 ---help--- 16 ---help---
16 Ethernet driver for Freescale DPAA2 SoCs, using the 17 Ethernet driver for Freescale DPAA2 SoCs, using the
17 Freescale MC bus driver 18 Freescale MC bus driver
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index dc6ecd824365..ff10d1f0a7e4 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) 231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; 232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
233 233
234 mutex_lock(&chip->state_lock);
235 ret = i2c_smbus_write_byte_data(chip->client, 234 ret = i2c_smbus_write_byte_data(chip->client,
236 AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); 235 AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
237 if (ret < 0) { 236 if (ret < 0)
238 mutex_unlock(&chip->state_lock);
239 return ret; 237 return ret;
240 }
241 238
242 chip->filter_rate_setup = i; 239 chip->filter_rate_setup = i;
243 mutex_unlock(&chip->state_lock);
244 240
245 return ret; 241 return ret;
246} 242}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2e1bd47337fd..e6727cefde05 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
293 size_t lmmk_size; 293 size_t lmmk_size;
294 size_t lum_size; 294 size_t lum_size;
295 int rc; 295 int rc;
296 mm_segment_t seg;
297 296
298 if (!lsm) 297 if (!lsm)
299 return -ENODATA; 298 return -ENODATA;
300 299
301 /*
302 * "Switch to kernel segment" to allow copying from kernel space by
303 * copy_{to,from}_user().
304 */
305 seg = get_fs();
306 set_fs(KERNEL_DS);
307
308 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { 300 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
309 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", 301 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
310 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); 302 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
406out_free: 398out_free:
407 kvfree(lmmk); 399 kvfree(lmmk);
408out: 400out:
409 set_fs(seg);
410 return rc; 401 return rc;
411} 402}
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8ea01904c0ea..466517c7c8e6 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
19 19
20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o 20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
21 21
22ccflags-y += -Werror
23
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 1d7f7ab94cac..6b13a3a66e49 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
4 4
5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o 5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o 6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
7
8ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index fceb9e9b881b..c9c0e1245858 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,3 +1 @@
1obj-$(CONFIG_VIDEO_OV5693) += ov5693.o obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
2
3ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 3fa7c1c1479f..f126a89a08e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
353 353
354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror 354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
355 355
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index cfe37eb026d6..859d0d6051cd 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -152,7 +152,7 @@ static const struct net_device_ops mon_netdev_ops = {
152static void mon_setup(struct net_device *dev) 152static void mon_setup(struct net_device *dev)
153{ 153{
154 dev->netdev_ops = &mon_netdev_ops; 154 dev->netdev_ops = &mon_netdev_ops;
155 dev->destructor = free_netdev; 155 dev->needs_free_netdev = true;
156 ether_setup(dev); 156 ether_setup(dev);
157 dev->priv_flags |= IFF_NO_QUEUE; 157 dev->priv_flags |= IFF_NO_QUEUE;
158 dev->type = ARPHRD_IEEE80211; 158 dev->type = ARPHRD_IEEE80211;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 4723a0bd5067..1c6ed5b2a6f9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
97 97
98 switch (variable) { 98 switch (variable) {
99 case HW_VAR_BSSID: 99 case HW_VAR_BSSID:
100 rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); 100 /* BSSIDR 2 byte alignment */
101 rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); 101 rtl92e_writew(dev, BSSIDR, *(u16 *)val);
102 rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
102 break; 103 break;
103 104
104 case HW_VAR_MEDIA_STATUS: 105 case HW_VAR_MEDIA_STATUS:
@@ -624,7 +625,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
624 struct r8192_priv *priv = rtllib_priv(dev); 625 struct r8192_priv *priv = rtllib_priv(dev);
625 626
626 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); 627 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
627 curCR = rtl92e_readl(dev, EPROM_CMD); 628 curCR = rtl92e_readw(dev, EPROM_CMD);
628 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, 629 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
629 curCR); 630 curCR);
630 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : 631 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -961,8 +962,8 @@ static void _rtl92e_net_update(struct net_device *dev)
961 rtl92e_config_rate(dev, &rate_config); 962 rtl92e_config_rate(dev, &rate_config);
962 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; 963 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
963 priv->basic_rate = rate_config &= 0x15f; 964 priv->basic_rate = rate_config &= 0x15f;
964 rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); 965 rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
965 rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); 966 rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
966 967
967 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { 968 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
968 rtl92e_writew(dev, ATIMWND, 2); 969 rtl92e_writew(dev, ATIMWND, 2);
@@ -1182,8 +1183,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1182 struct cb_desc *cb_desc, struct sk_buff *skb) 1183 struct cb_desc *cb_desc, struct sk_buff *skb)
1183{ 1184{
1184 struct r8192_priv *priv = rtllib_priv(dev); 1185 struct r8192_priv *priv = rtllib_priv(dev);
1185 dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, 1186 dma_addr_t mapping;
1186 PCI_DMA_TODEVICE);
1187 struct tx_fwinfo_8190pci *pTxFwInfo; 1187 struct tx_fwinfo_8190pci *pTxFwInfo;
1188 1188
1189 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; 1189 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1194,8 +1194,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, 1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
1195 pTxFwInfo->TxRate, cb_desc); 1195 pTxFwInfo->TxRate, cb_desc);
1196 1196
1197 if (pci_dma_mapping_error(priv->pdev, mapping))
1198 netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
1199 if (cb_desc->bAMPDUEnable) { 1197 if (cb_desc->bAMPDUEnable) {
1200 pTxFwInfo->AllowAggregation = 1; 1198 pTxFwInfo->AllowAggregation = 1;
1201 pTxFwInfo->RxMF = cb_desc->ampdu_factor; 1199 pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1230,6 +1228,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1230 } 1228 }
1231 1229
1232 memset((u8 *)pdesc, 0, 12); 1230 memset((u8 *)pdesc, 0, 12);
1231
1232 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
1233 PCI_DMA_TODEVICE);
1234 if (pci_dma_mapping_error(priv->pdev, mapping)) {
1235 netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
1236 return;
1237 }
1238
1233 pdesc->LINIP = 0; 1239 pdesc->LINIP = 0;
1234 pdesc->CmdInit = 1; 1240 pdesc->CmdInit = 1;
1235 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; 1241 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 48bbd9e8a52f..dcc4eb691889 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
306 pTsCommonInfo->TClasNum = TCLAS_Num; 306 pTsCommonInfo->TClasNum = TCLAS_Num;
307} 307}
308 308
309static bool IsACValid(unsigned int tid)
310{
311 return tid < 7;
312}
313
314bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, 309bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
315 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) 310 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
316{ 311{
@@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
328 if (ieee->current_network.qos_data.supported == 0) { 323 if (ieee->current_network.qos_data.supported == 0) {
329 UP = 0; 324 UP = 0;
330 } else { 325 } else {
331 if (!IsACValid(TID)) {
332 netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
333 __func__, TID);
334 return false;
335 }
336
337 switch (TID) { 326 switch (TID) {
338 case 0: 327 case 0:
339 case 3: 328 case 3:
@@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
351 case 7: 340 case 7:
352 UP = 7; 341 UP = 7;
353 break; 342 break;
343 default:
344 netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
345 __func__, TID);
346 return false;
354 } 347 }
355 } 348 }
356 349
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 5e7a61f24f8d..bd4352fe2de3 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
2667 mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; 2667 mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
2668 strncpy(mon_ndev->name, name, IFNAMSIZ); 2668 strncpy(mon_ndev->name, name, IFNAMSIZ);
2669 mon_ndev->name[IFNAMSIZ - 1] = 0; 2669 mon_ndev->name[IFNAMSIZ - 1] = 0;
2670 mon_ndev->destructor = rtw_ndev_destructor; 2670 mon_ndev->needs_free_netdev = true;
2671 mon_ndev->priv_destructor = rtw_ndev_destructor;
2671 2672
2672 mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; 2673 mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
2673 2674
@@ -3531,7 +3532,6 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
3531 pwdev_priv->power_mgmt = true; 3532 pwdev_priv->power_mgmt = true;
3532 else 3533 else
3533 pwdev_priv->power_mgmt = false; 3534 pwdev_priv->power_mgmt = false;
3534 kfree((u8 *)wdev);
3535 3535
3536 return ret; 3536 return ret;
3537 3537
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index f83cfc76505c..021589913681 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev)
1207 1207
1208 if (ndev->ieee80211_ptr) 1208 if (ndev->ieee80211_ptr)
1209 kfree((u8 *)ndev->ieee80211_ptr); 1209 kfree((u8 *)ndev->ieee80211_ptr);
1210
1211 free_netdev(ndev);
1212} 1210}
1213 1211
1214void rtw_dev_unload(struct adapter *padapter) 1212void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 02db59e8b593..aa16d1ab955b 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -160,7 +160,7 @@ static int isFileReadable(char *path)
160 oldfs = get_fs(); set_fs(get_ds()); 160 oldfs = get_fs(); set_fs(get_ds());
161 161
162 if (1!=readFile(fp, &buf, 1)) 162 if (1!=readFile(fp, &buf, 1))
163 ret = PTR_ERR(fp); 163 ret = -EINVAL;
164 164
165 set_fs(oldfs); 165 set_fs(oldfs);
166 filp_close(fp, NULL); 166 filp_close(fp, NULL);
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c
index 2cee9a952c9b..4a356e509fe4 100644
--- a/drivers/staging/typec/fusb302/fusb302.c
+++ b/drivers/staging/typec/fusb302/fusb302.c
@@ -264,22 +264,36 @@ static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { }
264 264
265#define FUSB302_RESUME_RETRY 10 265#define FUSB302_RESUME_RETRY 10
266#define FUSB302_RESUME_RETRY_SLEEP 50 266#define FUSB302_RESUME_RETRY_SLEEP 50
267static int fusb302_i2c_write(struct fusb302_chip *chip, 267
268 u8 address, u8 data) 268static bool fusb302_is_suspended(struct fusb302_chip *chip)
269{ 269{
270 int retry_cnt; 270 int retry_cnt;
271 int ret = 0;
272 271
273 atomic_set(&chip->i2c_busy, 1);
274 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 272 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
275 if (atomic_read(&chip->pm_suspend)) { 273 if (atomic_read(&chip->pm_suspend)) {
276 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 274 dev_err(chip->dev, "i2c: pm suspend, retry %d/%d\n",
277 retry_cnt + 1, FUSB302_RESUME_RETRY); 275 retry_cnt + 1, FUSB302_RESUME_RETRY);
278 msleep(FUSB302_RESUME_RETRY_SLEEP); 276 msleep(FUSB302_RESUME_RETRY_SLEEP);
279 } else { 277 } else {
280 break; 278 return false;
281 } 279 }
282 } 280 }
281
282 return true;
283}
284
285static int fusb302_i2c_write(struct fusb302_chip *chip,
286 u8 address, u8 data)
287{
288 int ret = 0;
289
290 atomic_set(&chip->i2c_busy, 1);
291
292 if (fusb302_is_suspended(chip)) {
293 atomic_set(&chip->i2c_busy, 0);
294 return -ETIMEDOUT;
295 }
296
283 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); 297 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data);
284 if (ret < 0) 298 if (ret < 0)
285 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", 299 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d",
@@ -292,21 +306,17 @@ static int fusb302_i2c_write(struct fusb302_chip *chip,
292static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, 306static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
293 u8 length, const u8 *data) 307 u8 length, const u8 *data)
294{ 308{
295 int retry_cnt;
296 int ret = 0; 309 int ret = 0;
297 310
298 if (length <= 0) 311 if (length <= 0)
299 return ret; 312 return ret;
300 atomic_set(&chip->i2c_busy, 1); 313 atomic_set(&chip->i2c_busy, 1);
301 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 314
302 if (atomic_read(&chip->pm_suspend)) { 315 if (fusb302_is_suspended(chip)) {
303 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 316 atomic_set(&chip->i2c_busy, 0);
304 retry_cnt + 1, FUSB302_RESUME_RETRY); 317 return -ETIMEDOUT;
305 msleep(FUSB302_RESUME_RETRY_SLEEP);
306 } else {
307 break;
308 }
309 } 318 }
319
310 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, 320 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address,
311 length, data); 321 length, data);
312 if (ret < 0) 322 if (ret < 0)
@@ -320,19 +330,15 @@ static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
320static int fusb302_i2c_read(struct fusb302_chip *chip, 330static int fusb302_i2c_read(struct fusb302_chip *chip,
321 u8 address, u8 *data) 331 u8 address, u8 *data)
322{ 332{
323 int retry_cnt;
324 int ret = 0; 333 int ret = 0;
325 334
326 atomic_set(&chip->i2c_busy, 1); 335 atomic_set(&chip->i2c_busy, 1);
327 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 336
328 if (atomic_read(&chip->pm_suspend)) { 337 if (fusb302_is_suspended(chip)) {
329 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 338 atomic_set(&chip->i2c_busy, 0);
330 retry_cnt + 1, FUSB302_RESUME_RETRY); 339 return -ETIMEDOUT;
331 msleep(FUSB302_RESUME_RETRY_SLEEP);
332 } else {
333 break;
334 }
335 } 340 }
341
336 ret = i2c_smbus_read_byte_data(chip->i2c_client, address); 342 ret = i2c_smbus_read_byte_data(chip->i2c_client, address);
337 *data = (u8)ret; 343 *data = (u8)ret;
338 if (ret < 0) 344 if (ret < 0)
@@ -345,33 +351,31 @@ static int fusb302_i2c_read(struct fusb302_chip *chip,
345static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, 351static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
346 u8 length, u8 *data) 352 u8 length, u8 *data)
347{ 353{
348 int retry_cnt;
349 int ret = 0; 354 int ret = 0;
350 355
351 if (length <= 0) 356 if (length <= 0)
352 return ret; 357 return ret;
353 atomic_set(&chip->i2c_busy, 1); 358 atomic_set(&chip->i2c_busy, 1);
354 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 359
355 if (atomic_read(&chip->pm_suspend)) { 360 if (fusb302_is_suspended(chip)) {
356 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 361 atomic_set(&chip->i2c_busy, 0);
357 retry_cnt + 1, FUSB302_RESUME_RETRY); 362 return -ETIMEDOUT;
358 msleep(FUSB302_RESUME_RETRY_SLEEP);
359 } else {
360 break;
361 }
362 } 363 }
364
363 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, 365 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address,
364 length, data); 366 length, data);
365 if (ret < 0) { 367 if (ret < 0) {
366 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", 368 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d",
367 address, length, ret); 369 address, length, ret);
368 return ret; 370 goto done;
369 } 371 }
370 if (ret != length) { 372 if (ret != length) {
371 fusb302_log(chip, "only read %d/%d bytes from 0x%02x", 373 fusb302_log(chip, "only read %d/%d bytes from 0x%02x",
372 ret, length, address); 374 ret, length, address);
373 return -EIO; 375 ret = -EIO;
374 } 376 }
377
378done:
375 atomic_set(&chip->i2c_busy, 0); 379 atomic_set(&chip->i2c_busy, 0);
376 380
377 return ret; 381 return ret;
@@ -489,7 +493,7 @@ static int tcpm_init(struct tcpc_dev *dev)
489 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); 493 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data);
490 if (ret < 0) 494 if (ret < 0)
491 return ret; 495 return ret;
492 chip->vbus_present = !!(FUSB_REG_STATUS0 & FUSB_REG_STATUS0_VBUSOK); 496 chip->vbus_present = !!(data & FUSB_REG_STATUS0_VBUSOK);
493 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); 497 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data);
494 if (ret < 0) 498 if (ret < 0)
495 return ret; 499 return ret;
@@ -1025,7 +1029,7 @@ static int fusb302_pd_send_message(struct fusb302_chip *chip,
1025 buf[pos++] = FUSB302_TKN_SYNC1; 1029 buf[pos++] = FUSB302_TKN_SYNC1;
1026 buf[pos++] = FUSB302_TKN_SYNC2; 1030 buf[pos++] = FUSB302_TKN_SYNC2;
1027 1031
1028 len = pd_header_cnt(msg->header) * 4; 1032 len = pd_header_cnt_le(msg->header) * 4;
1029 /* plug 2 for header */ 1033 /* plug 2 for header */
1030 len += 2; 1034 len += 2;
1031 if (len > 0x1F) { 1035 if (len > 0x1F) {
@@ -1481,7 +1485,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
1481 (u8 *)&msg->header); 1485 (u8 *)&msg->header);
1482 if (ret < 0) 1486 if (ret < 0)
1483 return ret; 1487 return ret;
1484 len = pd_header_cnt(msg->header) * 4; 1488 len = pd_header_cnt_le(msg->header) * 4;
1485 /* add 4 to length to include the CRC */ 1489 /* add 4 to length to include the CRC */
1486 if (len > PD_MAX_PAYLOAD * 4) { 1490 if (len > PD_MAX_PAYLOAD * 4) {
1487 fusb302_log(chip, "PD message too long %d", len); 1491 fusb302_log(chip, "PD message too long %d", len);
@@ -1663,14 +1667,12 @@ static int init_gpio(struct fusb302_chip *chip)
1663 if (ret < 0) { 1667 if (ret < 0) {
1664 fusb302_log(chip, 1668 fusb302_log(chip,
1665 "cannot set GPIO Int_N to input, ret=%d", ret); 1669 "cannot set GPIO Int_N to input, ret=%d", ret);
1666 gpio_free(chip->gpio_int_n);
1667 return ret; 1670 return ret;
1668 } 1671 }
1669 ret = gpio_to_irq(chip->gpio_int_n); 1672 ret = gpio_to_irq(chip->gpio_int_n);
1670 if (ret < 0) { 1673 if (ret < 0) {
1671 fusb302_log(chip, 1674 fusb302_log(chip,
1672 "cannot request IRQ for GPIO Int_N, ret=%d", ret); 1675 "cannot request IRQ for GPIO Int_N, ret=%d", ret);
1673 gpio_free(chip->gpio_int_n);
1674 return ret; 1676 return ret;
1675 } 1677 }
1676 chip->gpio_int_n_irq = ret; 1678 chip->gpio_int_n_irq = ret;
@@ -1787,11 +1789,13 @@ static const struct of_device_id fusb302_dt_match[] = {
1787 {.compatible = "fcs,fusb302"}, 1789 {.compatible = "fcs,fusb302"},
1788 {}, 1790 {},
1789}; 1791};
1792MODULE_DEVICE_TABLE(of, fusb302_dt_match);
1790 1793
1791static const struct i2c_device_id fusb302_i2c_device_id[] = { 1794static const struct i2c_device_id fusb302_i2c_device_id[] = {
1792 {"typec_fusb302", 0}, 1795 {"typec_fusb302", 0},
1793 {}, 1796 {},
1794}; 1797};
1798MODULE_DEVICE_TABLE(i2c, fusb302_i2c_device_id);
1795 1799
1796static const struct dev_pm_ops fusb302_pm_ops = { 1800static const struct dev_pm_ops fusb302_pm_ops = {
1797 .suspend = fusb302_pm_suspend, 1801 .suspend = fusb302_pm_suspend,
diff --git a/drivers/staging/typec/pd.h b/drivers/staging/typec/pd.h
index 8d97bdb95f23..510ef7279900 100644
--- a/drivers/staging/typec/pd.h
+++ b/drivers/staging/typec/pd.h
@@ -92,6 +92,16 @@ static inline unsigned int pd_header_type_le(__le16 header)
92 return pd_header_type(le16_to_cpu(header)); 92 return pd_header_type(le16_to_cpu(header));
93} 93}
94 94
95static inline unsigned int pd_header_msgid(u16 header)
96{
97 return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK;
98}
99
100static inline unsigned int pd_header_msgid_le(__le16 header)
101{
102 return pd_header_msgid(le16_to_cpu(header));
103}
104
95#define PD_MAX_PAYLOAD 7 105#define PD_MAX_PAYLOAD 7
96 106
97struct pd_message { 107struct pd_message {
diff --git a/drivers/staging/typec/pd_vdo.h b/drivers/staging/typec/pd_vdo.h
index dba172e0e0d1..d92259f8de0a 100644
--- a/drivers/staging/typec/pd_vdo.h
+++ b/drivers/staging/typec/pd_vdo.h
@@ -22,6 +22,9 @@
22 * VDM object is minimum of VDM header + 6 additional data objects. 22 * VDM object is minimum of VDM header + 6 additional data objects.
23 */ 23 */
24 24
25#define VDO_MAX_OBJECTS 6
26#define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1)
27
25/* 28/*
26 * VDM header 29 * VDM header
27 * ---------- 30 * ----------
@@ -34,7 +37,6 @@
34 * <5> :: reserved (SVDM), command type (UVDM) 37 * <5> :: reserved (SVDM), command type (UVDM)
35 * <4:0> :: command 38 * <4:0> :: command
36 */ 39 */
37#define VDO_MAX_SIZE 7
38#define VDO(vid, type, custom) \ 40#define VDO(vid, type, custom) \
39 (((vid) << 16) | \ 41 (((vid) << 16) | \
40 ((type) << 15) | \ 42 ((type) << 15) | \
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
index 5e5be74c7850..df72d8b01e73 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/staging/typec/tcpci.c
@@ -425,7 +425,7 @@ static const struct regmap_config tcpci_regmap_config = {
425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ 425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
426}; 426};
427 427
428const struct tcpc_config tcpci_tcpc_config = { 428static const struct tcpc_config tcpci_tcpc_config = {
429 .type = TYPEC_PORT_DFP, 429 .type = TYPEC_PORT_DFP,
430 .default_role = TYPEC_SINK, 430 .default_role = TYPEC_SINK,
431}; 431};
diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c
index abba655ba00a..20eb4ebcf8c3 100644
--- a/drivers/staging/typec/tcpm.c
+++ b/drivers/staging/typec/tcpm.c
@@ -238,6 +238,7 @@ struct tcpm_port {
238 unsigned int hard_reset_count; 238 unsigned int hard_reset_count;
239 bool pd_capable; 239 bool pd_capable;
240 bool explicit_contract; 240 bool explicit_contract;
241 unsigned int rx_msgid;
241 242
242 /* Partner capabilities/requests */ 243 /* Partner capabilities/requests */
243 u32 sink_request; 244 u32 sink_request;
@@ -251,6 +252,8 @@ struct tcpm_port {
251 unsigned int nr_src_pdo; 252 unsigned int nr_src_pdo;
252 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 u32 snk_pdo[PDO_MAX_OBJECTS];
253 unsigned int nr_snk_pdo; 254 unsigned int nr_snk_pdo;
255 u32 snk_vdo[VDO_MAX_OBJECTS];
256 unsigned int nr_snk_vdo;
254 257
255 unsigned int max_snk_mv; 258 unsigned int max_snk_mv;
256 unsigned int max_snk_ma; 259 unsigned int max_snk_ma;
@@ -997,6 +1000,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
997 struct pd_mode_data *modep; 1000 struct pd_mode_data *modep;
998 int rlen = 0; 1001 int rlen = 0;
999 u16 svid; 1002 u16 svid;
1003 int i;
1000 1004
1001 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", 1005 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1002 p0, cmd_type, cmd, cnt); 1006 p0, cmd_type, cmd, cnt);
@@ -1007,6 +1011,14 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1007 case CMDT_INIT: 1011 case CMDT_INIT:
1008 switch (cmd) { 1012 switch (cmd) {
1009 case CMD_DISCOVER_IDENT: 1013 case CMD_DISCOVER_IDENT:
1014 /* 6.4.4.3.1: Only respond as UFP (device) */
1015 if (port->data_role == TYPEC_DEVICE &&
1016 port->nr_snk_vdo) {
1017 for (i = 0; i < port->nr_snk_vdo; i++)
1018 response[i + 1]
1019 = cpu_to_le32(port->snk_vdo[i]);
1020 rlen = port->nr_snk_vdo + 1;
1021 }
1010 break; 1022 break;
1011 case CMD_DISCOVER_SVID: 1023 case CMD_DISCOVER_SVID:
1012 break; 1024 break;
@@ -1415,6 +1427,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1415 break; 1427 break;
1416 case SOFT_RESET_SEND: 1428 case SOFT_RESET_SEND:
1417 port->message_id = 0; 1429 port->message_id = 0;
1430 port->rx_msgid = -1;
1418 if (port->pwr_role == TYPEC_SOURCE) 1431 if (port->pwr_role == TYPEC_SOURCE)
1419 next_state = SRC_SEND_CAPABILITIES; 1432 next_state = SRC_SEND_CAPABILITIES;
1420 else 1433 else
@@ -1503,6 +1516,22 @@ static void tcpm_pd_rx_handler(struct work_struct *work)
1503 port->attached); 1516 port->attached);
1504 1517
1505 if (port->attached) { 1518 if (port->attached) {
1519 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1520 unsigned int msgid = pd_header_msgid_le(msg->header);
1521
1522 /*
1523 * USB PD standard, 6.6.1.2:
1524 * "... if MessageID value in a received Message is the
1525 * same as the stored value, the receiver shall return a
1526 * GoodCRC Message with that MessageID value and drop
1527 * the Message (this is a retry of an already received
1528 * Message). Note: this shall not apply to the Soft_Reset
1529 * Message which always has a MessageID value of zero."
1530 */
1531 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1532 goto done;
1533 port->rx_msgid = msgid;
1534
1506 /* 1535 /*
1507 * If both ends believe to be DFP/host, we have a data role 1536 * If both ends believe to be DFP/host, we have a data role
1508 * mismatch. 1537 * mismatch.
@@ -1520,6 +1549,7 @@ static void tcpm_pd_rx_handler(struct work_struct *work)
1520 } 1549 }
1521 } 1550 }
1522 1551
1552done:
1523 mutex_unlock(&port->lock); 1553 mutex_unlock(&port->lock);
1524 kfree(event); 1554 kfree(event);
1525} 1555}
@@ -1719,8 +1749,7 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1719 } 1749 }
1720 ma = min(ma, port->max_snk_ma); 1750 ma = min(ma, port->max_snk_ma);
1721 1751
1722 /* XXX: Any other flags need to be set? */ 1752 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1723 flags = 0;
1724 1753
1725 /* Set mismatch bit if offered power is less than operating power */ 1754 /* Set mismatch bit if offered power is less than operating power */
1726 mw = ma * mv / 1000; 1755 mw = ma * mv / 1000;
@@ -1957,6 +1986,12 @@ static void tcpm_reset_port(struct tcpm_port *port)
1957 port->attached = false; 1986 port->attached = false;
1958 port->pd_capable = false; 1987 port->pd_capable = false;
1959 1988
1989 /*
1990 * First Rx ID should be 0; set this to a sentinel of -1 so that
1991 * we can check tcpm_pd_rx_handler() if we had seen it before.
1992 */
1993 port->rx_msgid = -1;
1994
1960 port->tcpc->set_pd_rx(port->tcpc, false); 1995 port->tcpc->set_pd_rx(port->tcpc, false);
1961 tcpm_init_vbus(port); /* also disables charging */ 1996 tcpm_init_vbus(port); /* also disables charging */
1962 tcpm_init_vconn(port); 1997 tcpm_init_vconn(port);
@@ -2170,6 +2205,7 @@ static void run_state_machine(struct tcpm_port *port)
2170 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2205 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2171 port->caps_count = 0; 2206 port->caps_count = 0;
2172 port->message_id = 0; 2207 port->message_id = 0;
2208 port->rx_msgid = -1;
2173 port->explicit_contract = false; 2209 port->explicit_contract = false;
2174 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2210 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2175 break; 2211 break;
@@ -2329,6 +2365,7 @@ static void run_state_machine(struct tcpm_port *port)
2329 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); 2365 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2330 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2366 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2331 port->message_id = 0; 2367 port->message_id = 0;
2368 port->rx_msgid = -1;
2332 port->explicit_contract = false; 2369 port->explicit_contract = false;
2333 tcpm_set_state(port, SNK_DISCOVERY, 0); 2370 tcpm_set_state(port, SNK_DISCOVERY, 0);
2334 break; 2371 break;
@@ -2496,6 +2533,7 @@ static void run_state_machine(struct tcpm_port *port)
2496 /* Soft_Reset states */ 2533 /* Soft_Reset states */
2497 case SOFT_RESET: 2534 case SOFT_RESET:
2498 port->message_id = 0; 2535 port->message_id = 0;
2536 port->rx_msgid = -1;
2499 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 2537 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2500 if (port->pwr_role == TYPEC_SOURCE) 2538 if (port->pwr_role == TYPEC_SOURCE)
2501 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2539 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
@@ -2504,6 +2542,7 @@ static void run_state_machine(struct tcpm_port *port)
2504 break; 2542 break;
2505 case SOFT_RESET_SEND: 2543 case SOFT_RESET_SEND:
2506 port->message_id = 0; 2544 port->message_id = 0;
2545 port->rx_msgid = -1;
2507 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) 2546 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2508 tcpm_set_state_cond(port, hard_reset_state(port), 0); 2547 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2509 else 2548 else
@@ -2568,6 +2607,14 @@ static void run_state_machine(struct tcpm_port *port)
2568 break; 2607 break;
2569 case PR_SWAP_SRC_SNK_SOURCE_OFF: 2608 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2570 tcpm_set_cc(port, TYPEC_CC_RD); 2609 tcpm_set_cc(port, TYPEC_CC_RD);
2610 /*
2611 * USB-PD standard, 6.2.1.4, Port Power Role:
2612 * "During the Power Role Swap Sequence, for the initial Source
2613 * Port, the Port Power Role field shall be set to Sink in the
2614 * PS_RDY Message indicating that the initial Source’s power
2615 * supply is turned off"
2616 */
2617 tcpm_set_pwr_role(port, TYPEC_SINK);
2571 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 2618 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2572 tcpm_set_state(port, ERROR_RECOVERY, 0); 2619 tcpm_set_state(port, ERROR_RECOVERY, 0);
2573 break; 2620 break;
@@ -2575,7 +2622,6 @@ static void run_state_machine(struct tcpm_port *port)
2575 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); 2622 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2576 break; 2623 break;
2577 case PR_SWAP_SRC_SNK_SINK_ON: 2624 case PR_SWAP_SRC_SNK_SINK_ON:
2578 tcpm_set_pwr_role(port, TYPEC_SINK);
2579 tcpm_swap_complete(port, 0); 2625 tcpm_swap_complete(port, 0);
2580 tcpm_set_state(port, SNK_STARTUP, 0); 2626 tcpm_set_state(port, SNK_STARTUP, 0);
2581 break; 2627 break;
@@ -2587,8 +2633,15 @@ static void run_state_machine(struct tcpm_port *port)
2587 case PR_SWAP_SNK_SRC_SOURCE_ON: 2633 case PR_SWAP_SNK_SRC_SOURCE_ON:
2588 tcpm_set_cc(port, tcpm_rp_cc(port)); 2634 tcpm_set_cc(port, tcpm_rp_cc(port));
2589 tcpm_set_vbus(port, true); 2635 tcpm_set_vbus(port, true);
2590 tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 2636 /*
2637 * USB PD standard, 6.2.1.4:
2638 * "Subsequent Messages initiated by the Policy Engine,
2639 * such as the PS_RDY Message sent to indicate that Vbus
2640 * is ready, will have the Port Power Role field set to
2641 * Source."
2642 */
2591 tcpm_set_pwr_role(port, TYPEC_SOURCE); 2643 tcpm_set_pwr_role(port, TYPEC_SOURCE);
2644 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2592 tcpm_swap_complete(port, 0); 2645 tcpm_swap_complete(port, 0);
2593 tcpm_set_state(port, SRC_STARTUP, 0); 2646 tcpm_set_state(port, SRC_STARTUP, 0);
2594 break; 2647 break;
@@ -3292,6 +3345,20 @@ static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3292 return nr_pdo; 3345 return nr_pdo;
3293} 3346}
3294 3347
3348static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3349 unsigned int nr_vdo)
3350{
3351 unsigned int i;
3352
3353 if (nr_vdo > VDO_MAX_OBJECTS)
3354 nr_vdo = VDO_MAX_OBJECTS;
3355
3356 for (i = 0; i < nr_vdo; i++)
3357 dest_vdo[i] = src_vdo[i];
3358
3359 return nr_vdo;
3360}
3361
3295void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, 3362void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3296 unsigned int nr_pdo) 3363 unsigned int nr_pdo)
3297{ 3364{
@@ -3382,6 +3449,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3382 tcpc->config->nr_src_pdo); 3449 tcpc->config->nr_src_pdo);
3383 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3450 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3384 tcpc->config->nr_snk_pdo); 3451 tcpc->config->nr_snk_pdo);
3452 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3453 tcpc->config->nr_snk_vdo);
3385 3454
3386 port->max_snk_mv = tcpc->config->max_snk_mv; 3455 port->max_snk_mv = tcpc->config->max_snk_mv;
3387 port->max_snk_ma = tcpc->config->max_snk_ma; 3456 port->max_snk_ma = tcpc->config->max_snk_ma;
diff --git a/drivers/staging/typec/tcpm.h b/drivers/staging/typec/tcpm.h
index 969b365e6549..19c307d31a5a 100644
--- a/drivers/staging/typec/tcpm.h
+++ b/drivers/staging/typec/tcpm.h
@@ -60,6 +60,9 @@ struct tcpc_config {
60 const u32 *snk_pdo; 60 const u32 *snk_pdo;
61 unsigned int nr_snk_pdo; 61 unsigned int nr_snk_pdo;
62 62
63 const u32 *snk_vdo;
64 unsigned int nr_snk_vdo;
65
63 unsigned int max_snk_mv; 66 unsigned int max_snk_mv;
64 unsigned int max_snk_ma; 67 unsigned int max_snk_ma;
65 unsigned int max_snk_mw; 68 unsigned int max_snk_mw;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 988ee61fb4a7..d04db3f55519 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -502,8 +502,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
502 */ 502 */
503 sg_init_table(scatterlist, num_pages); 503 sg_init_table(scatterlist, num_pages);
504 /* Now set the pages for each scatterlist */ 504 /* Now set the pages for each scatterlist */
505 for (i = 0; i < num_pages; i++) 505 for (i = 0; i < num_pages; i++) {
506 sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); 506 unsigned int len = PAGE_SIZE - offset;
507
508 if (len > count)
509 len = count;
510 sg_set_page(scatterlist + i, pages[i], len, offset);
511 offset = 0;
512 count -= len;
513 }
507 514
508 dma_buffers = dma_map_sg(g_dev, 515 dma_buffers = dma_map_sg(g_dev,
509 scatterlist, 516 scatterlist,
@@ -524,20 +531,20 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
524 u32 addr = sg_dma_address(sg); 531 u32 addr = sg_dma_address(sg);
525 532
526 /* Note: addrs is the address + page_count - 1 533 /* Note: addrs is the address + page_count - 1
527 * The firmware expects the block to be page 534 * The firmware expects blocks after the first to be page-
528 * aligned and a multiple of the page size 535 * aligned and a multiple of the page size
529 */ 536 */
530 WARN_ON(len == 0); 537 WARN_ON(len == 0);
531 WARN_ON(len & ~PAGE_MASK); 538 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
532 WARN_ON(addr & ~PAGE_MASK); 539 WARN_ON(i && (addr & ~PAGE_MASK));
533 if (k > 0 && 540 if (k > 0 &&
534 ((addrs[k - 1] & PAGE_MASK) | 541 ((addrs[k - 1] & PAGE_MASK) +
535 ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) 542 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
536 == addr) { 543 == (addr & PAGE_MASK))
537 addrs[k - 1] += (len >> PAGE_SHIFT); 544 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
538 } else { 545 else
539 addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); 546 addrs[k++] = (addr & PAGE_MASK) |
540 } 547 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
541 } 548 }
542 549
543 /* Partial cache lines (fragments) require special measures */ 550 /* Partial cache lines (fragments) require special measures */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 26a9bcd5ee6a..3fdca2cdd8da 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
1279 */ 1279 */
1280 if (dump_payload) 1280 if (dump_payload)
1281 goto after_immediate_data; 1281 goto after_immediate_data;
1282 /*
1283 * Check for underflow case where both EDTL and immediate data payload
1284 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
1285 * already been set in target_cmd_size_check() as se_cmd->data_length.
1286 *
1287 * For this special case, fail the command and dump the immediate data
1288 * payload.
1289 */
1290 if (cmd->first_burst_len > cmd->se_cmd.data_length) {
1291 cmd->sense_reason = TCM_INVALID_CDB_FIELD;
1292 goto after_immediate_data;
1293 }
1282 1294
1283 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1295 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1284 cmd->first_burst_len); 1296 cmd->first_burst_len);
@@ -3790,6 +3802,8 @@ int iscsi_target_tx_thread(void *arg)
3790{ 3802{
3791 int ret = 0; 3803 int ret = 0;
3792 struct iscsi_conn *conn = arg; 3804 struct iscsi_conn *conn = arg;
3805 bool conn_freed = false;
3806
3793 /* 3807 /*
3794 * Allow ourselves to be interrupted by SIGINT so that a 3808 * Allow ourselves to be interrupted by SIGINT so that a
3795 * connection recovery / failure event can be triggered externally. 3809 * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3829,14 @@ get_immediate:
3815 goto transport_err; 3829 goto transport_err;
3816 3830
3817 ret = iscsit_handle_response_queue(conn); 3831 ret = iscsit_handle_response_queue(conn);
3818 if (ret == 1) 3832 if (ret == 1) {
3819 goto get_immediate; 3833 goto get_immediate;
3820 else if (ret == -ECONNRESET) 3834 } else if (ret == -ECONNRESET) {
3835 conn_freed = true;
3821 goto out; 3836 goto out;
3822 else if (ret < 0) 3837 } else if (ret < 0) {
3823 goto transport_err; 3838 goto transport_err;
3839 }
3824 } 3840 }
3825 3841
3826transport_err: 3842transport_err:
@@ -3830,8 +3846,13 @@ transport_err:
3830 * responsible for cleaning up the early connection failure. 3846 * responsible for cleaning up the early connection failure.
3831 */ 3847 */
3832 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3848 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3833 iscsit_take_action_for_connection_exit(conn); 3849 iscsit_take_action_for_connection_exit(conn, &conn_freed);
3834out: 3850out:
3851 if (!conn_freed) {
3852 while (!kthread_should_stop()) {
3853 msleep(100);
3854 }
3855 }
3835 return 0; 3856 return 0;
3836} 3857}
3837 3858
@@ -4004,6 +4025,7 @@ int iscsi_target_rx_thread(void *arg)
4004{ 4025{
4005 int rc; 4026 int rc;
4006 struct iscsi_conn *conn = arg; 4027 struct iscsi_conn *conn = arg;
4028 bool conn_freed = false;
4007 4029
4008 /* 4030 /*
4009 * Allow ourselves to be interrupted by SIGINT so that a 4031 * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4038,7 @@ int iscsi_target_rx_thread(void *arg)
4016 */ 4038 */
4017 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4039 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4018 if (rc < 0 || iscsi_target_check_conn_state(conn)) 4040 if (rc < 0 || iscsi_target_check_conn_state(conn))
4019 return 0; 4041 goto out;
4020 4042
4021 if (!conn->conn_transport->iscsit_get_rx_pdu) 4043 if (!conn->conn_transport->iscsit_get_rx_pdu)
4022 return 0; 4044 return 0;
@@ -4025,7 +4047,15 @@ int iscsi_target_rx_thread(void *arg)
4025 4047
4026 if (!signal_pending(current)) 4048 if (!signal_pending(current))
4027 atomic_set(&conn->transport_failed, 1); 4049 atomic_set(&conn->transport_failed, 1);
4028 iscsit_take_action_for_connection_exit(conn); 4050 iscsit_take_action_for_connection_exit(conn, &conn_freed);
4051
4052out:
4053 if (!conn_freed) {
4054 while (!kthread_should_stop()) {
4055 msleep(100);
4056 }
4057 }
4058
4029 return 0; 4059 return 0;
4030} 4060}
4031 4061
@@ -4405,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession(
4405 * always sleep waiting for RX/TX thread shutdown to complete 4435 * always sleep waiting for RX/TX thread shutdown to complete
4406 * within iscsit_close_connection(). 4436 * within iscsit_close_connection().
4407 */ 4437 */
4408 if (!conn->conn_transport->rdma_shutdown) 4438 if (!conn->conn_transport->rdma_shutdown) {
4409 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4439 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4440 if (!sleep)
4441 return;
4442 }
4410 4443
4411 atomic_set(&conn->conn_logout_remove, 0); 4444 atomic_set(&conn->conn_logout_remove, 0);
4412 complete(&conn->conn_logout_comp); 4445 complete(&conn->conn_logout_comp);
@@ -4422,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid(
4422{ 4455{
4423 int sleep = 1; 4456 int sleep = 1;
4424 4457
4425 if (!conn->conn_transport->rdma_shutdown) 4458 if (!conn->conn_transport->rdma_shutdown) {
4426 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4459 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4460 if (!sleep)
4461 return;
4462 }
4427 4463
4428 atomic_set(&conn->conn_logout_remove, 0); 4464 atomic_set(&conn->conn_logout_remove, 0);
4429 complete(&conn->conn_logout_comp); 4465 complete(&conn->conn_logout_comp);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 9a96e17bf7cd..7fe2aa73cff6 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
930 } 930 }
931} 931}
932 932
933void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 933void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
934{ 934{
935 *conn_freed = false;
936
935 spin_lock_bh(&conn->state_lock); 937 spin_lock_bh(&conn->state_lock);
936 if (atomic_read(&conn->connection_exit)) { 938 if (atomic_read(&conn->connection_exit)) {
937 spin_unlock_bh(&conn->state_lock); 939 spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
942 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 944 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
943 spin_unlock_bh(&conn->state_lock); 945 spin_unlock_bh(&conn->state_lock);
944 iscsit_close_connection(conn); 946 iscsit_close_connection(conn);
947 *conn_freed = true;
945 return; 948 return;
946 } 949 }
947 950
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
955 spin_unlock_bh(&conn->state_lock); 958 spin_unlock_bh(&conn->state_lock);
956 959
957 iscsit_handle_connection_cleanup(conn); 960 iscsit_handle_connection_cleanup(conn);
961 *conn_freed = true;
958} 962}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index 60e69e2af6ed..3822d9cd1230 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
15extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); 15extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
16extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); 16extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
17extern void iscsit_fall_back_to_erl0(struct iscsi_session *); 17extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
18extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); 18extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
19 19
20#endif /*** ISCSI_TARGET_ERL0_H ***/ 20#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 66238477137b..92b96b51d506 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
1464 break; 1464 break;
1465 } 1465 }
1466 1466
1467 while (!kthread_should_stop()) {
1468 msleep(100);
1469 }
1470
1467 return 0; 1471 return 0;
1468} 1472}
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 7ccc9c1cbfd1..6f88b31242b0 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
493 493
494static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); 494static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
495 495
496static bool iscsi_target_sk_state_check(struct sock *sk) 496static bool __iscsi_target_sk_check_close(struct sock *sk)
497{ 497{
498 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { 498 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
499 pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," 499 pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
500 "returning FALSE\n"); 500 "returning FALSE\n");
501 return false; 501 return true;
502 } 502 }
503 return true; 503 return false;
504}
505
506static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
507{
508 bool state = false;
509
510 if (conn->sock) {
511 struct sock *sk = conn->sock->sk;
512
513 read_lock_bh(&sk->sk_callback_lock);
514 state = (__iscsi_target_sk_check_close(sk) ||
515 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
516 read_unlock_bh(&sk->sk_callback_lock);
517 }
518 return state;
519}
520
521static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
522{
523 bool state = false;
524
525 if (conn->sock) {
526 struct sock *sk = conn->sock->sk;
527
528 read_lock_bh(&sk->sk_callback_lock);
529 state = test_bit(flag, &conn->login_flags);
530 read_unlock_bh(&sk->sk_callback_lock);
531 }
532 return state;
533}
534
535static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
536{
537 bool state = false;
538
539 if (conn->sock) {
540 struct sock *sk = conn->sock->sk;
541
542 write_lock_bh(&sk->sk_callback_lock);
543 state = (__iscsi_target_sk_check_close(sk) ||
544 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
545 if (!state)
546 clear_bit(flag, &conn->login_flags);
547 write_unlock_bh(&sk->sk_callback_lock);
548 }
549 return state;
504} 550}
505 551
506static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) 552static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
540 586
541 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 587 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
542 conn, current->comm, current->pid); 588 conn, current->comm, current->pid);
589 /*
590 * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
591 * before initial PDU processing in iscsi_target_start_negotiation()
592 * has completed, go ahead and retry until it's cleared.
593 *
594 * Otherwise if the TCP connection drops while this is occuring,
595 * iscsi_target_start_negotiation() will detect the failure, call
596 * cancel_delayed_work_sync(&conn->login_work), and cleanup the
597 * remaining iscsi connection resources from iscsi_np process context.
598 */
599 if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
600 schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
601 return;
602 }
543 603
544 spin_lock(&tpg->tpg_state_lock); 604 spin_lock(&tpg->tpg_state_lock);
545 state = (tpg->tpg_state == TPG_STATE_ACTIVE); 605 state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
547 607
548 if (!state) { 608 if (!state) {
549 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 609 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
550 iscsi_target_restore_sock_callbacks(conn); 610 goto err;
551 iscsi_target_login_drop(conn, login);
552 iscsit_deaccess_np(np, tpg, tpg_np);
553 return;
554 } 611 }
555 612
556 if (conn->sock) { 613 if (iscsi_target_sk_check_close(conn)) {
557 struct sock *sk = conn->sock->sk; 614 pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
558 615 goto err;
559 read_lock_bh(&sk->sk_callback_lock);
560 state = iscsi_target_sk_state_check(sk);
561 read_unlock_bh(&sk->sk_callback_lock);
562
563 if (!state) {
564 pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
565 iscsi_target_restore_sock_callbacks(conn);
566 iscsi_target_login_drop(conn, login);
567 iscsit_deaccess_np(np, tpg, tpg_np);
568 return;
569 }
570 } 616 }
571 617
572 conn->login_kworker = current; 618 conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
584 flush_signals(current); 630 flush_signals(current);
585 conn->login_kworker = NULL; 631 conn->login_kworker = NULL;
586 632
587 if (rc < 0) { 633 if (rc < 0)
588 iscsi_target_restore_sock_callbacks(conn); 634 goto err;
589 iscsi_target_login_drop(conn, login);
590 iscsit_deaccess_np(np, tpg, tpg_np);
591 return;
592 }
593 635
594 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", 636 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
595 conn, current->comm, current->pid); 637 conn, current->comm, current->pid);
596 638
597 rc = iscsi_target_do_login(conn, login); 639 rc = iscsi_target_do_login(conn, login);
598 if (rc < 0) { 640 if (rc < 0) {
599 iscsi_target_restore_sock_callbacks(conn); 641 goto err;
600 iscsi_target_login_drop(conn, login);
601 iscsit_deaccess_np(np, tpg, tpg_np);
602 } else if (!rc) { 642 } else if (!rc) {
603 if (conn->sock) { 643 if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
604 struct sock *sk = conn->sock->sk; 644 goto err;
605
606 write_lock_bh(&sk->sk_callback_lock);
607 clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
608 write_unlock_bh(&sk->sk_callback_lock);
609 }
610 } else if (rc == 1) { 645 } else if (rc == 1) {
611 iscsi_target_nego_release(conn); 646 iscsi_target_nego_release(conn);
612 iscsi_post_login_handler(np, conn, zero_tsih); 647 iscsi_post_login_handler(np, conn, zero_tsih);
613 iscsit_deaccess_np(np, tpg, tpg_np); 648 iscsit_deaccess_np(np, tpg, tpg_np);
614 } 649 }
650 return;
651
652err:
653 iscsi_target_restore_sock_callbacks(conn);
654 iscsi_target_login_drop(conn, login);
655 iscsit_deaccess_np(np, tpg, tpg_np);
615} 656}
616 657
617static void iscsi_target_do_cleanup(struct work_struct *work) 658static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
659 orig_state_change(sk); 700 orig_state_change(sk);
660 return; 701 return;
661 } 702 }
703 state = __iscsi_target_sk_check_close(sk);
704 pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
705
662 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 706 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
663 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" 707 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
664 " conn: %p\n", conn); 708 " conn: %p\n", conn);
709 if (state)
710 set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
665 write_unlock_bh(&sk->sk_callback_lock); 711 write_unlock_bh(&sk->sk_callback_lock);
666 orig_state_change(sk); 712 orig_state_change(sk);
667 return; 713 return;
668 } 714 }
669 if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 715 if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
670 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", 716 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
671 conn); 717 conn);
672 write_unlock_bh(&sk->sk_callback_lock); 718 write_unlock_bh(&sk->sk_callback_lock);
673 orig_state_change(sk); 719 orig_state_change(sk);
674 return; 720 return;
675 } 721 }
722 /*
723 * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
724 * but only queue conn->login_work -> iscsi_target_do_login_rx()
725 * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
726 *
727 * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
728 * will detect the dropped TCP connection from delayed workqueue context.
729 *
730 * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
731 * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
732 * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
733 * via iscsi_target_sk_check_and_clear() is responsible for detecting the
734 * dropped TCP connection in iscsi_np process context, and cleaning up
735 * the remaining iscsi connection resources.
736 */
737 if (state) {
738 pr_debug("iscsi_target_sk_state_change got failed state\n");
739 set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
740 state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
741 write_unlock_bh(&sk->sk_callback_lock);
676 742
677 state = iscsi_target_sk_state_check(sk); 743 orig_state_change(sk);
678 write_unlock_bh(&sk->sk_callback_lock);
679
680 pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
681 744
682 if (!state) { 745 if (!state)
683 pr_debug("iscsi_target_sk_state_change got failed state\n"); 746 schedule_delayed_work(&conn->login_work, 0);
684 schedule_delayed_work(&conn->login_cleanup_work, 0);
685 return; 747 return;
686 } 748 }
749 write_unlock_bh(&sk->sk_callback_lock);
750
687 orig_state_change(sk); 751 orig_state_change(sk);
688} 752}
689 753
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
946 if (iscsi_target_handle_csg_one(conn, login) < 0) 1010 if (iscsi_target_handle_csg_one(conn, login) < 0)
947 return -1; 1011 return -1;
948 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 1012 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
1013 /*
1014 * Check to make sure the TCP connection has not
1015 * dropped asynchronously while session reinstatement
1016 * was occuring in this kthread context, before
1017 * transitioning to full feature phase operation.
1018 */
1019 if (iscsi_target_sk_check_close(conn))
1020 return -1;
1021
949 login->tsih = conn->sess->tsih; 1022 login->tsih = conn->sess->tsih;
950 login->login_complete = 1; 1023 login->login_complete = 1;
951 iscsi_target_restore_sock_callbacks(conn); 1024 iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
972 break; 1045 break;
973 } 1046 }
974 1047
975 if (conn->sock) {
976 struct sock *sk = conn->sock->sk;
977 bool state;
978
979 read_lock_bh(&sk->sk_callback_lock);
980 state = iscsi_target_sk_state_check(sk);
981 read_unlock_bh(&sk->sk_callback_lock);
982
983 if (!state) {
984 pr_debug("iscsi_target_do_login() failed state for"
985 " conn: %p\n", conn);
986 return -1;
987 }
988 }
989
990 return 0; 1048 return 0;
991} 1049}
992 1050
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
1255 1313
1256 write_lock_bh(&sk->sk_callback_lock); 1314 write_lock_bh(&sk->sk_callback_lock);
1257 set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 1315 set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
1316 set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
1258 write_unlock_bh(&sk->sk_callback_lock); 1317 write_unlock_bh(&sk->sk_callback_lock);
1259 } 1318 }
1260 1319 /*
1320 * If iscsi_target_do_login returns zero to signal more PDU
1321 * exchanges are required to complete the login, go ahead and
1322 * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
1323 * is still active.
1324 *
1325 * Otherwise if TCP connection dropped asynchronously, go ahead
1326 * and perform connection cleanup now.
1327 */
1261 ret = iscsi_target_do_login(conn, login); 1328 ret = iscsi_target_do_login(conn, login);
1329 if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
1330 ret = -1;
1331
1262 if (ret < 0) { 1332 if (ret < 0) {
1263 cancel_delayed_work_sync(&conn->login_work); 1333 cancel_delayed_work_sync(&conn->login_work);
1264 cancel_delayed_work_sync(&conn->login_cleanup_work); 1334 cancel_delayed_work_sync(&conn->login_cleanup_work);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 9ab7090f7c83..0912de7c0cf8 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -136,7 +136,7 @@ int init_se_kmem_caches(void);
136void release_se_kmem_caches(void); 136void release_se_kmem_caches(void);
137u32 scsi_get_new_index(scsi_index_t); 137u32 scsi_get_new_index(scsi_index_t);
138void transport_subsystem_check_init(void); 138void transport_subsystem_check_init(void);
139void transport_cmd_finish_abort(struct se_cmd *, int); 139int transport_cmd_finish_abort(struct se_cmd *, int);
140unsigned char *transport_dump_cmd_direction(struct se_cmd *); 140unsigned char *transport_dump_cmd_direction(struct se_cmd *);
141void transport_dump_dev_state(struct se_device *, char *, int *); 141void transport_dump_dev_state(struct se_device *, char *, int *);
142void transport_dump_dev_info(struct se_device *, struct se_lun *, 142void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index dce1e1b47316..13f47bf4d16b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
75 kfree(tmr); 75 kfree(tmr);
76} 76}
77 77
78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) 78static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 bool remove = true, send_tas; 81 bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
91 transport_send_task_abort(cmd); 91 transport_send_task_abort(cmd);
92 } 92 }
93 93
94 transport_cmd_finish_abort(cmd, remove); 94 return transport_cmd_finish_abort(cmd, remove);
95} 95}
96 96
97static int target_check_cdb_and_preempt(struct list_head *list, 97static int target_check_cdb_and_preempt(struct list_head *list,
@@ -184,8 +184,8 @@ void core_tmr_abort_task(
184 cancel_work_sync(&se_cmd->work); 184 cancel_work_sync(&se_cmd->work);
185 transport_wait_for_tasks(se_cmd); 185 transport_wait_for_tasks(se_cmd);
186 186
187 transport_cmd_finish_abort(se_cmd, true); 187 if (!transport_cmd_finish_abort(se_cmd, true))
188 target_put_sess_cmd(se_cmd); 188 target_put_sess_cmd(se_cmd);
189 189
190 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 190 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
191 " ref_tag: %llu\n", ref_tag); 191 " ref_tag: %llu\n", ref_tag);
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list(
281 cancel_work_sync(&cmd->work); 281 cancel_work_sync(&cmd->work);
282 transport_wait_for_tasks(cmd); 282 transport_wait_for_tasks(cmd);
283 283
284 transport_cmd_finish_abort(cmd, 1); 284 if (!transport_cmd_finish_abort(cmd, 1))
285 target_put_sess_cmd(cmd); 285 target_put_sess_cmd(cmd);
286 } 286 }
287} 287}
288 288
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list(
380 cancel_work_sync(&cmd->work); 380 cancel_work_sync(&cmd->work);
381 transport_wait_for_tasks(cmd); 381 transport_wait_for_tasks(cmd);
382 382
383 core_tmr_handle_tas_abort(cmd, tas); 383 if (!core_tmr_handle_tas_abort(cmd, tas))
384 target_put_sess_cmd(cmd); 384 target_put_sess_cmd(cmd);
385 } 385 }
386} 386}
387 387
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 37f57357d4a0..f1b3a46bdcaf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
651 percpu_ref_put(&lun->lun_ref); 651 percpu_ref_put(&lun->lun_ref);
652} 652}
653 653
654void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 654int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
655{ 655{
656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
657 int ret = 0;
657 658
658 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 659 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
659 transport_lun_remove_cmd(cmd); 660 transport_lun_remove_cmd(cmd);
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
665 cmd->se_tfo->aborted_task(cmd); 666 cmd->se_tfo->aborted_task(cmd);
666 667
667 if (transport_cmd_check_stop_to_fabric(cmd)) 668 if (transport_cmd_check_stop_to_fabric(cmd))
668 return; 669 return 1;
669 if (remove && ack_kref) 670 if (remove && ack_kref)
670 transport_put_cmd(cmd); 671 ret = transport_put_cmd(cmd);
672
673 return ret;
671} 674}
672 675
673static void target_complete_failure_work(struct work_struct *work) 676static void target_complete_failure_work(struct work_struct *work)
@@ -1160,15 +1163,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1160 if (cmd->unknown_data_length) { 1163 if (cmd->unknown_data_length) {
1161 cmd->data_length = size; 1164 cmd->data_length = size;
1162 } else if (size != cmd->data_length) { 1165 } else if (size != cmd->data_length) {
1163 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1166 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1164 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1167 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1168 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1169 cmd->data_length, size, cmd->t_task_cdb[0]);
1167 1170
1168 if (cmd->data_direction == DMA_TO_DEVICE && 1171 if (cmd->data_direction == DMA_TO_DEVICE) {
1169 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1172 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1170 pr_err("Rejecting underflow/overflow WRITE data\n"); 1173 pr_err_ratelimited("Rejecting underflow/overflow"
1171 return TCM_INVALID_CDB_FIELD; 1174 " for WRITE data CDB\n");
1175 return TCM_INVALID_CDB_FIELD;
1176 }
1177 /*
1178 * Some fabric drivers like iscsi-target still expect to
1179 * always reject overflow writes. Reject this case until
1180 * full fabric driver level support for overflow writes
1181 * is introduced tree-wide.
1182 */
1183 if (size > cmd->data_length) {
1184 pr_err_ratelimited("Rejecting overflow for"
1185 " WRITE control CDB\n");
1186 return TCM_INVALID_CDB_FIELD;
1187 }
1172 } 1188 }
1173 /* 1189 /*
1174 * Reject READ_* or WRITE_* with overflow/underflow for 1190 * Reject READ_* or WRITE_* with overflow/underflow for
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9045837f748b..beb5f098f32d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -97,7 +97,7 @@ struct tcmu_hba {
97 97
98struct tcmu_dev { 98struct tcmu_dev {
99 struct list_head node; 99 struct list_head node;
100 100 struct kref kref;
101 struct se_device se_dev; 101 struct se_device se_dev;
102 102
103 char *name; 103 char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
970 if (!udev) 970 if (!udev)
971 return NULL; 971 return NULL;
972 kref_init(&udev->kref);
972 973
973 udev->name = kstrdup(name, GFP_KERNEL); 974 udev->name = kstrdup(name, GFP_KERNEL);
974 if (!udev->name) { 975 if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
1145 return 0; 1146 return 0;
1146} 1147}
1147 1148
1149static void tcmu_dev_call_rcu(struct rcu_head *p)
1150{
1151 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1152 struct tcmu_dev *udev = TCMU_DEV(dev);
1153
1154 kfree(udev->uio_info.name);
1155 kfree(udev->name);
1156 kfree(udev);
1157}
1158
1159static void tcmu_dev_kref_release(struct kref *kref)
1160{
1161 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1162 struct se_device *dev = &udev->se_dev;
1163
1164 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1165}
1166
1148static int tcmu_release(struct uio_info *info, struct inode *inode) 1167static int tcmu_release(struct uio_info *info, struct inode *inode)
1149{ 1168{
1150 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1169 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
1152 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1171 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1153 1172
1154 pr_debug("close\n"); 1173 pr_debug("close\n");
1155 1174 /* release ref from configure */
1175 kref_put(&udev->kref, tcmu_dev_kref_release);
1156 return 0; 1176 return 0;
1157} 1177}
1158 1178
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
1272 dev->dev_attrib.hw_max_sectors = 128; 1292 dev->dev_attrib.hw_max_sectors = 128;
1273 dev->dev_attrib.hw_queue_depth = 128; 1293 dev->dev_attrib.hw_queue_depth = 128;
1274 1294
1295 /*
1296 * Get a ref incase userspace does a close on the uio device before
1297 * LIO has initiated tcmu_free_device.
1298 */
1299 kref_get(&udev->kref);
1300
1275 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1301 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
1276 udev->uio_info.uio_dev->minor); 1302 udev->uio_info.uio_dev->minor);
1277 if (ret) 1303 if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
1284 return 0; 1310 return 0;
1285 1311
1286err_netlink: 1312err_netlink:
1313 kref_put(&udev->kref, tcmu_dev_kref_release);
1287 uio_unregister_device(&udev->uio_info); 1314 uio_unregister_device(&udev->uio_info);
1288err_register: 1315err_register:
1289 vfree(udev->mb_addr); 1316 vfree(udev->mb_addr);
1290err_vzalloc: 1317err_vzalloc:
1291 kfree(info->name); 1318 kfree(info->name);
1319 info->name = NULL;
1292 1320
1293 return ret; 1321 return ret;
1294} 1322}
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1302 return -EINVAL; 1330 return -EINVAL;
1303} 1331}
1304 1332
1305static void tcmu_dev_call_rcu(struct rcu_head *p)
1306{
1307 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1308 struct tcmu_dev *udev = TCMU_DEV(dev);
1309
1310 kfree(udev);
1311}
1312
1313static bool tcmu_dev_configured(struct tcmu_dev *udev) 1333static bool tcmu_dev_configured(struct tcmu_dev *udev)
1314{ 1334{
1315 return udev->uio_info.uio_dev ? true : false; 1335 return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
1364 udev->uio_info.uio_dev->minor); 1384 udev->uio_info.uio_dev->minor);
1365 1385
1366 uio_unregister_device(&udev->uio_info); 1386 uio_unregister_device(&udev->uio_info);
1367 kfree(udev->uio_info.name);
1368 kfree(udev->name);
1369 } 1387 }
1370 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1388
1389 /* release ref from init */
1390 kref_put(&udev->kref, tcmu_dev_kref_release);
1371} 1391}
1372 1392
1373enum { 1393enum {
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 2330a4eb4e8b..a6df12d88f90 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -1,6 +1,7 @@
1# Generic Trusted Execution Environment Configuration 1# Generic Trusted Execution Environment Configuration
2config TEE 2config TEE
3 tristate "Trusted Execution Environment support" 3 tristate "Trusted Execution Environment support"
4 depends on HAVE_ARM_SMCCC || COMPILE_TEST
4 select DMA_SHARED_BUFFER 5 select DMA_SHARED_BUFFER
5 select GENERIC_ALLOCATOR 6 select GENERIC_ALLOCATOR
6 help 7 help
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index ab08af4654ef..42c098e86f84 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -9,8 +9,9 @@ config BCM2835_THERMAL
9config BCM_NS_THERMAL 9config BCM_NS_THERMAL
10 tristate "Northstar thermal driver" 10 tristate "Northstar thermal driver"
11 depends on ARCH_BCM_IPROC || COMPILE_TEST 11 depends on ARCH_BCM_IPROC || COMPILE_TEST
12 default y if ARCH_BCM_IPROC
12 help 13 help
13 Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081, 14 Support for the Northstar and Northstar Plus family of SoCs (e.g.
14 BCM4709 and BCM47094. It contains DMU (Device Management Unit) block 15 BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device
15 with a thermal sensor that allows checking CPU temperature. This 16 Management Unit) block with a thermal sensor that allows checking CPU
16 driver provides support for it. 17 temperature.
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 644ba526d9ea..4362a69ac88d 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = {
195static int qoriq_tmu_probe(struct platform_device *pdev) 195static int qoriq_tmu_probe(struct platform_device *pdev)
196{ 196{
197 int ret; 197 int ret;
198 const struct thermal_trip *trip;
199 struct qoriq_tmu_data *data; 198 struct qoriq_tmu_data *data;
200 struct device_node *np = pdev->dev.of_node; 199 struct device_node *np = pdev->dev.of_node;
201 u32 site = 0; 200 u32 site = 0;
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
243 goto err_tmu; 242 goto err_tmu;
244 } 243 }
245 244
246 trip = of_thermal_get_trip_points(data->tz);
247
248 /* Enable monitoring */ 245 /* Enable monitoring */
249 site |= 0x1 << (15 - data->sensor_id); 246 site |= 0x1 << (15 - data->sensor_id);
250 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); 247 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index b21b9cc2c8d6..5a51c740e372 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
359 * This may be called from any critical situation to trigger a system shutdown 359 * This may be called from any critical situation to trigger a system shutdown
360 * after a known period of time. By default this is not scheduled. 360 * after a known period of time. By default this is not scheduled.
361 */ 361 */
362void thermal_emergency_poweroff(void) 362static void thermal_emergency_poweroff(void)
363{ 363{
364 int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; 364 int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
365 /* 365 /*
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ba9c302454fb..696ab3046b87 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
1010} 1010}
1011 1011
1012/** 1012/**
1013 * ti_bandgap_set_continous_mode() - One time enabling of continuous mode 1013 * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode
1014 * @bgp: pointer to struct ti_bandgap 1014 * @bgp: pointer to struct ti_bandgap
1015 * 1015 *
1016 * Call this function only if HAS(MODE_CONFIG) is set. As this driver may 1016 * Call this function only if HAS(MODE_CONFIG) is set. As this driver may
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
1214 } 1214 }
1215 1215
1216 bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); 1216 bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
1217 if (!bgp) { 1217 if (!bgp)
1218 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1219 return ERR_PTR(-ENOMEM); 1218 return ERR_PTR(-ENOMEM);
1220 }
1221 1219
1222 of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); 1220 of_id = of_match_device(of_ti_bandgap_match, &pdev->dev);
1223 if (of_id) 1221 if (of_id)
1224 bgp->conf = of_id->data; 1222 bgp->conf = of_id->data;
1225 1223
1226 /* register shadow for context save and restore */ 1224 /* register shadow for context save and restore */
1227 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * 1225 bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count,
1228 bgp->conf->sensor_count, GFP_KERNEL); 1226 sizeof(*bgp->regval), GFP_KERNEL);
1229 if (!bgp->regval) { 1227 if (!bgp->regval)
1230 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1231 return ERR_PTR(-ENOMEM); 1228 return ERR_PTR(-ENOMEM);
1232 }
1233 1229
1234 i = 0; 1230 i = 0;
1235 do { 1231 do {
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 7ac9bcdf1e61..61fe8d6fd24e 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void)
764 ehv_bc_driver = alloc_tty_driver(count); 764 ehv_bc_driver = alloc_tty_driver(count);
765 if (!ehv_bc_driver) { 765 if (!ehv_bc_driver) {
766 ret = -ENOMEM; 766 ret = -ENOMEM;
767 goto error; 767 goto err_free_bcs;
768 } 768 }
769 769
770 ehv_bc_driver->driver_name = "ehv-bc"; 770 ehv_bc_driver->driver_name = "ehv-bc";
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void)
778 ret = tty_register_driver(ehv_bc_driver); 778 ret = tty_register_driver(ehv_bc_driver);
779 if (ret) { 779 if (ret) {
780 pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); 780 pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
781 goto error; 781 goto err_put_tty_driver;
782 } 782 }
783 783
784 ret = platform_driver_register(&ehv_bc_tty_driver); 784 ret = platform_driver_register(&ehv_bc_tty_driver);
785 if (ret) { 785 if (ret) {
786 pr_err("ehv-bc: could not register platform driver (ret=%i)\n", 786 pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
787 ret); 787 ret);
788 goto error; 788 goto err_deregister_tty_driver;
789 } 789 }
790 790
791 return 0; 791 return 0;
792 792
793error: 793err_deregister_tty_driver:
794 if (ehv_bc_driver) { 794 tty_unregister_driver(ehv_bc_driver);
795 tty_unregister_driver(ehv_bc_driver); 795err_put_tty_driver:
796 put_tty_driver(ehv_bc_driver); 796 put_tty_driver(ehv_bc_driver);
797 } 797err_free_bcs:
798
799 kfree(bcs); 798 kfree(bcs);
800 799
801 return ret; 800 return ret;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 433de5ea9b02..f71b47334149 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev)
122} 122}
123EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); 123EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
124 124
125int serdev_device_write_buf(struct serdev_device *serdev,
126 const unsigned char *buf, size_t count)
127{
128 struct serdev_controller *ctrl = serdev->ctrl;
129
130 if (!ctrl || !ctrl->ops->write_buf)
131 return -EINVAL;
132
133 return ctrl->ops->write_buf(ctrl, buf, count);
134}
135EXPORT_SYMBOL_GPL(serdev_device_write_buf);
136
125int serdev_device_write(struct serdev_device *serdev, 137int serdev_device_write(struct serdev_device *serdev,
126 const unsigned char *buf, size_t count, 138 const unsigned char *buf, size_t count,
127 unsigned long timeout) 139 unsigned long timeout)
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 487c88f6aa0e..d0a021c93986 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl)
102 return PTR_ERR(tty); 102 return PTR_ERR(tty);
103 serport->tty = tty; 103 serport->tty = tty;
104 104
105 serport->port->client_ops = &client_ops;
106 serport->port->client_data = ctrl;
107
108 if (tty->ops->open) 105 if (tty->ops->open)
109 tty->ops->open(serport->tty, NULL); 106 tty->ops->open(serport->tty, NULL);
110 else 107 else
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
215 struct device *parent, 212 struct device *parent,
216 struct tty_driver *drv, int idx) 213 struct tty_driver *drv, int idx)
217{ 214{
215 const struct tty_port_client_operations *old_ops;
218 struct serdev_controller *ctrl; 216 struct serdev_controller *ctrl;
219 struct serport *serport; 217 struct serport *serport;
220 int ret; 218 int ret;
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port,
233 231
234 ctrl->ops = &ctrl_ops; 232 ctrl->ops = &ctrl_ops;
235 233
234 old_ops = port->client_ops;
235 port->client_ops = &client_ops;
236 port->client_data = ctrl;
237
236 ret = serdev_controller_add(ctrl); 238 ret = serdev_controller_add(ctrl);
237 if (ret) 239 if (ret)
238 goto err_controller_put; 240 goto err_reset_data;
239 241
240 dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); 242 dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
241 return &ctrl->dev; 243 return &ctrl->dev;
242 244
243err_controller_put: 245err_reset_data:
246 port->client_data = NULL;
247 port->client_ops = old_ops;
244 serdev_controller_put(ctrl); 248 serdev_controller_put(ctrl);
249
245 return ERR_PTR(ret); 250 return ERR_PTR(ret);
246} 251}
247 252
248void serdev_tty_port_unregister(struct tty_port *port) 253int serdev_tty_port_unregister(struct tty_port *port)
249{ 254{
250 struct serdev_controller *ctrl = port->client_data; 255 struct serdev_controller *ctrl = port->client_data;
251 struct serport *serport = serdev_controller_get_drvdata(ctrl); 256 struct serport *serport = serdev_controller_get_drvdata(ctrl);
252 257
253 if (!serport) 258 if (!serport)
254 return; 259 return -ENODEV;
255 260
256 serdev_controller_remove(ctrl); 261 serdev_controller_remove(ctrl);
257 port->client_ops = NULL; 262 port->client_ops = NULL;
258 port->client_data = NULL; 263 port->client_data = NULL;
259 serdev_controller_put(ctrl); 264 serdev_controller_put(ctrl);
265
266 return 0;
260} 267}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 09a65a3ec7f7..68fd045a7025 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -47,6 +47,7 @@
47/* 47/*
48 * These are definitions for the Exar XR17V35X and XR17(C|D)15X 48 * These are definitions for the Exar XR17V35X and XR17(C|D)15X
49 */ 49 */
50#define UART_EXAR_INT0 0x80
50#define UART_EXAR_SLEEP 0x8b /* Sleep mode */ 51#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
51#define UART_EXAR_DVID 0x8d /* Device identification */ 52#define UART_EXAR_DVID 0x8d /* Device identification */
52 53
@@ -1337,7 +1338,7 @@ out_lock:
1337 /* 1338 /*
1338 * Check if the device is a Fintek F81216A 1339 * Check if the device is a Fintek F81216A
1339 */ 1340 */
1340 if (port->type == PORT_16550A) 1341 if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
1341 fintek_8250_probe(up); 1342 fintek_8250_probe(up);
1342 1343
1343 if (up->capabilities != old_capabilities) { 1344 if (up->capabilities != old_capabilities) {
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port)
1869static int exar_handle_irq(struct uart_port *port) 1870static int exar_handle_irq(struct uart_port *port)
1870{ 1871{
1871 unsigned int iir = serial_port_in(port, UART_IIR); 1872 unsigned int iir = serial_port_in(port, UART_IIR);
1872 int ret; 1873 int ret = 0;
1873 1874
1874 ret = serial8250_handle_irq(port, iir); 1875 if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
1876 serial_port_in(port, UART_EXAR_INT0) != 0)
1877 ret = 1;
1875 1878
1876 if ((port->type == PORT_XR17V35X) || 1879 ret |= serial8250_handle_irq(port, iir);
1877 (port->type == PORT_XR17D15X)) {
1878 serial_port_in(port, 0x80);
1879 serial_port_in(port, 0x81);
1880 serial_port_in(port, 0x82);
1881 serial_port_in(port, 0x83);
1882 }
1883 1880
1884 return ret; 1881 return ret;
1885} 1882}
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port)
2177 serial_port_in(port, UART_RX); 2174 serial_port_in(port, UART_RX);
2178 serial_port_in(port, UART_IIR); 2175 serial_port_in(port, UART_IIR);
2179 serial_port_in(port, UART_MSR); 2176 serial_port_in(port, UART_MSR);
2177 if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
2178 serial_port_in(port, UART_EXAR_INT0);
2180 2179
2181 /* 2180 /*
2182 * At this point, there's no way the LSR could still be 0xff; 2181 * At this point, there's no way the LSR could still be 0xff;
@@ -2335,6 +2334,8 @@ dont_test_tx_en:
2335 serial_port_in(port, UART_RX); 2334 serial_port_in(port, UART_RX);
2336 serial_port_in(port, UART_IIR); 2335 serial_port_in(port, UART_IIR);
2337 serial_port_in(port, UART_MSR); 2336 serial_port_in(port, UART_MSR);
2337 if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
2338 serial_port_in(port, UART_EXAR_INT0);
2338 up->lsr_saved_flags = 0; 2339 up->lsr_saved_flags = 0;
2339 up->msr_saved_flags = 0; 2340 up->msr_saved_flags = 0;
2340 2341
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 18e3f8342b85..0475f5d261ce 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
478 478
479 port = &altera_jtaguart_ports[i].port; 479 port = &altera_jtaguart_ports[i].port;
480 uart_remove_one_port(&altera_jtaguart_driver, port); 480 uart_remove_one_port(&altera_jtaguart_driver, port);
481 iounmap(port->membase);
481 482
482 return 0; 483 return 0;
483} 484}
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 46d3438a0d27..3e4b717670d7 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev)
615 if (port) { 615 if (port) {
616 uart_remove_one_port(&altera_uart_driver, port); 616 uart_remove_one_port(&altera_uart_driver, port);
617 port->mapbase = 0; 617 port->mapbase = 0;
618 iounmap(port->membase);
618 } 619 }
619 620
620 return 0; 621 return 0;
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index ebd8569f9ad5..9fff25be87f9 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
27#define UARTn_FRAME 0x04 27#define UARTn_FRAME 0x04
28#define UARTn_FRAME_DATABITS__MASK 0x000f 28#define UARTn_FRAME_DATABITS__MASK 0x000f
29#define UARTn_FRAME_DATABITS(n) ((n) - 3) 29#define UARTn_FRAME_DATABITS(n) ((n) - 3)
30#define UARTn_FRAME_PARITY__MASK 0x0300
30#define UARTn_FRAME_PARITY_NONE 0x0000 31#define UARTn_FRAME_PARITY_NONE 0x0000
31#define UARTn_FRAME_PARITY_EVEN 0x0200 32#define UARTn_FRAME_PARITY_EVEN 0x0200
32#define UARTn_FRAME_PARITY_ODD 0x0300 33#define UARTn_FRAME_PARITY_ODD 0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
572 16 * (4 + (clkdiv >> 6))); 573 16 * (4 + (clkdiv >> 6)));
573 574
574 frame = efm32_uart_read32(efm_port, UARTn_FRAME); 575 frame = efm32_uart_read32(efm_port, UARTn_FRAME);
575 if (frame & UARTn_FRAME_PARITY_ODD) 576 switch (frame & UARTn_FRAME_PARITY__MASK) {
577 case UARTn_FRAME_PARITY_ODD:
576 *parity = 'o'; 578 *parity = 'o';
577 else if (frame & UARTn_FRAME_PARITY_EVEN) 579 break;
580 case UARTn_FRAME_PARITY_EVEN:
578 *parity = 'e'; 581 *parity = 'e';
579 else 582 break;
583 default:
580 *parity = 'n'; 584 *parity = 'n';
585 }
581 586
582 *bits = (frame & UARTn_FRAME_DATABITS__MASK) - 587 *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
583 UARTn_FRAME_DATABITS(4) + 4; 588 UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 157883653256..f190a84a0246 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = {
1382static void __exit ifx_spi_exit(void) 1382static void __exit ifx_spi_exit(void)
1383{ 1383{
1384 /* unregister */ 1384 /* unregister */
1385 spi_unregister_driver(&ifx_spi_driver);
1385 tty_unregister_driver(tty_drv); 1386 tty_unregister_driver(tty_drv);
1386 put_tty_driver(tty_drv); 1387 put_tty_driver(tty_drv);
1387 spi_unregister_driver(&ifx_spi_driver);
1388 unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); 1388 unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
1389} 1389}
1390 1390
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 33509b4beaec..bbefddd92bfe 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev)
2184 * and DCD (when they are outputs) or enables the respective 2184 * and DCD (when they are outputs) or enables the respective
2185 * irqs. So set this bit early, i.e. before requesting irqs. 2185 * irqs. So set this bit early, i.e. before requesting irqs.
2186 */ 2186 */
2187 writel(UFCR_DCEDTE, sport->port.membase + UFCR); 2187 reg = readl(sport->port.membase + UFCR);
2188 if (!(reg & UFCR_DCEDTE))
2189 writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
2188 2190
2189 /* 2191 /*
2190 * Disable UCR3_RI and UCR3_DCD irqs. They are also not 2192 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev)
2195 sport->port.membase + UCR3); 2197 sport->port.membase + UCR3);
2196 2198
2197 } else { 2199 } else {
2198 writel(0, sport->port.membase + UFCR); 2200 unsigned long ucr3 = UCR3_DSR;
2201
2202 reg = readl(sport->port.membase + UFCR);
2203 if (reg & UFCR_DCEDTE)
2204 writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
2205
2206 if (!is_imx1_uart(sport))
2207 ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
2208 writel(ucr3, sport->port.membase + UCR3);
2199 } 2209 }
2200 2210
2201 clk_disable_unprepare(sport->clk_ipg); 2211 clk_disable_unprepare(sport->clk_ipg);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0f45b7884a2c..13bfd5dcffce 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
2083 mutex_lock(&port->mutex); 2083 mutex_lock(&port->mutex);
2084 2084
2085 tty_dev = device_find_child(uport->dev, &match, serial_match_port); 2085 tty_dev = device_find_child(uport->dev, &match, serial_match_port);
2086 if (device_may_wakeup(tty_dev)) { 2086 if (tty_dev && device_may_wakeup(tty_dev)) {
2087 if (!enable_irq_wake(uport->irq)) 2087 if (!enable_irq_wake(uport->irq))
2088 uport->irq_wake = 1; 2088 uport->irq_wake = 1;
2089 put_device(tty_dev); 2089 put_device(tty_dev);
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
2782 * Register the port whether it's detected or not. This allows 2782 * Register the port whether it's detected or not. This allows
2783 * setserial to be used to alter this port's parameters. 2783 * setserial to be used to alter this port's parameters.
2784 */ 2784 */
2785 tty_dev = tty_port_register_device_attr(port, drv->tty_driver, 2785 tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
2786 uport->line, uport->dev, port, uport->tty_groups); 2786 uport->line, uport->dev, port, uport->tty_groups);
2787 if (likely(!IS_ERR(tty_dev))) { 2787 if (likely(!IS_ERR(tty_dev))) {
2788 device_set_wakeup_capable(tty_dev, 1); 2788 device_set_wakeup_capable(tty_dev, 1);
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
2845 /* 2845 /*
2846 * Remove the devices from the tty layer 2846 * Remove the devices from the tty layer
2847 */ 2847 */
2848 tty_unregister_device(drv->tty_driver, uport->line); 2848 tty_port_unregister_device(port, drv->tty_driver, uport->line);
2849 2849
2850 tty = tty_port_tty_get(port); 2850 tty = tty_port_tty_get(port);
2851 if (tty) { 2851 if (tty) {
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 1d21a9c1d33e..6b137194069f 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -129,19 +129,85 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
129 struct device *device, void *drvdata, 129 struct device *device, void *drvdata,
130 const struct attribute_group **attr_grp) 130 const struct attribute_group **attr_grp)
131{ 131{
132 tty_port_link_device(port, driver, index);
133 return tty_register_device_attr(driver, index, device, drvdata,
134 attr_grp);
135}
136EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
137
138/**
139 * tty_port_register_device_attr_serdev - register tty or serdev device
140 * @port: tty_port of the device
141 * @driver: tty_driver for this device
142 * @index: index of the tty
143 * @device: parent if exists, otherwise NULL
144 * @drvdata: driver data for the device
145 * @attr_grp: attribute group for the device
146 *
147 * Register a serdev or tty device depending on if the parent device has any
148 * defined serdev clients or not.
149 */
150struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
151 struct tty_driver *driver, unsigned index,
152 struct device *device, void *drvdata,
153 const struct attribute_group **attr_grp)
154{
132 struct device *dev; 155 struct device *dev;
133 156
134 tty_port_link_device(port, driver, index); 157 tty_port_link_device(port, driver, index);
135 158
136 dev = serdev_tty_port_register(port, device, driver, index); 159 dev = serdev_tty_port_register(port, device, driver, index);
137 if (PTR_ERR(dev) != -ENODEV) 160 if (PTR_ERR(dev) != -ENODEV) {
138 /* Skip creating cdev if we registered a serdev device */ 161 /* Skip creating cdev if we registered a serdev device */
139 return dev; 162 return dev;
163 }
140 164
141 return tty_register_device_attr(driver, index, device, drvdata, 165 return tty_register_device_attr(driver, index, device, drvdata,
142 attr_grp); 166 attr_grp);
143} 167}
144EXPORT_SYMBOL_GPL(tty_port_register_device_attr); 168EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
169
170/**
171 * tty_port_register_device_serdev - register tty or serdev device
172 * @port: tty_port of the device
173 * @driver: tty_driver for this device
174 * @index: index of the tty
175 * @device: parent if exists, otherwise NULL
176 *
177 * Register a serdev or tty device depending on if the parent device has any
178 * defined serdev clients or not.
179 */
180struct device *tty_port_register_device_serdev(struct tty_port *port,
181 struct tty_driver *driver, unsigned index,
182 struct device *device)
183{
184 return tty_port_register_device_attr_serdev(port, driver, index,
185 device, NULL, NULL);
186}
187EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
188
189/**
190 * tty_port_unregister_device - deregister a tty or serdev device
191 * @port: tty_port of the device
192 * @driver: tty_driver for this device
193 * @index: index of the tty
194 *
195 * If a tty or serdev device is registered with a call to
196 * tty_port_register_device_serdev() then this function must be called when
197 * the device is gone.
198 */
199void tty_port_unregister_device(struct tty_port *port,
200 struct tty_driver *driver, unsigned index)
201{
202 int ret;
203
204 ret = serdev_tty_port_unregister(port);
205 if (ret == 0)
206 return;
207
208 tty_unregister_device(driver, index);
209}
210EXPORT_SYMBOL_GPL(tty_port_unregister_device);
145 211
146int tty_port_alloc_xmit_buf(struct tty_port *port) 212int tty_port_alloc_xmit_buf(struct tty_port *port)
147{ 213{
@@ -189,9 +255,6 @@ static void tty_port_destructor(struct kref *kref)
189 /* check if last port ref was dropped before tty release */ 255 /* check if last port ref was dropped before tty release */
190 if (WARN_ON(port->itty)) 256 if (WARN_ON(port->itty))
191 return; 257 return;
192
193 serdev_tty_port_unregister(port);
194
195 if (port->xmit_buf) 258 if (port->xmit_buf)
196 free_page((unsigned long)port->xmit_buf); 259 free_page((unsigned long)port->xmit_buf);
197 tty_port_destroy(port); 260 tty_port_destroy(port);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 1c196f87e9d9..ff04b7f8549f 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -279,7 +279,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
279 map = kzalloc(sizeof(*map), GFP_KERNEL); 279 map = kzalloc(sizeof(*map), GFP_KERNEL);
280 if (!map) { 280 if (!map) {
281 ret = -ENOMEM; 281 ret = -ENOMEM;
282 goto err_map_kobj; 282 goto err_map;
283 } 283 }
284 kobject_init(&map->kobj, &map_attr_type); 284 kobject_init(&map->kobj, &map_attr_type);
285 map->mem = mem; 285 map->mem = mem;
@@ -289,7 +289,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
289 goto err_map_kobj; 289 goto err_map_kobj;
290 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 290 ret = kobject_uevent(&map->kobj, KOBJ_ADD);
291 if (ret) 291 if (ret)
292 goto err_map; 292 goto err_map_kobj;
293 } 293 }
294 294
295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { 295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
@@ -308,7 +308,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
308 portio = kzalloc(sizeof(*portio), GFP_KERNEL); 308 portio = kzalloc(sizeof(*portio), GFP_KERNEL);
309 if (!portio) { 309 if (!portio) {
310 ret = -ENOMEM; 310 ret = -ENOMEM;
311 goto err_portio_kobj; 311 goto err_portio;
312 } 312 }
313 kobject_init(&portio->kobj, &portio_attr_type); 313 kobject_init(&portio->kobj, &portio_attr_type);
314 portio->port = port; 314 portio->port = port;
@@ -319,7 +319,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
319 goto err_portio_kobj; 319 goto err_portio_kobj;
320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD); 320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
321 if (ret) 321 if (ret)
322 goto err_portio; 322 goto err_portio_kobj;
323 } 323 }
324 324
325 return 0; 325 return 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9e217b1361ea..fe4fe2440729 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -843,7 +843,10 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
843{ 843{
844 struct ci_hdrc *ci = dev_get_drvdata(dev); 844 struct ci_hdrc *ci = dev_get_drvdata(dev);
845 845
846 return sprintf(buf, "%s\n", ci_role(ci)->name); 846 if (ci->role != CI_ROLE_END)
847 return sprintf(buf, "%s\n", ci_role(ci)->name);
848
849 return 0;
847} 850}
848 851
849static ssize_t ci_role_store(struct device *dev, 852static ssize_t ci_role_store(struct device *dev,
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 6d23eede4d8c..1c31e8a08810 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
294{ 294{
295 struct ci_hdrc *ci = s->private; 295 struct ci_hdrc *ci = s->private;
296 296
297 seq_printf(s, "%s\n", ci_role(ci)->name); 297 if (ci->role != CI_ROLE_END)
298 seq_printf(s, "%s\n", ci_role(ci)->name);
298 299
299 return 0; 300 return 0;
300} 301}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 56d2d3213076..d68b125796f9 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1993,6 +1993,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
1993int ci_hdrc_gadget_init(struct ci_hdrc *ci) 1993int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1994{ 1994{
1995 struct ci_role_driver *rdrv; 1995 struct ci_role_driver *rdrv;
1996 int ret;
1996 1997
1997 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) 1998 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1998 return -ENXIO; 1999 return -ENXIO;
@@ -2005,7 +2006,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2005 rdrv->stop = udc_id_switch_for_host; 2006 rdrv->stop = udc_id_switch_for_host;
2006 rdrv->irq = udc_irq; 2007 rdrv->irq = udc_irq;
2007 rdrv->name = "gadget"; 2008 rdrv->name = "gadget";
2008 ci->roles[CI_ROLE_GADGET] = rdrv;
2009 2009
2010 return udc_start(ci); 2010 ret = udc_start(ci);
2011 if (!ret)
2012 ci->roles[CI_ROLE_GADGET] = rdrv;
2013
2014 return ret;
2011} 2015}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e77a4ed4f021..9f4a0185dd60 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -108,6 +108,8 @@ struct imx_usbmisc {
108 const struct usbmisc_ops *ops; 108 const struct usbmisc_ops *ops;
109}; 109};
110 110
111static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
112
111static int usbmisc_imx25_init(struct imx_usbmisc_data *data) 113static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
112{ 114{
113 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); 115 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
242 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 244 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
243 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 245 | MX53_USB_UHx_CTRL_ULPI_INT_EN;
244 writel(val, reg); 246 writel(val, reg);
245 /* Disable internal 60Mhz clock */ 247 if (is_imx53_usbmisc(data)) {
246 reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 248 /* Disable internal 60Mhz clock */
247 val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; 249 reg = usbmisc->base +
248 writel(val, reg); 250 MX53_USB_CLKONOFF_CTRL_OFFSET;
251 val = readl(reg) |
252 MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
253 writel(val, reg);
254 }
255
249 } 256 }
250 if (data->disable_oc) { 257 if (data->disable_oc) {
251 reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; 258 reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
267 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 274 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
268 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 275 | MX53_USB_UHx_CTRL_ULPI_INT_EN;
269 writel(val, reg); 276 writel(val, reg);
270 /* Disable internal 60Mhz clock */ 277
271 reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 278 if (is_imx53_usbmisc(data)) {
272 val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; 279 /* Disable internal 60Mhz clock */
273 writel(val, reg); 280 reg = usbmisc->base +
281 MX53_USB_CLKONOFF_CTRL_OFFSET;
282 val = readl(reg) |
283 MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
284 writel(val, reg);
285 }
274 } 286 }
275 if (data->disable_oc) { 287 if (data->disable_oc) {
276 reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; 288 reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = {
456 .init = usbmisc_imx27_init, 468 .init = usbmisc_imx27_init,
457}; 469};
458 470
471static const struct usbmisc_ops imx51_usbmisc_ops = {
472 .init = usbmisc_imx53_init,
473};
474
459static const struct usbmisc_ops imx53_usbmisc_ops = { 475static const struct usbmisc_ops imx53_usbmisc_ops = {
460 .init = usbmisc_imx53_init, 476 .init = usbmisc_imx53_init,
461}; 477};
@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
479 .set_wakeup = usbmisc_imx7d_set_wakeup, 495 .set_wakeup = usbmisc_imx7d_set_wakeup,
480}; 496};
481 497
498static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
499{
500 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
501
502 return usbmisc->ops == &imx53_usbmisc_ops;
503}
504
482int imx_usbmisc_init(struct imx_usbmisc_data *data) 505int imx_usbmisc_init(struct imx_usbmisc_data *data)
483{ 506{
484 struct imx_usbmisc *usbmisc; 507 struct imx_usbmisc *usbmisc;
@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
536 }, 559 },
537 { 560 {
538 .compatible = "fsl,imx51-usbmisc", 561 .compatible = "fsl,imx51-usbmisc",
539 .data = &imx53_usbmisc_ops, 562 .data = &imx51_usbmisc_ops,
540 }, 563 },
541 { 564 {
542 .compatible = "fsl,imx53-usbmisc", 565 .compatible = "fsl,imx53-usbmisc",
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index cfc3cff6e8d5..8e6ef671be9b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -475,11 +475,11 @@ static void snoop_urb(struct usb_device *udev,
475 475
476 if (userurb) { /* Async */ 476 if (userurb) { /* Async */
477 if (when == SUBMIT) 477 if (when == SUBMIT)
478 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 478 dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
479 "length %u\n", 479 "length %u\n",
480 userurb, ep, t, d, length); 480 userurb, ep, t, d, length);
481 else 481 else
482 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 482 dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
483 "actual_length %u status %d\n", 483 "actual_length %u status %d\n",
484 userurb, ep, t, d, length, 484 userurb, ep, t, d, length,
485 timeout_or_status); 485 timeout_or_status);
@@ -1895,7 +1895,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
1895 if (as) { 1895 if (as) {
1896 int retval; 1896 int retval;
1897 1897
1898 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1898 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
1899 retval = processcompl(as, (void __user * __user *)arg); 1899 retval = processcompl(as, (void __user * __user *)arg);
1900 free_async(as); 1900 free_async(as);
1901 return retval; 1901 return retval;
@@ -1912,7 +1912,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
1912 1912
1913 as = async_getcompleted(ps); 1913 as = async_getcompleted(ps);
1914 if (as) { 1914 if (as) {
1915 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1915 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
1916 retval = processcompl(as, (void __user * __user *)arg); 1916 retval = processcompl(as, (void __user * __user *)arg);
1917 free_async(as); 1917 free_async(as);
1918 } else { 1918 } else {
@@ -2043,7 +2043,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
2043 if (as) { 2043 if (as) {
2044 int retval; 2044 int retval;
2045 2045
2046 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2046 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
2047 retval = processcompl_compat(as, (void __user * __user *)arg); 2047 retval = processcompl_compat(as, (void __user * __user *)arg);
2048 free_async(as); 2048 free_async(as);
2049 return retval; 2049 return retval;
@@ -2060,7 +2060,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
2060 2060
2061 as = async_getcompleted(ps); 2061 as = async_getcompleted(ps);
2062 if (as) { 2062 if (as) {
2063 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2063 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
2064 retval = processcompl_compat(as, (void __user * __user *)arg); 2064 retval = processcompl_compat(as, (void __user * __user *)arg);
2065 free_async(as); 2065 free_async(as);
2066 } else { 2066 } else {
@@ -2489,7 +2489,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
2489#endif 2489#endif
2490 2490
2491 case USBDEVFS_DISCARDURB: 2491 case USBDEVFS_DISCARDURB:
2492 snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); 2492 snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
2493 ret = proc_unlinkurb(ps, p); 2493 ret = proc_unlinkurb(ps, p);
2494 break; 2494 break;
2495 2495
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 49550790a3cb..5dea98358c05 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1723,7 +1723,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
1723 if (retval == 0) 1723 if (retval == 0)
1724 retval = -EINPROGRESS; 1724 retval = -EINPROGRESS;
1725 else if (retval != -EIDRM && retval != -EBUSY) 1725 else if (retval != -EIDRM && retval != -EBUSY)
1726 dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", 1726 dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
1727 urb, retval); 1727 urb, retval);
1728 usb_put_dev(udev); 1728 usb_put_dev(udev);
1729 } 1729 }
@@ -1890,7 +1890,7 @@ rescan:
1890 /* kick hcd */ 1890 /* kick hcd */
1891 unlink1(hcd, urb, -ESHUTDOWN); 1891 unlink1(hcd, urb, -ESHUTDOWN);
1892 dev_dbg (hcd->self.controller, 1892 dev_dbg (hcd->self.controller,
1893 "shutdown urb %p ep%d%s%s\n", 1893 "shutdown urb %pK ep%d%s%s\n",
1894 urb, usb_endpoint_num(&ep->desc), 1894 urb, usb_endpoint_num(&ep->desc),
1895 is_in ? "in" : "out", 1895 is_in ? "in" : "out",
1896 ({ char *s; 1896 ({ char *s;
@@ -2520,6 +2520,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), 2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
2521 GFP_KERNEL); 2521 GFP_KERNEL);
2522 if (!hcd->bandwidth_mutex) { 2522 if (!hcd->bandwidth_mutex) {
2523 kfree(hcd->address0_mutex);
2523 kfree(hcd); 2524 kfree(hcd);
2524 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); 2525 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
2525 return NULL; 2526 return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 9dca59ef18b3..b8bb20d7acdb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -362,7 +362,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
362} 362}
363 363
364/* USB 2.0 spec Section 11.24.4.5 */ 364/* USB 2.0 spec Section 11.24.4.5 */
365static int get_hub_descriptor(struct usb_device *hdev, void *data) 365static int get_hub_descriptor(struct usb_device *hdev,
366 struct usb_hub_descriptor *desc)
366{ 367{
367 int i, ret, size; 368 int i, ret, size;
368 unsigned dtype; 369 unsigned dtype;
@@ -378,10 +379,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data)
378 for (i = 0; i < 3; i++) { 379 for (i = 0; i < 3; i++) {
379 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 380 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
380 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, 381 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
381 dtype << 8, 0, data, size, 382 dtype << 8, 0, desc, size,
382 USB_CTRL_GET_TIMEOUT); 383 USB_CTRL_GET_TIMEOUT);
383 if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) 384 if (hub_is_superspeed(hdev)) {
385 if (ret == size)
386 return ret;
387 } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
388 /* Make sure we have the DeviceRemovable field. */
389 size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
390 if (ret < size)
391 return -EMSGSIZE;
384 return ret; 392 return ret;
393 }
385 } 394 }
386 return -EINVAL; 395 return -EINVAL;
387} 396}
@@ -1313,7 +1322,7 @@ static int hub_configure(struct usb_hub *hub,
1313 } 1322 }
1314 mutex_init(&hub->status_mutex); 1323 mutex_init(&hub->status_mutex);
1315 1324
1316 hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); 1325 hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
1317 if (!hub->descriptor) { 1326 if (!hub->descriptor) {
1318 ret = -ENOMEM; 1327 ret = -ENOMEM;
1319 goto fail; 1328 goto fail;
@@ -1321,13 +1330,19 @@ static int hub_configure(struct usb_hub *hub,
1321 1330
1322 /* Request the entire hub descriptor. 1331 /* Request the entire hub descriptor.
1323 * hub->descriptor can handle USB_MAXCHILDREN ports, 1332 * hub->descriptor can handle USB_MAXCHILDREN ports,
1324 * but the hub can/will return fewer bytes here. 1333 * but a (non-SS) hub can/will return fewer bytes here.
1325 */ 1334 */
1326 ret = get_hub_descriptor(hdev, hub->descriptor); 1335 ret = get_hub_descriptor(hdev, hub->descriptor);
1327 if (ret < 0) { 1336 if (ret < 0) {
1328 message = "can't read hub descriptor"; 1337 message = "can't read hub descriptor";
1329 goto fail; 1338 goto fail;
1330 } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { 1339 }
1340
1341 maxchild = USB_MAXCHILDREN;
1342 if (hub_is_superspeed(hdev))
1343 maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
1344
1345 if (hub->descriptor->bNbrPorts > maxchild) {
1331 message = "hub has too many ports!"; 1346 message = "hub has too many ports!";
1332 ret = -ENODEV; 1347 ret = -ENODEV;
1333 goto fail; 1348 goto fail;
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index d787f195a9a6..d563cbcf76cf 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL_GPL(usb_of_get_child_node);
53 * 53 *
54 * Find the companion device from platform bus. 54 * Find the companion device from platform bus.
55 * 55 *
56 * Takes a reference to the returned struct device which needs to be dropped
57 * after use.
58 *
56 * Return: On success, a pointer to the companion device, %NULL on failure. 59 * Return: On success, a pointer to the companion device, %NULL on failure.
57 */ 60 */
58struct device *usb_of_get_companion_dev(struct device *dev) 61struct device *usb_of_get_companion_dev(struct device *dev)
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index d75cb8c0f7df..47903d510955 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -338,7 +338,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
338 if (!urb || !urb->complete) 338 if (!urb || !urb->complete)
339 return -EINVAL; 339 return -EINVAL;
340 if (urb->hcpriv) { 340 if (urb->hcpriv) {
341 WARN_ONCE(1, "URB %p submitted while active\n", urb); 341 WARN_ONCE(1, "URB %pK submitted while active\n", urb);
342 return -EBUSY; 342 return -EBUSY;
343 } 343 }
344 344
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 9cd8722f24f6..a3ffe97170ff 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = {
144 { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, 144 { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
145 { .compatible = "snps,dwc2" }, 145 { .compatible = "snps,dwc2" },
146 { .compatible = "samsung,s3c6400-hsotg" }, 146 { .compatible = "samsung,s3c6400-hsotg" },
147 { .compatible = "amlogic,meson8-usb",
148 .data = dwc2_set_amlogic_params },
147 { .compatible = "amlogic,meson8b-usb", 149 { .compatible = "amlogic,meson8b-usb",
148 .data = dwc2_set_amlogic_params }, 150 .data = dwc2_set_amlogic_params },
149 { .compatible = "amlogic,meson-gxbb-usb", 151 { .compatible = "amlogic,meson-gxbb-usb",
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 72664700b8a2..12ee23f53cdd 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev)
107 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
108 108
109 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
110 if (IS_ERR(kdwc->clk)) {
111 dev_err(kdwc->dev, "unable to get usb clock\n");
112 return PTR_ERR(kdwc->clk);
113 }
110 114
111 error = clk_prepare_enable(kdwc->clk); 115 error = clk_prepare_enable(kdwc->clk);
112 if (error < 0) { 116 if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index a15ec71d0423..84a2cebfc712 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,8 @@
39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
41#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 41#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
42#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
43#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
42 44
43#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 45#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
44#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 46#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -270,6 +272,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
270 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
271 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, 273 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, 274 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
275 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
276 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
273 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 277 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
274 { } /* Terminating Entry */ 278 { } /* Terminating Entry */
275}; 279};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f6f0b3be3ad..aea9a5b948b4 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1261,14 +1261,24 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1263 } 1263 }
1264 return 0;
1264 } 1265 }
1265 return 0; 1266
1267 if ((dep->flags & DWC3_EP_BUSY) &&
1268 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1269 WARN_ON_ONCE(!dep->resource_index);
1270 ret = __dwc3_gadget_kick_transfer(dep,
1271 dep->resource_index);
1272 }
1273
1274 goto out;
1266 } 1275 }
1267 1276
1268 if (!dwc3_calc_trbs_left(dep)) 1277 if (!dwc3_calc_trbs_left(dep))
1269 return 0; 1278 return 0;
1270 1279
1271 ret = __dwc3_gadget_kick_transfer(dep, 0); 1280 ret = __dwc3_gadget_kick_transfer(dep, 0);
1281out:
1272 if (ret == -EBUSY) 1282 if (ret == -EBUSY)
1273 ret = 0; 1283 ret = 0;
1274 1284
@@ -3026,6 +3036,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
3026 return IRQ_HANDLED; 3036 return IRQ_HANDLED;
3027 } 3037 }
3028 3038
3039 /*
3040 * With PCIe legacy interrupt, test shows that top-half irq handler can
3041 * be called again after HW interrupt deassertion. Check if bottom-half
3042 * irq event handler completes before caching new event to prevent
3043 * losing events.
3044 */
3045 if (evt->flags & DWC3_EVENT_PENDING)
3046 return IRQ_HANDLED;
3047
3029 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3048 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3030 count &= DWC3_GEVNTCOUNT_MASK; 3049 count &= DWC3_GEVNTCOUNT_MASK;
3031 if (!count) 3050 if (!count)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 49d685ad0da9..45b554032332 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
315 list_del(&f->list); 315 list_del(&f->list);
316 if (f->unbind) 316 if (f->unbind)
317 f->unbind(c, f); 317 f->unbind(c, f);
318
319 if (f->bind_deactivated)
320 usb_function_activate(f);
318} 321}
319EXPORT_SYMBOL_GPL(usb_remove_function); 322EXPORT_SYMBOL_GPL(usb_remove_function);
320 323
@@ -956,12 +959,8 @@ static void remove_config(struct usb_composite_dev *cdev,
956 959
957 f = list_first_entry(&config->functions, 960 f = list_first_entry(&config->functions,
958 struct usb_function, list); 961 struct usb_function, list);
959 list_del(&f->list); 962
960 if (f->unbind) { 963 usb_remove_function(config, f);
961 DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
962 f->unbind(config, f);
963 /* may free memory for "f" */
964 }
965 } 964 }
966 list_del(&config->list); 965 list_del(&config->list);
967 if (config->unbind) { 966 if (config->unbind) {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 71dd27c0d7f2..47dda3450abd 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1858,12 +1858,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1858 ep->ep->driver_data = ep; 1858 ep->ep->driver_data = ep;
1859 ep->ep->desc = ds; 1859 ep->ep->desc = ds;
1860 1860
1861 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + 1861 if (needs_comp_desc) {
1862 USB_DT_ENDPOINT_SIZE); 1862 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
1863 ep->ep->maxburst = comp_desc->bMaxBurst + 1; 1863 USB_DT_ENDPOINT_SIZE);
1864 1864 ep->ep->maxburst = comp_desc->bMaxBurst + 1;
1865 if (needs_comp_desc)
1866 ep->ep->comp_desc = comp_desc; 1865 ep->ep->comp_desc = comp_desc;
1866 }
1867 1867
1868 ret = usb_ep_enable(ep->ep); 1868 ret = usb_ep_enable(ep->ep);
1869 if (likely(!ret)) { 1869 if (likely(!ret)) {
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 4c8aacc232c0..74d57d6994da 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
396/* Caller must hold fsg->lock */ 396/* Caller must hold fsg->lock */
397static void wakeup_thread(struct fsg_common *common) 397static void wakeup_thread(struct fsg_common *common)
398{ 398{
399 smp_wmb(); /* ensure the write of bh->state is complete */ 399 /*
400 * Ensure the reading of thread_wakeup_needed
401 * and the writing of bh->state are completed
402 */
403 smp_mb();
400 /* Tell the main thread that something has happened */ 404 /* Tell the main thread that something has happened */
401 common->thread_wakeup_needed = 1; 405 common->thread_wakeup_needed = 1;
402 if (common->thread_task) 406 if (common->thread_task)
@@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
627 } 631 }
628 __set_current_state(TASK_RUNNING); 632 __set_current_state(TASK_RUNNING);
629 common->thread_wakeup_needed = 0; 633 common->thread_wakeup_needed = 0;
630 smp_rmb(); /* ensure the latest bh->state is visible */ 634
635 /*
636 * Ensure the writing of thread_wakeup_needed
637 * and the reading of bh->state are completed
638 */
639 smp_mb();
631 return rc; 640 return rc;
632} 641}
633 642
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index b4058f0000e4..6a1ce6a55158 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev)
281 dev->tx_queue_len = 1; 281 dev->tx_queue_len = 1;
282 282
283 dev->netdev_ops = &pn_netdev_ops; 283 dev->netdev_ops = &pn_netdev_ops;
284 dev->destructor = free_netdev; 284 dev->needs_free_netdev = true;
285 dev->header_ops = &phonet_header_ops; 285 dev->header_ops = &phonet_header_ops;
286} 286}
287 287
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 000677c991b0..9b0805f55ad7 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1256,7 +1256,7 @@ static void gserial_console_exit(void)
1256 struct gscons_info *info = &gscons_info; 1256 struct gscons_info *info = &gscons_info;
1257 1257
1258 unregister_console(&gserial_cons); 1258 unregister_console(&gserial_cons);
1259 if (info->console_thread != NULL) 1259 if (!IS_ERR_OR_NULL(info->console_thread))
1260 kthread_stop(info->console_thread); 1260 kthread_stop(info->console_thread);
1261 gs_buf_free(&info->con_buf); 1261 gs_buf_free(&info->con_buf);
1262} 1262}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b9ca0a26cbd9..684900fcfe24 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
1183 1183
1184 /* closing ep0 === shutdown all */ 1184 /* closing ep0 === shutdown all */
1185 1185
1186 if (dev->gadget_registered) 1186 if (dev->gadget_registered) {
1187 usb_gadget_unregister_driver (&gadgetfs_driver); 1187 usb_gadget_unregister_driver (&gadgetfs_driver);
1188 dev->gadget_registered = false;
1189 }
1188 1190
1189 /* at this point "good" hardware has disconnected the 1191 /* at this point "good" hardware has disconnected the
1190 * device from USB; the host won't see it any more. 1192 * device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
1677gadgetfs_suspend (struct usb_gadget *gadget) 1679gadgetfs_suspend (struct usb_gadget *gadget)
1678{ 1680{
1679 struct dev_data *dev = get_gadget_data (gadget); 1681 struct dev_data *dev = get_gadget_data (gadget);
1682 unsigned long flags;
1680 1683
1681 INFO (dev, "suspended from state %d\n", dev->state); 1684 INFO (dev, "suspended from state %d\n", dev->state);
1682 spin_lock (&dev->lock); 1685 spin_lock_irqsave(&dev->lock, flags);
1683 switch (dev->state) { 1686 switch (dev->state) {
1684 case STATE_DEV_SETUP: // VERY odd... host died?? 1687 case STATE_DEV_SETUP: // VERY odd... host died??
1685 case STATE_DEV_CONNECTED: 1688 case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
1690 default: 1693 default:
1691 break; 1694 break;
1692 } 1695 }
1693 spin_unlock (&dev->lock); 1696 spin_unlock_irqrestore(&dev->lock, flags);
1694} 1697}
1695 1698
1696static struct usb_gadget_driver gadgetfs_driver = { 1699static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index c79081952ea0..7635fd7cc328 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
442 /* Report reset and disconnect events to the driver */ 442 /* Report reset and disconnect events to the driver */
443 if (dum->driver && (disconnect || reset)) { 443 if (dum->driver && (disconnect || reset)) {
444 stop_activity(dum); 444 stop_activity(dum);
445 spin_unlock(&dum->lock);
446 if (reset) 445 if (reset)
447 usb_gadget_udc_reset(&dum->gadget, dum->driver); 446 usb_gadget_udc_reset(&dum->gadget, dum->driver);
448 else 447 else
449 dum->driver->disconnect(&dum->gadget); 448 dum->driver->disconnect(&dum->gadget);
450 spin_lock(&dum->lock);
451 } 449 }
452 } else if (dum_hcd->active != dum_hcd->old_active) { 450 } else if (dum_hcd->active != dum_hcd->old_active) {
453 if (dum_hcd->old_active && dum->driver->suspend) { 451 if (dum_hcd->old_active && dum->driver->suspend)
454 spin_unlock(&dum->lock);
455 dum->driver->suspend(&dum->gadget); 452 dum->driver->suspend(&dum->gadget);
456 spin_lock(&dum->lock); 453 else if (!dum_hcd->old_active && dum->driver->resume)
457 } else if (!dum_hcd->old_active && dum->driver->resume) {
458 spin_unlock(&dum->lock);
459 dum->driver->resume(&dum->gadget); 454 dum->driver->resume(&dum->gadget);
460 spin_lock(&dum->lock);
461 }
462 } 455 }
463 456
464 dum_hcd->old_status = dum_hcd->port_status; 457 dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
983 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 976 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
984 struct dummy *dum = dum_hcd->dum; 977 struct dummy *dum = dum_hcd->dum;
985 978
979 spin_lock_irq(&dum->lock);
986 dum->driver = NULL; 980 dum->driver = NULL;
981 spin_unlock_irq(&dum->lock);
987 982
988 return 0; 983 return 0;
989} 984}
@@ -2008,7 +2003,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
2008 HUB_CHAR_COMMON_OCPM); 2003 HUB_CHAR_COMMON_OCPM);
2009 desc->bNbrPorts = 1; 2004 desc->bNbrPorts = 1;
2010 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ 2005 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
2011 desc->u.ss.DeviceRemovable = 0xffff; 2006 desc->u.ss.DeviceRemovable = 0;
2012} 2007}
2013 2008
2014static inline void hub_descriptor(struct usb_hub_descriptor *desc) 2009static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2020,8 +2015,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
2020 HUB_CHAR_INDV_PORT_LPSM | 2015 HUB_CHAR_INDV_PORT_LPSM |
2021 HUB_CHAR_COMMON_OCPM); 2016 HUB_CHAR_COMMON_OCPM);
2022 desc->bNbrPorts = 1; 2017 desc->bNbrPorts = 1;
2023 desc->u.hs.DeviceRemovable[0] = 0xff; 2018 desc->u.hs.DeviceRemovable[0] = 0;
2024 desc->u.hs.DeviceRemovable[1] = 0xff; 2019 desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
2025} 2020}
2026 2021
2027static int dummy_hub_control( 2022static int dummy_hub_control(
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6cf07857eaca..f2cbd7f8005e 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2470 nuke(&dev->ep[i]); 2470 nuke(&dev->ep[i]);
2471 2471
2472 /* report disconnect; the driver is already quiesced */ 2472 /* report disconnect; the driver is already quiesced */
2473 if (driver) { 2473 if (driver)
2474 spin_unlock(&dev->lock);
2475 driver->disconnect(&dev->gadget); 2474 driver->disconnect(&dev->gadget);
2476 spin_lock(&dev->lock);
2477 }
2478 2475
2479 usb_reinit(dev); 2476 usb_reinit(dev);
2480} 2477}
@@ -3348,8 +3345,6 @@ next_endpoints:
3348 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3345 BIT(PCI_RETRY_ABORT_INTERRUPT))
3349 3346
3350static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3347static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3351__releases(dev->lock)
3352__acquires(dev->lock)
3353{ 3348{
3354 struct net2280_ep *ep; 3349 struct net2280_ep *ep;
3355 u32 tmp, num, mask, scratch; 3350 u32 tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@ __acquires(dev->lock)
3390 if (disconnect || reset) { 3385 if (disconnect || reset) {
3391 stop_activity(dev, dev->driver); 3386 stop_activity(dev, dev->driver);
3392 ep0_start(dev); 3387 ep0_start(dev);
3393 spin_unlock(&dev->lock);
3394 if (reset) 3388 if (reset)
3395 usb_gadget_udc_reset 3389 usb_gadget_udc_reset
3396 (&dev->gadget, dev->driver); 3390 (&dev->gadget, dev->driver);
3397 else 3391 else
3398 (dev->driver->disconnect) 3392 (dev->driver->disconnect)
3399 (&dev->gadget); 3393 (&dev->gadget);
3400 spin_lock(&dev->lock);
3401 return; 3394 return;
3402 } 3395 }
3403 } 3396 }
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 5a2d845fb1a6..cd4c88529721 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -623,7 +623,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
623{ 623{
624 usb3_disconnect(usb3); 624 usb3_disconnect(usb3);
625 usb3_write(usb3, 0, USB3_P0_INT_ENA); 625 usb3_write(usb3, 0, USB3_P0_INT_ENA);
626 usb3_write(usb3, 0, USB3_PN_INT_ENA);
627 usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); 626 usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
628 usb3_write(usb3, 0, USB3_USB_INT_ENA_1); 627 usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
629 usb3_write(usb3, 0, USB3_USB_INT_ENA_2); 628 usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
@@ -1475,7 +1474,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
1475 struct renesas_usb3_request *usb3_req, 1474 struct renesas_usb3_request *usb3_req,
1476 int status) 1475 int status)
1477{ 1476{
1478 usb3_pn_stop(usb3); 1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&usb3->lock, flags);
1480 if (usb3_pn_change(usb3, usb3_ep->num))
1481 usb3_pn_stop(usb3);
1482 spin_unlock_irqrestore(&usb3->lock, flags);
1483
1479 usb3_disable_pipe_irq(usb3, usb3_ep->num); 1484 usb3_disable_pipe_irq(usb3, usb3_ep->num);
1480 usb3_request_done(usb3_ep, usb3_req, status); 1485 usb3_request_done(usb3_ep, usb3_req, status);
1481 1486
@@ -1504,30 +1509,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
1504{ 1509{
1505 struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); 1510 struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
1506 struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); 1511 struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
1512 bool done = false;
1507 1513
1508 if (!usb3_req) 1514 if (!usb3_req)
1509 return; 1515 return;
1510 1516
1517 spin_lock(&usb3->lock);
1518 if (usb3_pn_change(usb3, num))
1519 goto out;
1520
1511 if (usb3_ep->dir_in) { 1521 if (usb3_ep->dir_in) {
1512 /* Do not stop the IN pipe here to detect LSTTR interrupt */ 1522 /* Do not stop the IN pipe here to detect LSTTR interrupt */
1513 if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) 1523 if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
1514 usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); 1524 usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
1515 } else { 1525 } else {
1516 if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) 1526 if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
1517 usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); 1527 done = true;
1518 } 1528 }
1529
1530out:
1531 /* need to unlock because usb3_request_done_pipen() locks it */
1532 spin_unlock(&usb3->lock);
1533
1534 if (done)
1535 usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
1519} 1536}
1520 1537
1521static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) 1538static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
1522{ 1539{
1523 u32 pn_int_sta; 1540 u32 pn_int_sta;
1524 1541
1525 if (usb3_pn_change(usb3, num) < 0) 1542 spin_lock(&usb3->lock);
1543 if (usb3_pn_change(usb3, num) < 0) {
1544 spin_unlock(&usb3->lock);
1526 return; 1545 return;
1546 }
1527 1547
1528 pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); 1548 pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
1529 pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); 1549 pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
1530 usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); 1550 usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
1551 spin_unlock(&usb3->lock);
1531 if (pn_int_sta & PN_INT_LSTTR) 1552 if (pn_int_sta & PN_INT_LSTTR)
1532 usb3_irq_epc_pipen_lsttr(usb3, num); 1553 usb3_irq_epc_pipen_lsttr(usb3, num);
1533 if (pn_int_sta & PN_INT_BFRDY) 1554 if (pn_int_sta & PN_INT_BFRDY)
@@ -1660,6 +1681,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
1660 1681
1661 spin_lock_irqsave(&usb3->lock, flags); 1682 spin_lock_irqsave(&usb3->lock, flags);
1662 if (!usb3_pn_change(usb3, usb3_ep->num)) { 1683 if (!usb3_pn_change(usb3, usb3_ep->num)) {
1684 usb3_write(usb3, 0, USB3_PN_INT_ENA);
1663 usb3_write(usb3, 0, USB3_PN_RAMMAP); 1685 usb3_write(usb3, 0, USB3_PN_RAMMAP);
1664 usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); 1686 usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
1665 } 1687 }
@@ -1799,6 +1821,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
1799 /* hook up the driver */ 1821 /* hook up the driver */
1800 usb3->driver = driver; 1822 usb3->driver = driver;
1801 1823
1824 pm_runtime_enable(usb3_to_dev(usb3));
1825 pm_runtime_get_sync(usb3_to_dev(usb3));
1826
1802 renesas_usb3_init_controller(usb3); 1827 renesas_usb3_init_controller(usb3);
1803 1828
1804 return 0; 1829 return 0;
@@ -1807,14 +1832,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
1807static int renesas_usb3_stop(struct usb_gadget *gadget) 1832static int renesas_usb3_stop(struct usb_gadget *gadget)
1808{ 1833{
1809 struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); 1834 struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
1810 unsigned long flags;
1811 1835
1812 spin_lock_irqsave(&usb3->lock, flags);
1813 usb3->softconnect = false; 1836 usb3->softconnect = false;
1814 usb3->gadget.speed = USB_SPEED_UNKNOWN; 1837 usb3->gadget.speed = USB_SPEED_UNKNOWN;
1815 usb3->driver = NULL; 1838 usb3->driver = NULL;
1816 renesas_usb3_stop_controller(usb3); 1839 renesas_usb3_stop_controller(usb3);
1817 spin_unlock_irqrestore(&usb3->lock, flags); 1840
1841 pm_runtime_put(usb3_to_dev(usb3));
1842 pm_runtime_disable(usb3_to_dev(usb3));
1818 1843
1819 return 0; 1844 return 0;
1820} 1845}
@@ -1891,9 +1916,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
1891 1916
1892 device_remove_file(&pdev->dev, &dev_attr_role); 1917 device_remove_file(&pdev->dev, &dev_attr_role);
1893 1918
1894 pm_runtime_put(&pdev->dev);
1895 pm_runtime_disable(&pdev->dev);
1896
1897 usb_del_gadget_udc(&usb3->gadget); 1919 usb_del_gadget_udc(&usb3->gadget);
1898 1920
1899 __renesas_usb3_ep_free_request(usb3->ep0_req); 1921 __renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2099,9 +2121,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
2099 2121
2100 usb3->workaround_for_vbus = priv->workaround_for_vbus; 2122 usb3->workaround_for_vbus = priv->workaround_for_vbus;
2101 2123
2102 pm_runtime_enable(&pdev->dev);
2103 pm_runtime_get_sync(&pdev->dev);
2104
2105 dev_info(&pdev->dev, "probed\n"); 2124 dev_info(&pdev->dev, "probed\n");
2106 2125
2107 return 0; 2126 return 0;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index bc7b9be12f54..f1908ea9fbd8 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -384,8 +384,10 @@ static int ehci_platform_resume(struct device *dev)
384 } 384 }
385 385
386 companion_dev = usb_of_get_companion_dev(hcd->self.controller); 386 companion_dev = usb_of_get_companion_dev(hcd->self.controller);
387 if (companion_dev) 387 if (companion_dev) {
388 device_pm_wait_for_dev(hcd->self.controller, companion_dev); 388 device_pm_wait_for_dev(hcd->self.controller, companion_dev);
389 put_device(companion_dev);
390 }
389 391
390 ehci_resume(hcd, priv->reset_on_resume); 392 ehci_resume(hcd, priv->reset_on_resume);
391 return 0; 393 return 0;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bfa7fa3d2eea..7bf78be1fd32 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1269 time = 30; 1269 time = 30;
1270 break; 1270 break;
1271 default: 1271 default:
1272 time = 300; 1272 time = 50;
1273 break; 1273 break;
1274 } 1274 }
1275 1275
@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1785 pipe = td->pipe; 1785 pipe = td->pipe;
1786 pipe_stop(r8a66597, pipe); 1786 pipe_stop(r8a66597, pipe);
1787 1787
1788 /* Select a different address or endpoint */
1788 new_td = td; 1789 new_td = td;
1789 do { 1790 do {
1790 list_move_tail(&new_td->queue, 1791 list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1794 new_td = td; 1795 new_td = td;
1795 break; 1796 break;
1796 } 1797 }
1797 } while (td != new_td && td->address == new_td->address); 1798 } while (td != new_td && td->address == new_td->address &&
1799 td->pipe->info.epnum == new_td->pipe->info.epnum);
1798 1800
1799 start_transfer(r8a66597, new_td); 1801 start_transfer(r8a66597, new_td);
1800 1802
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5e3e9d4c6956..0dde49c35dd2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -419,7 +419,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
419 wait_for_completion(cmd->completion); 419 wait_for_completion(cmd->completion);
420 420
421 if (cmd->status == COMP_COMMAND_ABORTED || 421 if (cmd->status == COMP_COMMAND_ABORTED ||
422 cmd->status == COMP_STOPPED) { 422 cmd->status == COMP_COMMAND_RING_STOPPED) {
423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
424 ret = -ETIME; 424 ret = -ETIME;
425 } 425 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bbe22bcc550a..fddf2731f798 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
56 } 56 }
57 57
58 if (max_packet) { 58 if (max_packet) {
59 seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); 59 seg->bounce_buf = kzalloc(max_packet, flags);
60 if (!seg->bounce_buf) { 60 if (!seg->bounce_buf) {
61 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 61 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
62 kfree(seg); 62 kfree(seg);
@@ -1724,7 +1724,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1725 for (i = 0; i < num_sp; i++) { 1725 for (i = 0; i < num_sp; i++) {
1726 dma_addr_t dma; 1726 dma_addr_t dma;
1727 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1727 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
1728 flags); 1728 flags);
1729 if (!buf) 1729 if (!buf)
1730 goto fail_sp4; 1730 goto fail_sp4;
@@ -2119,11 +2119,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2119{ 2119{
2120 u32 temp, port_offset, port_count; 2120 u32 temp, port_offset, port_count;
2121 int i; 2121 int i;
2122 u8 major_revision; 2122 u8 major_revision, minor_revision;
2123 struct xhci_hub *rhub; 2123 struct xhci_hub *rhub;
2124 2124
2125 temp = readl(addr); 2125 temp = readl(addr);
2126 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2126 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2127 minor_revision = XHCI_EXT_PORT_MINOR(temp);
2127 2128
2128 if (major_revision == 0x03) { 2129 if (major_revision == 0x03) {
2129 rhub = &xhci->usb3_rhub; 2130 rhub = &xhci->usb3_rhub;
@@ -2137,7 +2138,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2137 return; 2138 return;
2138 } 2139 }
2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2140 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2140 rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); 2141
2142 if (rhub->min_rev < minor_revision)
2143 rhub->min_rev = minor_revision;
2141 2144
2142 /* Port offset and count in the third dword, see section 7.2 */ 2145 /* Port offset and count in the third dword, see section 7.2 */
2143 temp = readl(addr + 2); 2146 temp = readl(addr + 2);
@@ -2307,10 +2310,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2307 /* Place limits on the number of roothub ports so that the hub 2310 /* Place limits on the number of roothub ports so that the hub
2308 * descriptors aren't longer than the USB core will allocate. 2311 * descriptors aren't longer than the USB core will allocate.
2309 */ 2312 */
2310 if (xhci->num_usb3_ports > 15) { 2313 if (xhci->num_usb3_ports > USB_SS_MAXPORTS) {
2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2314 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2312 "Limiting USB 3.0 roothub ports to 15."); 2315 "Limiting USB 3.0 roothub ports to %u.",
2313 xhci->num_usb3_ports = 15; 2316 USB_SS_MAXPORTS);
2317 xhci->num_usb3_ports = USB_SS_MAXPORTS;
2314 } 2318 }
2315 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2319 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2316 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2320 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7b86508ac8cf..1bcf971141c0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
52#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 52#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
53#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 53#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
55 56
56static const char hcd_name[] = "xhci_hcd"; 57static const char hcd_name[] = "xhci_hcd";
57 58
@@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
166 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 167 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
167 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || 168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || 169 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
169 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { 170 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
171 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
170 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 172 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
171 } 173 }
172 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 174 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
175 } 177 }
176 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 178 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
177 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 179 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
178 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) 180 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
181 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
179 xhci->quirks |= XHCI_MISSING_CAS; 182 xhci->quirks |= XHCI_MISSING_CAS;
180 183
181 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 184 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -198,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
198 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 201 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
199 pdev->device == 0x1042) 202 pdev->device == 0x1042)
200 xhci->quirks |= XHCI_BROKEN_STREAMS; 203 xhci->quirks |= XHCI_BROKEN_STREAMS;
204 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
205 pdev->device == 0x1142)
206 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
201 207
202 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 208 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
203 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; 209 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 7c2a9e7c8e0f..c04144b25a67 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -177,7 +177,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
177 177
178 irq = platform_get_irq(pdev, 0); 178 irq = platform_get_irq(pdev, 0);
179 if (irq < 0) 179 if (irq < 0)
180 return -ENODEV; 180 return irq;
181 181
182 /* 182 /*
183 * sysdev must point to a device that is known to the system firmware 183 * sysdev must point to a device that is known to the system firmware
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 74bf5c60a260..03f63f50afb6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -323,7 +323,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
323 if (i_cmd->status != COMP_COMMAND_ABORTED) 323 if (i_cmd->status != COMP_COMMAND_ABORTED)
324 continue; 324 continue;
325 325
326 i_cmd->status = COMP_STOPPED; 326 i_cmd->status = COMP_COMMAND_RING_STOPPED;
327 327
328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
329 i_cmd->command_trb); 329 i_cmd->command_trb);
@@ -641,8 +641,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
641 xhci_urb_free_priv(urb_priv); 641 xhci_urb_free_priv(urb_priv);
642 usb_hcd_unlink_urb_from_ep(hcd, urb); 642 usb_hcd_unlink_urb_from_ep(hcd, urb);
643 spin_unlock(&xhci->lock); 643 spin_unlock(&xhci->lock);
644 usb_hcd_giveback_urb(hcd, urb, status);
645 trace_xhci_urb_giveback(urb); 644 trace_xhci_urb_giveback(urb);
645 usb_hcd_giveback_urb(hcd, urb, status);
646 spin_lock(&xhci->lock); 646 spin_lock(&xhci->lock);
647} 647}
648 648
@@ -1380,7 +1380,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1381 1381
1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1383 if (cmd_comp_code == COMP_STOPPED) { 1383 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1384 complete_all(&xhci->cmd_ring_stop_completion); 1384 complete_all(&xhci->cmd_ring_stop_completion);
1385 return; 1385 return;
1386 } 1386 }
@@ -1436,8 +1436,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1436 break; 1436 break;
1437 case TRB_CMD_NOOP: 1437 case TRB_CMD_NOOP:
1438 /* Is this an aborted command turned to NO-OP? */ 1438 /* Is this an aborted command turned to NO-OP? */
1439 if (cmd->status == COMP_STOPPED) 1439 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1440 cmd_comp_code = COMP_STOPPED; 1440 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1441 break; 1441 break;
1442 case TRB_RESET_EP: 1442 case TRB_RESET_EP:
1443 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1443 WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -2677,11 +2677,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2678 union xhci_trb *event_ring_deq; 2678 union xhci_trb *event_ring_deq;
2679 irqreturn_t ret = IRQ_NONE; 2679 irqreturn_t ret = IRQ_NONE;
2680 unsigned long flags;
2680 dma_addr_t deq; 2681 dma_addr_t deq;
2681 u64 temp_64; 2682 u64 temp_64;
2682 u32 status; 2683 u32 status;
2683 2684
2684 spin_lock(&xhci->lock); 2685 spin_lock_irqsave(&xhci->lock, flags);
2685 /* Check if the xHC generated the interrupt, or the irq is shared */ 2686 /* Check if the xHC generated the interrupt, or the irq is shared */
2686 status = readl(&xhci->op_regs->status); 2687 status = readl(&xhci->op_regs->status);
2687 if (status == ~(u32)0) { 2688 if (status == ~(u32)0) {
@@ -2707,12 +2708,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2707 */ 2708 */
2708 status |= STS_EINT; 2709 status |= STS_EINT;
2709 writel(status, &xhci->op_regs->status); 2710 writel(status, &xhci->op_regs->status);
2710 /* FIXME when MSI-X is supported and there are multiple vectors */
2711 /* Clear the MSI-X event interrupt status */
2712 2711
2713 if (hcd->irq) { 2712 if (!hcd->msi_enabled) {
2714 u32 irq_pending; 2713 u32 irq_pending;
2715 /* Acknowledge the PCI interrupt */
2716 irq_pending = readl(&xhci->ir_set->irq_pending); 2714 irq_pending = readl(&xhci->ir_set->irq_pending);
2717 irq_pending |= IMAN_IP; 2715 irq_pending |= IMAN_IP;
2718 writel(irq_pending, &xhci->ir_set->irq_pending); 2716 writel(irq_pending, &xhci->ir_set->irq_pending);
@@ -2757,7 +2755,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2757 ret = IRQ_HANDLED; 2755 ret = IRQ_HANDLED;
2758 2756
2759out: 2757out:
2760 spin_unlock(&xhci->lock); 2758 spin_unlock_irqrestore(&xhci->lock, flags);
2761 2759
2762 return ret; 2760 return ret;
2763} 2761}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2d1310220832..30f47d92a610 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -359,9 +359,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
359 /* fall back to msi*/ 359 /* fall back to msi*/
360 ret = xhci_setup_msi(xhci); 360 ret = xhci_setup_msi(xhci);
361 361
362 if (!ret) 362 if (!ret) {
363 /* hcd->irq is 0, we have MSI */ 363 hcd->msi_enabled = 1;
364 return 0; 364 return 0;
365 }
365 366
366 if (!pdev->irq) { 367 if (!pdev->irq) {
367 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 368 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
@@ -1763,7 +1764,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1763 1764
1764 switch (*cmd_status) { 1765 switch (*cmd_status) {
1765 case COMP_COMMAND_ABORTED: 1766 case COMP_COMMAND_ABORTED:
1766 case COMP_STOPPED: 1767 case COMP_COMMAND_RING_STOPPED:
1767 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1768 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1768 ret = -ETIME; 1769 ret = -ETIME;
1769 break; 1770 break;
@@ -1813,7 +1814,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1813 1814
1814 switch (*cmd_status) { 1815 switch (*cmd_status) {
1815 case COMP_COMMAND_ABORTED: 1816 case COMP_COMMAND_ABORTED:
1816 case COMP_STOPPED: 1817 case COMP_COMMAND_RING_STOPPED:
1817 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1818 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1818 ret = -ETIME; 1819 ret = -ETIME;
1819 break; 1820 break;
@@ -3432,7 +3433,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3432 ret = reset_device_cmd->status; 3433 ret = reset_device_cmd->status;
3433 switch (ret) { 3434 switch (ret) {
3434 case COMP_COMMAND_ABORTED: 3435 case COMP_COMMAND_ABORTED:
3435 case COMP_STOPPED: 3436 case COMP_COMMAND_RING_STOPPED:
3436 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3437 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3437 ret = -ETIME; 3438 ret = -ETIME;
3438 goto command_cleanup; 3439 goto command_cleanup;
@@ -3817,7 +3818,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3817 */ 3818 */
3818 switch (command->status) { 3819 switch (command->status) {
3819 case COMP_COMMAND_ABORTED: 3820 case COMP_COMMAND_ABORTED:
3820 case COMP_STOPPED: 3821 case COMP_COMMAND_RING_STOPPED:
3821 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3822 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3822 ret = -ETIME; 3823 ret = -ETIME;
3823 break; 3824 break;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index e9cae4d82af2..15d4e64d3b65 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -192,7 +192,7 @@ static int chaoskey_probe(struct usb_interface *interface,
192 192
193 dev->in_ep = in_ep; 193 dev->in_ep = in_ep;
194 194
195 if (udev->descriptor.idVendor != ALEA_VENDOR_ID) 195 if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
196 dev->reads_started = 1; 196 dev->reads_started = 1;
197 197
198 dev->size = size; 198 dev->size = size;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 77569531b78a..83b05a287b0c 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); 554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
555 555
556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ 556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
557 info.speed = le16_to_cpu(dev->udev->speed); 557 info.speed = dev->udev->speed;
558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; 558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
559 info.report_size = dev->report_size; 559 info.report_size = dev->report_size;
560 560
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index aa3c280fdf8d..0782ac6f5edf 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -926,6 +926,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
926 USB_MAJOR, dev->minor); 926 USB_MAJOR, dev->minor);
927 927
928exit: 928exit:
929 kfree(get_version_reply);
929 return retval; 930 return retval;
930 931
931error: 932error:
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 3c6948af726a..f019d80ca9e4 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -973,7 +973,7 @@ sisusbcon_set_origin(struct vc_data *c)
973 973
974 mutex_unlock(&sisusb->lock); 974 mutex_unlock(&sisusb->lock);
975 975
976 return 1; 976 return true;
977} 977}
978 978
979/* Interface routine */ 979/* Interface routine */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9c7ee26ef388..bc6a9be2ccc5 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused)
245 dsps_mod_timer_optional(glue); 245 dsps_mod_timer_optional(glue);
246 break; 246 break;
247 case OTG_STATE_A_WAIT_BCON: 247 case OTG_STATE_A_WAIT_BCON:
248 /* keep VBUS on for host-only mode */
249 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
250 dsps_mod_timer_optional(glue);
251 break;
252 }
248 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
249 skip_session = 1; 254 skip_session = 1;
250 /* fall */ 255 /* fall */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ac3a4952abb4..dbe617a735d8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget)
2780 int ret; 2780 int ret;
2781 struct usb_hcd *hcd = musb->hcd; 2781 struct usb_hcd *hcd = musb->hcd;
2782 2782
2783 MUSB_HST_MODE(musb); 2783 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2784 musb->xceiv->otg->default_a = 1; 2784 MUSB_HST_MODE(musb);
2785 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2785 musb->xceiv->otg->default_a = 1;
2786 2786 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2787 }
2787 otg_set_host(musb->xceiv->otg, &hcd->self); 2788 otg_set_host(musb->xceiv->otg, &hcd->self);
2788 hcd->self.otg_port = 1; 2789 hcd->self.otg_port = 1;
2789 musb->xceiv->otg->host = &hcd->self; 2790 musb->xceiv->otg->host = &hcd->self;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 8b43c4b99f04..7870b37e0ea5 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -219,6 +219,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
219 u32 dma_remaining; 219 u32 dma_remaining;
220 int src_burst, dst_burst; 220 int src_burst, dst_burst;
221 u16 csr; 221 u16 csr;
222 u32 psize;
222 int ch; 223 int ch;
223 s8 dmareq; 224 s8 dmareq;
224 s8 sync_dev; 225 s8 sync_dev;
@@ -390,15 +391,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
390 391
391 if (chdat->tx) { 392 if (chdat->tx) {
392 /* Send transfer_packet_sz packets at a time */ 393 /* Send transfer_packet_sz packets at a time */
393 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 394 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
394 chdat->transfer_packet_sz); 395 psize &= ~0x7ff;
396 psize |= chdat->transfer_packet_sz;
397 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
395 398
396 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 399 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
397 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 400 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
398 } else { 401 } else {
399 /* Receive transfer_packet_sz packets at a time */ 402 /* Receive transfer_packet_sz packets at a time */
400 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 403 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
401 chdat->transfer_packet_sz << 16); 404 psize &= ~(0x7ff << 16);
405 psize |= (chdat->transfer_packet_sz << 16);
406 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
402 407
403 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 408 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
404 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 409 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d38780fa8788..aba74f817dc6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = {
809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), 810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
812 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 812 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
813 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 813 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
814 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 814 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
815 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 815 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), 816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), 818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -1527,9 +1527,9 @@ static int set_serial_info(struct tty_struct *tty,
1527 (new_serial.flags & ASYNC_FLAGS)); 1527 (new_serial.flags & ASYNC_FLAGS));
1528 priv->custom_divisor = new_serial.custom_divisor; 1528 priv->custom_divisor = new_serial.custom_divisor;
1529 1529
1530check_and_exit:
1530 write_latency_timer(port); 1531 write_latency_timer(port);
1531 1532
1532check_and_exit:
1533 if ((old_priv.flags & ASYNC_SPD_MASK) != 1533 if ((old_priv.flags & ASYNC_SPD_MASK) !=
1534 (priv->flags & ASYNC_SPD_MASK)) { 1534 (priv->flags & ASYNC_SPD_MASK)) {
1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) 1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 71fb9e59db71..4fcf1cecb6d7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -882,6 +882,8 @@
882/* Olimex */ 882/* Olimex */
883#define OLIMEX_VID 0x15BA 883#define OLIMEX_VID 0x15BA
884#define OLIMEX_ARM_USB_OCD_PID 0x0003 884#define OLIMEX_ARM_USB_OCD_PID 0x0003
885#define OLIMEX_ARM_USB_TINY_PID 0x0004
886#define OLIMEX_ARM_USB_TINY_H_PID 0x002a
885#define OLIMEX_ARM_USB_OCD_H_PID 0x002b 887#define OLIMEX_ARM_USB_OCD_H_PID 0x002b
886 888
887/* 889/*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 87798e625d6c..6cefb9cb133d 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2336,8 +2336,11 @@ static void change_port_settings(struct tty_struct *tty,
2336 if (!baud) { 2336 if (!baud) {
2337 /* pick a default, any default... */ 2337 /* pick a default, any default... */
2338 baud = 9600; 2338 baud = 9600;
2339 } else 2339 } else {
2340 /* Avoid a zero divisor. */
2341 baud = min(baud, 461550);
2340 tty_encode_baud_rate(tty, baud, baud); 2342 tty_encode_baud_rate(tty, baud, baud);
2343 }
2341 2344
2342 edge_port->baud_rate = baud; 2345 edge_port->baud_rate = baud;
2343 config->wBaudRate = (__u16)((461550L + baud/2) / baud); 2346 config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 73956d48a0c5..f9734a96d516 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -197,6 +197,7 @@ static u8 ir_xbof_change(u8 xbof)
197static int ir_startup(struct usb_serial *serial) 197static int ir_startup(struct usb_serial *serial)
198{ 198{
199 struct usb_irda_cs_descriptor *irda_desc; 199 struct usb_irda_cs_descriptor *irda_desc;
200 int rates;
200 201
201 irda_desc = irda_usb_find_class_desc(serial, 0); 202 irda_desc = irda_usb_find_class_desc(serial, 0);
202 if (!irda_desc) { 203 if (!irda_desc) {
@@ -205,18 +206,20 @@ static int ir_startup(struct usb_serial *serial)
205 return -ENODEV; 206 return -ENODEV;
206 } 207 }
207 208
209 rates = le16_to_cpu(irda_desc->wBaudRate);
210
208 dev_dbg(&serial->dev->dev, 211 dev_dbg(&serial->dev->dev,
209 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", 212 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n",
210 __func__, 213 __func__,
211 (irda_desc->wBaudRate & USB_IRDA_BR_2400) ? " 2400" : "", 214 (rates & USB_IRDA_BR_2400) ? " 2400" : "",
212 (irda_desc->wBaudRate & USB_IRDA_BR_9600) ? " 9600" : "", 215 (rates & USB_IRDA_BR_9600) ? " 9600" : "",
213 (irda_desc->wBaudRate & USB_IRDA_BR_19200) ? " 19200" : "", 216 (rates & USB_IRDA_BR_19200) ? " 19200" : "",
214 (irda_desc->wBaudRate & USB_IRDA_BR_38400) ? " 38400" : "", 217 (rates & USB_IRDA_BR_38400) ? " 38400" : "",
215 (irda_desc->wBaudRate & USB_IRDA_BR_57600) ? " 57600" : "", 218 (rates & USB_IRDA_BR_57600) ? " 57600" : "",
216 (irda_desc->wBaudRate & USB_IRDA_BR_115200) ? " 115200" : "", 219 (rates & USB_IRDA_BR_115200) ? " 115200" : "",
217 (irda_desc->wBaudRate & USB_IRDA_BR_576000) ? " 576000" : "", 220 (rates & USB_IRDA_BR_576000) ? " 576000" : "",
218 (irda_desc->wBaudRate & USB_IRDA_BR_1152000) ? " 1152000" : "", 221 (rates & USB_IRDA_BR_1152000) ? " 1152000" : "",
219 (irda_desc->wBaudRate & USB_IRDA_BR_4000000) ? " 4000000" : ""); 222 (rates & USB_IRDA_BR_4000000) ? " 4000000" : "");
220 223
221 switch (irda_desc->bmAdditionalBOFs) { 224 switch (irda_desc->bmAdditionalBOFs) {
222 case USB_IRDA_AB_48: 225 case USB_IRDA_AB_48:
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index edbc81f205c2..70f346f1aa86 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
189 return -ENOMEM; 189 return -ENOMEM;
190 190
191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed); 191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
192 put_unaligned_le32(cpu_to_le32(divisor), buf); 192 put_unaligned_le32(divisor, buf);
193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
194 MCT_U232_SET_BAUD_RATE_REQUEST, 194 MCT_U232_SET_BAUD_RATE_REQUEST,
195 MCT_U232_SET_REQUEST_TYPE, 195 MCT_U232_SET_REQUEST_TYPE,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af67a0de6b5d..3bf61acfc26b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb);
281#define TELIT_PRODUCT_LE922_USBCFG0 0x1042 281#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
282#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 282#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
283#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 283#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
284#define TELIT_PRODUCT_ME910 0x1100
284#define TELIT_PRODUCT_LE920 0x1200 285#define TELIT_PRODUCT_LE920 0x1200
285#define TELIT_PRODUCT_LE910 0x1201 286#define TELIT_PRODUCT_LE910 0x1201
286#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 287#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = {
640 .reserved = BIT(5) | BIT(6), 641 .reserved = BIT(5) | BIT(6),
641}; 642};
642 643
644static const struct option_blacklist_info telit_me910_blacklist = {
645 .sendsetup = BIT(0),
646 .reserved = BIT(1) | BIT(3),
647};
648
643static const struct option_blacklist_info telit_le910_blacklist = { 649static const struct option_blacklist_info telit_le910_blacklist = {
644 .sendsetup = BIT(0), 650 .sendsetup = BIT(0),
645 .reserved = BIT(1) | BIT(2), 651 .reserved = BIT(1) | BIT(2),
@@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = {
1235 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1241 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1236 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), 1242 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1237 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1243 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1244 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1245 .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1238 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1246 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1239 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1247 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1240 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), 1248 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 38b3f0d8cd58..fd509ed6cf70 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ 162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ 163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ 164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
165 {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
166 {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
165 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 369f3c24815a..44af719194b2 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@ struct ms_lib_ctrl {
446#define SD_BLOCK_LEN 9 446#define SD_BLOCK_LEN 9
447 447
448struct ene_ub6250_info { 448struct ene_ub6250_info {
449
450 /* I/O bounce buffer */
451 u8 *bbuf;
452
449 /* for 6250 code */ 453 /* for 6250 code */
450 struct SD_STATUS SD_Status; 454 struct SD_STATUS SD_Status;
451 struct MS_STATUS MS_Status; 455 struct MS_STATUS MS_Status;
@@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag);
493 497
494static void ene_ub6250_info_destructor(void *extra) 498static void ene_ub6250_info_destructor(void *extra)
495{ 499{
500 struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
501
496 if (!extra) 502 if (!extra)
497 return; 503 return;
504 kfree(info->bbuf);
498} 505}
499 506
500static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) 507static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
860 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) 867 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
861{ 868{
862 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 869 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
870 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
871 u8 *bbuf = info->bbuf;
863 int result; 872 int result;
864 u8 ExtBuf[4];
865 u32 bn = PhyBlockAddr * 0x20 + PageNum; 873 u32 bn = PhyBlockAddr * 0x20 + PageNum;
866 874
867 result = ene_load_bincode(us, MS_RW_PATTERN); 875 result = ene_load_bincode(us, MS_RW_PATTERN);
@@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
901 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); 909 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
902 bcb->CDB[6] = 0x01; 910 bcb->CDB[6] = 0x01;
903 911
904 result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 912 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
905 if (result != USB_STOR_XFER_GOOD) 913 if (result != USB_STOR_XFER_GOOD)
906 return USB_STOR_TRANSPORT_ERROR; 914 return USB_STOR_TRANSPORT_ERROR;
907 915
@@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
910 ExtraDat->status0 = 0x10; /* Not yet,fireware support */ 918 ExtraDat->status0 = 0x10; /* Not yet,fireware support */
911 919
912 ExtraDat->status1 = 0x00; /* Not yet,fireware support */ 920 ExtraDat->status1 = 0x00; /* Not yet,fireware support */
913 ExtraDat->ovrflg = ExtBuf[0]; 921 ExtraDat->ovrflg = bbuf[0];
914 ExtraDat->mngflg = ExtBuf[1]; 922 ExtraDat->mngflg = bbuf[1];
915 ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 923 ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
916 924
917 return USB_STOR_TRANSPORT_GOOD; 925 return USB_STOR_TRANSPORT_GOOD;
918} 926}
@@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1332 u8 PageNum, struct ms_lib_type_extdat *ExtraDat) 1340 u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
1333{ 1341{
1334 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 1342 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
1343 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
1344 u8 *bbuf = info->bbuf;
1335 int result; 1345 int result;
1336 u8 ExtBuf[4];
1337 1346
1338 memset(bcb, 0, sizeof(struct bulk_cb_wrap)); 1347 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
1339 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 1348 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1347 bcb->CDB[2] = (unsigned char)(PhyBlock>>16); 1356 bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
1348 bcb->CDB[6] = 0x01; 1357 bcb->CDB[6] = 0x01;
1349 1358
1350 result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 1359 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
1351 if (result != USB_STOR_XFER_GOOD) 1360 if (result != USB_STOR_XFER_GOOD)
1352 return USB_STOR_TRANSPORT_ERROR; 1361 return USB_STOR_TRANSPORT_ERROR;
1353 1362
@@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1355 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ 1364 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
1356 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ 1365 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
1357 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ 1366 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
1358 ExtraDat->ovrflg = ExtBuf[0]; 1367 ExtraDat->ovrflg = bbuf[0];
1359 ExtraDat->mngflg = ExtBuf[1]; 1368 ExtraDat->mngflg = bbuf[1];
1360 ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 1369 ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
1361 1370
1362 return USB_STOR_TRANSPORT_GOOD; 1371 return USB_STOR_TRANSPORT_GOOD;
1363} 1372}
@@ -1556,9 +1565,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
1556 u16 PhyBlock, newblk, i; 1565 u16 PhyBlock, newblk, i;
1557 u16 LogStart, LogEnde; 1566 u16 LogStart, LogEnde;
1558 struct ms_lib_type_extdat extdat; 1567 struct ms_lib_type_extdat extdat;
1559 u8 buf[0x200];
1560 u32 count = 0, index = 0; 1568 u32 count = 0, index = 0;
1561 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 1569 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
1570 u8 *bbuf = info->bbuf;
1562 1571
1563 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { 1572 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
1564 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); 1573 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1572,14 +1581,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
1572 } 1581 }
1573 1582
1574 if (count == PhyBlock) { 1583 if (count == PhyBlock) {
1575 ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); 1584 ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
1585 bbuf);
1576 count += 0x80; 1586 count += 0x80;
1577 } 1587 }
1578 index = (PhyBlock % 0x80) * 4; 1588 index = (PhyBlock % 0x80) * 4;
1579 1589
1580 extdat.ovrflg = buf[index]; 1590 extdat.ovrflg = bbuf[index];
1581 extdat.mngflg = buf[index+1]; 1591 extdat.mngflg = bbuf[index+1];
1582 extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); 1592 extdat.logadr = memstick_logaddr(bbuf[index+2],
1593 bbuf[index+3]);
1583 1594
1584 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { 1595 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
1585 ms_lib_setacquired_errorblock(us, PhyBlock); 1596 ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2062,9 +2073,9 @@ static int ene_ms_init(struct us_data *us)
2062{ 2073{
2063 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2074 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
2064 int result; 2075 int result;
2065 u8 buf[0x200];
2066 u16 MSP_BlockSize, MSP_UserAreaBlocks; 2076 u16 MSP_BlockSize, MSP_UserAreaBlocks;
2067 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2077 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
2078 u8 *bbuf = info->bbuf;
2068 2079
2069 printk(KERN_INFO "transport --- ENE_MSInit\n"); 2080 printk(KERN_INFO "transport --- ENE_MSInit\n");
2070 2081
@@ -2083,13 +2094,13 @@ static int ene_ms_init(struct us_data *us)
2083 bcb->CDB[0] = 0xF1; 2094 bcb->CDB[0] = 0xF1;
2084 bcb->CDB[1] = 0x01; 2095 bcb->CDB[1] = 0x01;
2085 2096
2086 result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2097 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
2087 if (result != USB_STOR_XFER_GOOD) { 2098 if (result != USB_STOR_XFER_GOOD) {
2088 printk(KERN_ERR "Execution MS Init Code Fail !!\n"); 2099 printk(KERN_ERR "Execution MS Init Code Fail !!\n");
2089 return USB_STOR_TRANSPORT_ERROR; 2100 return USB_STOR_TRANSPORT_ERROR;
2090 } 2101 }
2091 /* the same part to test ENE */ 2102 /* the same part to test ENE */
2092 info->MS_Status = *(struct MS_STATUS *)&buf[0]; 2103 info->MS_Status = *(struct MS_STATUS *) bbuf;
2093 2104
2094 if (info->MS_Status.Insert && info->MS_Status.Ready) { 2105 if (info->MS_Status.Insert && info->MS_Status.Ready) {
2095 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); 2106 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
@@ -2098,15 +2109,15 @@ static int ene_ms_init(struct us_data *us)
2098 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); 2109 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
2099 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); 2110 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
2100 if (info->MS_Status.IsMSPro) { 2111 if (info->MS_Status.IsMSPro) {
2101 MSP_BlockSize = (buf[6] << 8) | buf[7]; 2112 MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
2102 MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; 2113 MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
2103 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; 2114 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
2104 } else { 2115 } else {
2105 ms_card_init(us); /* Card is MS (to ms.c)*/ 2116 ms_card_init(us); /* Card is MS (to ms.c)*/
2106 } 2117 }
2107 usb_stor_dbg(us, "MS Init Code OK !!\n"); 2118 usb_stor_dbg(us, "MS Init Code OK !!\n");
2108 } else { 2119 } else {
2109 usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); 2120 usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
2110 return USB_STOR_TRANSPORT_ERROR; 2121 return USB_STOR_TRANSPORT_ERROR;
2111 } 2122 }
2112 2123
@@ -2116,9 +2127,9 @@ static int ene_ms_init(struct us_data *us)
2116static int ene_sd_init(struct us_data *us) 2127static int ene_sd_init(struct us_data *us)
2117{ 2128{
2118 int result; 2129 int result;
2119 u8 buf[0x200];
2120 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2130 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
2121 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2131 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
2132 u8 *bbuf = info->bbuf;
2122 2133
2123 usb_stor_dbg(us, "transport --- ENE_SDInit\n"); 2134 usb_stor_dbg(us, "transport --- ENE_SDInit\n");
2124 /* SD Init Part-1 */ 2135 /* SD Init Part-1 */
@@ -2152,17 +2163,17 @@ static int ene_sd_init(struct us_data *us)
2152 bcb->Flags = US_BULK_FLAG_IN; 2163 bcb->Flags = US_BULK_FLAG_IN;
2153 bcb->CDB[0] = 0xF1; 2164 bcb->CDB[0] = 0xF1;
2154 2165
2155 result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2166 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
2156 if (result != USB_STOR_XFER_GOOD) { 2167 if (result != USB_STOR_XFER_GOOD) {
2157 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); 2168 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
2158 return USB_STOR_TRANSPORT_ERROR; 2169 return USB_STOR_TRANSPORT_ERROR;
2159 } 2170 }
2160 2171
2161 info->SD_Status = *(struct SD_STATUS *)&buf[0]; 2172 info->SD_Status = *(struct SD_STATUS *) bbuf;
2162 if (info->SD_Status.Insert && info->SD_Status.Ready) { 2173 if (info->SD_Status.Insert && info->SD_Status.Ready) {
2163 struct SD_STATUS *s = &info->SD_Status; 2174 struct SD_STATUS *s = &info->SD_Status;
2164 2175
2165 ene_get_card_status(us, (unsigned char *)&buf); 2176 ene_get_card_status(us, bbuf);
2166 usb_stor_dbg(us, "Insert = %x\n", s->Insert); 2177 usb_stor_dbg(us, "Insert = %x\n", s->Insert);
2167 usb_stor_dbg(us, "Ready = %x\n", s->Ready); 2178 usb_stor_dbg(us, "Ready = %x\n", s->Ready);
2168 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); 2179 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
@@ -2170,7 +2181,7 @@ static int ene_sd_init(struct us_data *us)
2170 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); 2181 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
2171 usb_stor_dbg(us, "WtP = %x\n", s->WtP); 2182 usb_stor_dbg(us, "WtP = %x\n", s->WtP);
2172 } else { 2183 } else {
2173 usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); 2184 usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
2174 return USB_STOR_TRANSPORT_ERROR; 2185 return USB_STOR_TRANSPORT_ERROR;
2175 } 2186 }
2176 return USB_STOR_TRANSPORT_GOOD; 2187 return USB_STOR_TRANSPORT_GOOD;
@@ -2180,13 +2191,15 @@ static int ene_sd_init(struct us_data *us)
2180static int ene_init(struct us_data *us) 2191static int ene_init(struct us_data *us)
2181{ 2192{
2182 int result; 2193 int result;
2183 u8 misc_reg03 = 0; 2194 u8 misc_reg03;
2184 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); 2195 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
2196 u8 *bbuf = info->bbuf;
2185 2197
2186 result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2198 result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
2187 if (result != USB_STOR_XFER_GOOD) 2199 if (result != USB_STOR_XFER_GOOD)
2188 return USB_STOR_TRANSPORT_ERROR; 2200 return USB_STOR_TRANSPORT_ERROR;
2189 2201
2202 misc_reg03 = bbuf[0];
2190 if (misc_reg03 & 0x01) { 2203 if (misc_reg03 & 0x01) {
2191 if (!info->SD_Status.Ready) { 2204 if (!info->SD_Status.Ready) {
2192 result = ene_sd_init(us); 2205 result = ene_sd_init(us);
@@ -2303,8 +2316,9 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2303 const struct usb_device_id *id) 2316 const struct usb_device_id *id)
2304{ 2317{
2305 int result; 2318 int result;
2306 u8 misc_reg03 = 0; 2319 u8 misc_reg03;
2307 struct us_data *us; 2320 struct us_data *us;
2321 struct ene_ub6250_info *info;
2308 2322
2309 result = usb_stor_probe1(&us, intf, id, 2323 result = usb_stor_probe1(&us, intf, id,
2310 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, 2324 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2313,11 +2327,16 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2313 return result; 2327 return result;
2314 2328
2315 /* FIXME: where should the code alloc extra buf ? */ 2329 /* FIXME: where should the code alloc extra buf ? */
2316 if (!us->extra) { 2330 us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
2317 us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); 2331 if (!us->extra)
2318 if (!us->extra) 2332 return -ENOMEM;
2319 return -ENOMEM; 2333 us->extra_destructor = ene_ub6250_info_destructor;
2320 us->extra_destructor = ene_ub6250_info_destructor; 2334
2335 info = (struct ene_ub6250_info *)(us->extra);
2336 info->bbuf = kmalloc(512, GFP_KERNEL);
2337 if (!info->bbuf) {
2338 kfree(us->extra);
2339 return -ENOMEM;
2321 } 2340 }
2322 2341
2323 us->transport_name = "ene_ub6250"; 2342 us->transport_name = "ene_ub6250";
@@ -2329,12 +2348,13 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2329 return result; 2348 return result;
2330 2349
2331 /* probe card type */ 2350 /* probe card type */
2332 result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2351 result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
2333 if (result != USB_STOR_XFER_GOOD) { 2352 if (result != USB_STOR_XFER_GOOD) {
2334 usb_stor_disconnect(intf); 2353 usb_stor_disconnect(intf);
2335 return USB_STOR_TRANSPORT_ERROR; 2354 return USB_STOR_TRANSPORT_ERROR;
2336 } 2355 }
2337 2356
2357 misc_reg03 = info->bbuf[0];
2338 if (!(misc_reg03 & 0x01)) { 2358 if (!(misc_reg03 & 0x01)) {
2339 pr_info("ums_eneub6250: This driver only supports SD/MS cards. " 2359 pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
2340 "It does not support SM cards.\n"); 2360 "It does not support SM cards.\n");
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 5d8b2c261940..0585078638db 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -235,14 +235,19 @@ done:
235 235
236static inline void hub_descriptor(struct usb_hub_descriptor *desc) 236static inline void hub_descriptor(struct usb_hub_descriptor *desc)
237{ 237{
238 int width;
239
238 memset(desc, 0, sizeof(*desc)); 240 memset(desc, 0, sizeof(*desc));
239 desc->bDescriptorType = USB_DT_HUB; 241 desc->bDescriptorType = USB_DT_HUB;
240 desc->bDescLength = 9;
241 desc->wHubCharacteristics = cpu_to_le16( 242 desc->wHubCharacteristics = cpu_to_le16(
242 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); 243 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
244
243 desc->bNbrPorts = VHCI_HC_PORTS; 245 desc->bNbrPorts = VHCI_HC_PORTS;
244 desc->u.hs.DeviceRemovable[0] = 0xff; 246 BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
245 desc->u.hs.DeviceRemovable[1] = 0xff; 247 width = desc->bNbrPorts / 8 + 1;
248 desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
249 memset(&desc->u.hs.DeviceRemovable[0], 0, width);
250 memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
246} 251}
247 252
248static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, 253static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 6345e85822a4..a50cf45e530f 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@ error_submit_ep1:
341static 341static
342int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) 342int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
343{ 343{
344 struct usb_device *udev = interface_to_usbdev(iface);
344 struct i1480_usb *i1480_usb; 345 struct i1480_usb *i1480_usb;
345 struct i1480 *i1480; 346 struct i1480 *i1480;
346 struct device *dev = &iface->dev; 347 struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
352 iface->cur_altsetting->desc.bInterfaceNumber); 353 iface->cur_altsetting->desc.bInterfaceNumber);
353 goto error; 354 goto error;
354 } 355 }
355 if (iface->num_altsetting > 1 356 if (iface->num_altsetting > 1 &&
356 && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { 357 le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
357 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ 358 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
358 result = usb_set_interface(interface_to_usbdev(iface), 0, 1); 359 result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
359 if (result < 0) 360 if (result < 0)
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 687ebb053438..41d7979d81c5 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
1048 1048
1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; 1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) 1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
1051 if (PIXEL_CLOCK) 1051 if (PIXEL_CLOCK != 0)
1052 edt[num++] = block - edid; 1052 edt[num++] = block - edid;
1053 1053
1054 /* Yikes, EDID data is totally useless */ 1054 /* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ec2e7e353685..449fceaf79d5 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface,
1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n", 1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n",
1647 usbdev->manufacturer, usbdev->product, usbdev->serial); 1647 usbdev->manufacturer, usbdev->product, usbdev->serial);
1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", 1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
1649 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1649 le16_to_cpu(usbdev->descriptor.idVendor),
1650 usbdev->descriptor.bcdDevice, dev); 1650 le16_to_cpu(usbdev->descriptor.idProduct),
1651 le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
1651 dev_dbg(dev->gdev, "console enable=%d\n", console); 1652 dev_dbg(dev->gdev, "console enable=%d\n", console);
1652 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); 1653 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
1653 1654
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 6a3c353de7c3..05ef657235df 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
1105 char *bufptr; 1105 char *bufptr;
1106 struct urb *urb; 1106 struct urb *urb;
1107 1107
1108 pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1108 pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
1109 info->node, dev->blank_mode, blank_mode); 1109 info->node, dev->blank_mode, blank_mode);
1110 1110
1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
1112 (blank_mode != FB_BLANK_POWERDOWN)) { 1112 (blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1613 pr_info("%s %s - serial #%s\n", 1613 pr_info("%s %s - serial #%s\n",
1614 usbdev->manufacturer, usbdev->product, usbdev->serial); 1614 usbdev->manufacturer, usbdev->product, usbdev->serial);
1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
1616 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1616 le16_to_cpu(usbdev->descriptor.idVendor),
1617 usbdev->descriptor.bcdDevice, dev); 1617 le16_to_cpu(usbdev->descriptor.idProduct),
1618 le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
1618 pr_info("console enable=%d\n", console); 1619 pr_info("console enable=%d\n", console);
1619 pr_info("fb_defio enable=%d\n", fb_defio); 1620 pr_info("fb_defio enable=%d\n", fb_defio);
1620 pr_info("shadow enable=%d\n", shadow); 1621 pr_info("shadow enable=%d\n", shadow);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f012aae..badee04ef496 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
1630} 1630}
1631static void viafb_remove_proc(struct viafb_shared *shared) 1631static void viafb_remove_proc(struct viafb_shared *shared)
1632{ 1632{
1633 struct proc_dir_entry *viafb_entry = shared->proc_entry, 1633 struct proc_dir_entry *viafb_entry = shared->proc_entry;
1634 *iga1_entry = shared->iga1_proc_entry,
1635 *iga2_entry = shared->iga2_proc_entry;
1636 1634
1637 if (!viafb_entry) 1635 if (!viafb_entry)
1638 return; 1636 return;
1639 1637
1640 remove_proc_entry("output_devices", iga2_entry); 1638 remove_proc_entry("output_devices", shared->iga2_proc_entry);
1641 remove_proc_entry("iga2", viafb_entry); 1639 remove_proc_entry("iga2", viafb_entry);
1642 remove_proc_entry("output_devices", iga1_entry); 1640 remove_proc_entry("output_devices", shared->iga1_proc_entry);
1643 remove_proc_entry("iga1", viafb_entry); 1641 remove_proc_entry("iga1", viafb_entry);
1644 remove_proc_entry("supported_output_devices", viafb_entry); 1642 remove_proc_entry("supported_output_devices", viafb_entry);
1645 1643
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 408c174ef0d5..22caf808bfab 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev)
663} 663}
664#endif 664#endif
665 665
666static int virtballoon_validate(struct virtio_device *vdev)
667{
668 __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
669 return 0;
670}
671
666static unsigned int features[] = { 672static unsigned int features[] = {
667 VIRTIO_BALLOON_F_MUST_TELL_HOST, 673 VIRTIO_BALLOON_F_MUST_TELL_HOST,
668 VIRTIO_BALLOON_F_STATS_VQ, 674 VIRTIO_BALLOON_F_STATS_VQ,
@@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = {
675 .driver.name = KBUILD_MODNAME, 681 .driver.name = KBUILD_MODNAME,
676 .driver.owner = THIS_MODULE, 682 .driver.owner = THIS_MODULE,
677 .id_table = id_table, 683 .id_table = id_table,
684 .validate = virtballoon_validate,
678 .probe = virtballoon_probe, 685 .probe = virtballoon_probe,
679 .remove = virtballoon_remove, 686 .remove = virtballoon_remove,
680 .config_changed = virtballoon_changed, 687 .config_changed = virtballoon_changed,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 52a70ee6014f..8b9049dac094 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -452,7 +452,7 @@ config DAVINCI_WATCHDOG
452 452
453config ORION_WATCHDOG 453config ORION_WATCHDOG
454 tristate "Orion watchdog" 454 tristate "Orion watchdog"
455 depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST 455 depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
456 depends on ARM 456 depends on ARM
457 select WATCHDOG_CORE 457 select WATCHDOG_CORE
458 help 458 help
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 6fce17d5b9f1..a5775dfd8d5f 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
304 if (!wdt) 304 if (!wdt)
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 spin_lock_init(&wdt->lock);
308
307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
308 wdt->base = devm_ioremap_resource(dev, res); 310 wdt->base = devm_ioremap_resource(dev, res);
309 if (IS_ERR(wdt->base)) 311 if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
316 return ret; 318 return ret;
317 } 319 }
318 320
319 spin_lock_init(&wdt->lock);
320 platform_set_drvdata(pdev, wdt); 321 platform_set_drvdata(pdev, wdt);
321 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); 322 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
322 bcm_kona_wdt_wdd.parent = &pdev->dev; 323 bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 8d61e8bfe60b..86e0b5d2e761 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -49,7 +49,7 @@
49/* Counter maximum value */ 49/* Counter maximum value */
50#define CDNS_WDT_COUNTER_MAX 0xFFF 50#define CDNS_WDT_COUNTER_MAX 0xFFF
51 51
52static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT; 52static int wdt_timeout;
53static int nowayout = WATCHDOG_NOWAYOUT; 53static int nowayout = WATCHDOG_NOWAYOUT;
54 54
55module_param(wdt_timeout, int, 0); 55module_param(wdt_timeout, int, 0);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 347f0389b089..c4f65873bfa4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -306,16 +306,15 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
306 306
307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); 307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout);
308 308
309 /* Reset the timeout status bit so that the timer
310 * needs to count down twice again before rebooting */
311 outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */
312
309 /* Reload the timer by writing to the TCO Timer Counter register */ 313 /* Reload the timer by writing to the TCO Timer Counter register */
310 if (p->iTCO_version >= 2) { 314 if (p->iTCO_version >= 2)
311 outw(0x01, TCO_RLD(p)); 315 outw(0x01, TCO_RLD(p));
312 } else if (p->iTCO_version == 1) { 316 else if (p->iTCO_version == 1)
313 /* Reset the timeout status bit so that the timer
314 * needs to count down twice again before rebooting */
315 outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */
316
317 outb(0x01, TCO_RLD(p)); 317 outb(0x01, TCO_RLD(p));
318 }
319 318
320 spin_unlock(&p->io_lock); 319 spin_unlock(&p->io_lock);
321 return 0; 320 return 0;
@@ -328,11 +327,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
328 unsigned char val8; 327 unsigned char val8;
329 unsigned int tmrval; 328 unsigned int tmrval;
330 329
331 tmrval = seconds_to_ticks(p, t); 330 /* The timer counts down twice before rebooting */
332 331 tmrval = seconds_to_ticks(p, t) / 2;
333 /* For TCO v1 the timer counts down twice before rebooting */
334 if (p->iTCO_version == 1)
335 tmrval /= 2;
336 332
337 /* from the specs: */ 333 /* from the specs: */
338 /* "Values of 0h-3h are ignored and should not be attempted" */ 334 /* "Values of 0h-3h are ignored and should not be attempted" */
@@ -385,6 +381,8 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
385 spin_lock(&p->io_lock); 381 spin_lock(&p->io_lock);
386 val16 = inw(TCO_RLD(p)); 382 val16 = inw(TCO_RLD(p));
387 val16 &= 0x3ff; 383 val16 &= 0x3ff;
384 if (!(inw(TCO1_STS(p)) & 0x0008))
385 val16 += (inw(TCOv2_TMR(p)) & 0x3ff);
388 spin_unlock(&p->io_lock); 386 spin_unlock(&p->io_lock);
389 387
390 time_left = ticks_to_seconds(p, val16); 388 time_left = ticks_to_seconds(p, val16);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 99ebf6ea3de6..5615f4013924 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface,
630 return -ENODEV; 630 return -ENODEV;
631 } 631 }
632 632
633 if (iface_desc->desc.bNumEndpoints < 1)
634 return -ENODEV;
635
633 /* check out the endpoint: it has to be Interrupt & IN */ 636 /* check out the endpoint: it has to be Interrupt & IN */
634 endpoint = &iface_desc->endpoint[0].desc; 637 endpoint = &iface_desc->endpoint[0].desc;
635 638
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index f709962018ac..362fd229786d 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -6,6 +6,7 @@
6 * Licensed under GPLv2. 6 * Licensed under GPLv2.
7 */ 7 */
8 8
9#include <linux/delay.h>
9#include <linux/interrupt.h> 10#include <linux/interrupt.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
@@ -29,6 +30,7 @@ struct sama5d4_wdt {
29 struct watchdog_device wdd; 30 struct watchdog_device wdd;
30 void __iomem *reg_base; 31 void __iomem *reg_base;
31 u32 mr; 32 u32 mr;
33 unsigned long last_ping;
32}; 34};
33 35
34static int wdt_timeout = WDT_DEFAULT_TIMEOUT; 36static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
@@ -44,11 +46,34 @@ MODULE_PARM_DESC(nowayout,
44 "Watchdog cannot be stopped once started (default=" 46 "Watchdog cannot be stopped once started (default="
45 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 47 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
46 48
49#define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS))
50
47#define wdt_read(wdt, field) \ 51#define wdt_read(wdt, field) \
48 readl_relaxed((wdt)->reg_base + (field)) 52 readl_relaxed((wdt)->reg_base + (field))
49 53
50#define wdt_write(wtd, field, val) \ 54/* 4 slow clock periods is 4/32768 = 122.07µs*/
51 writel_relaxed((val), (wdt)->reg_base + (field)) 55#define WDT_DELAY usecs_to_jiffies(123)
56
57static void wdt_write(struct sama5d4_wdt *wdt, u32 field, u32 val)
58{
59 /*
60 * WDT_CR and WDT_MR must not be modified within three slow clock
61 * periods following a restart of the watchdog performed by a write
62 * access in WDT_CR.
63 */
64 while (time_before(jiffies, wdt->last_ping + WDT_DELAY))
65 usleep_range(30, 125);
66 writel_relaxed(val, wdt->reg_base + field);
67 wdt->last_ping = jiffies;
68}
69
70static void wdt_write_nosleep(struct sama5d4_wdt *wdt, u32 field, u32 val)
71{
72 if (time_before(jiffies, wdt->last_ping + WDT_DELAY))
73 udelay(123);
74 writel_relaxed(val, wdt->reg_base + field);
75 wdt->last_ping = jiffies;
76}
52 77
53static int sama5d4_wdt_start(struct watchdog_device *wdd) 78static int sama5d4_wdt_start(struct watchdog_device *wdd)
54{ 79{
@@ -89,7 +114,16 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
89 wdt->mr &= ~AT91_WDT_WDD; 114 wdt->mr &= ~AT91_WDT_WDD;
90 wdt->mr |= AT91_WDT_SET_WDV(value); 115 wdt->mr |= AT91_WDT_SET_WDV(value);
91 wdt->mr |= AT91_WDT_SET_WDD(value); 116 wdt->mr |= AT91_WDT_SET_WDD(value);
92 wdt_write(wdt, AT91_WDT_MR, wdt->mr); 117
118 /*
119 * WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When
120 * setting the WDDIS bit, and while it is set, the fields WDV and WDD
121 * must not be modified.
122 * If the watchdog is enabled, then the timeout can be updated. Else,
123 * wait that the user enables it.
124 */
125 if (wdt_enabled)
126 wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS);
93 127
94 wdd->timeout = timeout; 128 wdd->timeout = timeout;
95 129
@@ -145,23 +179,21 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
145 179
146static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) 180static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
147{ 181{
148 struct watchdog_device *wdd = &wdt->wdd;
149 u32 value = WDT_SEC2TICKS(wdd->timeout);
150 u32 reg; 182 u32 reg;
151
152 /* 183 /*
153 * Because the fields WDV and WDD must not be modified when the WDDIS 184 * When booting and resuming, the bootloader may have changed the
154 * bit is set, so clear the WDDIS bit before writing the WDT_MR. 185 * watchdog configuration.
186 * If the watchdog is already running, we can safely update it.
187 * Else, we have to disable it properly.
155 */ 188 */
156 reg = wdt_read(wdt, AT91_WDT_MR); 189 if (wdt_enabled) {
157 reg &= ~AT91_WDT_WDDIS; 190 wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
158 wdt_write(wdt, AT91_WDT_MR, reg); 191 } else {
159 192 reg = wdt_read(wdt, AT91_WDT_MR);
160 wdt->mr |= AT91_WDT_SET_WDD(value); 193 if (!(reg & AT91_WDT_WDDIS))
161 wdt->mr |= AT91_WDT_SET_WDV(value); 194 wdt_write_nosleep(wdt, AT91_WDT_MR,
162 195 reg | AT91_WDT_WDDIS);
163 wdt_write(wdt, AT91_WDT_MR, wdt->mr); 196 }
164
165 return 0; 197 return 0;
166} 198}
167 199
@@ -172,6 +204,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
172 struct resource *res; 204 struct resource *res;
173 void __iomem *regs; 205 void __iomem *regs;
174 u32 irq = 0; 206 u32 irq = 0;
207 u32 timeout;
175 int ret; 208 int ret;
176 209
177 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); 210 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -184,6 +217,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
184 wdd->ops = &sama5d4_wdt_ops; 217 wdd->ops = &sama5d4_wdt_ops;
185 wdd->min_timeout = MIN_WDT_TIMEOUT; 218 wdd->min_timeout = MIN_WDT_TIMEOUT;
186 wdd->max_timeout = MAX_WDT_TIMEOUT; 219 wdd->max_timeout = MAX_WDT_TIMEOUT;
220 wdt->last_ping = jiffies;
187 221
188 watchdog_set_drvdata(wdd, wdt); 222 watchdog_set_drvdata(wdd, wdt);
189 223
@@ -221,6 +255,11 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
221 return ret; 255 return ret;
222 } 256 }
223 257
258 timeout = WDT_SEC2TICKS(wdd->timeout);
259
260 wdt->mr |= AT91_WDT_SET_WDD(timeout);
261 wdt->mr |= AT91_WDT_SET_WDV(timeout);
262
224 ret = sama5d4_wdt_init(wdt); 263 ret = sama5d4_wdt_init(wdt);
225 if (ret) 264 if (ret)
226 return ret; 265 return ret;
@@ -263,9 +302,7 @@ static int sama5d4_wdt_resume(struct device *dev)
263{ 302{
264 struct sama5d4_wdt *wdt = dev_get_drvdata(dev); 303 struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
265 304
266 wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); 305 sama5d4_wdt_init(wdt);
267 if (wdt->mr & AT91_WDT_WDDIS)
268 wdt_write(wdt, AT91_WDT_MR, wdt->mr);
269 306
270 return 0; 307 return 0;
271} 308}
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 48b2c058b009..bc7addc2dc06 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -332,7 +332,7 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
332 pr_crit("Would Reboot\n"); 332 pr_crit("Would Reboot\n");
333#else 333#else
334 pr_crit("Initiating system reboot\n"); 334 pr_crit("Initiating system reboot\n");
335 emergency_restart(NULL); 335 emergency_restart();
336#endif 336#endif
337#else 337#else
338 pr_crit("Reset in 5ms\n"); 338 pr_crit("Reset in 5ms\n");
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c
index e290d5a13a6d..c98252733c30 100644
--- a/drivers/watchdog/zx2967_wdt.c
+++ b/drivers/watchdog/zx2967_wdt.c
@@ -211,10 +211,8 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
211 211
212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
213 wdt->reg_base = devm_ioremap_resource(dev, base); 213 wdt->reg_base = devm_ioremap_resource(dev, base);
214 if (IS_ERR(wdt->reg_base)) { 214 if (IS_ERR(wdt->reg_base))
215 dev_err(dev, "ioremap failed\n");
216 return PTR_ERR(wdt->reg_base); 215 return PTR_ERR(wdt->reg_base);
217 }
218 216
219 zx2967_wdt_reset_sysctrl(dev); 217 zx2967_wdt_reset_sysctrl(dev);
220 218
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7a92a5e1d40c..feca75b07fdd 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
362 st->global_error = 1; 362 st->global_error = 1;
363 } 363 }
364 } 364 }
365 st->va += PAGE_SIZE * nr; 365 st->va += XEN_PAGE_SIZE * nr;
366 st->index += nr; 366 st->index += nr / XEN_PFN_PER_PAGE;
367 367
368 return 0; 368 return 0;
369} 369}
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 734cbf8d9676..dd9f1bebb5a3 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
344 int status; 344 int status;
345 345
346 token = (autofs_wqt_t) param->fail.token; 346 token = (autofs_wqt_t) param->fail.token;
347 status = param->fail.status ? param->fail.status : -ENOENT; 347 status = param->fail.status < 0 ? param->fail.status : -ENOENT;
348 return autofs4_wait_release(sbi, token, status); 348 return autofs4_wait_release(sbi, token, status);
349} 349}
350 350
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 519599dddd36..0a7404ef9335 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -263,7 +263,10 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
263 kfree(vecs); 263 kfree(vecs);
264 264
265 if (unlikely(bio.bi_error)) 265 if (unlikely(bio.bi_error))
266 return bio.bi_error; 266 ret = bio.bi_error;
267
268 bio_uninit(&bio);
269
267 return ret; 270 return ret;
268} 271}
269 272
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 643c70d2b2e6..4f8f75d9e839 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2563,7 +2563,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, 2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2564 unsigned num_items) 2564 unsigned num_items)
2565{ 2565{
2566 return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2566 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
2567} 2567}
2568 2568
2569/* 2569/*
@@ -2573,7 +2573,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, 2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
2574 unsigned num_items) 2574 unsigned num_items)
2575{ 2575{
2576 return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 2576 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
2577} 2577}
2578 2578
2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 60a750678a82..c24d615e3d7f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -468,7 +468,7 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
468 468
469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) { 469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
470 btrfs_crit(fs_info, "invalid dir item name len: %u", 470 btrfs_crit(fs_info, "invalid dir item name len: %u",
471 (unsigned)btrfs_dir_data_len(leaf, dir_item)); 471 (unsigned)btrfs_dir_name_len(leaf, dir_item));
472 return 1; 472 return 1;
473 } 473 }
474 474
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8685d67185d0..5f678dcb20e6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3467,10 +3467,12 @@ static int write_dev_supers(struct btrfs_device *device,
3467 * we fua the first super. The others we allow 3467 * we fua the first super. The others we allow
3468 * to go down lazy. 3468 * to go down lazy.
3469 */ 3469 */
3470 if (i == 0) 3470 if (i == 0) {
3471 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); 3471 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3472 else 3472 REQ_SYNC | REQ_FUA, bh);
3473 } else {
3473 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 3474 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3475 }
3474 if (ret) 3476 if (ret)
3475 errors++; 3477 errors++;
3476 } 3478 }
@@ -3535,7 +3537,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3535 3537
3536 bio->bi_end_io = btrfs_end_empty_barrier; 3538 bio->bi_end_io = btrfs_end_empty_barrier;
3537 bio->bi_bdev = device->bdev; 3539 bio->bi_bdev = device->bdev;
3538 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 3540 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3539 init_completion(&device->flush_wait); 3541 init_completion(&device->flush_wait);
3540 bio->bi_private = &device->flush_wait; 3542 bio->bi_private = &device->flush_wait;
3541 device->flush_bio = bio; 3543 device->flush_bio = bio;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e390451c72e6..33d979e9ea2a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3993,6 +3993,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3993 info->space_info_kobj, "%s", 3993 info->space_info_kobj, "%s",
3994 alloc_name(found->flags)); 3994 alloc_name(found->flags));
3995 if (ret) { 3995 if (ret) {
3996 percpu_counter_destroy(&found->total_bytes_pinned);
3996 kfree(found); 3997 kfree(found);
3997 return ret; 3998 return ret;
3998 } 3999 }
@@ -4844,7 +4845,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4844 spin_unlock(&delayed_rsv->lock); 4845 spin_unlock(&delayed_rsv->lock);
4845 4846
4846commit: 4847commit:
4847 trans = btrfs_join_transaction(fs_info->fs_root); 4848 trans = btrfs_join_transaction(fs_info->extent_root);
4848 if (IS_ERR(trans)) 4849 if (IS_ERR(trans))
4849 return -ENOSPC; 4850 return -ENOSPC;
4850 4851
@@ -4862,7 +4863,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
4862 struct btrfs_space_info *space_info, u64 num_bytes, 4863 struct btrfs_space_info *space_info, u64 num_bytes,
4863 u64 orig_bytes, int state) 4864 u64 orig_bytes, int state)
4864{ 4865{
4865 struct btrfs_root *root = fs_info->fs_root; 4866 struct btrfs_root *root = fs_info->extent_root;
4866 struct btrfs_trans_handle *trans; 4867 struct btrfs_trans_handle *trans;
4867 int nr; 4868 int nr;
4868 int ret = 0; 4869 int ret = 0;
@@ -5062,7 +5063,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5062 int flush_state = FLUSH_DELAYED_ITEMS_NR; 5063 int flush_state = FLUSH_DELAYED_ITEMS_NR;
5063 5064
5064 spin_lock(&space_info->lock); 5065 spin_lock(&space_info->lock);
5065 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, 5066 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
5066 space_info); 5067 space_info);
5067 if (!to_reclaim) { 5068 if (!to_reclaim) {
5068 spin_unlock(&space_info->lock); 5069 spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8da3edf2ac3..d3619e010005 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2458,7 +2458,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2458 if (!uptodate) { 2458 if (!uptodate) {
2459 ClearPageUptodate(page); 2459 ClearPageUptodate(page);
2460 SetPageError(page); 2460 SetPageError(page);
2461 ret = ret < 0 ? ret : -EIO; 2461 ret = err < 0 ? err : -EIO;
2462 mapping_set_error(page->mapping, ret); 2462 mapping_set_error(page->mapping, ret);
2463 } 2463 }
2464} 2464}
@@ -4377,6 +4377,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
4377 return NULL; 4377 return NULL;
4378} 4378}
4379 4379
4380/*
4381 * To cache previous fiemap extent
4382 *
4383 * Will be used for merging fiemap extent
4384 */
4385struct fiemap_cache {
4386 u64 offset;
4387 u64 phys;
4388 u64 len;
4389 u32 flags;
4390 bool cached;
4391};
4392
4393/*
4394 * Helper to submit fiemap extent.
4395 *
4396 * Will try to merge current fiemap extent specified by @offset, @phys,
4397 * @len and @flags with cached one.
4398 * And only when we fails to merge, cached one will be submitted as
4399 * fiemap extent.
4400 *
4401 * Return value is the same as fiemap_fill_next_extent().
4402 */
4403static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4404 struct fiemap_cache *cache,
4405 u64 offset, u64 phys, u64 len, u32 flags)
4406{
4407 int ret = 0;
4408
4409 if (!cache->cached)
4410 goto assign;
4411
4412 /*
4413 * Sanity check, extent_fiemap() should have ensured that new
4414 * fiemap extent won't overlap with cahced one.
4415 * Not recoverable.
4416 *
4417 * NOTE: Physical address can overlap, due to compression
4418 */
4419 if (cache->offset + cache->len > offset) {
4420 WARN_ON(1);
4421 return -EINVAL;
4422 }
4423
4424 /*
4425 * Only merges fiemap extents if
4426 * 1) Their logical addresses are continuous
4427 *
4428 * 2) Their physical addresses are continuous
4429 * So truly compressed (physical size smaller than logical size)
4430 * extents won't get merged with each other
4431 *
4432 * 3) Share same flags except FIEMAP_EXTENT_LAST
4433 * So regular extent won't get merged with prealloc extent
4434 */
4435 if (cache->offset + cache->len == offset &&
4436 cache->phys + cache->len == phys &&
4437 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4438 (flags & ~FIEMAP_EXTENT_LAST)) {
4439 cache->len += len;
4440 cache->flags |= flags;
4441 goto try_submit_last;
4442 }
4443
4444 /* Not mergeable, need to submit cached one */
4445 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4446 cache->len, cache->flags);
4447 cache->cached = false;
4448 if (ret)
4449 return ret;
4450assign:
4451 cache->cached = true;
4452 cache->offset = offset;
4453 cache->phys = phys;
4454 cache->len = len;
4455 cache->flags = flags;
4456try_submit_last:
4457 if (cache->flags & FIEMAP_EXTENT_LAST) {
4458 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4459 cache->phys, cache->len, cache->flags);
4460 cache->cached = false;
4461 }
4462 return ret;
4463}
4464
4465/*
4466 * Sanity check for fiemap cache
4467 *
4468 * All fiemap cache should be submitted by emit_fiemap_extent()
4469 * Iteration should be terminated either by last fiemap extent or
4470 * fieinfo->fi_extents_max.
4471 * So no cached fiemap should exist.
4472 */
4473static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
4474 struct fiemap_extent_info *fieinfo,
4475 struct fiemap_cache *cache)
4476{
4477 int ret;
4478
4479 if (!cache->cached)
4480 return 0;
4481
4482 /* Small and recoverbale problem, only to info developer */
4483#ifdef CONFIG_BTRFS_DEBUG
4484 WARN_ON(1);
4485#endif
4486 btrfs_warn(fs_info,
4487 "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
4488 cache->offset, cache->phys, cache->len, cache->flags);
4489 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4490 cache->len, cache->flags);
4491 cache->cached = false;
4492 if (ret > 0)
4493 ret = 0;
4494 return ret;
4495}
4496
4380int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4497int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4381 __u64 start, __u64 len, get_extent_t *get_extent) 4498 __u64 start, __u64 len, get_extent_t *get_extent)
4382{ 4499{
@@ -4394,6 +4511,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4394 struct extent_state *cached_state = NULL; 4511 struct extent_state *cached_state = NULL;
4395 struct btrfs_path *path; 4512 struct btrfs_path *path;
4396 struct btrfs_root *root = BTRFS_I(inode)->root; 4513 struct btrfs_root *root = BTRFS_I(inode)->root;
4514 struct fiemap_cache cache = { 0 };
4397 int end = 0; 4515 int end = 0;
4398 u64 em_start = 0; 4516 u64 em_start = 0;
4399 u64 em_len = 0; 4517 u64 em_len = 0;
@@ -4573,8 +4691,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4573 flags |= FIEMAP_EXTENT_LAST; 4691 flags |= FIEMAP_EXTENT_LAST;
4574 end = 1; 4692 end = 1;
4575 } 4693 }
4576 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 4694 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4577 em_len, flags); 4695 em_len, flags);
4578 if (ret) { 4696 if (ret) {
4579 if (ret == 1) 4697 if (ret == 1)
4580 ret = 0; 4698 ret = 0;
@@ -4582,6 +4700,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4582 } 4700 }
4583 } 4701 }
4584out_free: 4702out_free:
4703 if (!ret)
4704 ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
4585 free_extent_map(em); 4705 free_extent_map(em);
4586out: 4706out:
4587 btrfs_free_path(path); 4707 btrfs_free_path(path);
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc156a03..baacc1866861 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
38{ 38{
39 SHASH_DESC_ON_STACK(shash, tfm); 39 SHASH_DESC_ON_STACK(shash, tfm);
40 u32 *ctx = (u32 *)shash_desc_ctx(shash); 40 u32 *ctx = (u32 *)shash_desc_ctx(shash);
41 u32 retval;
41 int err; 42 int err;
42 43
43 shash->tfm = tfm; 44 shash->tfm = tfm;
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
47 err = crypto_shash_update(shash, address, length); 48 err = crypto_shash_update(shash, address, length);
48 BUG_ON(err); 49 BUG_ON(err);
49 50
50 return *ctx; 51 retval = *ctx;
52 barrier_data(ctx);
53 return retval;
51} 54}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17cbe9306faf..ef3c98c527c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2952,7 +2952,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2952 2952
2953 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2953 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2954 ordered_extent->file_offset + ordered_extent->len - 1, 2954 ordered_extent->file_offset + ordered_extent->len - 1,
2955 EXTENT_DEFRAG, 1, cached_state); 2955 EXTENT_DEFRAG, 0, cached_state);
2956 if (ret) { 2956 if (ret) {
2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7483,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7483 int found = false; 7483 int found = false;
7484 void **pagep = NULL; 7484 void **pagep = NULL;
7485 struct page *page = NULL; 7485 struct page *page = NULL;
7486 int start_idx; 7486 unsigned long start_idx;
7487 int end_idx; 7487 unsigned long end_idx;
7488 7488
7489 start_idx = start >> PAGE_SHIFT; 7489 start_idx = start >> PAGE_SHIFT;
7490 7490
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 987044bca1c2..59cb307b15fb 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
131 } 131 }
132 132
133 if (new_mode != old_mode) { 133 if (new_mode != old_mode) {
134 newattrs.ia_ctime = current_time(inode);
134 newattrs.ia_mode = new_mode; 135 newattrs.ia_mode = new_mode;
135 newattrs.ia_valid = ATTR_MODE; 136 newattrs.ia_valid = ATTR_MODE;
136 ret = __ceph_setattr(inode, &newattrs); 137 ret = __ceph_setattr(inode, &newattrs);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e8f11fa565c5..7df550c13d7f 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
91 ceph_mdsc_put_request(req); 91 ceph_mdsc_put_request(req);
92 if (!inode) 92 if (!inode)
93 return ERR_PTR(-ESTALE); 93 return ERR_PTR(-ESTALE);
94 if (inode->i_nlink == 0) {
95 iput(inode);
96 return ERR_PTR(-ESTALE);
97 }
94 } 98 }
95 99
96 return d_obtain_alias(inode); 100 return d_obtain_alias(inode);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 3fdde0b283c9..29308a80d66f 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1671,8 +1671,12 @@ static long ceph_fallocate(struct file *file, int mode,
1671 } 1671 }
1672 1672
1673 size = i_size_read(inode); 1673 size = i_size_read(inode);
1674 if (!(mode & FALLOC_FL_KEEP_SIZE)) 1674 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1675 endoff = offset + length; 1675 endoff = offset + length;
1676 ret = inode_newsize_ok(inode, endoff);
1677 if (ret)
1678 goto unlock;
1679 }
1676 1680
1677 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1681 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1678 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1682 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index dcce79b84406..4de6cdddf059 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2022 attr->ia_size > inode->i_size) { 2022 attr->ia_size > inode->i_size) {
2023 i_size_write(inode, attr->ia_size); 2023 i_size_write(inode, attr->ia_size);
2024 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2024 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2025 inode->i_ctime = attr->ia_ctime;
2026 ci->i_reported_size = attr->ia_size; 2025 ci->i_reported_size = attr->ia_size;
2027 dirtied |= CEPH_CAP_FILE_EXCL; 2026 dirtied |= CEPH_CAP_FILE_EXCL;
2028 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2027 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
@@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2044 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2043 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2045 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2044 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2046 only ? "ctime only" : "ignored"); 2045 only ? "ctime only" : "ignored");
2047 inode->i_ctime = attr->ia_ctime;
2048 if (only) { 2046 if (only) {
2049 /* 2047 /*
2050 * if kernel wants to dirty ctime but nothing else, 2048 * if kernel wants to dirty ctime but nothing else,
@@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2067 if (dirtied) { 2065 if (dirtied) {
2068 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2066 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2069 &prealloc_cf); 2067 &prealloc_cf);
2070 inode->i_ctime = current_time(inode); 2068 inode->i_ctime = attr->ia_ctime;
2071 } 2069 }
2072 2070
2073 release &= issued; 2071 release &= issued;
@@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
2085 req->r_inode_drop = release; 2083 req->r_inode_drop = release;
2086 req->r_args.setattr.mask = cpu_to_le32(mask); 2084 req->r_args.setattr.mask = cpu_to_le32(mask);
2087 req->r_num_caps = 1; 2085 req->r_num_caps = 1;
2086 req->r_stamp = attr->ia_ctime;
2088 err = ceph_mdsc_do_request(mdsc, NULL, req); 2087 err = ceph_mdsc_do_request(mdsc, NULL, req);
2089 } 2088 }
2090 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2089 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f38e56fa9712..0c05df44cc6c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1687,7 +1687,6 @@ struct ceph_mds_request *
1687ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1687ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1688{ 1688{
1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1690 struct timespec ts;
1691 1690
1692 if (!req) 1691 if (!req)
1693 return ERR_PTR(-ENOMEM); 1692 return ERR_PTR(-ENOMEM);
@@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1706 init_completion(&req->r_safe_completion); 1705 init_completion(&req->r_safe_completion);
1707 INIT_LIST_HEAD(&req->r_unsafe_item); 1706 INIT_LIST_HEAD(&req->r_unsafe_item);
1708 1707
1709 ktime_get_real_ts(&ts); 1708 req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
1710 req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
1711 1709
1712 req->r_op = op; 1710 req->r_op = op;
1713 req->r_direct_mode = mode; 1711 req->r_direct_mode = mode;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 15bac390dff9..b98436f5c7c7 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1135,20 +1135,19 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1135 u32 acllen = 0; 1135 u32 acllen = 0;
1136 int rc = 0; 1136 int rc = 0;
1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1137 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1138 struct cifs_tcon *tcon; 1138 struct smb_version_operations *ops;
1139 1139
1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1140 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
1141 1141
1142 if (IS_ERR(tlink)) 1142 if (IS_ERR(tlink))
1143 return PTR_ERR(tlink); 1143 return PTR_ERR(tlink);
1144 tcon = tlink_tcon(tlink);
1145 1144
1146 if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) 1145 ops = tlink_tcon(tlink)->ses->server->ops;
1147 pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, 1146
1148 &acllen); 1147 if (pfid && (ops->get_acl_by_fid))
1149 else if (tcon->ses->server->ops->get_acl) 1148 pntsd = ops->get_acl_by_fid(cifs_sb, pfid, &acllen);
1150 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1149 else if (ops->get_acl)
1151 &acllen); 1150 pntsd = ops->get_acl(cifs_sb, inode, path, &acllen);
1152 else { 1151 else {
1153 cifs_put_tlink(tlink); 1152 cifs_put_tlink(tlink);
1154 return -EOPNOTSUPP; 1153 return -EOPNOTSUPP;
@@ -1181,23 +1180,23 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1181 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1180 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1182 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1181 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1183 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1182 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1184 struct cifs_tcon *tcon; 1183 struct smb_version_operations *ops;
1185 1184
1186 if (IS_ERR(tlink)) 1185 if (IS_ERR(tlink))
1187 return PTR_ERR(tlink); 1186 return PTR_ERR(tlink);
1188 tcon = tlink_tcon(tlink); 1187
1188 ops = tlink_tcon(tlink)->ses->server->ops;
1189 1189
1190 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1190 cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
1191 1191
1192 /* Get the security descriptor */ 1192 /* Get the security descriptor */
1193 1193
1194 if (tcon->ses->server->ops->get_acl == NULL) { 1194 if (ops->get_acl == NULL) {
1195 cifs_put_tlink(tlink); 1195 cifs_put_tlink(tlink);
1196 return -EOPNOTSUPP; 1196 return -EOPNOTSUPP;
1197 } 1197 }
1198 1198
1199 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1199 pntsd = ops->get_acl(cifs_sb, inode, path, &secdesclen);
1200 &secdesclen);
1201 if (IS_ERR(pntsd)) { 1200 if (IS_ERR(pntsd)) {
1202 rc = PTR_ERR(pntsd); 1201 rc = PTR_ERR(pntsd);
1203 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); 1202 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
@@ -1224,13 +1223,12 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1224 1223
1225 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1224 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
1226 1225
1227 if (tcon->ses->server->ops->set_acl == NULL) 1226 if (ops->set_acl == NULL)
1228 rc = -EOPNOTSUPP; 1227 rc = -EOPNOTSUPP;
1229 1228
1230 if (!rc) { 1229 if (!rc) {
1231 /* Set the security descriptor */ 1230 /* Set the security descriptor */
1232 rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode, 1231 rc = ops->set_acl(pnntsd, secdesclen, inode, path, aclflag);
1233 path, aclflag);
1234 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1232 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
1235 } 1233 }
1236 cifs_put_tlink(tlink); 1234 cifs_put_tlink(tlink);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8be55be70faf..bcc7d9acad64 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -418,7 +418,7 @@ struct smb_version_operations {
418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); 418 int (*validate_negotiate)(const unsigned int, struct cifs_tcon *);
419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, 419 ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *,
420 const unsigned char *, const unsigned char *, char *, 420 const unsigned char *, const unsigned char *, char *,
421 size_t, const struct nls_table *, int); 421 size_t, struct cifs_sb_info *);
422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 422 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
423 const char *, const void *, const __u16, 423 const char *, const void *, const __u16,
424 const struct nls_table *, int); 424 const struct nls_table *, int);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e49958c3f8bb..6eb3147132e3 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -480,8 +480,7 @@ extern int CIFSSMBCopy(unsigned int xid,
480extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 480extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
481 const unsigned char *searchName, 481 const unsigned char *searchName,
482 const unsigned char *ea_name, char *EAData, 482 const unsigned char *ea_name, char *EAData,
483 size_t bufsize, const struct nls_table *nls_codepage, 483 size_t bufsize, struct cifs_sb_info *cifs_sb);
484 int remap_special_chars);
485extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, 484extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
486 const char *fileName, const char *ea_name, 485 const char *fileName, const char *ea_name,
487 const void *ea_value, const __u16 ea_value_len, 486 const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4c01b3f9abf0..fbb0d4cbda41 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -697,9 +697,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
697{ 697{
698 struct TCP_Server_Info *server = mid->callback_data; 698 struct TCP_Server_Info *server = mid->callback_data;
699 699
700 mutex_lock(&server->srv_mutex);
701 DeleteMidQEntry(mid); 700 DeleteMidQEntry(mid);
702 mutex_unlock(&server->srv_mutex);
703 add_credits(server, 1, CIFS_ECHO_OP); 701 add_credits(server, 1, CIFS_ECHO_OP);
704} 702}
705 703
@@ -1599,9 +1597,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
1599 } 1597 }
1600 1598
1601 queue_work(cifsiod_wq, &rdata->work); 1599 queue_work(cifsiod_wq, &rdata->work);
1602 mutex_lock(&server->srv_mutex);
1603 DeleteMidQEntry(mid); 1600 DeleteMidQEntry(mid);
1604 mutex_unlock(&server->srv_mutex);
1605 add_credits(server, 1, 0); 1601 add_credits(server, 1, 0);
1606} 1602}
1607 1603
@@ -2058,7 +2054,6 @@ cifs_writev_callback(struct mid_q_entry *mid)
2058{ 2054{
2059 struct cifs_writedata *wdata = mid->callback_data; 2055 struct cifs_writedata *wdata = mid->callback_data;
2060 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2056 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2061 struct TCP_Server_Info *server = tcon->ses->server;
2062 unsigned int written; 2057 unsigned int written;
2063 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 2058 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
2064 2059
@@ -2095,9 +2090,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
2095 } 2090 }
2096 2091
2097 queue_work(cifsiod_wq, &wdata->work); 2092 queue_work(cifsiod_wq, &wdata->work);
2098 mutex_lock(&server->srv_mutex);
2099 DeleteMidQEntry(mid); 2093 DeleteMidQEntry(mid);
2100 mutex_unlock(&server->srv_mutex);
2101 add_credits(tcon->ses->server, 1, 0); 2094 add_credits(tcon->ses->server, 1, 0);
2102} 2095}
2103 2096
@@ -6076,11 +6069,13 @@ ssize_t
6076CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon, 6069CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
6077 const unsigned char *searchName, const unsigned char *ea_name, 6070 const unsigned char *searchName, const unsigned char *ea_name,
6078 char *EAData, size_t buf_size, 6071 char *EAData, size_t buf_size,
6079 const struct nls_table *nls_codepage, int remap) 6072 struct cifs_sb_info *cifs_sb)
6080{ 6073{
6081 /* BB assumes one setup word */ 6074 /* BB assumes one setup word */
6082 TRANSACTION2_QPI_REQ *pSMB = NULL; 6075 TRANSACTION2_QPI_REQ *pSMB = NULL;
6083 TRANSACTION2_QPI_RSP *pSMBr = NULL; 6076 TRANSACTION2_QPI_RSP *pSMBr = NULL;
6077 int remap = cifs_remap(cifs_sb);
6078 struct nls_table *nls_codepage = cifs_sb->local_nls;
6084 int rc = 0; 6079 int rc = 0;
6085 int bytes_returned; 6080 int bytes_returned;
6086 int list_len; 6081 int list_len;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6ef78ad838e6..fcef70602b27 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -582,7 +582,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
583 int rc = 0; 583 int rc = 0;
584 584
585 down_read(&cinode->lock_sem); 585 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
586 if (cinode->can_cache_brlcks) { 586 if (cinode->can_cache_brlcks) {
587 /* can cache locks - no need to relock */ 587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem); 588 up_read(&cinode->lock_sem);
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3271 if (!is_sync_kiocb(iocb)) 3271 if (!is_sync_kiocb(iocb))
3272 ctx->iocb = iocb; 3272 ctx->iocb = iocb;
3273 3273
3274 if (to->type & ITER_IOVEC) 3274 if (to->type == ITER_IOVEC)
3275 ctx->should_dirty = true; 3275 ctx->should_dirty = true;
3276 3276
3277 rc = setup_aio_ctx_iter(ctx, to, READ); 3277 rc = setup_aio_ctx_iter(ctx, to, READ);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index c3b2fa0b2ec8..4d1fcd76d022 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -563,8 +563,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
563 563
564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path, 564 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
565 "SETFILEBITS", ea_value, 4 /* size of buf */, 565 "SETFILEBITS", ea_value, 4 /* size of buf */,
566 cifs_sb->local_nls, 566 cifs_sb);
567 cifs_remap(cifs_sb));
568 cifs_put_tlink(tlink); 567 cifs_put_tlink(tlink);
569 if (rc < 0) 568 if (rc < 0)
570 return (int)rc; 569 return (int)rc;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index b08531977daa..3b147dc6af63 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
810 810
811 if (!pages) { 811 if (!pages) {
812 pages = vmalloc(max_pages * sizeof(struct page *)); 812 pages = vmalloc(max_pages * sizeof(struct page *));
813 if (!bv) { 813 if (!pages) {
814 kvfree(bv); 814 kvfree(bv);
815 return -ENOMEM; 815 return -ENOMEM;
816 } 816 }
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 27bc360c7ffd..a723df3e0197 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
849 struct cifs_fid *fid, __u16 search_flags, 849 struct cifs_fid *fid, __u16 search_flags,
850 struct cifs_search_info *srch_inf) 850 struct cifs_search_info *srch_inf)
851{ 851{
852 return CIFSFindFirst(xid, tcon, path, cifs_sb, 852 int rc;
853 &fid->netfid, search_flags, srch_inf, true); 853
854 rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
855 &fid->netfid, search_flags, srch_inf, true);
856 if (rc)
857 cifs_dbg(FYI, "find first failed=%d\n", rc);
858 return rc;
854} 859}
855 860
856static int 861static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index c58691834eb2..7e48561abd29 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); 982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
983 kfree(utf16_path); 983 kfree(utf16_path);
984 if (rc) { 984 if (rc) {
985 cifs_dbg(VFS, "open dir failed\n"); 985 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
986 return rc; 986 return rc;
987 } 987 }
988 988
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, 992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
993 fid->volatile_fid, 0, srch_inf); 993 fid->volatile_fid, 0, srch_inf);
994 if (rc) { 994 if (rc) {
995 cifs_dbg(VFS, "query directory failed\n"); 995 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
997 } 997 }
998 return rc; 998 return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1809 1809
1810 sg = init_sg(rqst, sign); 1810 sg = init_sg(rqst, sign);
1811 if (!sg) { 1811 if (!sg) {
1812 cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); 1812 cifs_dbg(VFS, "%s: Failed to init sg", __func__);
1813 rc = -ENOMEM;
1813 goto free_req; 1814 goto free_req;
1814 } 1815 }
1815 1816
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
1817 iv = kzalloc(iv_len, GFP_KERNEL); 1818 iv = kzalloc(iv_len, GFP_KERNEL);
1818 if (!iv) { 1819 if (!iv) {
1819 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); 1820 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
1821 rc = -ENOMEM;
1820 goto free_sg; 1822 goto free_sg;
1821 } 1823 }
1822 iv[0] = 3; 1824 iv[0] = 3;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 48ff7703b919..e4afdaae743f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1240,15 +1240,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1240 goto tcon_exit; 1240 goto tcon_exit;
1241 } 1241 }
1242 1242
1243 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) 1243 switch (rsp->ShareType) {
1244 case SMB2_SHARE_TYPE_DISK:
1244 cifs_dbg(FYI, "connection to disk share\n"); 1245 cifs_dbg(FYI, "connection to disk share\n");
1245 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { 1246 break;
1247 case SMB2_SHARE_TYPE_PIPE:
1246 tcon->ipc = true; 1248 tcon->ipc = true;
1247 cifs_dbg(FYI, "connection to pipe share\n"); 1249 cifs_dbg(FYI, "connection to pipe share\n");
1248 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { 1250 break;
1249 tcon->print = true; 1251 case SMB2_SHARE_TYPE_PRINT:
1252 tcon->ipc = true;
1250 cifs_dbg(FYI, "connection to printer\n"); 1253 cifs_dbg(FYI, "connection to printer\n");
1251 } else { 1254 break;
1255 default:
1252 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); 1256 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
1253 rc = -EOPNOTSUPP; 1257 rc = -EOPNOTSUPP;
1254 goto tcon_error_exit; 1258 goto tcon_error_exit;
@@ -2173,9 +2177,7 @@ smb2_echo_callback(struct mid_q_entry *mid)
2173 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2177 if (mid->mid_state == MID_RESPONSE_RECEIVED)
2174 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); 2178 credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest);
2175 2179
2176 mutex_lock(&server->srv_mutex);
2177 DeleteMidQEntry(mid); 2180 DeleteMidQEntry(mid);
2178 mutex_unlock(&server->srv_mutex);
2179 add_credits(server, credits_received, CIFS_ECHO_OP); 2181 add_credits(server, credits_received, CIFS_ECHO_OP);
2180} 2182}
2181 2183
@@ -2433,9 +2435,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
2433 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 2435 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
2434 2436
2435 queue_work(cifsiod_wq, &rdata->work); 2437 queue_work(cifsiod_wq, &rdata->work);
2436 mutex_lock(&server->srv_mutex);
2437 DeleteMidQEntry(mid); 2438 DeleteMidQEntry(mid);
2438 mutex_unlock(&server->srv_mutex);
2439 add_credits(server, credits_received, 0); 2439 add_credits(server, credits_received, 0);
2440} 2440}
2441 2441
@@ -2594,7 +2594,6 @@ smb2_writev_callback(struct mid_q_entry *mid)
2594{ 2594{
2595 struct cifs_writedata *wdata = mid->callback_data; 2595 struct cifs_writedata *wdata = mid->callback_data;
2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2596 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2597 struct TCP_Server_Info *server = tcon->ses->server;
2598 unsigned int written; 2597 unsigned int written;
2599 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 2598 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
2600 unsigned int credits_received = 1; 2599 unsigned int credits_received = 1;
@@ -2634,9 +2633,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
2634 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 2633 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
2635 2634
2636 queue_work(cifsiod_wq, &wdata->work); 2635 queue_work(cifsiod_wq, &wdata->work);
2637 mutex_lock(&server->srv_mutex);
2638 DeleteMidQEntry(mid); 2636 DeleteMidQEntry(mid);
2639 mutex_unlock(&server->srv_mutex);
2640 add_credits(tcon->ses->server, credits_received, 0); 2637 add_credits(tcon->ses->server, credits_received, 0);
2641} 2638}
2642 2639
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 4d64b5b8fc9c..47a125ece11e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -94,7 +94,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
94 now = jiffies; 94 now = jiffies;
95 /* commands taking longer than one second are indications that 95 /* commands taking longer than one second are indications that
96 something is wrong, unless it is quite a slow link or server */ 96 something is wrong, unless it is quite a slow link or server */
97 if ((now - midEntry->when_alloc) > HZ) { 97 if (time_after(now, midEntry->when_alloc + HZ)) {
98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { 98 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
99 pr_debug(" CIFS slow rsp: cmd %d mid %llu", 99 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
100 midEntry->command, midEntry->mid); 100 midEntry->command, midEntry->mid);
@@ -613,9 +613,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
613 } 613 }
614 spin_unlock(&GlobalMid_Lock); 614 spin_unlock(&GlobalMid_Lock);
615 615
616 mutex_lock(&server->srv_mutex);
617 DeleteMidQEntry(mid); 616 DeleteMidQEntry(mid);
618 mutex_unlock(&server->srv_mutex);
619 return rc; 617 return rc;
620} 618}
621 619
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 20af5187ba63..de50e749ff05 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
188 pcreatetime = (__u64 *)value; 188 pcreatetime = (__u64 *)value;
189 *pcreatetime = CIFS_I(inode)->createtime; 189 *pcreatetime = CIFS_I(inode)->createtime;
190 return sizeof(__u64); 190 return sizeof(__u64);
191
192 return rc;
193} 191}
194 192
195 193
@@ -235,8 +233,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
235 233
236 if (pTcon->ses->server->ops->query_all_EAs) 234 if (pTcon->ses->server->ops->query_all_EAs)
237 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 235 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
238 full_path, name, value, size, 236 full_path, name, value, size, cifs_sb);
239 cifs_sb->local_nls, cifs_remap(cifs_sb));
240 break; 237 break;
241 238
242 case XATTR_CIFS_ACL: { 239 case XATTR_CIFS_ACL: {
@@ -336,8 +333,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
336 333
337 if (pTcon->ses->server->ops->query_all_EAs) 334 if (pTcon->ses->server->ops->query_all_EAs)
338 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, 335 rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
339 full_path, NULL, data, buf_size, 336 full_path, NULL, data, buf_size, cifs_sb);
340 cifs_sb->local_nls, cifs_remap(cifs_sb));
341list_ea_exit: 337list_ea_exit:
342 kfree(full_path); 338 kfree(full_path);
343 free_xid(xid); 339 free_xid(xid);
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 8b2a994042dd..a66f6624d899 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item)
138} 138}
139EXPORT_SYMBOL(config_item_get); 139EXPORT_SYMBOL(config_item_get);
140 140
141struct config_item *config_item_get_unless_zero(struct config_item *item)
142{
143 if (item && kref_get_unless_zero(&item->ci_kref))
144 return item;
145 return NULL;
146}
147EXPORT_SYMBOL(config_item_get_unless_zero);
148
141static void config_item_cleanup(struct config_item *item) 149static void config_item_cleanup(struct config_item *item)
142{ 150{
143 struct config_item_type *t = item->ci_type; 151 struct config_item_type *t = item->ci_type;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index a6ab012a2c6a..c8aabba502f6 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
83 ret = -ENOMEM; 83 ret = -ENOMEM;
84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
85 if (sl) { 85 if (sl) {
86 sl->sl_target = config_item_get(item);
87 spin_lock(&configfs_dirent_lock); 86 spin_lock(&configfs_dirent_lock);
88 if (target_sd->s_type & CONFIGFS_USET_DROPPING) { 87 if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
89 spin_unlock(&configfs_dirent_lock); 88 spin_unlock(&configfs_dirent_lock);
90 config_item_put(item);
91 kfree(sl); 89 kfree(sl);
92 return -ENOENT; 90 return -ENOENT;
93 } 91 }
92 sl->sl_target = config_item_get(item);
94 list_add(&sl->sl_list, &target_sd->s_links); 93 list_add(&sl->sl_list, &target_sd->s_links);
95 spin_unlock(&configfs_dirent_lock); 94 spin_unlock(&configfs_dirent_lock);
96 ret = configfs_create_link(sl, parent_item->ci_dentry, 95 ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/dax.c b/fs/dax.c
index c22eaf162f95..9187f3b07f3e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
859 if (ret < 0) 859 if (ret < 0)
860 goto out; 860 goto out;
861 } 861 }
862 start_index = indices[pvec.nr - 1] + 1;
862 } 863 }
863out: 864out:
864 put_dax(dax_dev); 865 put_dax(dax_dev);
@@ -1155,6 +1156,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
1155 } 1156 }
1156 1157
1157 /* 1158 /*
1159 * It is possible, particularly with mixed reads & writes to private
1160 * mappings, that we have raced with a PMD fault that overlaps with
1161 * the PTE we need to set up. If so just return and the fault will be
1162 * retried.
1163 */
1164 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1165 vmf_ret = VM_FAULT_NOPAGE;
1166 goto unlock_entry;
1167 }
1168
1169 /*
1158 * Note that we don't bother to use iomap_apply here: DAX required 1170 * Note that we don't bother to use iomap_apply here: DAX required
1159 * the file system block size to be equal the page size, which means 1171 * the file system block size to be equal the page size, which means
1160 * that we never have to deal with more than a single extent here. 1172 * that we never have to deal with more than a single extent here.
@@ -1398,6 +1410,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1398 goto fallback; 1410 goto fallback;
1399 1411
1400 /* 1412 /*
1413 * It is possible, particularly with mixed reads & writes to private
1414 * mappings, that we have raced with a PTE fault that overlaps with
1415 * the PMD we need to set up. If so just return and the fault will be
1416 * retried.
1417 */
1418 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1419 !pmd_devmap(*vmf->pmd)) {
1420 result = 0;
1421 goto unlock_entry;
1422 }
1423
1424 /*
1401 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1425 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1402 * setting up a mapping, so really we're using iomap_begin() as a way 1426 * setting up a mapping, so really we're using iomap_begin() as a way
1403 * to look up our filesystem block. 1427 * to look up our filesystem block.
diff --git a/fs/dcache.c b/fs/dcache.c
index cddf39777835..a9f995f6859e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data)
1494{ 1494{
1495 struct detach_data *data = _data; 1495 struct detach_data *data = _data;
1496 1496
1497 if (!data->mountpoint && !data->select.found) 1497 if (!data->mountpoint && list_empty(&data->select.dispose))
1498 __d_drop(data->select.start); 1498 __d_drop(data->select.start);
1499} 1499}
1500 1500
@@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry)
1536 1536
1537 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1537 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1538 1538
1539 if (data.select.found) 1539 if (!list_empty(&data.select.dispose))
1540 shrink_dentry_list(&data.select.dispose); 1540 shrink_dentry_list(&data.select.dispose);
1541 else if (!data.mountpoint)
1542 return;
1541 1543
1542 if (data.mountpoint) { 1544 if (data.mountpoint) {
1543 detach_mounts(data.mountpoint); 1545 detach_mounts(data.mountpoint);
1544 dput(data.mountpoint); 1546 dput(data.mountpoint);
1545 } 1547 }
1546
1547 if (!data.mountpoint && !data.select.found)
1548 break;
1549
1550 cond_resched(); 1548 cond_resched();
1551 } 1549 }
1552} 1550}
diff --git a/fs/exec.c b/fs/exec.c
index 72934df68471..904199086490 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
220 220
221 if (write) { 221 if (write) {
222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
223 unsigned long ptr_size;
223 struct rlimit *rlim; 224 struct rlimit *rlim;
224 225
226 /*
227 * Since the stack will hold pointers to the strings, we
228 * must account for them as well.
229 *
230 * The size calculation is the entire vma while each arg page is
231 * built, so each time we get here it's calculating how far it
232 * is currently (rather than each call being just the newly
233 * added size from the arg page). As a result, we need to
234 * always add the entire size of the pointers, so that on the
235 * last call to get_arg_page() we'll actually have the entire
236 * correct size.
237 */
238 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
239 if (ptr_size > ULONG_MAX - size)
240 goto fail;
241 size += ptr_size;
242
225 acct_arg_size(bprm, size / PAGE_SIZE); 243 acct_arg_size(bprm, size / PAGE_SIZE);
226 244
227 /* 245 /*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
239 * to work from. 257 * to work from.
240 */ 258 */
241 rlim = current->signal->rlim; 259 rlim = current->signal->rlim;
242 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { 260 if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
243 put_page(page); 261 goto fail;
244 return NULL;
245 }
246 } 262 }
247 263
248 return page; 264 return page;
265
266fail:
267 put_page(page);
268 return NULL;
249} 269}
250 270
251static void put_arg_page(struct page *page) 271static void put_arg_page(struct page *page)
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 26d77f9f8c12..2dcbd5698884 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -817,7 +817,7 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
817 iomap->bdev = bdev; 817 iomap->bdev = bdev;
818 iomap->offset = (u64)first_block << blkbits; 818 iomap->offset = (u64)first_block << blkbits;
819 if (blk_queue_dax(bdev->bd_queue)) 819 if (blk_queue_dax(bdev->bd_queue))
820 iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 820 iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name);
821 else 821 else
822 iomap->dax_dev = NULL; 822 iomap->dax_dev = NULL;
823 823
@@ -841,7 +841,7 @@ static int
841ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, 841ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
842 ssize_t written, unsigned flags, struct iomap *iomap) 842 ssize_t written, unsigned flags, struct iomap *iomap)
843{ 843{
844 put_dax(iomap->dax_dev); 844 fs_put_dax(iomap->dax_dev);
845 if (iomap->type == IOMAP_MAPPED && 845 if (iomap->type == IOMAP_MAPPED &&
846 written < length && 846 written < length &&
847 (flags & IOMAP_WRITE)) 847 (flags & IOMAP_WRITE))
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fd389935ecd1..3ec0e46de95f 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5 */ 5 */
6 6
7#include <linux/quotaops.h>
7#include "ext4_jbd2.h" 8#include "ext4_jbd2.h"
8#include "ext4.h" 9#include "ext4.h"
9#include "xattr.h" 10#include "xattr.h"
@@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
232 handle_t *handle; 233 handle_t *handle;
233 int error, retries = 0; 234 int error, retries = 0;
234 235
236 error = dquot_initialize(inode);
237 if (error)
238 return error;
235retry: 239retry:
236 handle = ext4_journal_start(inode, EXT4_HT_XATTR, 240 handle = ext4_journal_start(inode, EXT4_HT_XATTR,
237 ext4_jbd2_credits_xattr(inode)); 241 ext4_jbd2_credits_xattr(inode));
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8e8046104f4d..32191548abed 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2523,7 +2523,6 @@ extern int ext4_search_dir(struct buffer_head *bh,
2523 int buf_size, 2523 int buf_size,
2524 struct inode *dir, 2524 struct inode *dir,
2525 struct ext4_filename *fname, 2525 struct ext4_filename *fname,
2526 const struct qstr *d_name,
2527 unsigned int offset, 2526 unsigned int offset,
2528 struct ext4_dir_entry_2 **res_dir); 2527 struct ext4_dir_entry_2 **res_dir);
2529extern int ext4_generic_delete_entry(handle_t *handle, 2528extern int ext4_generic_delete_entry(handle_t *handle,
@@ -3007,7 +3006,6 @@ extern int htree_inlinedir_to_tree(struct file *dir_file,
3007 int *has_inline_data); 3006 int *has_inline_data);
3008extern struct buffer_head *ext4_find_inline_entry(struct inode *dir, 3007extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
3009 struct ext4_filename *fname, 3008 struct ext4_filename *fname,
3010 const struct qstr *d_name,
3011 struct ext4_dir_entry_2 **res_dir, 3009 struct ext4_dir_entry_2 **res_dir,
3012 int *has_inline_data); 3010 int *has_inline_data);
3013extern int ext4_delete_inline_entry(handle_t *handle, 3011extern int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2a97dff87b96..3e36508610b7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3413 struct ext4_sb_info *sbi; 3413 struct ext4_sb_info *sbi;
3414 struct ext4_extent_header *eh; 3414 struct ext4_extent_header *eh;
3415 struct ext4_map_blocks split_map; 3415 struct ext4_map_blocks split_map;
3416 struct ext4_extent zero_ex; 3416 struct ext4_extent zero_ex1, zero_ex2;
3417 struct ext4_extent *ex, *abut_ex; 3417 struct ext4_extent *ex, *abut_ex;
3418 ext4_lblk_t ee_block, eof_block; 3418 ext4_lblk_t ee_block, eof_block;
3419 unsigned int ee_len, depth, map_len = map->m_len; 3419 unsigned int ee_len, depth, map_len = map->m_len;
3420 int allocated = 0, max_zeroout = 0; 3420 int allocated = 0, max_zeroout = 0;
3421 int err = 0; 3421 int err = 0;
3422 int split_flag = 0; 3422 int split_flag = EXT4_EXT_DATA_VALID2;
3423 3423
3424 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3424 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3425 "block %llu, max_blocks %u\n", inode->i_ino, 3425 "block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3436 ex = path[depth].p_ext; 3436 ex = path[depth].p_ext;
3437 ee_block = le32_to_cpu(ex->ee_block); 3437 ee_block = le32_to_cpu(ex->ee_block);
3438 ee_len = ext4_ext_get_actual_len(ex); 3438 ee_len = ext4_ext_get_actual_len(ex);
3439 zero_ex.ee_len = 0; 3439 zero_ex1.ee_len = 0;
3440 zero_ex2.ee_len = 0;
3440 3441
3441 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3442 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3442 3443
@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3576 if (ext4_encrypted_inode(inode)) 3577 if (ext4_encrypted_inode(inode))
3577 max_zeroout = 0; 3578 max_zeroout = 0;
3578 3579
3579 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3580 if (max_zeroout && (ee_len <= max_zeroout)) {
3581 err = ext4_ext_zeroout(inode, ex);
3582 if (err)
3583 goto out;
3584 zero_ex.ee_block = ex->ee_block;
3585 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3586 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3587
3588 err = ext4_ext_get_access(handle, inode, path + depth);
3589 if (err)
3590 goto out;
3591 ext4_ext_mark_initialized(ex);
3592 ext4_ext_try_to_merge(handle, inode, path, ex);
3593 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3594 goto out;
3595 }
3596
3597 /* 3580 /*
3598 * four cases: 3581 * five cases:
3599 * 1. split the extent into three extents. 3582 * 1. split the extent into three extents.
3600 * 2. split the extent into two extents, zeroout the first half. 3583 * 2. split the extent into two extents, zeroout the head of the first
3601 * 3. split the extent into two extents, zeroout the second half. 3584 * extent.
3585 * 3. split the extent into two extents, zeroout the tail of the second
3586 * extent.
3602 * 4. split the extent into two extents with out zeroout. 3587 * 4. split the extent into two extents with out zeroout.
3588 * 5. no splitting needed, just possibly zeroout the head and / or the
3589 * tail of the extent.
3603 */ 3590 */
3604 split_map.m_lblk = map->m_lblk; 3591 split_map.m_lblk = map->m_lblk;
3605 split_map.m_len = map->m_len; 3592 split_map.m_len = map->m_len;
3606 3593
3607 if (max_zeroout && (allocated > map->m_len)) { 3594 if (max_zeroout && (allocated > split_map.m_len)) {
3608 if (allocated <= max_zeroout) { 3595 if (allocated <= max_zeroout) {
3609 /* case 3 */ 3596 /* case 3 or 5 */
3610 zero_ex.ee_block = 3597 zero_ex1.ee_block =
3611 cpu_to_le32(map->m_lblk); 3598 cpu_to_le32(split_map.m_lblk +
3612 zero_ex.ee_len = cpu_to_le16(allocated); 3599 split_map.m_len);
3613 ext4_ext_store_pblock(&zero_ex, 3600 zero_ex1.ee_len =
3614 ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3601 cpu_to_le16(allocated - split_map.m_len);
3615 err = ext4_ext_zeroout(inode, &zero_ex); 3602 ext4_ext_store_pblock(&zero_ex1,
3603 ext4_ext_pblock(ex) + split_map.m_lblk +
3604 split_map.m_len - ee_block);
3605 err = ext4_ext_zeroout(inode, &zero_ex1);
3616 if (err) 3606 if (err)
3617 goto out; 3607 goto out;
3618 split_map.m_lblk = map->m_lblk;
3619 split_map.m_len = allocated; 3608 split_map.m_len = allocated;
3620 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3609 }
3621 /* case 2 */ 3610 if (split_map.m_lblk - ee_block + split_map.m_len <
3622 if (map->m_lblk != ee_block) { 3611 max_zeroout) {
3623 zero_ex.ee_block = ex->ee_block; 3612 /* case 2 or 5 */
3624 zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3613 if (split_map.m_lblk != ee_block) {
3614 zero_ex2.ee_block = ex->ee_block;
3615 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3625 ee_block); 3616 ee_block);
3626 ext4_ext_store_pblock(&zero_ex, 3617 ext4_ext_store_pblock(&zero_ex2,
3627 ext4_ext_pblock(ex)); 3618 ext4_ext_pblock(ex));
3628 err = ext4_ext_zeroout(inode, &zero_ex); 3619 err = ext4_ext_zeroout(inode, &zero_ex2);
3629 if (err) 3620 if (err)
3630 goto out; 3621 goto out;
3631 } 3622 }
3632 3623
3624 split_map.m_len += split_map.m_lblk - ee_block;
3633 split_map.m_lblk = ee_block; 3625 split_map.m_lblk = ee_block;
3634 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3635 allocated = map->m_len; 3626 allocated = map->m_len;
3636 } 3627 }
3637 } 3628 }
@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3642 err = 0; 3633 err = 0;
3643out: 3634out:
3644 /* If we have gotten a failure, don't zero out status tree */ 3635 /* If we have gotten a failure, don't zero out status tree */
3645 if (!err) 3636 if (!err) {
3646 err = ext4_zeroout_es(inode, &zero_ex); 3637 err = ext4_zeroout_es(inode, &zero_ex1);
3638 if (!err)
3639 err = ext4_zeroout_es(inode, &zero_ex2);
3640 }
3647 return err ? err : allocated; 3641 return err ? err : allocated;
3648} 3642}
3649 3643
@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4883 4877
4884 /* Zero out partial block at the edges of the range */ 4878 /* Zero out partial block at the edges of the range */
4885 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4879 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4880 if (ret >= 0)
4881 ext4_update_inode_fsync_trans(handle, inode, 1);
4886 4882
4887 if (file->f_flags & O_SYNC) 4883 if (file->f_flags & O_SYNC)
4888 ext4_handle_sync(handle); 4884 ext4_handle_sync(handle);
@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5569 ext4_handle_sync(handle); 5565 ext4_handle_sync(handle);
5570 inode->i_mtime = inode->i_ctime = current_time(inode); 5566 inode->i_mtime = inode->i_ctime = current_time(inode);
5571 ext4_mark_inode_dirty(handle, inode); 5567 ext4_mark_inode_dirty(handle, inode);
5568 ext4_update_inode_fsync_trans(handle, inode, 1);
5572 5569
5573out_stop: 5570out_stop:
5574 ext4_journal_stop(handle); 5571 ext4_journal_stop(handle);
@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5742 up_write(&EXT4_I(inode)->i_data_sem); 5739 up_write(&EXT4_I(inode)->i_data_sem);
5743 if (IS_SYNC(inode)) 5740 if (IS_SYNC(inode))
5744 ext4_handle_sync(handle); 5741 ext4_handle_sync(handle);
5742 if (ret >= 0)
5743 ext4_update_inode_fsync_trans(handle, inode, 1);
5745 5744
5746out_stop: 5745out_stop:
5747 ext4_journal_stop(handle); 5746 ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 831fd6beebf0..02ce7e7bbdf5 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -474,57 +474,37 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
474 endoff = (loff_t)end_blk << blkbits; 474 endoff = (loff_t)end_blk << blkbits;
475 475
476 index = startoff >> PAGE_SHIFT; 476 index = startoff >> PAGE_SHIFT;
477 end = endoff >> PAGE_SHIFT; 477 end = (endoff - 1) >> PAGE_SHIFT;
478 478
479 pagevec_init(&pvec, 0); 479 pagevec_init(&pvec, 0);
480 do { 480 do {
481 int i, num; 481 int i, num;
482 unsigned long nr_pages; 482 unsigned long nr_pages;
483 483
484 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 484 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
486 (pgoff_t)num); 486 (pgoff_t)num);
487 if (nr_pages == 0) { 487 if (nr_pages == 0)
488 if (whence == SEEK_DATA)
489 break;
490
491 BUG_ON(whence != SEEK_HOLE);
492 /*
493 * If this is the first time to go into the loop and
494 * offset is not beyond the end offset, it will be a
495 * hole at this offset
496 */
497 if (lastoff == startoff || lastoff < endoff)
498 found = 1;
499 break;
500 }
501
502 /*
503 * If this is the first time to go into the loop and
504 * offset is smaller than the first page offset, it will be a
505 * hole at this offset.
506 */
507 if (lastoff == startoff && whence == SEEK_HOLE &&
508 lastoff < page_offset(pvec.pages[0])) {
509 found = 1;
510 break; 488 break;
511 }
512 489
513 for (i = 0; i < nr_pages; i++) { 490 for (i = 0; i < nr_pages; i++) {
514 struct page *page = pvec.pages[i]; 491 struct page *page = pvec.pages[i];
515 struct buffer_head *bh, *head; 492 struct buffer_head *bh, *head;
516 493
517 /* 494 /*
518 * If the current offset is not beyond the end of given 495 * If current offset is smaller than the page offset,
519 * range, it will be a hole. 496 * there is a hole at this offset.
520 */ 497 */
521 if (lastoff < endoff && whence == SEEK_HOLE && 498 if (whence == SEEK_HOLE && lastoff < endoff &&
522 page->index > end) { 499 lastoff < page_offset(pvec.pages[i])) {
523 found = 1; 500 found = 1;
524 *offset = lastoff; 501 *offset = lastoff;
525 goto out; 502 goto out;
526 } 503 }
527 504
505 if (page->index > end)
506 goto out;
507
528 lock_page(page); 508 lock_page(page);
529 509
530 if (unlikely(page->mapping != inode->i_mapping)) { 510 if (unlikely(page->mapping != inode->i_mapping)) {
@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
564 unlock_page(page); 544 unlock_page(page);
565 } 545 }
566 546
567 /* 547 /* The no. of pages is less than our desired, we are done. */
568 * The no. of pages is less than our desired, that would be a 548 if (nr_pages < num)
569 * hole in there.
570 */
571 if (nr_pages < num && whence == SEEK_HOLE) {
572 found = 1;
573 *offset = lastoff;
574 break; 549 break;
575 }
576 550
577 index = pvec.pages[i - 1]->index + 1; 551 index = pvec.pages[i - 1]->index + 1;
578 pagevec_release(&pvec); 552 pagevec_release(&pvec);
579 } while (index <= end); 553 } while (index <= end);
580 554
555 if (whence == SEEK_HOLE && lastoff < endoff) {
556 found = 1;
557 *offset = lastoff;
558 }
581out: 559out:
582 pagevec_release(&pvec); 560 pagevec_release(&pvec);
583 return found; 561 return found;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index d5dea4c293ef..8d141c0c8ff9 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1627,7 +1627,6 @@ out:
1627 1627
1628struct buffer_head *ext4_find_inline_entry(struct inode *dir, 1628struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1629 struct ext4_filename *fname, 1629 struct ext4_filename *fname,
1630 const struct qstr *d_name,
1631 struct ext4_dir_entry_2 **res_dir, 1630 struct ext4_dir_entry_2 **res_dir,
1632 int *has_inline_data) 1631 int *has_inline_data)
1633{ 1632{
@@ -1649,7 +1648,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1649 EXT4_INLINE_DOTDOT_SIZE; 1648 EXT4_INLINE_DOTDOT_SIZE;
1650 inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; 1649 inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
1651 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1650 ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
1652 dir, fname, d_name, 0, res_dir); 1651 dir, fname, 0, res_dir);
1653 if (ret == 1) 1652 if (ret == 1)
1654 goto out_find; 1653 goto out_find;
1655 if (ret < 0) 1654 if (ret < 0)
@@ -1662,7 +1661,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1662 inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE; 1661 inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
1663 1662
1664 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1663 ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
1665 dir, fname, d_name, 0, res_dir); 1664 dir, fname, 0, res_dir);
1666 if (ret == 1) 1665 if (ret == 1)
1667 goto out_find; 1666 goto out_find;
1668 1667
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5834c4d76be8..5cf82d03968c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2124,15 +2124,29 @@ static int ext4_writepage(struct page *page,
2124static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2124static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2125{ 2125{
2126 int len; 2126 int len;
2127 loff_t size = i_size_read(mpd->inode); 2127 loff_t size;
2128 int err; 2128 int err;
2129 2129
2130 BUG_ON(page->index != mpd->first_page); 2130 BUG_ON(page->index != mpd->first_page);
2131 clear_page_dirty_for_io(page);
2132 /*
2133 * We have to be very careful here! Nothing protects writeback path
2134 * against i_size changes and the page can be writeably mapped into
2135 * page tables. So an application can be growing i_size and writing
2136 * data through mmap while writeback runs. clear_page_dirty_for_io()
2137 * write-protects our page in page tables and the page cannot get
2138 * written to again until we release page lock. So only after
2139 * clear_page_dirty_for_io() we are safe to sample i_size for
2140 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2141 * on the barrier provided by TestClearPageDirty in
2142 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2143 * after page tables are updated.
2144 */
2145 size = i_size_read(mpd->inode);
2131 if (page->index == size >> PAGE_SHIFT) 2146 if (page->index == size >> PAGE_SHIFT)
2132 len = size & ~PAGE_MASK; 2147 len = size & ~PAGE_MASK;
2133 else 2148 else
2134 len = PAGE_SIZE; 2149 len = PAGE_SIZE;
2135 clear_page_dirty_for_io(page);
2136 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2150 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2137 if (!err) 2151 if (!err)
2138 mpd->wbc->nr_to_write--; 2152 mpd->wbc->nr_to_write--;
@@ -3412,7 +3426,7 @@ retry:
3412 bdev = inode->i_sb->s_bdev; 3426 bdev = inode->i_sb->s_bdev;
3413 iomap->bdev = bdev; 3427 iomap->bdev = bdev;
3414 if (blk_queue_dax(bdev->bd_queue)) 3428 if (blk_queue_dax(bdev->bd_queue))
3415 iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 3429 iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name);
3416 else 3430 else
3417 iomap->dax_dev = NULL; 3431 iomap->dax_dev = NULL;
3418 iomap->offset = first_block << blkbits; 3432 iomap->offset = first_block << blkbits;
@@ -3447,7 +3461,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3447 int blkbits = inode->i_blkbits; 3461 int blkbits = inode->i_blkbits;
3448 bool truncate = false; 3462 bool truncate = false;
3449 3463
3450 put_dax(iomap->dax_dev); 3464 fs_put_dax(iomap->dax_dev);
3451 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT)) 3465 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
3452 return 0; 3466 return 0;
3453 3467
@@ -3629,9 +3643,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3629 get_block_func = ext4_dio_get_block_unwritten_async; 3643 get_block_func = ext4_dio_get_block_unwritten_async;
3630 dio_flags = DIO_LOCKING; 3644 dio_flags = DIO_LOCKING;
3631 } 3645 }
3632#ifdef CONFIG_EXT4_FS_ENCRYPTION
3633 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
3634#endif
3635 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3646 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3636 get_block_func, ext4_end_io_dio, NULL, 3647 get_block_func, ext4_end_io_dio, NULL,
3637 dio_flags); 3648 dio_flags);
@@ -3713,7 +3724,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3713 */ 3724 */
3714 inode_lock_shared(inode); 3725 inode_lock_shared(inode);
3715 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3726 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3716 iocb->ki_pos + count); 3727 iocb->ki_pos + count - 1);
3717 if (ret) 3728 if (ret)
3718 goto out_unlock; 3729 goto out_unlock;
3719 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3730 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
@@ -4207,6 +4218,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4207 4218
4208 inode->i_mtime = inode->i_ctime = current_time(inode); 4219 inode->i_mtime = inode->i_ctime = current_time(inode);
4209 ext4_mark_inode_dirty(handle, inode); 4220 ext4_mark_inode_dirty(handle, inode);
4221 if (ret >= 0)
4222 ext4_update_inode_fsync_trans(handle, inode, 1);
4210out_stop: 4223out_stop:
4211 ext4_journal_stop(handle); 4224 ext4_journal_stop(handle);
4212out_dio: 4225out_dio:
@@ -5637,8 +5650,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
5637 /* No extended attributes present */ 5650 /* No extended attributes present */
5638 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5651 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5639 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5652 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5640 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5653 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5641 new_extra_isize); 5654 EXT4_I(inode)->i_extra_isize, 0,
5655 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5642 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5656 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5643 return 0; 5657 return 0;
5644 } 5658 }
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5083bce20ac4..b7928cddd539 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3887,7 +3887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3887 3887
3888 err = ext4_mb_load_buddy(sb, group, &e4b); 3888 err = ext4_mb_load_buddy(sb, group, &e4b);
3889 if (err) { 3889 if (err) {
3890 ext4_error(sb, "Error loading buddy information for %u", group); 3890 ext4_warning(sb, "Error %d loading buddy information for %u",
3891 err, group);
3891 put_bh(bitmap_bh); 3892 put_bh(bitmap_bh);
3892 return 0; 3893 return 0;
3893 } 3894 }
@@ -4044,10 +4045,11 @@ repeat:
4044 BUG_ON(pa->pa_type != MB_INODE_PA); 4045 BUG_ON(pa->pa_type != MB_INODE_PA);
4045 group = ext4_get_group_number(sb, pa->pa_pstart); 4046 group = ext4_get_group_number(sb, pa->pa_pstart);
4046 4047
4047 err = ext4_mb_load_buddy(sb, group, &e4b); 4048 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4049 GFP_NOFS|__GFP_NOFAIL);
4048 if (err) { 4050 if (err) {
4049 ext4_error(sb, "Error loading buddy information for %u", 4051 ext4_error(sb, "Error %d loading buddy information for %u",
4050 group); 4052 err, group);
4051 continue; 4053 continue;
4052 } 4054 }
4053 4055
@@ -4303,11 +4305,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4303 spin_unlock(&lg->lg_prealloc_lock); 4305 spin_unlock(&lg->lg_prealloc_lock);
4304 4306
4305 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4307 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4308 int err;
4306 4309
4307 group = ext4_get_group_number(sb, pa->pa_pstart); 4310 group = ext4_get_group_number(sb, pa->pa_pstart);
4308 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4311 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4309 ext4_error(sb, "Error loading buddy information for %u", 4312 GFP_NOFS|__GFP_NOFAIL);
4310 group); 4313 if (err) {
4314 ext4_error(sb, "Error %d loading buddy information for %u",
4315 err, group);
4311 continue; 4316 continue;
4312 } 4317 }
4313 ext4_lock_group(sb, group); 4318 ext4_lock_group(sb, group);
@@ -5127,8 +5132,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5127 5132
5128 ret = ext4_mb_load_buddy(sb, group, &e4b); 5133 ret = ext4_mb_load_buddy(sb, group, &e4b);
5129 if (ret) { 5134 if (ret) {
5130 ext4_error(sb, "Error in loading buddy " 5135 ext4_warning(sb, "Error %d loading buddy information for %u",
5131 "information for %u", group); 5136 ret, group);
5132 return ret; 5137 return ret;
5133 } 5138 }
5134 bitmap = e4b.bd_bitmap; 5139 bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b81f7d46f344..404256caf9cf 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1155,12 +1155,11 @@ errout:
1155static inline int search_dirblock(struct buffer_head *bh, 1155static inline int search_dirblock(struct buffer_head *bh,
1156 struct inode *dir, 1156 struct inode *dir,
1157 struct ext4_filename *fname, 1157 struct ext4_filename *fname,
1158 const struct qstr *d_name,
1159 unsigned int offset, 1158 unsigned int offset,
1160 struct ext4_dir_entry_2 **res_dir) 1159 struct ext4_dir_entry_2 **res_dir)
1161{ 1160{
1162 return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, 1161 return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
1163 fname, d_name, offset, res_dir); 1162 fname, offset, res_dir);
1164} 1163}
1165 1164
1166/* 1165/*
@@ -1262,7 +1261,6 @@ static inline bool ext4_match(const struct ext4_filename *fname,
1262 */ 1261 */
1263int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, 1262int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1264 struct inode *dir, struct ext4_filename *fname, 1263 struct inode *dir, struct ext4_filename *fname,
1265 const struct qstr *d_name,
1266 unsigned int offset, struct ext4_dir_entry_2 **res_dir) 1264 unsigned int offset, struct ext4_dir_entry_2 **res_dir)
1267{ 1265{
1268 struct ext4_dir_entry_2 * de; 1266 struct ext4_dir_entry_2 * de;
@@ -1355,7 +1353,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1355 1353
1356 if (ext4_has_inline_data(dir)) { 1354 if (ext4_has_inline_data(dir)) {
1357 int has_inline_data = 1; 1355 int has_inline_data = 1;
1358 ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir, 1356 ret = ext4_find_inline_entry(dir, &fname, res_dir,
1359 &has_inline_data); 1357 &has_inline_data);
1360 if (has_inline_data) { 1358 if (has_inline_data) {
1361 if (inlined) 1359 if (inlined)
@@ -1447,7 +1445,7 @@ restart:
1447 goto next; 1445 goto next;
1448 } 1446 }
1449 set_buffer_verified(bh); 1447 set_buffer_verified(bh);
1450 i = search_dirblock(bh, dir, &fname, d_name, 1448 i = search_dirblock(bh, dir, &fname,
1451 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); 1449 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1452 if (i == 1) { 1450 if (i == 1) {
1453 EXT4_I(dir)->i_dir_start_lookup = block; 1451 EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1488,7 +1486,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1488{ 1486{
1489 struct super_block * sb = dir->i_sb; 1487 struct super_block * sb = dir->i_sb;
1490 struct dx_frame frames[2], *frame; 1488 struct dx_frame frames[2], *frame;
1491 const struct qstr *d_name = fname->usr_fname;
1492 struct buffer_head *bh; 1489 struct buffer_head *bh;
1493 ext4_lblk_t block; 1490 ext4_lblk_t block;
1494 int retval; 1491 int retval;
@@ -1505,7 +1502,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1505 if (IS_ERR(bh)) 1502 if (IS_ERR(bh))
1506 goto errout; 1503 goto errout;
1507 1504
1508 retval = search_dirblock(bh, dir, fname, d_name, 1505 retval = search_dirblock(bh, dir, fname,
1509 block << EXT4_BLOCK_SIZE_BITS(sb), 1506 block << EXT4_BLOCK_SIZE_BITS(sb),
1510 res_dir); 1507 res_dir);
1511 if (retval == 1) 1508 if (retval == 1)
@@ -1530,7 +1527,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1530 1527
1531 bh = NULL; 1528 bh = NULL;
1532errout: 1529errout:
1533 dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); 1530 dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
1534success: 1531success:
1535 dx_release(frames); 1532 dx_release(frames);
1536 return bh; 1533 return bh;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0b177da9ea82..d37c81f327e7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -848,14 +848,9 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
848{ 848{
849 int type; 849 int type;
850 850
851 if (ext4_has_feature_quota(sb)) { 851 /* Use our quota_off function to clear inode flags etc. */
852 dquot_disable(sb, -1, 852 for (type = 0; type < EXT4_MAXQUOTAS; type++)
853 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 853 ext4_quota_off(sb, type);
854 } else {
855 /* Use our quota_off function to clear inode flags etc. */
856 for (type = 0; type < EXT4_MAXQUOTAS; type++)
857 ext4_quota_off(sb, type);
858 }
859} 854}
860#else 855#else
861static inline void ext4_quota_off_umount(struct super_block *sb) 856static inline void ext4_quota_off_umount(struct super_block *sb)
@@ -1179,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1179 return res; 1174 return res;
1180 } 1175 }
1181 1176
1177 res = dquot_initialize(inode);
1178 if (res)
1179 return res;
1182retry: 1180retry:
1183 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1181 handle = ext4_journal_start(inode, EXT4_HT_MISC,
1184 ext4_jbd2_credits_xattr(inode)); 1182 ext4_jbd2_credits_xattr(inode));
@@ -5485,7 +5483,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
5485 goto out; 5483 goto out;
5486 5484
5487 err = dquot_quota_off(sb, type); 5485 err = dquot_quota_off(sb, type);
5488 if (err) 5486 if (err || ext4_has_feature_quota(sb))
5489 goto out_put; 5487 goto out_put;
5490 5488
5491 inode_lock(inode); 5489 inode_lock(inode);
@@ -5505,6 +5503,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
5505out_unlock: 5503out_unlock:
5506 inode_unlock(inode); 5504 inode_unlock(inode);
5507out_put: 5505out_put:
5506 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5508 iput(inode); 5507 iput(inode);
5509 return err; 5508 return err;
5510out: 5509out:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8fb7ce14e6eb..5d3c2536641c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -888,6 +888,8 @@ inserted:
888 else { 888 else {
889 u32 ref; 889 u32 ref;
890 890
891 WARN_ON_ONCE(dquot_initialize_needed(inode));
892
891 /* The old block is released after updating 893 /* The old block is released after updating
892 the inode. */ 894 the inode. */
893 error = dquot_alloc_block(inode, 895 error = dquot_alloc_block(inode,
@@ -954,6 +956,8 @@ inserted:
954 /* We need to allocate a new block */ 956 /* We need to allocate a new block */
955 ext4_fsblk_t goal, block; 957 ext4_fsblk_t goal, block;
956 958
959 WARN_ON_ONCE(dquot_initialize_needed(inode));
960
957 goal = ext4_group_first_block_no(sb, 961 goal = ext4_group_first_block_no(sb,
958 EXT4_I(inode)->i_block_group); 962 EXT4_I(inode)->i_block_group);
959 963
@@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1166 return -EINVAL; 1170 return -EINVAL;
1167 if (strlen(name) > 255) 1171 if (strlen(name) > 255)
1168 return -ERANGE; 1172 return -ERANGE;
1173
1169 ext4_write_lock_xattr(inode, &no_expand); 1174 ext4_write_lock_xattr(inode, &no_expand);
1170 1175
1171 error = ext4_reserve_inode_write(handle, inode, &is.iloc); 1176 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1267 int error, retries = 0; 1272 int error, retries = 0;
1268 int credits = ext4_jbd2_credits_xattr(inode); 1273 int credits = ext4_jbd2_credits_xattr(inode);
1269 1274
1275 error = dquot_initialize(inode);
1276 if (error)
1277 return error;
1270retry: 1278retry:
1271 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); 1279 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
1272 if (IS_ERR(handle)) { 1280 if (IS_ERR(handle)) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2185c7a040a1..fd2e651bad6d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1078{ 1078{
1079 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); 1079 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
1080 u32 *ctx = (u32 *)shash_desc_ctx(shash); 1080 u32 *ctx = (u32 *)shash_desc_ctx(shash);
1081 u32 retval;
1081 int err; 1082 int err;
1082 1083
1083 shash->tfm = sbi->s_chksum_driver; 1084 shash->tfm = sbi->s_chksum_driver;
@@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1087 err = crypto_shash_update(shash, address, length); 1088 err = crypto_shash_update(shash, address, length);
1088 BUG_ON(err); 1089 BUG_ON(err);
1089 1090
1090 return *ctx; 1091 retval = *ctx;
1092 barrier_data(ctx);
1093 return retval;
1091} 1094}
1092 1095
1093static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, 1096static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 5a1b58f8fef4..65c88379a3a1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -975,8 +975,15 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
975 int err; 975 int err;
976 char *suffix = ""; 976 char *suffix = "";
977 977
978 if (sb->s_bdev) 978 if (sb->s_bdev) {
979 suffix = "-fuseblk"; 979 suffix = "-fuseblk";
980 /*
981 * sb->s_bdi points to blkdev's bdi however we want to redirect
982 * it to our private bdi...
983 */
984 bdi_put(sb->s_bdi);
985 sb->s_bdi = &noop_backing_dev_info;
986 }
980 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), 987 err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev),
981 MINOR(fc->dev), suffix); 988 MINOR(fc->dev), suffix);
982 if (err) 989 if (err)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f865b96374df..d2955daf17a4 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
659 struct gfs2_log_header *lh; 659 struct gfs2_log_header *lh;
660 unsigned int tail; 660 unsigned int tail;
661 u32 hash; 661 u32 hash;
662 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; 662 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
663 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 663 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
664 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 664 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
665 lh = page_address(page); 665 lh = page_address(page);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index dde861387a40..d44f5456eb9b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
200 addr = ALIGN(addr, huge_page_size(h)); 200 addr = ALIGN(addr, huge_page_size(h));
201 vma = find_vma(mm, addr); 201 vma = find_vma(mm, addr);
202 if (TASK_SIZE - len >= addr && 202 if (TASK_SIZE - len >= addr &&
203 (!vma || addr + len <= vma->vm_start)) 203 (!vma || addr + len <= vm_start_gap(vma)))
204 return addr; 204 return addr;
205 } 205 }
206 206
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 9ee4832b6f8b..2d30a6da7013 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -680,6 +680,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
680 680
681 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 681 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
682 handle->h_buffer_credits = nblocks; 682 handle->h_buffer_credits = nblocks;
683 /*
684 * Restore the original nofs context because the journal restart
685 * is basically the same thing as journal stop and start.
686 * start_this_handle will start a new nofs context.
687 */
688 memalloc_nofs_restore(handle->saved_alloc_context);
683 ret = start_this_handle(journal, handle, gfp_mask); 689 ret = start_this_handle(journal, handle, gfp_mask);
684 return ret; 690 return ret;
685} 691}
diff --git a/fs/namespace.c b/fs/namespace.c
index 8bd3e4d448b9..5a4438445bf7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3488 return err; 3488 return err;
3489 } 3489 }
3490 3490
3491 put_mnt_ns(old_mnt_ns);
3492
3491 /* Update the pwd and root */ 3493 /* Update the pwd and root */
3492 set_fs_pwd(fs, &root); 3494 set_fs_pwd(fs, &root);
3493 set_fs_root(fs, &root); 3495 set_fs_root(fs, &root);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index c14758e08d73..390ac9c39c59 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -753,7 +753,6 @@ static void nfs4_callback_free_slot(struct nfs4_session *session,
753 * A single slot, so highest used slotid is either 0 or -1 753 * A single slot, so highest used slotid is either 0 or -1
754 */ 754 */
755 nfs4_free_slot(tbl, slot); 755 nfs4_free_slot(tbl, slot);
756 nfs4_slot_tbl_drain_complete(tbl);
757 spin_unlock(&tbl->slot_tbl_lock); 756 spin_unlock(&tbl->slot_tbl_lock);
758} 757}
759 758
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 32ccd7754f8a..2ac00bf4ecf1 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1946,29 +1946,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1946} 1946}
1947EXPORT_SYMBOL_GPL(nfs_link); 1947EXPORT_SYMBOL_GPL(nfs_link);
1948 1948
1949static void
1950nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data)
1951{
1952 struct dentry *old_dentry = data->old_dentry;
1953 struct dentry *new_dentry = data->new_dentry;
1954 struct inode *old_inode = d_inode(old_dentry);
1955 struct inode *new_inode = d_inode(new_dentry);
1956
1957 nfs_mark_for_revalidate(old_inode);
1958
1959 switch (task->tk_status) {
1960 case 0:
1961 if (new_inode != NULL)
1962 nfs_drop_nlink(new_inode);
1963 d_move(old_dentry, new_dentry);
1964 nfs_set_verifier(new_dentry,
1965 nfs_save_change_attribute(data->new_dir));
1966 break;
1967 case -ENOENT:
1968 nfs_dentry_handle_enoent(old_dentry);
1969 }
1970}
1971
1972/* 1949/*
1973 * RENAME 1950 * RENAME
1974 * FIXME: Some nfsds, like the Linux user space nfsd, may generate a 1951 * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -1999,7 +1976,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1999{ 1976{
2000 struct inode *old_inode = d_inode(old_dentry); 1977 struct inode *old_inode = d_inode(old_dentry);
2001 struct inode *new_inode = d_inode(new_dentry); 1978 struct inode *new_inode = d_inode(new_dentry);
2002 struct dentry *dentry = NULL; 1979 struct dentry *dentry = NULL, *rehash = NULL;
2003 struct rpc_task *task; 1980 struct rpc_task *task;
2004 int error = -EBUSY; 1981 int error = -EBUSY;
2005 1982
@@ -2022,8 +1999,10 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2022 * To prevent any new references to the target during the 1999 * To prevent any new references to the target during the
2023 * rename, we unhash the dentry in advance. 2000 * rename, we unhash the dentry in advance.
2024 */ 2001 */
2025 if (!d_unhashed(new_dentry)) 2002 if (!d_unhashed(new_dentry)) {
2026 d_drop(new_dentry); 2003 d_drop(new_dentry);
2004 rehash = new_dentry;
2005 }
2027 2006
2028 if (d_count(new_dentry) > 2) { 2007 if (d_count(new_dentry) > 2) {
2029 int err; 2008 int err;
@@ -2040,6 +2019,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2040 goto out; 2019 goto out;
2041 2020
2042 new_dentry = dentry; 2021 new_dentry = dentry;
2022 rehash = NULL;
2043 new_inode = NULL; 2023 new_inode = NULL;
2044 } 2024 }
2045 } 2025 }
@@ -2048,8 +2028,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2048 if (new_inode != NULL) 2028 if (new_inode != NULL)
2049 NFS_PROTO(new_inode)->return_delegation(new_inode); 2029 NFS_PROTO(new_inode)->return_delegation(new_inode);
2050 2030
2051 task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, 2031 task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
2052 nfs_complete_rename);
2053 if (IS_ERR(task)) { 2032 if (IS_ERR(task)) {
2054 error = PTR_ERR(task); 2033 error = PTR_ERR(task);
2055 goto out; 2034 goto out;
@@ -2059,9 +2038,27 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2059 if (error == 0) 2038 if (error == 0)
2060 error = task->tk_status; 2039 error = task->tk_status;
2061 rpc_put_task(task); 2040 rpc_put_task(task);
2041 nfs_mark_for_revalidate(old_inode);
2062out: 2042out:
2043 if (rehash)
2044 d_rehash(rehash);
2063 trace_nfs_rename_exit(old_dir, old_dentry, 2045 trace_nfs_rename_exit(old_dir, old_dentry,
2064 new_dir, new_dentry, error); 2046 new_dir, new_dentry, error);
2047 if (!error) {
2048 if (new_inode != NULL)
2049 nfs_drop_nlink(new_inode);
2050 /*
2051 * The d_move() should be here instead of in an async RPC completion
2052 * handler because we need the proper locks to move the dentry. If
2053 * we're interrupted by a signal, the async RPC completion handler
2054 * should mark the directories for revalidation.
2055 */
2056 d_move(old_dentry, new_dentry);
2057 nfs_set_verifier(new_dentry,
2058 nfs_save_change_attribute(new_dir));
2059 } else if (error == -ENOENT)
2060 nfs_dentry_handle_enoent(old_dentry);
2061
2065 /* new dentry created? */ 2062 /* new dentry created? */
2066 if (dentry) 2063 if (dentry)
2067 dput(dentry); 2064 dput(dentry);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f5714ee01000..23542dc44a25 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
454 goto out_err_free; 454 goto out_err_free;
455 455
456 /* fh */ 456 /* fh */
457 rc = -EIO;
457 p = xdr_inline_decode(&stream, 4); 458 p = xdr_inline_decode(&stream, 4);
458 if (!p) 459 if (!p)
459 goto out_err_free; 460 goto out_err_free;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e9b4c3320e37..3e24392f2caa 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -398,7 +398,6 @@ extern struct file_system_type nfs4_referral_fs_type;
398bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t); 398bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
399struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *, 399struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
400 struct nfs_subversion *); 400 struct nfs_subversion *);
401void nfs_initialise_sb(struct super_block *);
402int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 401int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
403int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 402int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
404struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *, 403struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +457,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
458extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 457extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
459 458
460/* super.c */ 459/* super.c */
461void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
462void nfs_umount_begin(struct super_block *); 460void nfs_umount_begin(struct super_block *);
463int nfs_statfs(struct dentry *, struct kstatfs *); 461int nfs_statfs(struct dentry *, struct kstatfs *);
464int nfs_show_options(struct seq_file *, struct dentry *); 462int nfs_show_options(struct seq_file *, struct dentry *);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 1a224a33a6c2..e5686be67be8 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
246 246
247 devname = nfs_devname(dentry, page, PAGE_SIZE); 247 devname = nfs_devname(dentry, page, PAGE_SIZE);
248 if (IS_ERR(devname)) 248 if (IS_ERR(devname))
249 mnt = (struct vfsmount *)devname; 249 mnt = ERR_CAST(devname);
250 else 250 else
251 mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata); 251 mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
252 252
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 929d09a5310a..319a47db218d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
177 if (status) 177 if (status)
178 goto out; 178 goto out;
179 179
180 if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 180 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
181 &res->commit_res.verf->verifier)) { 181 &res->commit_res.verf->verifier)) {
182 status = -EAGAIN; 182 status = -EAGAIN;
183 goto out; 183 goto out;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 692a7a8bfc7a..66776f022111 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
582 */ 582 */
583 nfs4_schedule_path_down_recovery(pos); 583 nfs4_schedule_path_down_recovery(pos);
584 default: 584 default:
585 spin_lock(&nn->nfs_client_lock);
586 goto out; 585 goto out;
587 } 586 }
588 587
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c08c46a3b8cd..dbfa18900e25 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2589,7 +2589,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2589 2589
2590 /* Except MODE, it seems harmless of setting twice. */ 2590 /* Except MODE, it seems harmless of setting twice. */
2591 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && 2591 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
2592 attrset[1] & FATTR4_WORD1_MODE) 2592 (attrset[1] & FATTR4_WORD1_MODE ||
2593 attrset[2] & FATTR4_WORD2_MODE_UMASK))
2593 sattr->ia_valid &= ~ATTR_MODE; 2594 sattr->ia_valid &= ~ATTR_MODE;
2594 2595
2595 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2596 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -8416,6 +8417,7 @@ static void nfs4_layoutget_release(void *calldata)
8416 size_t max_pages = max_response_pages(server); 8417 size_t max_pages = max_response_pages(server);
8417 8418
8418 dprintk("--> %s\n", __func__); 8419 dprintk("--> %s\n", __func__);
8420 nfs4_sequence_free_slot(&lgp->res.seq_res);
8419 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8421 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8420 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8422 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8421 put_nfs_open_context(lgp->args.ctx); 8423 put_nfs_open_context(lgp->args.ctx);
@@ -8490,7 +8492,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8490 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8492 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8491 if (status == 0 && lgp->res.layoutp->len) 8493 if (status == 0 && lgp->res.layoutp->len)
8492 lseg = pnfs_layout_process(lgp); 8494 lseg = pnfs_layout_process(lgp);
8493 nfs4_sequence_free_slot(&lgp->res.seq_res);
8494 rpc_put_task(task); 8495 rpc_put_task(task);
8495 dprintk("<-- %s status=%d\n", __func__, status); 8496 dprintk("<-- %s status=%d\n", __func__, status);
8496 if (status) 8497 if (status)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b34de036501b..cbf82b0d4467 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2134,6 +2134,8 @@ again:
2134 put_rpccred(cred); 2134 put_rpccred(cred);
2135 switch (status) { 2135 switch (status) {
2136 case 0: 2136 case 0:
2137 case -EINTR:
2138 case -ERESTARTSYS:
2137 break; 2139 break;
2138 case -ETIMEDOUT: 2140 case -ETIMEDOUT:
2139 if (clnt->cl_softrtry) 2141 if (clnt->cl_softrtry)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index adc6ec28d4b5..c383d0913b54 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2094} 2094}
2095EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); 2095EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2096 2096
2097/*
2098 * Check for any intersection between the request and the pgio->pg_lseg,
2099 * and if none, put this pgio->pg_lseg away.
2100 */
2101static void
2102pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2103{
2104 if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2105 pnfs_put_lseg(pgio->pg_lseg);
2106 pgio->pg_lseg = NULL;
2107 }
2108}
2109
2097void 2110void
2098pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2111pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2099{ 2112{
2100 u64 rd_size = req->wb_bytes; 2113 u64 rd_size = req->wb_bytes;
2101 2114
2102 pnfs_generic_pg_check_layout(pgio); 2115 pnfs_generic_pg_check_layout(pgio);
2116 pnfs_generic_pg_check_range(pgio, req);
2103 if (pgio->pg_lseg == NULL) { 2117 if (pgio->pg_lseg == NULL) {
2104 if (pgio->pg_dreq == NULL) 2118 if (pgio->pg_dreq == NULL)
2105 rd_size = i_size_read(pgio->pg_inode) - req_offset(req); 2119 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2131 struct nfs_page *req, u64 wb_size) 2145 struct nfs_page *req, u64 wb_size)
2132{ 2146{
2133 pnfs_generic_pg_check_layout(pgio); 2147 pnfs_generic_pg_check_layout(pgio);
2148 pnfs_generic_pg_check_range(pgio, req);
2134 if (pgio->pg_lseg == NULL) { 2149 if (pgio->pg_lseg == NULL) {
2135 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 2150 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2136 req->wb_context, 2151 req->wb_context,
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2191 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset, 2206 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2192 pgio->pg_lseg->pls_range.length); 2207 pgio->pg_lseg->pls_range.length);
2193 req_start = req_offset(req); 2208 req_start = req_offset(req);
2194 WARN_ON_ONCE(req_start >= seg_end); 2209
2195 /* start of request is past the last byte of this segment */ 2210 /* start of request is past the last byte of this segment */
2196 if (req_start >= seg_end) { 2211 if (req_start >= seg_end)
2197 /* reference the new lseg */
2198 if (pgio->pg_ops->pg_cleanup)
2199 pgio->pg_ops->pg_cleanup(pgio);
2200 if (pgio->pg_ops->pg_init)
2201 pgio->pg_ops->pg_init(pgio, req);
2202 return 0; 2212 return 0;
2203 }
2204 2213
2205 /* adjust 'size' iff there are fewer bytes left in the 2214 /* adjust 'size' iff there are fewer bytes left in the
2206 * segment than what nfs_generic_pg_test returned */ 2215 * segment than what nfs_generic_pg_test returned */
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2d05b756a8d6..99731e3e332f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
593 return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); 593 return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
594} 594}
595 595
596static inline bool
597pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
598{
599 u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
600 u64 req_last = req_offset(req) + req->wb_bytes;
601
602 return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
603 req_offset(req), req_last);
604}
605
596extern unsigned int layoutstats_timer; 606extern unsigned int layoutstats_timer;
597 607
598#ifdef NFS_DEBUG 608#ifdef NFS_DEBUG
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2f3822a4a7d5..eceb4eabb064 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount);
2301/* 2301/*
2302 * Initialise the common bits of the superblock 2302 * Initialise the common bits of the superblock
2303 */ 2303 */
2304inline void nfs_initialise_sb(struct super_block *sb) 2304static void nfs_initialise_sb(struct super_block *sb)
2305{ 2305{
2306 struct nfs_server *server = NFS_SB(sb); 2306 struct nfs_server *server = NFS_SB(sb);
2307 2307
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super);
2348/* 2348/*
2349 * Finish setting up a cloned NFS2/3/4 superblock 2349 * Finish setting up a cloned NFS2/3/4 superblock
2350 */ 2350 */
2351void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2351static void nfs_clone_super(struct super_block *sb,
2352 struct nfs_mount_info *mount_info)
2352{ 2353{
2353 const struct super_block *old_sb = mount_info->cloned->sb; 2354 const struct super_block *old_sb = mount_info->cloned->sb;
2354 struct nfs_server *server = NFS_SB(sb); 2355 struct nfs_server *server = NFS_SB(sb);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 12feac6ee2fd..452334694a5d 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
334 if (!p) 334 if (!p)
335 return 0; 335 return 0;
336 p = xdr_decode_hyper(p, &args->offset); 336 p = xdr_decode_hyper(p, &args->offset);
337 args->count = ntohl(*p++);
338
339 if (!xdr_argsize_check(rqstp, p))
340 return 0;
341 337
338 args->count = ntohl(*p++);
342 len = min(args->count, max_blocksize); 339 len = min(args->count, max_blocksize);
343 340
344 /* set up the kvec */ 341 /* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
352 v++; 349 v++;
353 } 350 }
354 args->vlen = v; 351 args->vlen = v;
355 return 1; 352 return xdr_argsize_check(rqstp, p);
356} 353}
357 354
358int 355int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
544 p = decode_fh(p, &args->fh); 541 p = decode_fh(p, &args->fh);
545 if (!p) 542 if (!p)
546 return 0; 543 return 0;
547 if (!xdr_argsize_check(rqstp, p))
548 return 0;
549 args->buffer = page_address(*(rqstp->rq_next_page++)); 544 args->buffer = page_address(*(rqstp->rq_next_page++));
550 545
551 return 1; 546 return xdr_argsize_check(rqstp, p);
552} 547}
553 548
554int 549int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
574 args->verf = p; p += 2; 569 args->verf = p; p += 2;
575 args->dircount = ~0; 570 args->dircount = ~0;
576 args->count = ntohl(*p++); 571 args->count = ntohl(*p++);
577
578 if (!xdr_argsize_check(rqstp, p))
579 return 0;
580
581 args->count = min_t(u32, args->count, PAGE_SIZE); 572 args->count = min_t(u32, args->count, PAGE_SIZE);
582 args->buffer = page_address(*(rqstp->rq_next_page++)); 573 args->buffer = page_address(*(rqstp->rq_next_page++));
583 574
584 return 1; 575 return xdr_argsize_check(rqstp, p);
585} 576}
586 577
587int 578int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
599 args->dircount = ntohl(*p++); 590 args->dircount = ntohl(*p++);
600 args->count = ntohl(*p++); 591 args->count = ntohl(*p++);
601 592
602 if (!xdr_argsize_check(rqstp, p))
603 return 0;
604
605 len = args->count = min(args->count, max_blocksize); 593 len = args->count = min(args->count, max_blocksize);
606 while (len > 0) { 594 while (len > 0) {
607 struct page *p = *(rqstp->rq_next_page++); 595 struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
609 args->buffer = page_address(p); 597 args->buffer = page_address(p);
610 len -= PAGE_SIZE; 598 len -= PAGE_SIZE;
611 } 599 }
612 return 1; 600
601 return xdr_argsize_check(rqstp, p);
613} 602}
614 603
615int 604int
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index c453a1998e00..dadb3bf305b2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1769 opdesc->op_get_currentstateid(cstate, &op->u); 1769 opdesc->op_get_currentstateid(cstate, &op->u);
1770 op->status = opdesc->op_func(rqstp, cstate, &op->u); 1770 op->status = opdesc->op_func(rqstp, cstate, &op->u);
1771 1771
1772 /* Only from SEQUENCE */
1773 if (cstate->status == nfserr_replay_cache) {
1774 dprintk("%s NFS4.1 replay from cache\n", __func__);
1775 status = op->status;
1776 goto out;
1777 }
1772 if (!op->status) { 1778 if (!op->status) {
1773 if (opdesc->op_set_currentstateid) 1779 if (opdesc->op_set_currentstateid)
1774 opdesc->op_set_currentstateid(cstate, &op->u); 1780 opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1779 if (need_wrongsec_check(rqstp)) 1785 if (need_wrongsec_check(rqstp))
1780 op->status = check_nfsd_access(current_fh->fh_export, rqstp); 1786 op->status = check_nfsd_access(current_fh->fh_export, rqstp);
1781 } 1787 }
1782
1783encode_op: 1788encode_op:
1784 /* Only from SEQUENCE */
1785 if (cstate->status == nfserr_replay_cache) {
1786 dprintk("%s NFS4.1 replay from cache\n", __func__);
1787 status = op->status;
1788 goto out;
1789 }
1790 if (op->status == nfserr_replay_me) { 1789 if (op->status == nfserr_replay_me) {
1791 op->replay = &cstate->replay_owner->so_replay; 1790 op->replay = &cstate->replay_owner->so_replay;
1792 nfsd4_encode_replay(&resp->xdr, op); 1791 nfsd4_encode_replay(&resp->xdr, op);
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 6a4947a3f4fa..de07ff625777 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
257 len = args->count = ntohl(*p++); 257 len = args->count = ntohl(*p++);
258 p++; /* totalcount - unused */ 258 p++; /* totalcount - unused */
259 259
260 if (!xdr_argsize_check(rqstp, p))
261 return 0;
262
263 len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); 260 len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
264 261
265 /* set up somewhere to store response. 262 /* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
275 v++; 272 v++;
276 } 273 }
277 args->vlen = v; 274 args->vlen = v;
278 return 1; 275 return xdr_argsize_check(rqstp, p);
279} 276}
280 277
281int 278int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
365 p = decode_fh(p, &args->fh); 362 p = decode_fh(p, &args->fh);
366 if (!p) 363 if (!p)
367 return 0; 364 return 0;
368 if (!xdr_argsize_check(rqstp, p))
369 return 0;
370 args->buffer = page_address(*(rqstp->rq_next_page++)); 365 args->buffer = page_address(*(rqstp->rq_next_page++));
371 366
372 return 1; 367 return xdr_argsize_check(rqstp, p);
373} 368}
374 369
375int 370int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
407 args->cookie = ntohl(*p++); 402 args->cookie = ntohl(*p++);
408 args->count = ntohl(*p++); 403 args->count = ntohl(*p++);
409 args->count = min_t(u32, args->count, PAGE_SIZE); 404 args->count = min_t(u32, args->count, PAGE_SIZE);
410 if (!xdr_argsize_check(rqstp, p))
411 return 0;
412 args->buffer = page_address(*(rqstp->rq_next_page++)); 405 args->buffer = page_address(*(rqstp->rq_next_page++));
413 406
414 return 1; 407 return xdr_argsize_check(rqstp, p);
415} 408}
416 409
417/* 410/*
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 358258364616..4690cd75d8d7 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
159 PTR_ERR(dent_inode)); 159 PTR_ERR(dent_inode));
160 kfree(name); 160 kfree(name);
161 /* Return the error code. */ 161 /* Return the error code. */
162 return (struct dentry *)dent_inode; 162 return ERR_CAST(dent_inode);
163 } 163 }
164 /* It is guaranteed that @name is no longer allocated at this point. */ 164 /* It is guaranteed that @name is no longer allocated at this point. */
165 if (MREF_ERR(mref) == -ENOENT) { 165 if (MREF_ERR(mref) == -ENOENT) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3b7c937a36b5..4689940a953c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
2591 struct ocfs2_lock_res *lockres; 2591 struct ocfs2_lock_res *lockres;
2592 2592
2593 lockres = &OCFS2_I(inode)->ip_inode_lockres; 2593 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2594 /* had_lock means that the currect process already takes the cluster
2595 * lock previously. If had_lock is 1, we have nothing to do here, and
2596 * it will get unlocked where we got the lock.
2597 */
2594 if (!had_lock) { 2598 if (!had_lock) {
2595 ocfs2_remove_holder(lockres, oh); 2599 ocfs2_remove_holder(lockres, oh);
2596 ocfs2_inode_unlock(inode, ex); 2600 ocfs2_inode_unlock(inode, ex);
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 827fc9809bc2..9f88188060db 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -119,7 +119,7 @@ check_err:
119 119
120 if (IS_ERR(inode)) { 120 if (IS_ERR(inode)) {
121 mlog_errno(PTR_ERR(inode)); 121 mlog_errno(PTR_ERR(inode));
122 result = (void *)inode; 122 result = ERR_CAST(inode);
123 goto bail; 123 goto bail;
124 } 124 }
125 125
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3c5384d9b3a5..f70c3778d600 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
1328 void *buffer, 1328 void *buffer,
1329 size_t buffer_size) 1329 size_t buffer_size)
1330{ 1330{
1331 int ret; 1331 int ret, had_lock;
1332 struct buffer_head *di_bh = NULL; 1332 struct buffer_head *di_bh = NULL;
1333 struct ocfs2_lock_holder oh;
1333 1334
1334 ret = ocfs2_inode_lock(inode, &di_bh, 0); 1335 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
1335 if (ret < 0) { 1336 if (had_lock < 0) {
1336 mlog_errno(ret); 1337 mlog_errno(had_lock);
1337 return ret; 1338 return had_lock;
1338 } 1339 }
1339 down_read(&OCFS2_I(inode)->ip_xattr_sem); 1340 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1340 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1341 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1341 name, buffer, buffer_size); 1342 name, buffer, buffer_size);
1342 up_read(&OCFS2_I(inode)->ip_xattr_sem); 1343 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1343 1344
1344 ocfs2_inode_unlock(inode, 0); 1345 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1345 1346
1346 brelse(di_bh); 1347 brelse(di_bh);
1347 1348
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
3537{ 3538{
3538 struct buffer_head *di_bh = NULL; 3539 struct buffer_head *di_bh = NULL;
3539 struct ocfs2_dinode *di; 3540 struct ocfs2_dinode *di;
3540 int ret, credits, ref_meta = 0, ref_credits = 0; 3541 int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
3541 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3542 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3542 struct inode *tl_inode = osb->osb_tl_inode; 3543 struct inode *tl_inode = osb->osb_tl_inode;
3543 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; 3544 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
3544 struct ocfs2_refcount_tree *ref_tree = NULL; 3545 struct ocfs2_refcount_tree *ref_tree = NULL;
3546 struct ocfs2_lock_holder oh;
3545 3547
3546 struct ocfs2_xattr_info xi = { 3548 struct ocfs2_xattr_info xi = {
3547 .xi_name_index = name_index, 3549 .xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
3572 return -ENOMEM; 3574 return -ENOMEM;
3573 } 3575 }
3574 3576
3575 ret = ocfs2_inode_lock(inode, &di_bh, 1); 3577 had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
3576 if (ret < 0) { 3578 if (had_lock < 0) {
3579 ret = had_lock;
3577 mlog_errno(ret); 3580 mlog_errno(ret);
3578 goto cleanup_nolock; 3581 goto cleanup_nolock;
3579 } 3582 }
@@ -3670,7 +3673,7 @@ cleanup:
3670 if (ret) 3673 if (ret)
3671 mlog_errno(ret); 3674 mlog_errno(ret);
3672 } 3675 }
3673 ocfs2_inode_unlock(inode, 1); 3676 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
3674cleanup_nolock: 3677cleanup_nolock:
3675 brelse(di_bh); 3678 brelse(di_bh);
3676 brelse(xbs.xattr_bh); 3679 brelse(xbs.xattr_bh);
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 0daac5112f7a..c0c9683934b7 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -1,5 +1,6 @@
1config OVERLAY_FS 1config OVERLAY_FS
2 tristate "Overlay filesystem support" 2 tristate "Overlay filesystem support"
3 select EXPORTFS
3 help 4 help
4 An overlay filesystem combines two filesystems - an 'upper' filesystem 5 An overlay filesystem combines two filesystems - an 'upper' filesystem
5 and a 'lower' filesystem. When a name exists in both filesystems, the 6 and a 'lower' filesystem. When a name exists in both filesystems, the
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 9008ab9fbd2e..a2a65120c9d0 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
300 return PTR_ERR(fh); 300 return PTR_ERR(fh);
301 } 301 }
302 302
303 err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0); 303 /*
304 * Do not fail when upper doesn't support xattrs.
305 */
306 err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
307 fh ? fh->len : 0, 0);
304 kfree(fh); 308 kfree(fh);
305 309
306 return err; 310 return err;
@@ -326,15 +330,9 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
326 .link = link 330 .link = link
327 }; 331 };
328 332
329 upper = lookup_one_len(dentry->d_name.name, upperdir,
330 dentry->d_name.len);
331 err = PTR_ERR(upper);
332 if (IS_ERR(upper))
333 goto out;
334
335 err = security_inode_copy_up(dentry, &new_creds); 333 err = security_inode_copy_up(dentry, &new_creds);
336 if (err < 0) 334 if (err < 0)
337 goto out1; 335 goto out;
338 336
339 if (new_creds) 337 if (new_creds)
340 old_creds = override_creds(new_creds); 338 old_creds = override_creds(new_creds);
@@ -342,13 +340,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
342 if (tmpfile) 340 if (tmpfile)
343 temp = ovl_do_tmpfile(upperdir, stat->mode); 341 temp = ovl_do_tmpfile(upperdir, stat->mode);
344 else 342 else
345 temp = ovl_lookup_temp(workdir, dentry); 343 temp = ovl_lookup_temp(workdir);
346 err = PTR_ERR(temp);
347 if (IS_ERR(temp))
348 goto out1;
349
350 err = 0; 344 err = 0;
351 if (!tmpfile) 345 if (IS_ERR(temp)) {
346 err = PTR_ERR(temp);
347 temp = NULL;
348 }
349
350 if (!err && !tmpfile)
352 err = ovl_create_real(wdir, temp, &cattr, NULL, true); 351 err = ovl_create_real(wdir, temp, &cattr, NULL, true);
353 352
354 if (new_creds) { 353 if (new_creds) {
@@ -357,7 +356,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
357 } 356 }
358 357
359 if (err) 358 if (err)
360 goto out2; 359 goto out;
361 360
362 if (S_ISREG(stat->mode)) { 361 if (S_ISREG(stat->mode)) {
363 struct path upperpath; 362 struct path upperpath;
@@ -393,10 +392,23 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
393 /* 392 /*
394 * Store identifier of lower inode in upper inode xattr to 393 * Store identifier of lower inode in upper inode xattr to
395 * allow lookup of the copy up origin inode. 394 * allow lookup of the copy up origin inode.
395 *
396 * Don't set origin when we are breaking the association with a lower
397 * hard link.
396 */ 398 */
397 err = ovl_set_origin(dentry, lowerpath->dentry, temp); 399 if (S_ISDIR(stat->mode) || stat->nlink == 1) {
398 if (err) 400 err = ovl_set_origin(dentry, lowerpath->dentry, temp);
401 if (err)
402 goto out_cleanup;
403 }
404
405 upper = lookup_one_len(dentry->d_name.name, upperdir,
406 dentry->d_name.len);
407 if (IS_ERR(upper)) {
408 err = PTR_ERR(upper);
409 upper = NULL;
399 goto out_cleanup; 410 goto out_cleanup;
411 }
400 412
401 if (tmpfile) 413 if (tmpfile)
402 err = ovl_do_link(temp, udir, upper, true); 414 err = ovl_do_link(temp, udir, upper, true);
@@ -411,17 +423,15 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
411 423
412 /* Restore timestamps on parent (best effort) */ 424 /* Restore timestamps on parent (best effort) */
413 ovl_set_timestamps(upperdir, pstat); 425 ovl_set_timestamps(upperdir, pstat);
414out2: 426out:
415 dput(temp); 427 dput(temp);
416out1:
417 dput(upper); 428 dput(upper);
418out:
419 return err; 429 return err;
420 430
421out_cleanup: 431out_cleanup:
422 if (!tmpfile) 432 if (!tmpfile)
423 ovl_cleanup(wdir, temp); 433 ovl_cleanup(wdir, temp);
424 goto out2; 434 goto out;
425} 435}
426 436
427/* 437/*
@@ -454,6 +464,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
454 ovl_path_upper(parent, &parentpath); 464 ovl_path_upper(parent, &parentpath);
455 upperdir = parentpath.dentry; 465 upperdir = parentpath.dentry;
456 466
467 /* Mark parent "impure" because it may now contain non-pure upper */
468 err = ovl_set_impure(parent, upperdir);
469 if (err)
470 return err;
471
457 err = vfs_getattr(&parentpath, &pstat, 472 err = vfs_getattr(&parentpath, &pstat,
458 STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); 473 STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
459 if (err) 474 if (err)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 723b98b90698..a63a71656e9b 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
41 } 41 }
42} 42}
43 43
44struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) 44struct dentry *ovl_lookup_temp(struct dentry *workdir)
45{ 45{
46 struct dentry *temp; 46 struct dentry *temp;
47 char name[20]; 47 char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
68 struct dentry *whiteout; 68 struct dentry *whiteout;
69 struct inode *wdir = workdir->d_inode; 69 struct inode *wdir = workdir->d_inode;
70 70
71 whiteout = ovl_lookup_temp(workdir, dentry); 71 whiteout = ovl_lookup_temp(workdir);
72 if (IS_ERR(whiteout)) 72 if (IS_ERR(whiteout))
73 return whiteout; 73 return whiteout;
74 74
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
127 return err; 127 return err;
128} 128}
129 129
130static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) 130static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
131 int xerr)
131{ 132{
132 int err; 133 int err;
133 134
134 err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); 135 err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
135 if (!err) 136 if (!err)
136 ovl_dentry_set_opaque(dentry); 137 ovl_dentry_set_opaque(dentry);
137 138
138 return err; 139 return err;
139} 140}
140 141
142static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
143{
144 /*
145 * Fail with -EIO when trying to create opaque dir and upper doesn't
146 * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
147 * return a specific error for noxattr case.
148 */
149 return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
150}
151
141/* Common operations required to be done after creation of file on upper */ 152/* Common operations required to be done after creation of file on upper */
142static void ovl_instantiate(struct dentry *dentry, struct inode *inode, 153static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
143 struct dentry *newdentry, bool hardlink) 154 struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
162 return OVL_TYPE_MERGE(ovl_path_type(dentry)); 173 return OVL_TYPE_MERGE(ovl_path_type(dentry));
163} 174}
164 175
176static bool ovl_type_origin(struct dentry *dentry)
177{
178 return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
179}
180
165static int ovl_create_upper(struct dentry *dentry, struct inode *inode, 181static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
166 struct cattr *attr, struct dentry *hardlink) 182 struct cattr *attr, struct dentry *hardlink)
167{ 183{
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
250 if (upper->d_parent->d_inode != udir) 266 if (upper->d_parent->d_inode != udir)
251 goto out_unlock; 267 goto out_unlock;
252 268
253 opaquedir = ovl_lookup_temp(workdir, dentry); 269 opaquedir = ovl_lookup_temp(workdir);
254 err = PTR_ERR(opaquedir); 270 err = PTR_ERR(opaquedir);
255 if (IS_ERR(opaquedir)) 271 if (IS_ERR(opaquedir))
256 goto out_unlock; 272 goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
382 if (err) 398 if (err)
383 goto out; 399 goto out;
384 400
385 newdentry = ovl_lookup_temp(workdir, dentry); 401 newdentry = ovl_lookup_temp(workdir);
386 err = PTR_ERR(newdentry); 402 err = PTR_ERR(newdentry);
387 if (IS_ERR(newdentry)) 403 if (IS_ERR(newdentry))
388 goto out_unlock; 404 goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
846 if (IS_ERR(redirect)) 862 if (IS_ERR(redirect))
847 return PTR_ERR(redirect); 863 return PTR_ERR(redirect);
848 864
849 err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, 865 err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
850 redirect, strlen(redirect), 0); 866 OVL_XATTR_REDIRECT,
867 redirect, strlen(redirect), -EXDEV);
851 if (!err) { 868 if (!err) {
852 spin_lock(&dentry->d_lock); 869 spin_lock(&dentry->d_lock);
853 ovl_dentry_set_redirect(dentry, redirect); 870 ovl_dentry_set_redirect(dentry, redirect);
854 spin_unlock(&dentry->d_lock); 871 spin_unlock(&dentry->d_lock);
855 } else { 872 } else {
856 kfree(redirect); 873 kfree(redirect);
857 if (err == -EOPNOTSUPP) 874 pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
858 ovl_clear_redirect_dir(dentry->d_sb);
859 else
860 pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
861 /* Fall back to userspace copy-up */ 875 /* Fall back to userspace copy-up */
862 err = -EXDEV; 876 err = -EXDEV;
863 } 877 }
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
943 old_upperdir = ovl_dentry_upper(old->d_parent); 957 old_upperdir = ovl_dentry_upper(old->d_parent);
944 new_upperdir = ovl_dentry_upper(new->d_parent); 958 new_upperdir = ovl_dentry_upper(new->d_parent);
945 959
960 if (!samedir) {
961 /*
962 * When moving a merge dir or non-dir with copy up origin into
963 * a new parent, we are marking the new parent dir "impure".
964 * When ovl_iterate() iterates an "impure" upper dir, it will
965 * lookup the origin inodes of the entries to fill d_ino.
966 */
967 if (ovl_type_origin(old)) {
968 err = ovl_set_impure(new->d_parent, new_upperdir);
969 if (err)
970 goto out_revert_creds;
971 }
972 if (!overwrite && ovl_type_origin(new)) {
973 err = ovl_set_impure(old->d_parent, old_upperdir);
974 if (err)
975 goto out_revert_creds;
976 }
977 }
978
946 trap = lock_rename(new_upperdir, old_upperdir); 979 trap = lock_rename(new_upperdir, old_upperdir);
947 980
948 olddentry = lookup_one_len(old->d_name.name, old_upperdir, 981 olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
992 if (ovl_type_merge_or_lower(old)) 1025 if (ovl_type_merge_or_lower(old))
993 err = ovl_set_redirect(old, samedir); 1026 err = ovl_set_redirect(old, samedir);
994 else if (!old_opaque && ovl_type_merge(new->d_parent)) 1027 else if (!old_opaque && ovl_type_merge(new->d_parent))
995 err = ovl_set_opaque(old, olddentry); 1028 err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
996 if (err) 1029 if (err)
997 goto out_dput; 1030 goto out_dput;
998 } 1031 }
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
1000 if (ovl_type_merge_or_lower(new)) 1033 if (ovl_type_merge_or_lower(new))
1001 err = ovl_set_redirect(new, samedir); 1034 err = ovl_set_redirect(new, samedir);
1002 else if (!new_opaque && ovl_type_merge(old->d_parent)) 1035 else if (!new_opaque && ovl_type_merge(old->d_parent))
1003 err = ovl_set_opaque(new, newdentry); 1036 err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
1004 if (err) 1037 if (err)
1005 goto out_dput; 1038 goto out_dput;
1006 } 1039 }
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ad9547f82da5..d613e2c41242 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
240 return res; 240 return res;
241} 241}
242 242
243static bool ovl_can_list(const char *s)
244{
245 /* List all non-trusted xatts */
246 if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
247 return true;
248
249 /* Never list trusted.overlay, list other trusted for superuser only */
250 return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
251}
252
243ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 253ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
244{ 254{
245 struct dentry *realdentry = ovl_dentry_real(dentry); 255 struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
263 return -EIO; 273 return -EIO;
264 274
265 len -= slen; 275 len -= slen;
266 if (ovl_is_private_xattr(s)) { 276 if (!ovl_can_list(s)) {
267 res -= slen; 277 res -= slen;
268 memmove(s, s + slen, len); 278 memmove(s, s + slen, len);
269 } else { 279 } else {
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index bad0f665a635..f3136c31e72a 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -169,17 +169,7 @@ invalid:
169 169
170static bool ovl_is_opaquedir(struct dentry *dentry) 170static bool ovl_is_opaquedir(struct dentry *dentry)
171{ 171{
172 int res; 172 return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
173 char val;
174
175 if (!d_is_dir(dentry))
176 return false;
177
178 res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
179 if (res == 1 && val == 'y')
180 return true;
181
182 return false;
183} 173}
184 174
185static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, 175static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
351 unsigned int ctr = 0; 341 unsigned int ctr = 0;
352 struct inode *inode = NULL; 342 struct inode *inode = NULL;
353 bool upperopaque = false; 343 bool upperopaque = false;
344 bool upperimpure = false;
354 char *upperredirect = NULL; 345 char *upperredirect = NULL;
355 struct dentry *this; 346 struct dentry *this;
356 unsigned int i; 347 unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
395 poe = roe; 386 poe = roe;
396 } 387 }
397 upperopaque = d.opaque; 388 upperopaque = d.opaque;
389 if (upperdentry && d.is_dir)
390 upperimpure = ovl_is_impuredir(upperdentry);
398 } 391 }
399 392
400 if (!d.stop && poe->numlower) { 393 if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
463 456
464 revert_creds(old_cred); 457 revert_creds(old_cred);
465 oe->opaque = upperopaque; 458 oe->opaque = upperopaque;
459 oe->impure = upperimpure;
466 oe->redirect = upperredirect; 460 oe->redirect = upperredirect;
467 oe->__upperdentry = upperdentry; 461 oe->__upperdentry = upperdentry;
468 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); 462 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index caa36cb9c46d..0623cebeefff 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -24,6 +24,7 @@ enum ovl_path_type {
24#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque" 24#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
25#define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect" 25#define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
26#define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin" 26#define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
27#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
27 28
28/* 29/*
29 * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, 30 * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
203struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); 204struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
204void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); 205void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
205bool ovl_dentry_is_opaque(struct dentry *dentry); 206bool ovl_dentry_is_opaque(struct dentry *dentry);
207bool ovl_dentry_is_impure(struct dentry *dentry);
206bool ovl_dentry_is_whiteout(struct dentry *dentry); 208bool ovl_dentry_is_whiteout(struct dentry *dentry);
207void ovl_dentry_set_opaque(struct dentry *dentry); 209void ovl_dentry_set_opaque(struct dentry *dentry);
208bool ovl_redirect_dir(struct super_block *sb); 210bool ovl_redirect_dir(struct super_block *sb);
209void ovl_clear_redirect_dir(struct super_block *sb);
210const char *ovl_dentry_get_redirect(struct dentry *dentry); 211const char *ovl_dentry_get_redirect(struct dentry *dentry);
211void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); 212void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
212void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); 213void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
219struct file *ovl_path_open(struct path *path, int flags); 220struct file *ovl_path_open(struct path *path, int flags);
220int ovl_copy_up_start(struct dentry *dentry); 221int ovl_copy_up_start(struct dentry *dentry);
221void ovl_copy_up_end(struct dentry *dentry); 222void ovl_copy_up_end(struct dentry *dentry);
223bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
224int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
225 const char *name, const void *value, size_t size,
226 int xerr);
227int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
228
229static inline bool ovl_is_impuredir(struct dentry *dentry)
230{
231 return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
232}
233
222 234
223/* namei.c */ 235/* namei.c */
224int ovl_path_next(int idx, struct dentry *dentry, struct path *path); 236int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
263 275
264/* dir.c */ 276/* dir.c */
265extern const struct inode_operations ovl_dir_inode_operations; 277extern const struct inode_operations ovl_dir_inode_operations;
266struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); 278struct dentry *ovl_lookup_temp(struct dentry *workdir);
267struct cattr { 279struct cattr {
268 dev_t rdev; 280 dev_t rdev;
269 umode_t mode; 281 umode_t mode;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index b2023ddb8532..34bc4a9f5c61 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -28,6 +28,7 @@ struct ovl_fs {
28 /* creds of process who forced instantiation of super block */ 28 /* creds of process who forced instantiation of super block */
29 const struct cred *creator_cred; 29 const struct cred *creator_cred;
30 bool tmpfile; 30 bool tmpfile;
31 bool noxattr;
31 wait_queue_head_t copyup_wq; 32 wait_queue_head_t copyup_wq;
32 /* sb common to all layers */ 33 /* sb common to all layers */
33 struct super_block *same_sb; 34 struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
42 u64 version; 43 u64 version;
43 const char *redirect; 44 const char *redirect;
44 bool opaque; 45 bool opaque;
46 bool impure;
45 bool copying; 47 bool copying;
46 }; 48 };
47 struct rcu_head rcu; 49 struct rcu_head rcu;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 9828b7de8999..4882ffb37bae 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
891 dput(temp); 891 dput(temp);
892 else 892 else
893 pr_warn("overlayfs: upper fs does not support tmpfile.\n"); 893 pr_warn("overlayfs: upper fs does not support tmpfile.\n");
894
895 /*
896 * Check if upper/work fs supports trusted.overlay.*
897 * xattr
898 */
899 err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
900 "0", 1, 0);
901 if (err) {
902 ufs->noxattr = true;
903 pr_warn("overlayfs: upper fs does not support xattr.\n");
904 } else {
905 vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
906 }
894 } 907 }
895 } 908 }
896 909
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
961 path_put(&workpath); 974 path_put(&workpath);
962 kfree(lowertmp); 975 kfree(lowertmp);
963 976
964 oe->__upperdentry = upperpath.dentry; 977 if (upperpath.dentry) {
978 oe->__upperdentry = upperpath.dentry;
979 oe->impure = ovl_is_impuredir(upperpath.dentry);
980 }
965 for (i = 0; i < numlower; i++) { 981 for (i = 0; i < numlower; i++) {
966 oe->lowerstack[i].dentry = stack[i].dentry; 982 oe->lowerstack[i].dentry = stack[i].dentry;
967 oe->lowerstack[i].mnt = ufs->lower_mnt[i]; 983 oe->lowerstack[i].mnt = ufs->lower_mnt[i];
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index cfdea47313a1..809048913889 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
175 return oe->opaque; 175 return oe->opaque;
176} 176}
177 177
178bool ovl_dentry_is_impure(struct dentry *dentry)
179{
180 struct ovl_entry *oe = dentry->d_fsdata;
181
182 return oe->impure;
183}
184
178bool ovl_dentry_is_whiteout(struct dentry *dentry) 185bool ovl_dentry_is_whiteout(struct dentry *dentry)
179{ 186{
180 return !dentry->d_inode && ovl_dentry_is_opaque(dentry); 187 return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
191{ 198{
192 struct ovl_fs *ofs = sb->s_fs_info; 199 struct ovl_fs *ofs = sb->s_fs_info;
193 200
194 return ofs->config.redirect_dir; 201 return ofs->config.redirect_dir && !ofs->noxattr;
195}
196
197void ovl_clear_redirect_dir(struct super_block *sb)
198{
199 struct ovl_fs *ofs = sb->s_fs_info;
200
201 ofs->config.redirect_dir = false;
202} 202}
203 203
204const char *ovl_dentry_get_redirect(struct dentry *dentry) 204const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
303 wake_up_locked(&ofs->copyup_wq); 303 wake_up_locked(&ofs->copyup_wq);
304 spin_unlock(&ofs->copyup_wq.lock); 304 spin_unlock(&ofs->copyup_wq.lock);
305} 305}
306
307bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
308{
309 int res;
310 char val;
311
312 if (!d_is_dir(dentry))
313 return false;
314
315 res = vfs_getxattr(dentry, name, &val, 1);
316 if (res == 1 && val == 'y')
317 return true;
318
319 return false;
320}
321
322int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
323 const char *name, const void *value, size_t size,
324 int xerr)
325{
326 int err;
327 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
328
329 if (ofs->noxattr)
330 return xerr;
331
332 err = ovl_do_setxattr(upperdentry, name, value, size, 0);
333
334 if (err == -EOPNOTSUPP) {
335 pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
336 ofs->noxattr = true;
337 return xerr;
338 }
339
340 return err;
341}
342
343int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
344{
345 int err;
346 struct ovl_entry *oe = dentry->d_fsdata;
347
348 if (oe->impure)
349 return 0;
350
351 /*
352 * Do not fail when upper doesn't support xattrs.
353 * Upper inodes won't have origin nor redirect xattr anyway.
354 */
355 err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
356 "y", 1, 0);
357 if (!err)
358 oe->impure = true;
359
360 return err;
361}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 45f6bf68fff3..f1e1927ccd48 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
821 if (!mmget_not_zero(mm)) 821 if (!mmget_not_zero(mm))
822 goto free; 822 goto free;
823 823
824 flags = write ? FOLL_WRITE : 0; 824 flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
825 825
826 while (count > 0) { 826 while (count > 0) {
827 int this_len = min_t(int, count, PAGE_SIZE); 827 int this_len = min_t(int, count, PAGE_SIZE);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0c8b33d99b1..520802da059c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
300 300
301 /* We don't show the stack guard page in /proc/maps */ 301 /* We don't show the stack guard page in /proc/maps */
302 start = vma->vm_start; 302 start = vma->vm_start;
303 if (stack_guard_page_start(vma, start))
304 start += PAGE_SIZE;
305 end = vma->vm_end; 303 end = vma->vm_end;
306 if (stack_guard_page_end(vma, end))
307 end -= PAGE_SIZE;
308 304
309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 305 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 306 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ebf80c7739e1..48813aeaab80 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode)
1512} 1512}
1513EXPORT_SYMBOL(dquot_initialize); 1513EXPORT_SYMBOL(dquot_initialize);
1514 1514
1515bool dquot_initialize_needed(struct inode *inode)
1516{
1517 struct dquot **dquots;
1518 int i;
1519
1520 if (!dquot_active(inode))
1521 return false;
1522
1523 dquots = i_dquot(inode);
1524 for (i = 0; i < MAXQUOTAS; i++)
1525 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1526 return true;
1527 return false;
1528}
1529EXPORT_SYMBOL(dquot_initialize_needed);
1530
1515/* 1531/*
1516 * Release all quotas referenced by inode. 1532 * Release all quotas referenced by inode.
1517 * 1533 *
diff --git a/fs/read_write.c b/fs/read_write.c
index 47c1d4484df9..19d4d88fa285 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1285,7 +1285,7 @@ static size_t compat_writev(struct file *file,
1285 if (!(file->f_mode & FMODE_CAN_WRITE)) 1285 if (!(file->f_mode & FMODE_CAN_WRITE))
1286 goto out; 1286 goto out;
1287 1287
1288 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); 1288 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
1289 1289
1290out: 1290out:
1291 if (ret > 0) 1291 if (ret > 0)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index da01f497180a..39bb1e838d8d 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
1112 depth = reiserfs_write_unlock_nested(s); 1112 depth = reiserfs_write_unlock_nested(s);
1113 if (reiserfs_barrier_flush(s)) 1113 if (reiserfs_barrier_flush(s))
1114 __sync_dirty_buffer(jl->j_commit_bh, 1114 __sync_dirty_buffer(jl->j_commit_bh,
1115 REQ_PREFLUSH | REQ_FUA); 1115 REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1116 else 1116 else
1117 sync_dirty_buffer(jl->j_commit_bh); 1117 sync_dirty_buffer(jl->j_commit_bh);
1118 reiserfs_write_lock_nested(s, depth); 1118 reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
1271 1271
1272 if (reiserfs_barrier_flush(sb)) 1272 if (reiserfs_barrier_flush(sb))
1273 __sync_dirty_buffer(journal->j_header_bh, 1273 __sync_dirty_buffer(journal->j_header_bh,
1274 REQ_PREFLUSH | REQ_FUA); 1274 REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
1275 else 1275 else
1276 sync_dirty_buffer(journal->j_header_bh); 1276 sync_dirty_buffer(journal->j_header_bh);
1277 1277
diff --git a/fs/stat.c b/fs/stat.c
index f494b182c7c7..c35610845ab1 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
672 inode->i_bytes -= 512; 672 inode->i_bytes -= 512;
673 } 673 }
674} 674}
675EXPORT_SYMBOL(__inode_add_bytes);
675 676
676void inode_add_bytes(struct inode *inode, loff_t bytes) 677void inode_add_bytes(struct inode *inode, loff_t bytes)
677{ 678{
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a0376a2c1c29..f80be4c5df9d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
82 ufs_error (sb, "ufs_free_fragments", 82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i); 83 "bit already cleared for fragment %u", i);
84 } 84 }
85 85
86 inode_sub_bytes(inode, count << uspi->s_fshift);
86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 uspi->cs_total.cs_nffree += count; 88 uspi->cs_total.cs_nffree += count;
88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@ do_more:
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 } 186 }
186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1); 190 ufs_clusteracct (sb, ucpi, blkno, 1);
189 191
@@ -398,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
398 /* 400 /*
399 * There is not enough space for user on the device 401 * There is not enough space for user on the device
400 */ 402 */
401 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { 403 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
402 mutex_unlock(&UFS_SB(sb)->s_lock); 404 if (!capable(CAP_SYS_RESOURCE)) {
403 UFSD("EXIT (FAILED)\n"); 405 mutex_unlock(&UFS_SB(sb)->s_lock);
404 return 0; 406 UFSD("EXIT (FAILED)\n");
407 return 0;
408 }
405 } 409 }
406 410
407 if (goal >= uspi->s_size) 411 if (goal >= uspi->s_size)
@@ -419,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
419 if (result) { 423 if (result) {
420 ufs_clear_frags(inode, result + oldcount, 424 ufs_clear_frags(inode, result + oldcount,
421 newcount - oldcount, locked_page != NULL); 425 newcount - oldcount, locked_page != NULL);
426 *err = 0;
422 write_seqlock(&UFS_I(inode)->meta_lock); 427 write_seqlock(&UFS_I(inode)->meta_lock);
423 ufs_cpu_to_data_ptr(sb, p, result); 428 ufs_cpu_to_data_ptr(sb, p, result);
424 write_sequnlock(&UFS_I(inode)->meta_lock);
425 *err = 0;
426 UFS_I(inode)->i_lastfrag = 429 UFS_I(inode)->i_lastfrag =
427 max(UFS_I(inode)->i_lastfrag, fragment + count); 430 max(UFS_I(inode)->i_lastfrag, fragment + count);
431 write_sequnlock(&UFS_I(inode)->meta_lock);
428 } 432 }
429 mutex_unlock(&UFS_SB(sb)->s_lock); 433 mutex_unlock(&UFS_SB(sb)->s_lock);
430 UFSD("EXIT, result %llu\n", (unsigned long long)result); 434 UFSD("EXIT, result %llu\n", (unsigned long long)result);
@@ -437,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
437 result = ufs_add_fragments(inode, tmp, oldcount, newcount); 441 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
438 if (result) { 442 if (result) {
439 *err = 0; 443 *err = 0;
444 read_seqlock_excl(&UFS_I(inode)->meta_lock);
440 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 445 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441 fragment + count); 446 fragment + count);
447 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
442 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 448 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
443 locked_page != NULL); 449 locked_page != NULL);
444 mutex_unlock(&UFS_SB(sb)->s_lock); 450 mutex_unlock(&UFS_SB(sb)->s_lock);
@@ -449,39 +455,29 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
449 /* 455 /*
450 * allocate new block and move data 456 * allocate new block and move data
451 */ 457 */
452 switch (fs32_to_cpu(sb, usb1->fs_optim)) { 458 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
453 case UFS_OPTSPACE:
454 request = newcount; 459 request = newcount;
455 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 460 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
456 > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
457 break; 462 } else {
458 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459 break;
460 default:
461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462
463 case UFS_OPTTIME:
464 request = uspi->s_fpb; 463 request = uspi->s_fpb;
465 if (uspi->cs_total.cs_nffree < uspi->s_dsize * 464 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
466 (uspi->s_minfree - 2) / 100) 465 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
467 break;
468 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469 break;
470 } 466 }
471 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 467 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
472 if (result) { 468 if (result) {
473 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 469 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
474 locked_page != NULL); 470 locked_page != NULL);
471 mutex_unlock(&UFS_SB(sb)->s_lock);
475 ufs_change_blocknr(inode, fragment - oldcount, oldcount, 472 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
476 uspi->s_sbbase + tmp, 473 uspi->s_sbbase + tmp,
477 uspi->s_sbbase + result, locked_page); 474 uspi->s_sbbase + result, locked_page);
475 *err = 0;
478 write_seqlock(&UFS_I(inode)->meta_lock); 476 write_seqlock(&UFS_I(inode)->meta_lock);
479 ufs_cpu_to_data_ptr(sb, p, result); 477 ufs_cpu_to_data_ptr(sb, p, result);
480 write_sequnlock(&UFS_I(inode)->meta_lock);
481 *err = 0;
482 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 478 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483 fragment + count); 479 fragment + count);
484 mutex_unlock(&UFS_SB(sb)->s_lock); 480 write_sequnlock(&UFS_I(inode)->meta_lock);
485 if (newcount < request) 481 if (newcount < request)
486 ufs_free_fragments (inode, result + newcount, request - newcount); 482 ufs_free_fragments (inode, result + newcount, request - newcount);
487 ufs_free_fragments (inode, tmp, oldcount); 483 ufs_free_fragments (inode, tmp, oldcount);
@@ -494,6 +490,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
494 return 0; 490 return 0;
495} 491}
496 492
493static bool try_add_frags(struct inode *inode, unsigned frags)
494{
495 unsigned size = frags * i_blocksize(inode);
496 spin_lock(&inode->i_lock);
497 __inode_add_bytes(inode, size);
498 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
499 __inode_sub_bytes(inode, size);
500 spin_unlock(&inode->i_lock);
501 return false;
502 }
503 spin_unlock(&inode->i_lock);
504 return true;
505}
506
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 507static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498 unsigned oldcount, unsigned newcount) 508 unsigned oldcount, unsigned newcount)
499{ 509{
@@ -530,6 +540,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
530 for (i = oldcount; i < newcount; i++) 540 for (i = oldcount; i < newcount; i++)
531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 541 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532 return 0; 542 return 0;
543
544 if (!try_add_frags(inode, count))
545 return 0;
533 /* 546 /*
534 * Block can be extended 547 * Block can be extended
535 */ 548 */
@@ -647,6 +660,7 @@ cg_found:
647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 660 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648 i = uspi->s_fpb - count; 661 i = uspi->s_fpb - count;
649 662
663 inode_sub_bytes(inode, i << uspi->s_fshift);
650 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 664 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651 uspi->cs_total.cs_nffree += i; 665 uspi->cs_total.cs_nffree += i;
652 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); 666 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +671,8 @@ cg_found:
657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 671 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658 if (result == INVBLOCK) 672 if (result == INVBLOCK)
659 return 0; 673 return 0;
674 if (!try_add_frags(inode, count))
675 return 0;
660 for (i = 0; i < count; i++) 676 for (i = 0; i < count; i++)
661 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); 677 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662 678
@@ -716,6 +732,8 @@ norot:
716 return INVBLOCK; 732 return INVBLOCK;
717 ucpi->c_rotor = result; 733 ucpi->c_rotor = result;
718gotit: 734gotit:
735 if (!try_add_frags(inode, uspi->s_fpb))
736 return 0;
719 blkno = ufs_fragstoblks(result); 737 blkno = ufs_fragstoblks(result);
720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 738 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 739 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7e41aee7b69a..f36d6a53687d 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
235 235
236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 new_size, err, locked_page); 238 new_size - (lastfrag & uspi->s_fpbmask), err,
239 locked_page);
239 return tmp != 0; 240 return tmp != 0;
240} 241}
241 242
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
284 goal += uspi->s_fpb; 285 goal += uspi->s_fpb;
285 } 286 }
286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 287 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
287 goal, uspi->s_fpb, err, locked_page); 288 goal, nfrags, err, locked_page);
288 289
289 if (!tmp) { 290 if (!tmp) {
290 *err = -ENOSPC; 291 *err = -ENOSPC;
@@ -400,11 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
400 u64 phys64 = 0; 401 u64 phys64 = 0;
401 unsigned frag = fragment & uspi->s_fpbmask; 402 unsigned frag = fragment & uspi->s_fpbmask;
402 403
403 if (!create) { 404 phys64 = ufs_frag_map(inode, offsets, depth);
404 phys64 = ufs_frag_map(inode, offsets, depth); 405 if (!create)
405 goto out; 406 goto done;
406 }
407 407
408 if (phys64) {
409 if (fragment >= UFS_NDIR_FRAGMENT)
410 goto done;
411 read_seqlock_excl(&UFS_I(inode)->meta_lock);
412 if (fragment < UFS_I(inode)->i_lastfrag) {
413 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
414 goto done;
415 }
416 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
417 }
408 /* This code entered only while writing ....? */ 418 /* This code entered only while writing ....? */
409 419
410 mutex_lock(&UFS_I(inode)->truncate_mutex); 420 mutex_lock(&UFS_I(inode)->truncate_mutex);
@@ -448,6 +458,11 @@ out:
448 } 458 }
449 mutex_unlock(&UFS_I(inode)->truncate_mutex); 459 mutex_unlock(&UFS_I(inode)->truncate_mutex);
450 return err; 460 return err;
461
462done:
463 if (phys64)
464 map_bh(bh_result, sb, phys64 + frag);
465 return 0;
451} 466}
452 467
453static int ufs_writepage(struct page *page, struct writeback_control *wbc) 468static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -551,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
551 */ 566 */
552 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 567 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
553 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 568 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
554 if (inode->i_nlink == 0) { 569 if (inode->i_nlink == 0)
555 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 570 return -ESTALE;
556 return -1;
557 }
558 571
559 /* 572 /*
560 * Linux now has 32-bit uid and gid, so we can support EFT. 573 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -563,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
563 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 576 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
564 577
565 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 578 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
566 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 579 inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
567 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 580 inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
568 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 581 inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
569 inode->i_mtime.tv_nsec = 0; 582 inode->i_mtime.tv_nsec = 0;
570 inode->i_atime.tv_nsec = 0; 583 inode->i_atime.tv_nsec = 0;
571 inode->i_ctime.tv_nsec = 0; 584 inode->i_ctime.tv_nsec = 0;
@@ -599,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
599 */ 612 */
600 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 613 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
601 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 614 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
602 if (inode->i_nlink == 0) { 615 if (inode->i_nlink == 0)
603 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 616 return -ESTALE;
604 return -1;
605 }
606 617
607 /* 618 /*
608 * Linux now has 32-bit uid and gid, so we can support EFT. 619 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -642,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
642 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 653 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
643 struct buffer_head * bh; 654 struct buffer_head * bh;
644 struct inode *inode; 655 struct inode *inode;
645 int err; 656 int err = -EIO;
646 657
647 UFSD("ENTER, ino %lu\n", ino); 658 UFSD("ENTER, ino %lu\n", ino);
648 659
@@ -677,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
677 err = ufs1_read_inode(inode, 688 err = ufs1_read_inode(inode,
678 ufs_inode + ufs_inotofsbo(inode->i_ino)); 689 ufs_inode + ufs_inotofsbo(inode->i_ino));
679 } 690 }
680 691 brelse(bh);
681 if (err) 692 if (err)
682 goto bad_inode; 693 goto bad_inode;
694
683 inode->i_version++; 695 inode->i_version++;
684 ufsi->i_lastfrag = 696 ufsi->i_lastfrag =
685 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 697 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -688,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
688 700
689 ufs_set_inode_ops(inode); 701 ufs_set_inode_ops(inode);
690 702
691 brelse(bh);
692
693 UFSD("EXIT\n"); 703 UFSD("EXIT\n");
694 unlock_new_inode(inode); 704 unlock_new_inode(inode);
695 return inode; 705 return inode;
696 706
697bad_inode: 707bad_inode:
698 iget_failed(inode); 708 iget_failed(inode);
699 return ERR_PTR(-EIO); 709 return ERR_PTR(err);
700} 710}
701 711
702static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) 712static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
@@ -841,8 +851,11 @@ void ufs_evict_inode(struct inode * inode)
841 truncate_inode_pages_final(&inode->i_data); 851 truncate_inode_pages_final(&inode->i_data);
842 if (want_delete) { 852 if (want_delete) {
843 inode->i_size = 0; 853 inode->i_size = 0;
844 if (inode->i_blocks) 854 if (inode->i_blocks &&
855 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
856 S_ISLNK(inode->i_mode)))
845 ufs_truncate_blocks(inode); 857 ufs_truncate_blocks(inode);
858 ufs_update_inode(inode, inode_needs_sync(inode));
846 } 859 }
847 860
848 invalidate_inode_buffers(inode); 861 invalidate_inode_buffers(inode);
@@ -868,7 +881,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
868 ctx->to = from + count; 881 ctx->to = from + count;
869} 882}
870 883
871#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
872#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 884#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
873 885
874static void ufs_trunc_direct(struct inode *inode) 886static void ufs_trunc_direct(struct inode *inode)
@@ -1100,25 +1112,30 @@ out:
1100 return err; 1112 return err;
1101} 1113}
1102 1114
1103static void __ufs_truncate_blocks(struct inode *inode) 1115static void ufs_truncate_blocks(struct inode *inode)
1104{ 1116{
1105 struct ufs_inode_info *ufsi = UFS_I(inode); 1117 struct ufs_inode_info *ufsi = UFS_I(inode);
1106 struct super_block *sb = inode->i_sb; 1118 struct super_block *sb = inode->i_sb;
1107 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1119 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1108 unsigned offsets[4]; 1120 unsigned offsets[4];
1109 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1121 int depth;
1110 int depth2; 1122 int depth2;
1111 unsigned i; 1123 unsigned i;
1112 struct ufs_buffer_head *ubh[3]; 1124 struct ufs_buffer_head *ubh[3];
1113 void *p; 1125 void *p;
1114 u64 block; 1126 u64 block;
1115 1127
1116 if (!depth) 1128 if (inode->i_size) {
1117 return; 1129 sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1130 depth = ufs_block_to_path(inode, last, offsets);
1131 if (!depth)
1132 return;
1133 } else {
1134 depth = 1;
1135 }
1118 1136
1119 /* find the last non-zero in offsets[] */
1120 for (depth2 = depth - 1; depth2; depth2--) 1137 for (depth2 = depth - 1; depth2; depth2--)
1121 if (offsets[depth2]) 1138 if (offsets[depth2] != uspi->s_apb - 1)
1122 break; 1139 break;
1123 1140
1124 mutex_lock(&ufsi->truncate_mutex); 1141 mutex_lock(&ufsi->truncate_mutex);
@@ -1127,9 +1144,8 @@ static void __ufs_truncate_blocks(struct inode *inode)
1127 offsets[0] = UFS_IND_BLOCK; 1144 offsets[0] = UFS_IND_BLOCK;
1128 } else { 1145 } else {
1129 /* get the blocks that should be partially emptied */ 1146 /* get the blocks that should be partially emptied */
1130 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1147 p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1131 for (i = 0; i < depth2; i++) { 1148 for (i = 0; i < depth2; i++) {
1132 offsets[i]++; /* next branch is fully freed */
1133 block = ufs_data_ptr_to_cpu(sb, p); 1149 block = ufs_data_ptr_to_cpu(sb, p);
1134 if (!block) 1150 if (!block)
1135 break; 1151 break;
@@ -1140,7 +1156,7 @@ static void __ufs_truncate_blocks(struct inode *inode)
1140 write_sequnlock(&ufsi->meta_lock); 1156 write_sequnlock(&ufsi->meta_lock);
1141 break; 1157 break;
1142 } 1158 }
1143 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1159 p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1144 } 1160 }
1145 while (i--) 1161 while (i--)
1146 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); 1162 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
@@ -1155,7 +1171,9 @@ static void __ufs_truncate_blocks(struct inode *inode)
1155 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1171 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1156 } 1172 }
1157 } 1173 }
1174 read_seqlock_excl(&ufsi->meta_lock);
1158 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1175 ufsi->i_lastfrag = DIRECT_FRAGMENT;
1176 read_sequnlock_excl(&ufsi->meta_lock);
1159 mark_inode_dirty(inode); 1177 mark_inode_dirty(inode);
1160 mutex_unlock(&ufsi->truncate_mutex); 1178 mutex_unlock(&ufsi->truncate_mutex);
1161} 1179}
@@ -1183,7 +1201,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
1183 1201
1184 truncate_setsize(inode, size); 1202 truncate_setsize(inode, size);
1185 1203
1186 __ufs_truncate_blocks(inode); 1204 ufs_truncate_blocks(inode);
1187 inode->i_mtime = inode->i_ctime = current_time(inode); 1205 inode->i_mtime = inode->i_ctime = current_time(inode);
1188 mark_inode_dirty(inode); 1206 mark_inode_dirty(inode);
1189out: 1207out:
@@ -1191,16 +1209,6 @@ out:
1191 return err; 1209 return err;
1192} 1210}
1193 1211
1194static void ufs_truncate_blocks(struct inode *inode)
1195{
1196 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1197 S_ISLNK(inode->i_mode)))
1198 return;
1199 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1200 return;
1201 __ufs_truncate_blocks(inode);
1202}
1203
1204int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1212int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1205{ 1213{
1206 struct inode *inode = d_inode(dentry); 1214 struct inode *inode = d_inode(dentry);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 131b2b77c818..0a4f58a5073c 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb)
480 usb3 = ubh_get_usb_third(uspi); 480 usb3 = ubh_get_usb_third(uspi);
481 481
482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
483 (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 483 (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
484 mtype == UFS_MOUNT_UFSTYPE_UFS2) { 484 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
485 /*we have statistic in different place, then usual*/ 485 /*we have statistic in different place, then usual*/
486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); 486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb)
596 usb2 = ubh_get_usb_second(uspi); 596 usb2 = ubh_get_usb_second(uspi);
597 usb3 = ubh_get_usb_third(uspi); 597 usb3 = ubh_get_usb_third(uspi);
598 598
599 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 599 if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
600 (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
601 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
602 /*we have statistic in different place, then usual*/ 600 /*we have statistic in different place, then usual*/
603 usb2->fs_un.fs_u2.cs_ndir = 601 usb2->fs_un.fs_u2.cs_ndir =
604 cpu_to_fs64(sb, uspi->cs_total.cs_ndir); 602 cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb)
608 cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 606 cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
609 usb3->fs_un1.fs_u2.cs_nffree = 607 usb3->fs_un1.fs_u2.cs_nffree =
610 cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 608 cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
611 } else { 609 goto out;
612 usb1->fs_cstotal.cs_ndir = 610 }
613 cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 611
614 usb1->fs_cstotal.cs_nbfree = 612 if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
615 cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 613 (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
616 usb1->fs_cstotal.cs_nifree = 614 /* store stats in both old and new places */
617 cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 615 usb2->fs_un.fs_u2.cs_ndir =
618 usb1->fs_cstotal.cs_nffree = 616 cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
619 cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 617 usb2->fs_un.fs_u2.cs_nbfree =
618 cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
619 usb3->fs_un1.fs_u2.cs_nifree =
620 cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
621 usb3->fs_un1.fs_u2.cs_nffree =
622 cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
620 } 623 }
624 usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
625 usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
626 usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
627 usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
628out:
621 ubh_mark_buffer_dirty(USPI_UBH(uspi)); 629 ubh_mark_buffer_dirty(USPI_UBH(uspi));
622 ufs_print_super_stuff(sb, usb1, usb2, usb3); 630 ufs_print_super_stuff(sb, usb1, usb2, usb3);
623 UFSD("EXIT\n"); 631 UFSD("EXIT\n");
@@ -746,6 +754,23 @@ static void ufs_put_super(struct super_block *sb)
746 return; 754 return;
747} 755}
748 756
757static u64 ufs_max_bytes(struct super_block *sb)
758{
759 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
760 int bits = uspi->s_apbshift;
761 u64 res;
762
763 if (bits > 21)
764 res = ~0ULL;
765 else
766 res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
767 (1LL << (3*bits));
768
769 if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
770 return MAX_LFS_FILESIZE;
771 return res << uspi->s_bshift;
772}
773
749static int ufs_fill_super(struct super_block *sb, void *data, int silent) 774static int ufs_fill_super(struct super_block *sb, void *data, int silent)
750{ 775{
751 struct ufs_sb_info * sbi; 776 struct ufs_sb_info * sbi;
@@ -812,9 +837,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
812 uspi->s_dirblksize = UFS_SECTOR_SIZE; 837 uspi->s_dirblksize = UFS_SECTOR_SIZE;
813 super_block_offset=UFS_SBLOCK; 838 super_block_offset=UFS_SBLOCK;
814 839
815 /* Keep 2Gig file limit. Some UFS variants need to override 840 sb->s_maxbytes = MAX_LFS_FILESIZE;
816 this but as I don't know which I'll let those in the know loosen 841
817 the rules */
818 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { 842 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
819 case UFS_MOUNT_UFSTYPE_44BSD: 843 case UFS_MOUNT_UFSTYPE_44BSD:
820 UFSD("ufstype=44bsd\n"); 844 UFSD("ufstype=44bsd\n");
@@ -980,6 +1004,13 @@ again:
980 flags |= UFS_ST_SUN; 1004 flags |= UFS_ST_SUN;
981 } 1005 }
982 1006
1007 if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
1008 uspi->s_postblformat == UFS_42POSTBLFMT) {
1009 if (!silent)
1010 pr_err("this is not a 44bsd filesystem");
1011 goto failed;
1012 }
1013
983 /* 1014 /*
984 * Check ufs magic number 1015 * Check ufs magic number
985 */ 1016 */
@@ -1127,8 +1158,8 @@ magic_found:
1127 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); 1158 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
1128 1159
1129 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1160 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
1130 uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1161 uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
1131 uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1162 uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
1132 } else { 1163 } else {
1133 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); 1164 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
1134 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); 1165 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
@@ -1177,6 +1208,18 @@ magic_found:
1177 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); 1208 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
1178 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); 1209 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
1179 1210
1211 uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
1212 uspi->s_minfree, 100);
1213 if (uspi->s_minfree <= 5) {
1214 uspi->s_time_to_space = ~0ULL;
1215 uspi->s_space_to_time = 0;
1216 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1217 } else {
1218 uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
1219 uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
1220 uspi->s_minfree - 2, 100) - 1;
1221 }
1222
1180 /* 1223 /*
1181 * Compute another frequently used values 1224 * Compute another frequently used values
1182 */ 1225 */
@@ -1212,6 +1255,7 @@ magic_found:
1212 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1255 "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
1213 uspi->s_maxsymlinklen = maxsymlen; 1256 uspi->s_maxsymlinklen = maxsymlen;
1214 } 1257 }
1258 sb->s_maxbytes = ufs_max_bytes(sb);
1215 sb->s_max_links = UFS_LINK_MAX; 1259 sb->s_max_links = UFS_LINK_MAX;
1216 1260
1217 inode = ufs_iget(sb, UFS_ROOTINO); 1261 inode = ufs_iget(sb, UFS_ROOTINO);
@@ -1365,19 +1409,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
1365 mutex_lock(&UFS_SB(sb)->s_lock); 1409 mutex_lock(&UFS_SB(sb)->s_lock);
1366 usb3 = ubh_get_usb_third(uspi); 1410 usb3 = ubh_get_usb_third(uspi);
1367 1411
1368 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1412 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
1369 buf->f_type = UFS2_MAGIC; 1413 buf->f_type = UFS2_MAGIC;
1370 buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1414 else
1371 } else {
1372 buf->f_type = UFS_MAGIC; 1415 buf->f_type = UFS_MAGIC;
1373 buf->f_blocks = uspi->s_dsize; 1416
1374 } 1417 buf->f_blocks = uspi->s_dsize;
1375 buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 1418 buf->f_bfree = ufs_freefrags(uspi);
1376 uspi->cs_total.cs_nffree;
1377 buf->f_ffree = uspi->cs_total.cs_nifree; 1419 buf->f_ffree = uspi->cs_total.cs_nifree;
1378 buf->f_bsize = sb->s_blocksize; 1420 buf->f_bsize = sb->s_blocksize;
1379 buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) 1421 buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
1380 ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; 1422 ? (buf->f_bfree - uspi->s_root_blocks) : 0;
1381 buf->f_files = uspi->s_ncg * uspi->s_ipg; 1423 buf->f_files = uspi->s_ncg * uspi->s_ipg;
1382 buf->f_namelen = UFS_MAXNAMLEN; 1424 buf->f_namelen = UFS_MAXNAMLEN;
1383 buf->f_fsid.val[0] = (u32)id; 1425 buf->f_fsid.val[0] = (u32)id;
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 0cbd5d340b67..150eef6f1233 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -733,10 +733,8 @@ struct ufs_sb_private_info {
733 __u32 s_dblkno; /* offset of first data after cg */ 733 __u32 s_dblkno; /* offset of first data after cg */
734 __u32 s_cgoffset; /* cylinder group offset in cylinder */ 734 __u32 s_cgoffset; /* cylinder group offset in cylinder */
735 __u32 s_cgmask; /* used to calc mod fs_ntrak */ 735 __u32 s_cgmask; /* used to calc mod fs_ntrak */
736 __u32 s_size; /* number of blocks (fragments) in fs */ 736 __u64 s_size; /* number of blocks (fragments) in fs */
737 __u32 s_dsize; /* number of data blocks in fs */ 737 __u64 s_dsize; /* number of data blocks in fs */
738 __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */
739 __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */
740 __u32 s_ncg; /* number of cylinder groups */ 738 __u32 s_ncg; /* number of cylinder groups */
741 __u32 s_bsize; /* size of basic blocks */ 739 __u32 s_bsize; /* size of basic blocks */
742 __u32 s_fsize; /* size of fragments */ 740 __u32 s_fsize; /* size of fragments */
@@ -793,6 +791,9 @@ struct ufs_sb_private_info {
793 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ 791 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */
794 __s32 fs_magic; /* filesystem magic */ 792 __s32 fs_magic; /* filesystem magic */
795 unsigned int s_dirblksize; 793 unsigned int s_dirblksize;
794 __u64 s_root_blocks;
795 __u64 s_time_to_space;
796 __u64 s_space_to_time;
796}; 797};
797 798
798/* 799/*
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index f41ad0a6106f..02497a492eb2 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
243struct page *ufs_get_locked_page(struct address_space *mapping, 243struct page *ufs_get_locked_page(struct address_space *mapping,
244 pgoff_t index) 244 pgoff_t index)
245{ 245{
246 struct page *page; 246 struct inode *inode = mapping->host;
247 247 struct page *page = find_lock_page(mapping, index);
248 page = find_lock_page(mapping, index);
249 if (!page) { 248 if (!page) {
250 page = read_mapping_page(mapping, index, NULL); 249 page = read_mapping_page(mapping, index, NULL);
251 250
@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
253 printk(KERN_ERR "ufs_change_blocknr: " 252 printk(KERN_ERR "ufs_change_blocknr: "
254 "read_mapping_page error: ino %lu, index: %lu\n", 253 "read_mapping_page error: ino %lu, index: %lu\n",
255 mapping->host->i_ino, index); 254 mapping->host->i_ino, index);
256 goto out; 255 return page;
257 } 256 }
258 257
259 lock_page(page); 258 lock_page(page);
@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
262 /* Truncate got there first */ 261 /* Truncate got there first */
263 unlock_page(page); 262 unlock_page(page);
264 put_page(page); 263 put_page(page);
265 page = NULL; 264 return NULL;
266 goto out;
267 } 265 }
268 266
269 if (!PageUptodate(page) || PageError(page)) { 267 if (!PageUptodate(page) || PageError(page)) {
@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
272 270
273 printk(KERN_ERR "ufs_change_blocknr: " 271 printk(KERN_ERR "ufs_change_blocknr: "
274 "can not read page: ino %lu, index: %lu\n", 272 "can not read page: ino %lu, index: %lu\n",
275 mapping->host->i_ino, index); 273 inode->i_ino, index);
276 274
277 page = ERR_PTR(-EIO); 275 return ERR_PTR(-EIO);
278 } 276 }
279 } 277 }
280out: 278 if (!page_has_buffers(page))
279 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
281 return page; 280 return page;
282} 281}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53dbc81..9fc7119a1551 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
350#define ubh_blkmap(ubh,begin,bit) \ 350#define ubh_blkmap(ubh,begin,bit) \
351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
352 352
353/*
354 * Determine the number of available frags given a
355 * percentage to hold in reserve.
356 */
357static inline u64 353static inline u64
358ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) 354ufs_freefrags(struct ufs_sb_private_info *uspi)
359{ 355{
360 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 356 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
361 uspi->cs_total.cs_nffree - 357 uspi->cs_total.cs_nffree;
362 (uspi->s_dsize * (percentreserved) / 100);
363} 358}
364 359
365/* 360/*
@@ -473,15 +468,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
473static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 468static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 469 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
475{ 470{
471 u8 mask;
476 switch (uspi->s_fpb) { 472 switch (uspi->s_fpb) {
477 case 8: 473 case 8:
478 return (*ubh_get_addr (ubh, begin + block) == 0xff); 474 return (*ubh_get_addr (ubh, begin + block) == 0xff);
479 case 4: 475 case 4:
480 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 476 mask = 0x0f << ((block & 0x01) << 2);
477 return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
481 case 2: 478 case 2:
482 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 479 mask = 0x03 << ((block & 0x03) << 1);
480 return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
483 case 1: 481 case 1:
484 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 482 mask = 0x01 << (block & 0x07);
483 return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
485 } 484 }
486 return 0; 485 return 0;
487} 486}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f7555fc25877..1d622f276e3a 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
340 bool must_wait, return_to_userland; 340 bool must_wait, return_to_userland;
341 long blocking_state; 341 long blocking_state;
342 342
343 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
344
345 ret = VM_FAULT_SIGBUS; 343 ret = VM_FAULT_SIGBUS;
344
345 /*
346 * We don't do userfault handling for the final child pid update.
347 *
348 * We also don't do userfault handling during
349 * coredumping. hugetlbfs has the special
350 * follow_hugetlb_page() to skip missing pages in the
351 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
352 * the no_page_table() helper in follow_page_mask(), but the
353 * shmem_vm_ops->fault method is invoked even during
354 * coredumping without mmap_sem and it ends up here.
355 */
356 if (current->flags & (PF_EXITING|PF_DUMPCORE))
357 goto out;
358
359 /*
360 * Coredumping runs without mmap_sem so we can only check that
361 * the mmap_sem is held, if PF_DUMPCORE was not set.
362 */
363 WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
364
346 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 365 ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
347 if (!ctx) 366 if (!ctx)
348 goto out; 367 goto out;
@@ -361,12 +380,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
361 goto out; 380 goto out;
362 381
363 /* 382 /*
364 * We don't do userfault handling for the final child pid update.
365 */
366 if (current->flags & PF_EXITING)
367 goto out;
368
369 /*
370 * Check that we can return VM_FAULT_RETRY. 383 * Check that we can return VM_FAULT_RETRY.
371 * 384 *
372 * NOTE: it should become possible to return VM_FAULT_RETRY 385 * NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index f02eb7673392..a7048eafa8e6 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1280,7 +1280,6 @@ xfs_bmap_read_extents(
1280 xfs_bmbt_rec_t *frp; 1280 xfs_bmbt_rec_t *frp;
1281 xfs_fsblock_t nextbno; 1281 xfs_fsblock_t nextbno;
1282 xfs_extnum_t num_recs; 1282 xfs_extnum_t num_recs;
1283 xfs_extnum_t start;
1284 1283
1285 num_recs = xfs_btree_get_numrecs(block); 1284 num_recs = xfs_btree_get_numrecs(block);
1286 if (unlikely(i + num_recs > room)) { 1285 if (unlikely(i + num_recs > room)) {
@@ -1303,7 +1302,6 @@ xfs_bmap_read_extents(
1303 * Copy records into the extent records. 1302 * Copy records into the extent records.
1304 */ 1303 */
1305 frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1304 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1306 start = i;
1307 for (j = 0; j < num_recs; j++, i++, frp++) { 1305 for (j = 0; j < num_recs; j++, i++, frp++) {
1308 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); 1306 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1309 trp->l0 = be64_to_cpu(frp->l0); 1307 trp->l0 = be64_to_cpu(frp->l0);
@@ -2065,8 +2063,10 @@ xfs_bmap_add_extent_delay_real(
2065 } 2063 }
2066 temp = xfs_bmap_worst_indlen(bma->ip, temp); 2064 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2067 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); 2065 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2068 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 2066 diff = (int)(temp + temp2 -
2069 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2067 (startblockval(PREV.br_startblock) -
2068 (bma->cur ?
2069 bma->cur->bc_private.b.allocated : 0)));
2070 if (diff > 0) { 2070 if (diff > 0) {
2071 error = xfs_mod_fdblocks(bma->ip->i_mount, 2071 error = xfs_mod_fdblocks(bma->ip->i_mount,
2072 -((int64_t)diff), false); 2072 -((int64_t)diff), false);
@@ -2123,7 +2123,6 @@ xfs_bmap_add_extent_delay_real(
2123 temp = da_new; 2123 temp = da_new;
2124 if (bma->cur) 2124 if (bma->cur)
2125 temp += bma->cur->bc_private.b.allocated; 2125 temp += bma->cur->bc_private.b.allocated;
2126 ASSERT(temp <= da_old);
2127 if (temp < da_old) 2126 if (temp < da_old)
2128 xfs_mod_fdblocks(bma->ip->i_mount, 2127 xfs_mod_fdblocks(bma->ip->i_mount,
2129 (int64_t)(da_old - temp), false); 2128 (int64_t)(da_old - temp), false);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5392674bf893..3a673ba201aa 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4395,7 +4395,7 @@ xfs_btree_visit_blocks(
4395 xfs_btree_readahead_ptr(cur, ptr, 1); 4395 xfs_btree_readahead_ptr(cur, ptr, 1);
4396 4396
4397 /* save for the next iteration of the loop */ 4397 /* save for the next iteration of the loop */
4398 lptr = *ptr; 4398 xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
4399 } 4399 }
4400 4400
4401 /* for each buffer in the level */ 4401 /* for each buffer in the level */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index b177ef33cd4c..82a38d86ebad 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1629,13 +1629,28 @@ xfs_refcount_recover_cow_leftovers(
1629 if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) 1629 if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
1630 return -EOPNOTSUPP; 1630 return -EOPNOTSUPP;
1631 1631
1632 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); 1632 INIT_LIST_HEAD(&debris);
1633
1634 /*
1635 * In this first part, we use an empty transaction to gather up
1636 * all the leftover CoW extents so that we can subsequently
1637 * delete them. The empty transaction is used to avoid
1638 * a buffer lock deadlock if there happens to be a loop in the
1639 * refcountbt because we're allowed to re-grab a buffer that is
1640 * already attached to our transaction. When we're done
1641 * recording the CoW debris we cancel the (empty) transaction
1642 * and everything goes away cleanly.
1643 */
1644 error = xfs_trans_alloc_empty(mp, &tp);
1633 if (error) 1645 if (error)
1634 return error; 1646 return error;
1635 cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL); 1647
1648 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
1649 if (error)
1650 goto out_trans;
1651 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
1636 1652
1637 /* Find all the leftover CoW staging extents. */ 1653 /* Find all the leftover CoW staging extents. */
1638 INIT_LIST_HEAD(&debris);
1639 memset(&low, 0, sizeof(low)); 1654 memset(&low, 0, sizeof(low));
1640 memset(&high, 0, sizeof(high)); 1655 memset(&high, 0, sizeof(high));
1641 low.rc.rc_startblock = XFS_REFC_COW_START; 1656 low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@ xfs_refcount_recover_cow_leftovers(
1645 if (error) 1660 if (error)
1646 goto out_cursor; 1661 goto out_cursor;
1647 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1662 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1648 xfs_buf_relse(agbp); 1663 xfs_trans_brelse(tp, agbp);
1664 xfs_trans_cancel(tp);
1649 1665
1650 /* Now iterate the list to free the leftovers */ 1666 /* Now iterate the list to free the leftovers */
1651 list_for_each_entry(rr, &debris, rr_list) { 1667 list_for_each_entry_safe(rr, n, &debris, rr_list) {
1652 /* Set up transaction. */ 1668 /* Set up transaction. */
1653 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp); 1669 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1654 if (error) 1670 if (error)
@@ -1676,8 +1692,16 @@ xfs_refcount_recover_cow_leftovers(
1676 error = xfs_trans_commit(tp); 1692 error = xfs_trans_commit(tp);
1677 if (error) 1693 if (error)
1678 goto out_free; 1694 goto out_free;
1695
1696 list_del(&rr->rr_list);
1697 kmem_free(rr);
1679 } 1698 }
1680 1699
1700 return error;
1701out_defer:
1702 xfs_defer_cancel(&dfops);
1703out_trans:
1704 xfs_trans_cancel(tp);
1681out_free: 1705out_free:
1682 /* Free the leftover list */ 1706 /* Free the leftover list */
1683 list_for_each_entry_safe(rr, n, &debris, rr_list) { 1707 list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@ out_free:
1688 1712
1689out_cursor: 1713out_cursor:
1690 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 1714 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1691 xfs_buf_relse(agbp); 1715 xfs_trans_brelse(tp, agbp);
1692 goto out_free; 1716 goto out_trans;
1693
1694out_defer:
1695 xfs_defer_cancel(&dfops);
1696 xfs_trans_cancel(tp);
1697 goto out_free;
1698} 1717}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 09af0f7cd55e..3b91faacc1ba 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1316,9 +1316,12 @@ xfs_vm_bmap(
1316 * The swap code (ab-)uses ->bmap to get a block mapping and then 1316 * The swap code (ab-)uses ->bmap to get a block mapping and then
1317 * bypasseѕ the file system for actual I/O. We really can't allow 1317 * bypasseѕ the file system for actual I/O. We really can't allow
1318 * that on reflinks inodes, so we have to skip out here. And yes, 1318 * that on reflinks inodes, so we have to skip out here. And yes,
1319 * 0 is the magic code for a bmap error.. 1319 * 0 is the magic code for a bmap error.
1320 *
1321 * Since we don't pass back blockdev info, we can't return bmap
1322 * information for rt files either.
1320 */ 1323 */
1321 if (xfs_is_reflink_inode(ip)) 1324 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1322 return 0; 1325 return 0;
1323 1326
1324 filemap_write_and_wait(mapping); 1327 filemap_write_and_wait(mapping);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 2b954308a1d6..9e3cc2146d5b 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -582,9 +582,13 @@ xfs_getbmap(
582 } 582 }
583 break; 583 break;
584 default: 584 default:
585 /* Local format data forks report no extents. */
586 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
587 bmv->bmv_entries = 0;
588 return 0;
589 }
585 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 590 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
586 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 591 ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
587 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
588 return -EINVAL; 592 return -EINVAL;
589 593
590 if (xfs_get_extsz_hint(ip) || 594 if (xfs_get_extsz_hint(ip) ||
@@ -712,7 +716,7 @@ xfs_getbmap(
712 * extents. 716 * extents.
713 */ 717 */
714 if (map[i].br_startblock == DELAYSTARTBLOCK && 718 if (map[i].br_startblock == DELAYSTARTBLOCK &&
715 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) 719 map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
716 ASSERT((iflags & BMV_IF_DELALLOC) != 0); 720 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
717 721
718 if (map[i].br_startblock == HOLESTARTBLOCK && 722 if (map[i].br_startblock == HOLESTARTBLOCK &&
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 62fa39276a24..16d6a578fc16 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -97,12 +97,16 @@ static inline void
97xfs_buf_ioacct_inc( 97xfs_buf_ioacct_inc(
98 struct xfs_buf *bp) 98 struct xfs_buf *bp)
99{ 99{
100 if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT)) 100 if (bp->b_flags & XBF_NO_IOACCT)
101 return; 101 return;
102 102
103 ASSERT(bp->b_flags & XBF_ASYNC); 103 ASSERT(bp->b_flags & XBF_ASYNC);
104 bp->b_flags |= _XBF_IN_FLIGHT; 104 spin_lock(&bp->b_lock);
105 percpu_counter_inc(&bp->b_target->bt_io_count); 105 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
106 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
107 percpu_counter_inc(&bp->b_target->bt_io_count);
108 }
109 spin_unlock(&bp->b_lock);
106} 110}
107 111
108/* 112/*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
110 * freed and unaccount from the buftarg. 114 * freed and unaccount from the buftarg.
111 */ 115 */
112static inline void 116static inline void
113xfs_buf_ioacct_dec( 117__xfs_buf_ioacct_dec(
114 struct xfs_buf *bp) 118 struct xfs_buf *bp)
115{ 119{
116 if (!(bp->b_flags & _XBF_IN_FLIGHT)) 120 lockdep_assert_held(&bp->b_lock);
117 return;
118 121
119 bp->b_flags &= ~_XBF_IN_FLIGHT; 122 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
120 percpu_counter_dec(&bp->b_target->bt_io_count); 123 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
124 percpu_counter_dec(&bp->b_target->bt_io_count);
125 }
126}
127
128static inline void
129xfs_buf_ioacct_dec(
130 struct xfs_buf *bp)
131{
132 spin_lock(&bp->b_lock);
133 __xfs_buf_ioacct_dec(bp);
134 spin_unlock(&bp->b_lock);
121} 135}
122 136
123/* 137/*
@@ -149,9 +163,9 @@ xfs_buf_stale(
149 * unaccounted (released to LRU) before that occurs. Drop in-flight 163 * unaccounted (released to LRU) before that occurs. Drop in-flight
150 * status now to preserve accounting consistency. 164 * status now to preserve accounting consistency.
151 */ 165 */
152 xfs_buf_ioacct_dec(bp);
153
154 spin_lock(&bp->b_lock); 166 spin_lock(&bp->b_lock);
167 __xfs_buf_ioacct_dec(bp);
168
155 atomic_set(&bp->b_lru_ref, 0); 169 atomic_set(&bp->b_lru_ref, 0);
156 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 170 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
157 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 171 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
979 * ensures the decrement occurs only once per-buf. 993 * ensures the decrement occurs only once per-buf.
980 */ 994 */
981 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 995 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
982 xfs_buf_ioacct_dec(bp); 996 __xfs_buf_ioacct_dec(bp);
983 goto out_unlock; 997 goto out_unlock;
984 } 998 }
985 999
986 /* the last reference has been dropped ... */ 1000 /* the last reference has been dropped ... */
987 xfs_buf_ioacct_dec(bp); 1001 __xfs_buf_ioacct_dec(bp);
988 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1002 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
989 /* 1003 /*
990 * If the buffer is added to the LRU take a new reference to the 1004 * If the buffer is added to the LRU take a new reference to the
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8d1d44f87ce9..1508121f29f2 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -63,7 +63,6 @@ typedef enum {
63#define _XBF_KMEM (1 << 21)/* backed by heap memory */ 63#define _XBF_KMEM (1 << 21)/* backed by heap memory */
64#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 64#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
65#define _XBF_COMPOUND (1 << 23)/* compound buffer */ 65#define _XBF_COMPOUND (1 << 23)/* compound buffer */
66#define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */
67 66
68typedef unsigned int xfs_buf_flags_t; 67typedef unsigned int xfs_buf_flags_t;
69 68
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
84 { _XBF_PAGES, "PAGES" }, \ 83 { _XBF_PAGES, "PAGES" }, \
85 { _XBF_KMEM, "KMEM" }, \ 84 { _XBF_KMEM, "KMEM" }, \
86 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 85 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
87 { _XBF_COMPOUND, "COMPOUND" }, \ 86 { _XBF_COMPOUND, "COMPOUND" }
88 { _XBF_IN_FLIGHT, "IN_FLIGHT" }
89 87
90 88
91/* 89/*
92 * Internal state flags. 90 * Internal state flags.
93 */ 91 */
94#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 92#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
93#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
95 94
96/* 95/*
97 * The xfs_buftarg contains 2 notions of "sector size" - 96 * The xfs_buftarg contains 2 notions of "sector size" -
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 35703a801372..5fb5a0958a14 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1043,49 +1043,17 @@ xfs_find_get_desired_pgoff(
1043 1043
1044 index = startoff >> PAGE_SHIFT; 1044 index = startoff >> PAGE_SHIFT;
1045 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1045 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1046 end = endoff >> PAGE_SHIFT; 1046 end = (endoff - 1) >> PAGE_SHIFT;
1047 do { 1047 do {
1048 int want; 1048 int want;
1049 unsigned nr_pages; 1049 unsigned nr_pages;
1050 unsigned int i; 1050 unsigned int i;
1051 1051
1052 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1052 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
1053 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1053 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1054 want); 1054 want);
1055 /* 1055 if (nr_pages == 0)
1056 * No page mapped into given range. If we are searching holes
1057 * and if this is the first time we got into the loop, it means
1058 * that the given offset is landed in a hole, return it.
1059 *
1060 * If we have already stepped through some block buffers to find
1061 * holes but they all contains data. In this case, the last
1062 * offset is already updated and pointed to the end of the last
1063 * mapped page, if it does not reach the endpoint to search,
1064 * that means there should be a hole between them.
1065 */
1066 if (nr_pages == 0) {
1067 /* Data search found nothing */
1068 if (type == DATA_OFF)
1069 break;
1070
1071 ASSERT(type == HOLE_OFF);
1072 if (lastoff == startoff || lastoff < endoff) {
1073 found = true;
1074 *offset = lastoff;
1075 }
1076 break;
1077 }
1078
1079 /*
1080 * At lease we found one page. If this is the first time we
1081 * step into the loop, and if the first page index offset is
1082 * greater than the given search offset, a hole was found.
1083 */
1084 if (type == HOLE_OFF && lastoff == startoff &&
1085 lastoff < page_offset(pvec.pages[0])) {
1086 found = true;
1087 break; 1056 break;
1088 }
1089 1057
1090 for (i = 0; i < nr_pages; i++) { 1058 for (i = 0; i < nr_pages; i++) {
1091 struct page *page = pvec.pages[i]; 1059 struct page *page = pvec.pages[i];
@@ -1098,18 +1066,18 @@ xfs_find_get_desired_pgoff(
1098 * file mapping. However, page->index will not change 1066 * file mapping. However, page->index will not change
1099 * because we have a reference on the page. 1067 * because we have a reference on the page.
1100 * 1068 *
1101 * Searching done if the page index is out of range. 1069 * If current page offset is beyond where we've ended,
1102 * If the current offset is not reaches the end of 1070 * we've found a hole.
1103 * the specified search range, there should be a hole
1104 * between them.
1105 */ 1071 */
1106 if (page->index > end) { 1072 if (type == HOLE_OFF && lastoff < endoff &&
1107 if (type == HOLE_OFF && lastoff < endoff) { 1073 lastoff < page_offset(pvec.pages[i])) {
1108 *offset = lastoff; 1074 found = true;
1109 found = true; 1075 *offset = lastoff;
1110 }
1111 goto out; 1076 goto out;
1112 } 1077 }
1078 /* Searching done if the page index is out of range. */
1079 if (page->index > end)
1080 goto out;
1113 1081
1114 lock_page(page); 1082 lock_page(page);
1115 /* 1083 /*
@@ -1151,21 +1119,20 @@ xfs_find_get_desired_pgoff(
1151 1119
1152 /* 1120 /*
1153 * The number of returned pages less than our desired, search 1121 * The number of returned pages less than our desired, search
1154 * done. In this case, nothing was found for searching data, 1122 * done.
1155 * but we found a hole behind the last offset.
1156 */ 1123 */
1157 if (nr_pages < want) { 1124 if (nr_pages < want)
1158 if (type == HOLE_OFF) {
1159 *offset = lastoff;
1160 found = true;
1161 }
1162 break; 1125 break;
1163 }
1164 1126
1165 index = pvec.pages[i - 1]->index + 1; 1127 index = pvec.pages[i - 1]->index + 1;
1166 pagevec_release(&pvec); 1128 pagevec_release(&pvec);
1167 } while (index <= end); 1129 } while (index <= end);
1168 1130
1131 /* No page at lastoff and we are not done - we found a hole. */
1132 if (type == HOLE_OFF && lastoff < endoff) {
1133 *offset = lastoff;
1134 found = true;
1135 }
1169out: 1136out:
1170 pagevec_release(&pvec); 1137 pagevec_release(&pvec);
1171 return found; 1138 return found;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 3683819887a5..814ed729881d 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -828,6 +828,7 @@ xfs_getfsmap(
828 struct xfs_fsmap dkeys[2]; /* per-dev keys */ 828 struct xfs_fsmap dkeys[2]; /* per-dev keys */
829 struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS]; 829 struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS];
830 struct xfs_getfsmap_info info = { NULL }; 830 struct xfs_getfsmap_info info = { NULL };
831 bool use_rmap;
831 int i; 832 int i;
832 int error = 0; 833 int error = 0;
833 834
@@ -837,12 +838,14 @@ xfs_getfsmap(
837 !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1])) 838 !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
838 return -EINVAL; 839 return -EINVAL;
839 840
841 use_rmap = capable(CAP_SYS_ADMIN) &&
842 xfs_sb_version_hasrmapbt(&mp->m_sb);
840 head->fmh_entries = 0; 843 head->fmh_entries = 0;
841 844
842 /* Set up our device handlers. */ 845 /* Set up our device handlers. */
843 memset(handlers, 0, sizeof(handlers)); 846 memset(handlers, 0, sizeof(handlers));
844 handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev); 847 handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
845 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 848 if (use_rmap)
846 handlers[0].fn = xfs_getfsmap_datadev_rmapbt; 849 handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
847 else 850 else
848 handlers[0].fn = xfs_getfsmap_datadev_bnobt; 851 handlers[0].fn = xfs_getfsmap_datadev_bnobt;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index f61c84f8e31a..990210fcb9c3 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,6 @@ xfs_inode_alloc(
66 66
67 XFS_STATS_INC(mp, vn_active); 67 XFS_STATS_INC(mp, vn_active);
68 ASSERT(atomic_read(&ip->i_pincount) == 0); 68 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(!xfs_isiflocked(ip)); 69 ASSERT(!xfs_isiflocked(ip));
71 ASSERT(ip->i_ino == 0); 70 ASSERT(ip->i_ino == 0);
72 71
@@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag(
190{ 189{
191 struct xfs_mount *mp = pag->pag_mount; 190 struct xfs_mount *mp = pag->pag_mount;
192 191
193 ASSERT(spin_is_locked(&pag->pag_ici_lock)); 192 lockdep_assert_held(&pag->pag_ici_lock);
194 if (pag->pag_ici_reclaimable++) 193 if (pag->pag_ici_reclaimable++)
195 return; 194 return;
196 195
@@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag(
212{ 211{
213 struct xfs_mount *mp = pag->pag_mount; 212 struct xfs_mount *mp = pag->pag_mount;
214 213
215 ASSERT(spin_is_locked(&pag->pag_ici_lock)); 214 lockdep_assert_held(&pag->pag_ici_lock);
216 if (--pag->pag_ici_reclaimable) 215 if (--pag->pag_ici_reclaimable)
217 return; 216 return;
218 217
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index a63f61c256bd..94e5bdf7304c 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1068,7 +1068,7 @@ xfs_file_iomap_begin(
1068 /* optionally associate a dax device with the iomap bdev */ 1068 /* optionally associate a dax device with the iomap bdev */
1069 bdev = iomap->bdev; 1069 bdev = iomap->bdev;
1070 if (blk_queue_dax(bdev->bd_queue)) 1070 if (blk_queue_dax(bdev->bd_queue))
1071 iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); 1071 iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name);
1072 else 1072 else
1073 iomap->dax_dev = NULL; 1073 iomap->dax_dev = NULL;
1074 1074
@@ -1149,7 +1149,7 @@ xfs_file_iomap_end(
1149 unsigned flags, 1149 unsigned flags,
1150 struct iomap *iomap) 1150 struct iomap *iomap)
1151{ 1151{
1152 put_dax(iomap->dax_dev); 1152 fs_put_dax(iomap->dax_dev);
1153 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1153 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1154 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1154 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1155 length, written, iomap); 1155 length, written, iomap);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 197f3fffc9a7..408c7820e200 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -210,7 +210,8 @@ struct acpi_device_flags {
210 u32 of_compatible_ok:1; 210 u32 of_compatible_ok:1;
211 u32 coherent_dma:1; 211 u32 coherent_dma:1;
212 u32 cca_seen:1; 212 u32 cca_seen:1;
213 u32 reserved:20; 213 u32 spi_i2c_slave:1;
214 u32 reserved:19;
214}; 215};
215 216
216/* File System */ 217/* File System */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d92543f3bbfd..bdc55c0da19c 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -374,6 +374,20 @@ struct acpi_table_desc {
374 u16 validation_count; 374 u16 validation_count;
375}; 375};
376 376
377/*
378 * Maximum value of the validation_count field in struct acpi_table_desc.
379 * When reached, validation_count cannot be changed any more and the table will
380 * be permanently regarded as validated.
381 *
382 * This is to prevent situations in which unbalanced table get/put operations
383 * may cause premature table unmapping in the OS to happen.
384 *
385 * The maximum validation count can be defined to any value, but should be
386 * greater than the maximum number of OS early stage mapping slots to avoid
387 * leaking early stage table mappings to the late stage.
388 */
389#define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX
390
377/* Masks for Flags field above */ 391/* Masks for Flags field above */
378 392
379#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */ 393#define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c0bd0d7651a9..bb837310c07e 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
913int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); 913int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
914int drm_dp_stop_crc(struct drm_dp_aux *aux); 914int drm_dp_stop_crc(struct drm_dp_aux *aux);
915 915
916struct drm_dp_dpcd_ident {
917 u8 oui[3];
918 u8 device_id[6];
919 u8 hw_rev;
920 u8 sw_major_rev;
921 u8 sw_minor_rev;
922} __packed;
923
924/**
925 * struct drm_dp_desc - DP branch/sink device descriptor
926 * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
927 * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
928 */
929struct drm_dp_desc {
930 struct drm_dp_dpcd_ident ident;
931 u32 quirks;
932};
933
934int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
935 bool is_branch);
936
937/**
938 * enum drm_dp_quirk - Display Port sink/branch device specific quirks
939 *
940 * Display Port sink and branch devices in the wild have a variety of bugs, try
941 * to collect them here. The quirks are shared, but it's up to the drivers to
942 * implement workarounds for them.
943 */
944enum drm_dp_quirk {
945 /**
946 * @DP_DPCD_QUIRK_LIMITED_M_N:
947 *
948 * The device requires main link attributes Mvid and Nvid to be limited
949 * to 16 bits.
950 */
951 DP_DPCD_QUIRK_LIMITED_M_N,
952};
953
954/**
955 * drm_dp_has_quirk() - does the DP device have a specific quirk
956 * @desc: Device decriptor filled by drm_dp_read_desc()
957 * @quirk: Quirk to query for
958 *
959 * Return true if DP device identified by @desc has @quirk.
960 */
961static inline bool
962drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
963{
964 return desc->quirks & BIT(quirk);
965}
966
916#endif /* _DRM_DP_HELPER_H_ */ 967#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 370c0a0473fc..d66432c6e675 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -43,6 +43,8 @@
43#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ 43#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
44#define _DT_BINDINGS_CLK_SUN50I_A64_H_ 44#define _DT_BINDINGS_CLK_SUN50I_A64_H_
45 45
46#define CLK_PLL_PERIPH0 11
47
46#define CLK_BUS_MIPI_DSI 28 48#define CLK_BUS_MIPI_DSI 28
47#define CLK_BUS_CE 29 49#define CLK_BUS_CE 29
48#define CLK_BUS_DMA 30 50#define CLK_BUS_DMA 30
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index c2afc41d6964..e139fe5c62ec 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -43,6 +43,8 @@
43#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ 43#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_
44#define _DT_BINDINGS_CLK_SUN8I_H3_H_ 44#define _DT_BINDINGS_CLK_SUN8I_H3_H_
45 45
46#define CLK_PLL_PERIPH0 9
47
46#define CLK_CPUX 14 48#define CLK_CPUX 14
47 49
48#define CLK_BUS_CE 20 50#define CLK_BUS_CE 20
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 97b8d3728b31..ef718586321c 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -195,7 +195,10 @@ struct vgic_dist {
195 /* either a GICv2 CPU interface */ 195 /* either a GICv2 CPU interface */
196 gpa_t vgic_cpu_base; 196 gpa_t vgic_cpu_base;
197 /* or a number of GICv3 redistributor regions */ 197 /* or a number of GICv3 redistributor regions */
198 gpa_t vgic_redist_base; 198 struct {
199 gpa_t vgic_redist_base;
200 gpa_t vgic_redist_free_offset;
201 };
199 }; 202 };
200 203
201 /* distributor enabled */ 204 /* distributor enabled */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d1b04b0e99cf..a7e29fa0981f 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -426,6 +426,7 @@ extern void bio_advance(struct bio *, unsigned);
426 426
427extern void bio_init(struct bio *bio, struct bio_vec *table, 427extern void bio_init(struct bio *bio, struct bio_vec *table,
428 unsigned short max_vecs); 428 unsigned short max_vecs);
429extern void bio_uninit(struct bio *);
429extern void bio_reset(struct bio *); 430extern void bio_reset(struct bio *);
430void bio_chain(struct bio *, struct bio *); 431void bio_chain(struct bio *, struct bio *);
431 432
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index c47aa248c640..fcd641032f8d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -238,7 +238,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
238 bool kick_requeue_list); 238 bool kick_requeue_list);
239void blk_mq_kick_requeue_list(struct request_queue *q); 239void blk_mq_kick_requeue_list(struct request_queue *q);
240void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 240void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
241void blk_mq_abort_requeue_list(struct request_queue *q);
242void blk_mq_complete_request(struct request *rq); 241void blk_mq_complete_request(struct request *rq);
243 242
244bool blk_mq_queue_stopped(struct request_queue *q); 243bool blk_mq_queue_stopped(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ab92c4ea138b..1ddd36bd2173 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -391,6 +391,8 @@ struct request_queue {
391 int nr_rqs[2]; /* # allocated [a]sync rqs */ 391 int nr_rqs[2]; /* # allocated [a]sync rqs */
392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
393 393
394 atomic_t shared_hctx_restart;
395
394 struct blk_queue_stats *stats; 396 struct blk_queue_stats *stats;
395 struct rq_wb *rq_wb; 397 struct rq_wb *rq_wb;
396 398
@@ -586,6 +588,8 @@ struct request_queue {
586 588
587 size_t cmd_size; 589 size_t cmd_size;
588 void *rq_alloc_data; 590 void *rq_alloc_data;
591
592 struct work_struct release_work;
589}; 593};
590 594
591#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 595#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 5efb4db44e1e..d5093b52b485 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -40,6 +40,9 @@ struct bpf_reg_state {
40 */ 40 */
41 s64 min_value; 41 s64 min_value;
42 u64 max_value; 42 u64 max_value;
43 u32 min_align;
44 u32 aux_off;
45 u32 aux_off_align;
43}; 46};
44 47
45enum bpf_stack_slot_type { 48enum bpf_stack_slot_type {
@@ -87,6 +90,7 @@ struct bpf_verifier_env {
87 struct bpf_prog *prog; /* eBPF program being verified */ 90 struct bpf_prog *prog; /* eBPF program being verified */
88 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 91 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
89 int stack_size; /* number of states to be processed */ 92 int stack_size; /* number of states to be processed */
93 bool strict_alignment; /* perform strict pointer alignment checks */
90 struct bpf_verifier_state cur_state; /* current verifier state */ 94 struct bpf_verifier_state cur_state; /* current verifier state */
91 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 95 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
92 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ 96 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index aa2e19182d99..51c5bd64bd00 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -3,6 +3,8 @@
3 3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 5
6#include <linux/string.h>
7
6#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG 8#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
7 9
8/* 10/*
@@ -12,12 +14,10 @@
12 */ 14 */
13 15
14# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 16# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
15extern const char *ceph_file_part(const char *s, int len);
16# define dout(fmt, ...) \ 17# define dout(fmt, ...) \
17 pr_debug("%.*s %12.12s:%-4d : " fmt, \ 18 pr_debug("%.*s %12.12s:%-4d : " fmt, \
18 8 - (int)sizeof(KBUILD_MODNAME), " ", \ 19 8 - (int)sizeof(KBUILD_MODNAME), " ", \
19 ceph_file_part(__FILE__, sizeof(__FILE__)), \ 20 kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
20 __LINE__, ##__VA_ARGS__)
21# else 21# else
22/* faux printk call just to see any compiler warnings. */ 22/* faux printk call just to see any compiler warnings. */
23# define dout(fmt, ...) do { \ 23# define dout(fmt, ...) do { \
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 21745946cae1..ec47101cb1bf 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -48,6 +48,7 @@ enum {
48 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 48 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
49 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 49 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
50 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 50 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
51 CSS_DYING = (1 << 4), /* css is dying */
51}; 52};
52 53
53/* bits in struct cgroup flags field */ 54/* bits in struct cgroup flags field */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed2573e149fa..710a005c6b7a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
344} 344}
345 345
346/** 346/**
347 * css_is_dying - test whether the specified css is dying
348 * @css: target css
349 *
350 * Test whether @css is in the process of offlining or already offline. In
351 * most cases, ->css_online() and ->css_offline() callbacks should be
352 * enough; however, the actual offline operations are RCU delayed and this
353 * test returns %true also when @css is scheduled to be offlined.
354 *
355 * This is useful, for example, when the use case requires synchronous
356 * behavior with respect to cgroup removal. cgroup removal schedules css
357 * offlining but the css can seem alive while the operation is being
358 * delayed. If the delay affects user visible semantics, this test can be
359 * used to resolve the situation.
360 */
361static inline bool css_is_dying(struct cgroup_subsys_state *css)
362{
363 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
364}
365
366/**
347 * css_put - put a css reference 367 * css_put - put a css reference
348 * @css: target css 368 * @css: target css
349 * 369 *
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..d614c5ea1b5e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,11 @@
15 * with any version that can compile the kernel 15 * with any version that can compile the kernel
16 */ 16 */
17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
18
19/*
20 * GCC does not warn about unused static inline functions for
21 * -Wunused-function. This turns out to avoid the need for complex #ifdef
22 * directives. Suppress the warning in clang as well.
23 */
24#undef inline
25#define inline inline __attribute__((unused)) notrace
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2319b8c108e8..c96709049683 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item,
74 const char *name, 74 const char *name,
75 struct config_item_type *type); 75 struct config_item_type *type);
76 76
77extern struct config_item * config_item_get(struct config_item *); 77extern struct config_item *config_item_get(struct config_item *);
78extern struct config_item *config_item_get_unless_zero(struct config_item *);
78extern void config_item_put(struct config_item *); 79extern void config_item_put(struct config_item *);
79 80
80struct config_item_type { 81struct config_item_type {
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 00ebac854bb7..5ec1f6c47716 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -18,6 +18,20 @@ struct dax_operations {
18 void **, pfn_t *); 18 void **, pfn_t *);
19}; 19};
20 20
21#if IS_ENABLED(CONFIG_DAX)
22struct dax_device *dax_get_by_host(const char *host);
23void put_dax(struct dax_device *dax_dev);
24#else
25static inline struct dax_device *dax_get_by_host(const char *host)
26{
27 return NULL;
28}
29
30static inline void put_dax(struct dax_device *dax_dev)
31{
32}
33#endif
34
21int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 35int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
22#if IS_ENABLED(CONFIG_FS_DAX) 36#if IS_ENABLED(CONFIG_FS_DAX)
23int __bdev_dax_supported(struct super_block *sb, int blocksize); 37int __bdev_dax_supported(struct super_block *sb, int blocksize);
@@ -25,23 +39,29 @@ static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
25{ 39{
26 return __bdev_dax_supported(sb, blocksize); 40 return __bdev_dax_supported(sb, blocksize);
27} 41}
42
43static inline struct dax_device *fs_dax_get_by_host(const char *host)
44{
45 return dax_get_by_host(host);
46}
47
48static inline void fs_put_dax(struct dax_device *dax_dev)
49{
50 put_dax(dax_dev);
51}
52
28#else 53#else
29static inline int bdev_dax_supported(struct super_block *sb, int blocksize) 54static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
30{ 55{
31 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
32} 57}
33#endif
34 58
35#if IS_ENABLED(CONFIG_DAX) 59static inline struct dax_device *fs_dax_get_by_host(const char *host)
36struct dax_device *dax_get_by_host(const char *host);
37void put_dax(struct dax_device *dax_dev);
38#else
39static inline struct dax_device *dax_get_by_host(const char *host)
40{ 60{
41 return NULL; 61 return NULL;
42} 62}
43 63
44static inline void put_dax(struct dax_device *dax_dev) 64static inline void fs_put_dax(struct dax_device *dax_dev)
45{ 65{
46} 66}
47#endif 67#endif
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 4eac2670bfa1..92f20832fd28 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
78 78
79struct iommu_domain; 79struct iommu_domain;
80struct msi_msg; 80struct msi_msg;
81struct device;
81 82
82static inline int iommu_dma_init(void) 83static inline int iommu_dma_init(void)
83{ 84{
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5e9c74cf8894..9bbf21a516e4 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; }
136static inline int dmi_name_in_serial(const char *s) { return 0; } 136static inline int dmi_name_in_serial(const char *s) { return 0; }
137#define dmi_available 0 137#define dmi_available 0
138static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), 138static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
139 void *private_data) { return -1; } 139 void *private_data) { return -ENXIO; }
140static inline bool dmi_match(enum dmi_field f, const char *str) 140static inline bool dmi_match(enum dmi_field f, const char *str)
141 { return false; } 141 { return false; }
142static inline void dmi_memdev_name(u16 handle, const char **bank, 142static inline void dmi_memdev_name(u16 handle, const char **bank,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 9ec5e22846e0..0e306c5a86d6 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -153,7 +153,7 @@ struct elevator_type
153#endif 153#endif
154 154
155 /* managed by elevator core */ 155 /* managed by elevator core */
156 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ 156 char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
157 struct list_head list; 157 struct list_head list;
158}; 158};
159 159
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 56197f82af45..62d948f80730 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -272,6 +272,16 @@ struct bpf_prog_aux;
272 .off = OFF, \ 272 .off = OFF, \
273 .imm = IMM }) 273 .imm = IMM })
274 274
275/* Unconditional jumps, goto pc + off16 */
276
277#define BPF_JMP_A(OFF) \
278 ((struct bpf_insn) { \
279 .code = BPF_JMP | BPF_JA, \
280 .dst_reg = 0, \
281 .src_reg = 0, \
282 .off = OFF, \
283 .imm = 0 })
284
275/* Function call */ 285/* Function call */
276 286
277#define BPF_EMIT_CALL(FUNC) \ 287#define BPF_EMIT_CALL(FUNC) \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 2b1a44f5bdb6..a89d37e8b387 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -41,7 +41,7 @@ struct vm_area_struct;
41#define ___GFP_WRITE 0x800000u 41#define ___GFP_WRITE 0x800000u
42#define ___GFP_KSWAPD_RECLAIM 0x1000000u 42#define ___GFP_KSWAPD_RECLAIM 0x1000000u
43#ifdef CONFIG_LOCKDEP 43#ifdef CONFIG_LOCKDEP
44#define ___GFP_NOLOCKDEP 0x4000000u 44#define ___GFP_NOLOCKDEP 0x2000000u
45#else 45#else
46#define ___GFP_NOLOCKDEP 0 46#define ___GFP_NOLOCKDEP 0
47#endif 47#endif
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index c0d712d22b07..f738d50cc17d 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
56 .flags = _flags, \ 56 .flags = _flags, \
57} 57}
58 58
59#ifdef CONFIG_GPIOLIB
59void gpiod_add_lookup_table(struct gpiod_lookup_table *table); 60void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
60void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); 61void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
62#else
63static inline
64void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
65static inline
66void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
67#endif
61 68
62#endif /* __LINUX_GPIO_MACHINE_H */ 69#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 661e5c2a8e2a..082dc1bd0801 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -167,7 +167,6 @@ static inline void hash_del_rcu(struct hlist_node *node)
167/** 167/**
168 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the 168 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
169 * same bucket in an rcu enabled hashtable 169 * same bucket in an rcu enabled hashtable
170 * in a rcu enabled hashtable
171 * @name: hashtable to iterate 170 * @name: hashtable to iterate
172 * @obj: the type * to use as a loop cursor for each entry 171 * @obj: the type * to use as a loop cursor for each entry
173 * @member: the name of the hlist_node within the struct 172 * @member: the name of the hlist_node within the struct
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8d5fcd6284ce..283dc2f5364d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -614,14 +614,16 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
614static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, 614static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
615 netdev_features_t features) 615 netdev_features_t features)
616{ 616{
617 if (skb_vlan_tagged_multi(skb)) 617 if (skb_vlan_tagged_multi(skb)) {
618 features = netdev_intersect_features(features, 618 /* In the case of multi-tagged packets, use a direct mask
619 NETIF_F_SG | 619 * instead of using netdev_interesect_features(), to make
620 NETIF_F_HIGHDMA | 620 * sure that only devices supporting NETIF_F_HW_CSUM will
621 NETIF_F_FRAGLIST | 621 * have checksum offloading support.
622 NETIF_F_HW_CSUM | 622 */
623 NETIF_F_HW_VLAN_CTAG_TX | 623 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
624 NETIF_F_HW_VLAN_STAG_TX); 624 NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
625 NETIF_F_HW_VLAN_STAG_TX;
626 }
625 627
626 return features; 628 return features;
627} 629}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index fffb91202bc9..1fa293a37f4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -417,6 +417,10 @@
417#define ICH_HCR_EN (1 << 0) 417#define ICH_HCR_EN (1 << 0)
418#define ICH_HCR_UIE (1 << 1) 418#define ICH_HCR_UIE (1 << 1)
419 419
420#define ICH_VMCR_ACK_CTL_SHIFT 2
421#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
422#define ICH_VMCR_FIQ_EN_SHIFT 3
423#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
420#define ICH_VMCR_CBPR_SHIFT 4 424#define ICH_VMCR_CBPR_SHIFT 4
421#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) 425#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
422#define ICH_VMCR_EOIM_SHIFT 9 426#define ICH_VMCR_EOIM_SHIFT 9
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index dc30f3d057eb..d3453ee072fc 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -25,7 +25,18 @@
25#define GICC_ENABLE 0x1 25#define GICC_ENABLE 0x1
26#define GICC_INT_PRI_THRESHOLD 0xf0 26#define GICC_INT_PRI_THRESHOLD 0xf0
27 27
28#define GIC_CPU_CTRL_EOImodeNS (1 << 9) 28#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
29#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
30#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
31#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
32#define GIC_CPU_CTRL_AckCtl_SHIFT 2
33#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
34#define GIC_CPU_CTRL_FIQEn_SHIFT 3
35#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
36#define GIC_CPU_CTRL_CBPR_SHIFT 4
37#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
38#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
39#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
29 40
30#define GICC_IAR_INT_ID_MASK 0x3ff 41#define GICC_IAR_INT_ID_MASK 0x3ff
31#define GICC_INT_SPURIOUS 1023 42#define GICC_INT_SPURIOUS 1023
@@ -84,8 +95,19 @@
84#define GICH_LR_EOI (1 << 19) 95#define GICH_LR_EOI (1 << 19)
85#define GICH_LR_HW (1 << 31) 96#define GICH_LR_HW (1 << 31)
86 97
87#define GICH_VMCR_CTRL_SHIFT 0 98#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
88#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 99#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
100#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
101#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
102#define GICH_VMCR_ACK_CTL_SHIFT 2
103#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
104#define GICH_VMCR_FIQ_EN_SHIFT 3
105#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
106#define GICH_VMCR_CBPR_SHIFT 4
107#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
108#define GICH_VMCR_EOI_MODE_SHIFT 9
109#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
110
89#define GICH_VMCR_PRIMASK_SHIFT 27 111#define GICH_VMCR_PRIMASK_SHIFT 27
90#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) 112#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
91#define GICH_VMCR_BINPOINT_SHIFT 21 113#define GICH_VMCR_BINPOINT_SHIFT 21
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 36872fbb815d..734377ad42e9 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
64/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 64/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
65#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 65#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
66 66
67#ifndef __jiffy_arch_data
68#define __jiffy_arch_data
69#endif
70
67/* 71/*
68 * The 64-bit value is not atomic - you MUST NOT read it 72 * The 64-bit value is not atomic - you MUST NOT read it
69 * without sampling the sequence number in jiffies_lock. 73 * without sampling the sequence number in jiffies_lock.
70 * get_jiffies_64() will do this for you as appropriate. 74 * get_jiffies_64() will do this for you as appropriate.
71 */ 75 */
72extern u64 __cacheline_aligned_in_smp jiffies_64; 76extern u64 __cacheline_aligned_in_smp jiffies_64;
73extern unsigned long volatile __cacheline_aligned_in_smp jiffies; 77extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
74 78
75#if (BITS_PER_LONG < 64) 79#if (BITS_PER_LONG < 64)
76u64 get_jiffies_64(void); 80u64 get_jiffies_64(void);
diff --git a/include/linux/key.h b/include/linux/key.h
index 0c9b93b0d1f7..78e25aabedaf 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,7 +173,6 @@ struct key {
173#ifdef KEY_DEBUGGING 173#ifdef KEY_DEBUGGING
174 unsigned magic; 174 unsigned magic;
175#define KEY_DEBUG_MAGIC 0x18273645u 175#define KEY_DEBUG_MAGIC 0x18273645u
176#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
177#endif 176#endif
178 177
179 unsigned long flags; /* status flags (change with bitops) */ 178 unsigned long flags; /* status flags (change with bitops) */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 30f90c1a0aaf..541df0b5b815 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -349,6 +349,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
349 int write, void __user *buffer, 349 int write, void __user *buffer,
350 size_t *length, loff_t *ppos); 350 size_t *length, loff_t *ppos);
351#endif 351#endif
352extern void wait_for_kprobe_optimizer(void);
353#else
354static inline void wait_for_kprobe_optimizer(void) { }
352#endif /* CONFIG_OPTPROBES */ 355#endif /* CONFIG_OPTPROBES */
353#ifdef CONFIG_KPROBES_ON_FTRACE 356#ifdef CONFIG_KPROBES_ON_FTRACE
354extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 357extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 4ce24a376262..8098695e5d8d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
425} 425}
426#endif 426#endif
427 427
428extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
429 phys_addr_t end_addr);
428#else 430#else
429static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 431static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
430{ 432{
431 return 0; 433 return 0;
432} 434}
433 435
436static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
437 phys_addr_t end_addr)
438{
439 return 0;
440}
441
434#endif /* CONFIG_HAVE_MEMBLOCK */ 442#endif /* CONFIG_HAVE_MEMBLOCK */
435 443
436#endif /* __KERNEL__ */ 444#endif /* __KERNEL__ */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f62ce8d..8e2828d48d7f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
470 u16 rate_val; 470 u16 rate_val;
471}; 471};
472 472
473struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
473int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 474int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
474 enum mlx4_update_qp_attr attr, 475 enum mlx4_update_qp_attr attr,
475 struct mlx4_update_qp_params *params); 476 struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index dd9a263ed368..a940ec6a046c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -787,8 +787,14 @@ enum {
787}; 787};
788 788
789enum { 789enum {
790 CQE_RSS_HTYPE_IP = 0x3 << 6, 790 CQE_RSS_HTYPE_IP = 0x3 << 2,
791 CQE_RSS_HTYPE_L4 = 0x3 << 2, 791 /* cqe->rss_hash_type[3:2] - IP destination selected for hash
792 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
793 */
794 CQE_RSS_HTYPE_L4 = 0x3 << 6,
795 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
796 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
797 */
792}; 798};
793 799
794enum { 800enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index bcdf739ee41a..93273d9ea4d1 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -787,7 +787,12 @@ enum {
787 787
788typedef void (*mlx5_cmd_cbk_t)(int status, void *context); 788typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
789 789
790enum {
791 MLX5_CMD_ENT_STATE_PENDING_COMP,
792};
793
790struct mlx5_cmd_work_ent { 794struct mlx5_cmd_work_ent {
795 unsigned long state;
791 struct mlx5_cmd_msg *in; 796 struct mlx5_cmd_msg *in;
792 struct mlx5_cmd_msg *out; 797 struct mlx5_cmd_msg *out;
793 void *uout; 798 void *uout;
@@ -976,7 +981,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
976void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 981void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
977void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 982void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
978struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 983struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
979void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); 984void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
980void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); 985void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
981int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 986int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
982 int nent, u64 mask, const char *name, 987 int nent, u64 mask, const char *name,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1b166d2e19c5..b25e7baa273e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -109,7 +109,6 @@ struct mlx5_flow_table_attr {
109 int max_fte; 109 int max_fte;
110 u32 level; 110 u32 level;
111 u32 flags; 111 u32 flags;
112 u32 underlay_qpn;
113}; 112};
114 113
115struct mlx5_flow_table * 114struct mlx5_flow_table *
@@ -167,4 +166,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
167void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 166void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
168void mlx5_fc_query_cached(struct mlx5_fc *counter, 167void mlx5_fc_query_cached(struct mlx5_fc *counter,
169 u64 *bytes, u64 *packets, u64 *lastuse); 168 u64 *bytes, u64 *packets, u64 *lastuse);
169int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
170int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
171
170#endif 172#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de0724b400..edafedb7b509 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -766,6 +766,12 @@ enum {
766 MLX5_CAP_PORT_TYPE_ETH = 0x1, 766 MLX5_CAP_PORT_TYPE_ETH = 0x1,
767}; 767};
768 768
769enum {
770 MLX5_CAP_UMR_FENCE_STRONG = 0x0,
771 MLX5_CAP_UMR_FENCE_SMALL = 0x1,
772 MLX5_CAP_UMR_FENCE_NONE = 0x2,
773};
774
769struct mlx5_ifc_cmd_hca_cap_bits { 775struct mlx5_ifc_cmd_hca_cap_bits {
770 u8 reserved_at_0[0x80]; 776 u8 reserved_at_0[0x80];
771 777
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
875 u8 reserved_at_202[0x1]; 881 u8 reserved_at_202[0x1];
876 u8 ipoib_enhanced_offloads[0x1]; 882 u8 ipoib_enhanced_offloads[0x1];
877 u8 ipoib_basic_offloads[0x1]; 883 u8 ipoib_basic_offloads[0x1];
878 u8 reserved_at_205[0xa]; 884 u8 reserved_at_205[0x5];
885 u8 umr_fence[0x2];
886 u8 reserved_at_20c[0x3];
879 u8 drain_sigerr[0x1]; 887 u8 drain_sigerr[0x1];
880 u8 cmdif_checksum[0x2]; 888 u8 cmdif_checksum[0x2];
881 u8 sigerr_cqe[0x1]; 889 u8 sigerr_cqe[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7cb17c6b97de..6f543a47fc92 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page);
1393 1393
1394int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1394int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1395 1395
1396/* Is the vma a continuation of the stack vma above it? */
1397static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1398{
1399 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1400}
1401
1402static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1396static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1403{ 1397{
1404 return !vma->vm_ops; 1398 return !vma->vm_ops;
@@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma);
1414static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1408static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1415#endif 1409#endif
1416 1410
1417static inline int stack_guard_page_start(struct vm_area_struct *vma,
1418 unsigned long addr)
1419{
1420 return (vma->vm_flags & VM_GROWSDOWN) &&
1421 (vma->vm_start == addr) &&
1422 !vma_growsdown(vma->vm_prev, addr);
1423}
1424
1425/* Is the vma a continuation of the stack vma below it? */
1426static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1427{
1428 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1429}
1430
1431static inline int stack_guard_page_end(struct vm_area_struct *vma,
1432 unsigned long addr)
1433{
1434 return (vma->vm_flags & VM_GROWSUP) &&
1435 (vma->vm_end == addr) &&
1436 !vma_growsup(vma->vm_next, addr);
1437}
1438
1439int vma_is_stack_for_current(struct vm_area_struct *vma); 1411int vma_is_stack_for_current(struct vm_area_struct *vma);
1440 1412
1441extern unsigned long move_page_tables(struct vm_area_struct *vma, 1413extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping,
2222 pgoff_t offset, 2194 pgoff_t offset,
2223 unsigned long size); 2195 unsigned long size);
2224 2196
2197extern unsigned long stack_guard_gap;
2225/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2198/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2226extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2199extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2227 2200
@@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
2250 return vma; 2223 return vma;
2251} 2224}
2252 2225
2226static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2227{
2228 unsigned long vm_start = vma->vm_start;
2229
2230 if (vma->vm_flags & VM_GROWSDOWN) {
2231 vm_start -= stack_guard_gap;
2232 if (vm_start > vma->vm_start)
2233 vm_start = 0;
2234 }
2235 return vm_start;
2236}
2237
2238static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2239{
2240 unsigned long vm_end = vma->vm_end;
2241
2242 if (vma->vm_flags & VM_GROWSUP) {
2243 vm_end += stack_guard_gap;
2244 if (vm_end < vma->vm_end)
2245 vm_end = -PAGE_SIZE;
2246 }
2247 return vm_end;
2248}
2249
2253static inline unsigned long vma_pages(struct vm_area_struct *vma) 2250static inline unsigned long vma_pages(struct vm_area_struct *vma)
2254{ 2251{
2255 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2252 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -2327,6 +2324,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2327#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2324#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2328#define FOLL_COW 0x4000 /* internal GUP flag */ 2325#define FOLL_COW 0x4000 /* internal GUP flag */
2329 2326
2327static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2328{
2329 if (vm_fault & VM_FAULT_OOM)
2330 return -ENOMEM;
2331 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2332 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2333 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2334 return -EFAULT;
2335 return 0;
2336}
2337
2330typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2338typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2331 void *data); 2339 void *data);
2332extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2340extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ebaccd4e7d8c..ef6a13b7bd3e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -678,6 +678,7 @@ typedef struct pglist_data {
678 * is the first PFN that needs to be initialised. 678 * is the first PFN that needs to be initialised.
679 */ 679 */
680 unsigned long first_deferred_pfn; 680 unsigned long first_deferred_pfn;
681 unsigned long static_init_size;
681#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 682#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
682 683
683#ifdef CONFIG_TRANSPARENT_HUGEPAGE 684#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 566fda587fcf..3f74ef2281e8 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -467,6 +467,7 @@ enum dmi_field {
467 DMI_PRODUCT_VERSION, 467 DMI_PRODUCT_VERSION,
468 DMI_PRODUCT_SERIAL, 468 DMI_PRODUCT_SERIAL,
469 DMI_PRODUCT_UUID, 469 DMI_PRODUCT_UUID,
470 DMI_PRODUCT_FAMILY,
470 DMI_BOARD_VENDOR, 471 DMI_BOARD_VENDOR,
471 DMI_BOARD_NAME, 472 DMI_BOARD_NAME,
472 DMI_BOARD_VERSION, 473 DMI_BOARD_VERSION,
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 6be1949ebcdf..1ee7b30dafec 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -457,7 +457,7 @@ enum hwparam_type {
457 hwparam_ioport, /* Module parameter configures an I/O port */ 457 hwparam_ioport, /* Module parameter configures an I/O port */
458 hwparam_iomem, /* Module parameter configures an I/O mem address */ 458 hwparam_iomem, /* Module parameter configures an I/O mem address */
459 hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */ 459 hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */
460 hwparam_irq, /* Module parameter configures an I/O port */ 460 hwparam_irq, /* Module parameter configures an IRQ */
461 hwparam_dma, /* Module parameter configures a DMA channel */ 461 hwparam_dma, /* Module parameter configures a DMA channel */
462 hwparam_dma_addr, /* Module parameter configures a DMA buffer address */ 462 hwparam_dma_addr, /* Module parameter configures a DMA buffer address */
463 hwparam_other, /* Module parameter configures some other value */ 463 hwparam_other, /* Module parameter configures some other value */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9c23bd2efb56..4ed952c17fc7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -914,8 +914,7 @@ struct xfrmdev_ops {
914 * 914 *
915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
916 * Called when a user wants to change the Maximum Transfer Unit 916 * Called when a user wants to change the Maximum Transfer Unit
917 * of a device. If not defined, any request to change MTU will 917 * of a device.
918 * will return an error.
919 * 918 *
920 * void (*ndo_tx_timeout)(struct net_device *dev); 919 * void (*ndo_tx_timeout)(struct net_device *dev);
921 * Callback used when the transmitter has not made any progress 920 * Callback used when the transmitter has not made any progress
@@ -1596,8 +1595,8 @@ enum netdev_priv_flags {
1596 * @rtnl_link_state: This enum represents the phases of creating 1595 * @rtnl_link_state: This enum represents the phases of creating
1597 * a new link 1596 * a new link
1598 * 1597 *
1599 * @destructor: Called from unregister, 1598 * @needs_free_netdev: Should unregister perform free_netdev?
1600 * can be used to call free_netdev 1599 * @priv_destructor: Called from unregister
1601 * @npinfo: XXX: need comments on this one 1600 * @npinfo: XXX: need comments on this one
1602 * @nd_net: Network namespace this network device is inside 1601 * @nd_net: Network namespace this network device is inside
1603 * 1602 *
@@ -1858,7 +1857,8 @@ struct net_device {
1858 RTNL_LINK_INITIALIZING, 1857 RTNL_LINK_INITIALIZING,
1859 } rtnl_link_state:16; 1858 } rtnl_link_state:16;
1860 1859
1861 void (*destructor)(struct net_device *dev); 1860 bool needs_free_netdev;
1861 void (*priv_destructor)(struct net_device *dev);
1862 1862
1863#ifdef CONFIG_NETPOLL 1863#ifdef CONFIG_NETPOLL
1864 struct netpoll_info __rcu *npinfo; 1864 struct netpoll_info __rcu *npinfo;
@@ -3296,11 +3296,15 @@ int dev_get_phys_port_id(struct net_device *dev,
3296int dev_get_phys_port_name(struct net_device *dev, 3296int dev_get_phys_port_name(struct net_device *dev,
3297 char *name, size_t len); 3297 char *name, size_t len);
3298int dev_change_proto_down(struct net_device *dev, bool proto_down); 3298int dev_change_proto_down(struct net_device *dev, bool proto_down);
3299int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3300 int fd, u32 flags);
3301struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3299struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3302struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3300struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3303 struct netdev_queue *txq, int *ret); 3301 struct netdev_queue *txq, int *ret);
3302
3303typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
3304int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3305 int fd, u32 flags);
3306bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op);
3307
3304int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3308int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3305int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3309int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3306bool is_skb_forwardable(const struct net_device *dev, 3310bool is_skb_forwardable(const struct net_device *dev,
@@ -4257,6 +4261,11 @@ static inline const char *netdev_name(const struct net_device *dev)
4257 return dev->name; 4261 return dev->name;
4258} 4262}
4259 4263
4264static inline bool netdev_unregistering(const struct net_device *dev)
4265{
4266 return dev->reg_state == NETREG_UNREGISTERING;
4267}
4268
4260static inline const char *netdev_reg_state(const struct net_device *dev) 4269static inline const char *netdev_reg_state(const struct net_device *dev)
4261{ 4270{
4262 switch (dev->reg_state) { 4271 switch (dev->reg_state) {
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index be378cf47fcc..b3044c2c62cb 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -294,7 +294,7 @@ int xt_match_to_user(const struct xt_entry_match *m,
294int xt_target_to_user(const struct xt_entry_target *t, 294int xt_target_to_user(const struct xt_entry_target *t,
295 struct xt_entry_target __user *u); 295 struct xt_entry_target __user *u);
296int xt_data_to_user(void __user *dst, const void *src, 296int xt_data_to_user(void __user *dst, const void *src,
297 int usersize, int size); 297 int usersize, int size, int aligned_size);
298 298
299void *xt_copy_counters_from_user(const void __user *user, unsigned int len, 299void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
300 struct xt_counters_info *info, bool compat); 300 struct xt_counters_info *info, bool compat);
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index a30efb437e6d..e0cbf17af780 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
125/* True if the target is not a standard target */ 125/* True if the target is not a standard target */
126#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) 126#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
127 127
128static inline bool ebt_invalid_target(int target)
129{
130 return (target < -NUM_STANDARD_TARGETS || target >= 0);
131}
132
128#endif 133#endif
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 0db37158a61d..6c8c5d8041b7 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -27,8 +27,8 @@
27 27
28/* FC Port role bitmask - can merge with FC Port Roles in fc transport */ 28/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
29#define FC_PORT_ROLE_NVME_INITIATOR 0x10 29#define FC_PORT_ROLE_NVME_INITIATOR 0x10
30#define FC_PORT_ROLE_NVME_TARGET 0x11 30#define FC_PORT_ROLE_NVME_TARGET 0x20
31#define FC_PORT_ROLE_NVME_DISCOVERY 0x12 31#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
32 32
33 33
34/** 34/**
@@ -642,15 +642,7 @@ enum {
642 * sequence in one LLDD operation. Errors during Data 642 * sequence in one LLDD operation. Errors during Data
643 * sequence transmit must not allow RSP sequence to be sent. 643 * sequence transmit must not allow RSP sequence to be sent.
644 */ 644 */
645 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1), 645 NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
646 /* Bit 1: When 0, the LLDD will deliver FCP CMD
647 * on the CPU it should be affinitized to. Thus work will
648 * be scheduled on the cpu received on. When 1, the LLDD
649 * may not deliver the CMD on the CPU it should be worked
650 * on. The transport should pick a cpu to schedule the work
651 * on.
652 */
653 NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
654 /* Bit 2: When 0, the LLDD is calling the cmd rcv handler 646 /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
655 * in a non-isr context, allowing the transport to finish 647 * in a non-isr context, allowing the transport to finish
656 * op completion in the calling context. When 1, the LLDD 648 * op completion in the calling context. When 1, the LLDD
@@ -658,7 +650,7 @@ enum {
658 * requiring the transport to transition to a workqueue 650 * requiring the transport to transition to a workqueue
659 * for op completion. 651 * for op completion.
660 */ 652 */
661 NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3), 653 NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
662 /* Bit 3: When 0, the LLDD is calling the op done handler 654 /* Bit 3: When 0, the LLDD is calling the op done handler
663 * in a non-isr context, allowing the transport to finish 655 * in a non-isr context, allowing the transport to finish
664 * op completion in the calling context. When 1, the LLDD 656 * op completion in the calling context. When 1, the LLDD
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index ec6b11deb773..1e0deb8e8494 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -8,7 +8,7 @@
8#include <linux/ioport.h> 8#include <linux/ioport.h>
9#include <linux/of.h> 9#include <linux/of.h>
10 10
11typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *); 11typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
12 12
13/* 13/*
14 * Workarounds only applied to 32bit powermac machines 14 * Workarounds only applied to 32bit powermac machines
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index dc8224ae28d5..e0d1946270f3 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -64,6 +64,7 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
64 const char *bus_id, 64 const char *bus_id,
65 struct device *parent); 65 struct device *parent);
66 66
67extern int of_platform_device_destroy(struct device *dev, void *data);
67extern int of_platform_bus_probe(struct device_node *root, 68extern int of_platform_bus_probe(struct device_node *root,
68 const struct of_device_id *matches, 69 const struct of_device_id *matches,
69 struct device *parent); 70 struct device *parent);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 33c2b0b77429..8039f9f0ca05 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -183,6 +183,11 @@ enum pci_dev_flags {
183 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 183 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
184 /* Do not use FLR even if device advertises PCI_AF_CAP */ 184 /* Do not use FLR even if device advertises PCI_AF_CAP */
185 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 185 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
186 /*
187 * Resume before calling the driver's system suspend hooks, disabling
188 * the direct_complete optimization.
189 */
190 PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
186}; 191};
187 192
188enum pci_irq_reroute_variant { 193enum pci_irq_reroute_variant {
@@ -1342,9 +1347,9 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1342 unsigned int max_vecs, unsigned int flags, 1347 unsigned int max_vecs, unsigned int flags,
1343 const struct irq_affinity *aff_desc) 1348 const struct irq_affinity *aff_desc)
1344{ 1349{
1345 if (min_vecs > 1) 1350 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1346 return -EINVAL; 1351 return 1;
1347 return 1; 1352 return -ENOSPC;
1348} 1353}
1349 1354
1350static inline void pci_free_irq_vectors(struct pci_dev *dev) 1355static inline void pci_free_irq_vectors(struct pci_dev *dev)
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 279e3c5326e3..7620eb127cff 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -42,8 +42,6 @@
42 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high 42 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
43 * impedance to VDD). If the argument is != 0 pull-up is enabled, 43 * impedance to VDD). If the argument is != 0 pull-up is enabled,
44 * if it is 0, pull-up is total, i.e. the pin is connected to VDD. 44 * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
45 * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
46 * input and output operations.
47 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open 45 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
48 * collector) which means it is usually wired with other output ports 46 * collector) which means it is usually wired with other output ports
49 * which are then pulled up with an external resistor. Setting this 47 * which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
98 PIN_CONFIG_BIAS_PULL_DOWN, 96 PIN_CONFIG_BIAS_PULL_DOWN,
99 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 97 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
100 PIN_CONFIG_BIAS_PULL_UP, 98 PIN_CONFIG_BIAS_PULL_UP,
101 PIN_CONFIG_BIDIRECTIONAL,
102 PIN_CONFIG_DRIVE_OPEN_DRAIN, 99 PIN_CONFIG_DRIVE_OPEN_DRAIN,
103 PIN_CONFIG_DRIVE_OPEN_SOURCE, 100 PIN_CONFIG_DRIVE_OPEN_SOURCE,
104 PIN_CONFIG_DRIVE_PUSH_PULL, 101 PIN_CONFIG_DRIVE_PUSH_PULL,
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index 54b04483976c..ba4e4bb70262 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -16,5 +16,7 @@
16struct mtk_chip_config { 16struct mtk_chip_config {
17 u32 tx_mlsb; 17 u32 tx_mlsb;
18 u32 rx_mlsb; 18 u32 rx_mlsb;
19 u32 cs_pol;
20 u32 sample_sel;
19}; 21};
20#endif 22#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 422bc2e4cb6a..ef3eb8bbfee4 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -54,7 +54,8 @@ extern int ptrace_request(struct task_struct *child, long request,
54 unsigned long addr, unsigned long data); 54 unsigned long addr, unsigned long data);
55extern void ptrace_notify(int exit_code); 55extern void ptrace_notify(int exit_code);
56extern void __ptrace_link(struct task_struct *child, 56extern void __ptrace_link(struct task_struct *child,
57 struct task_struct *new_parent); 57 struct task_struct *new_parent,
58 const struct cred *ptracer_cred);
58extern void __ptrace_unlink(struct task_struct *child); 59extern void __ptrace_unlink(struct task_struct *child);
59extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); 60extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
60#define PTRACE_MODE_READ 0x01 61#define PTRACE_MODE_READ 0x01
@@ -206,7 +207,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
206 207
207 if (unlikely(ptrace) && current->ptrace) { 208 if (unlikely(ptrace) && current->ptrace) {
208 child->ptrace = current->ptrace; 209 child->ptrace = current->ptrace;
209 __ptrace_link(child, current->parent); 210 __ptrace_link(child, current->parent, current->ptracer_cred);
210 211
211 if (child->ptrace & PT_SEIZED) 212 if (child->ptrace & PT_SEIZED)
212 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); 213 task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -215,6 +216,8 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
215 216
216 set_tsk_thread_flag(child, TIF_SIGPENDING); 217 set_tsk_thread_flag(child, TIF_SIGPENDING);
217 } 218 }
219 else
220 child->ptracer_cred = NULL;
218} 221}
219 222
220/** 223/**
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9c6f768b7d32..dda22f45fc1b 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); 44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
45 45
46int dquot_initialize(struct inode *inode); 46int dquot_initialize(struct inode *inode);
47bool dquot_initialize_needed(struct inode *inode);
47void dquot_drop(struct inode *inode); 48void dquot_drop(struct inode *inode);
48struct dquot *dqget(struct super_block *sb, struct kqid qid); 49struct dquot *dqget(struct super_block *sb, struct kqid qid);
49static inline struct dquot *dqgrab(struct dquot *dquot) 50static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
207 return 0; 208 return 0;
208} 209}
209 210
211static inline bool dquot_initialize_needed(struct inode *inode)
212{
213 return false;
214}
215
210static inline void dquot_drop(struct inode *inode) 216static inline void dquot_drop(struct inode *inode)
211{ 217{
212} 218}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index cda76c6506ca..e69402d4a8ae 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *);
195void serdev_device_close(struct serdev_device *); 195void serdev_device_close(struct serdev_device *);
196unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); 196unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
197void serdev_device_set_flow_control(struct serdev_device *, bool); 197void serdev_device_set_flow_control(struct serdev_device *, bool);
198int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
198void serdev_device_wait_until_sent(struct serdev_device *, long); 199void serdev_device_wait_until_sent(struct serdev_device *, long);
199int serdev_device_get_tiocm(struct serdev_device *); 200int serdev_device_get_tiocm(struct serdev_device *);
200int serdev_device_set_tiocm(struct serdev_device *, int, int); 201int serdev_device_set_tiocm(struct serdev_device *, int, int);
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
236 return 0; 237 return 0;
237} 238}
238static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} 239static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
240static inline int serdev_device_write_buf(struct serdev_device *serdev,
241 const unsigned char *buf,
242 size_t count)
243{
244 return -ENODEV;
245}
239static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} 246static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
240static inline int serdev_device_get_tiocm(struct serdev_device *serdev) 247static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
241{ 248{
@@ -301,7 +308,7 @@ struct tty_driver;
301struct device *serdev_tty_port_register(struct tty_port *port, 308struct device *serdev_tty_port_register(struct tty_port *port,
302 struct device *parent, 309 struct device *parent,
303 struct tty_driver *drv, int idx); 310 struct tty_driver *drv, int idx);
304void serdev_tty_port_unregister(struct tty_port *port); 311int serdev_tty_port_unregister(struct tty_port *port);
305#else 312#else
306static inline struct device *serdev_tty_port_register(struct tty_port *port, 313static inline struct device *serdev_tty_port_register(struct tty_port *port,
307 struct device *parent, 314 struct device *parent,
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
309{ 316{
310 return ERR_PTR(-ENODEV); 317 return ERR_PTR(-ENODEV);
311} 318}
312static inline void serdev_tty_port_unregister(struct tty_port *port) {} 319static inline int serdev_tty_port_unregister(struct tty_port *port)
313#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
314
315static inline int serdev_device_write_buf(struct serdev_device *serdev,
316 const unsigned char *data,
317 size_t count)
318{ 320{
319 return serdev_device_write(serdev, data, count, 0); 321 return -ENODEV;
320} 322}
323#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
321 324
322#endif /*_LINUX_SERDEV_H */ 325#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 07ef550c6627..93315d6b21a8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,6 +84,7 @@ struct kmem_cache {
84 int red_left_pad; /* Left redzone padding size */ 84 int red_left_pad; /* Left redzone padding size */
85#ifdef CONFIG_SYSFS 85#ifdef CONFIG_SYSFS
86 struct kobject kobj; /* For sysfs */ 86 struct kobject kobj; /* For sysfs */
87 struct work_struct kobj_remove_work;
87#endif 88#endif
88#ifdef CONFIG_MEMCG 89#ifdef CONFIG_MEMCG
89 struct memcg_cache_params memcg_params; 90 struct memcg_cache_params memcg_params;
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index a18e0783946b..787e7ad53d45 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -1,6 +1,11 @@
1#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__ 1#ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
2#define __LINUX_SOC_RENESAS_RCAR_RST_H__ 2#define __LINUX_SOC_RENESAS_RCAR_RST_H__
3 3
4#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \
5 defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796)
4int rcar_rst_read_mode_pins(u32 *mode); 6int rcar_rst_read_mode_pins(u32 *mode);
7#else
8static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
9#endif
5 10
6#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */ 11#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 935bd2854ff1..7b2170bfd6e7 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -24,13 +24,13 @@
24 24
25struct dma_chan; 25struct dma_chan;
26struct property_entry; 26struct property_entry;
27struct spi_master; 27struct spi_controller;
28struct spi_transfer; 28struct spi_transfer;
29struct spi_flash_read_message; 29struct spi_flash_read_message;
30 30
31/* 31/*
32 * INTERFACES between SPI master-side drivers and SPI infrastructure. 32 * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
33 * (There's no SPI slave support for Linux yet...) 33 * and SPI infrastructure.
34 */ 34 */
35extern struct bus_type spi_bus_type; 35extern struct bus_type spi_bus_type;
36 36
@@ -84,7 +84,7 @@ struct spi_statistics {
84 84
85void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 85void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
86 struct spi_transfer *xfer, 86 struct spi_transfer *xfer,
87 struct spi_master *master); 87 struct spi_controller *ctlr);
88 88
89#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ 89#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
90 do { \ 90 do { \
@@ -98,13 +98,14 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
98 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) 98 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
99 99
100/** 100/**
101 * struct spi_device - Master side proxy for an SPI slave device 101 * struct spi_device - Controller side proxy for an SPI slave device
102 * @dev: Driver model representation of the device. 102 * @dev: Driver model representation of the device.
103 * @master: SPI controller used with the device. 103 * @controller: SPI controller used with the device.
104 * @master: Copy of controller, for backwards compatibility.
104 * @max_speed_hz: Maximum clock rate to be used with this chip 105 * @max_speed_hz: Maximum clock rate to be used with this chip
105 * (on this board); may be changed by the device's driver. 106 * (on this board); may be changed by the device's driver.
106 * The spi_transfer.speed_hz can override this for each transfer. 107 * The spi_transfer.speed_hz can override this for each transfer.
107 * @chip_select: Chipselect, distinguishing chips handled by @master. 108 * @chip_select: Chipselect, distinguishing chips handled by @controller.
108 * @mode: The spi mode defines how data is clocked out and in. 109 * @mode: The spi mode defines how data is clocked out and in.
109 * This may be changed by the device's driver. 110 * This may be changed by the device's driver.
110 * The "active low" default for chipselect mode can be overridden 111 * The "active low" default for chipselect mode can be overridden
@@ -140,7 +141,8 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
140 */ 141 */
141struct spi_device { 142struct spi_device {
142 struct device dev; 143 struct device dev;
143 struct spi_master *master; 144 struct spi_controller *controller;
145 struct spi_controller *master; /* compatibility layer */
144 u32 max_speed_hz; 146 u32 max_speed_hz;
145 u8 chip_select; 147 u8 chip_select;
146 u8 bits_per_word; 148 u8 bits_per_word;
@@ -198,7 +200,7 @@ static inline void spi_dev_put(struct spi_device *spi)
198 put_device(&spi->dev); 200 put_device(&spi->dev);
199} 201}
200 202
201/* ctldata is for the bus_master driver's runtime state */ 203/* ctldata is for the bus_controller driver's runtime state */
202static inline void *spi_get_ctldata(struct spi_device *spi) 204static inline void *spi_get_ctldata(struct spi_device *spi)
203{ 205{
204 return spi->controller_state; 206 return spi->controller_state;
@@ -292,9 +294,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
292 spi_unregister_driver) 294 spi_unregister_driver)
293 295
294/** 296/**
295 * struct spi_master - interface to SPI master controller 297 * struct spi_controller - interface to SPI master or slave controller
296 * @dev: device interface to this driver 298 * @dev: device interface to this driver
297 * @list: link with the global spi_master list 299 * @list: link with the global spi_controller list
298 * @bus_num: board-specific (and often SOC-specific) identifier for a 300 * @bus_num: board-specific (and often SOC-specific) identifier for a
299 * given SPI controller. 301 * given SPI controller.
300 * @num_chipselect: chipselects are used to distinguish individual 302 * @num_chipselect: chipselects are used to distinguish individual
@@ -311,6 +313,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
311 * @min_speed_hz: Lowest supported transfer speed 313 * @min_speed_hz: Lowest supported transfer speed
312 * @max_speed_hz: Highest supported transfer speed 314 * @max_speed_hz: Highest supported transfer speed
313 * @flags: other constraints relevant to this driver 315 * @flags: other constraints relevant to this driver
316 * @slave: indicates that this is an SPI slave controller
314 * @max_transfer_size: function that returns the max transfer size for 317 * @max_transfer_size: function that returns the max transfer size for
315 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. 318 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
316 * @max_message_size: function that returns the max message size for 319 * @max_message_size: function that returns the max message size for
@@ -326,8 +329,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
326 * the device whose settings are being modified. 329 * the device whose settings are being modified.
327 * @transfer: adds a message to the controller's transfer queue. 330 * @transfer: adds a message to the controller's transfer queue.
328 * @cleanup: frees controller-specific state 331 * @cleanup: frees controller-specific state
329 * @can_dma: determine whether this master supports DMA 332 * @can_dma: determine whether this controller supports DMA
330 * @queued: whether this master is providing an internal message queue 333 * @queued: whether this controller is providing an internal message queue
331 * @kworker: thread struct for message pump 334 * @kworker: thread struct for message pump
332 * @kworker_task: pointer to task for message pump kworker thread 335 * @kworker_task: pointer to task for message pump kworker thread
333 * @pump_messages: work struct for scheduling work to the message pump 336 * @pump_messages: work struct for scheduling work to the message pump
@@ -374,6 +377,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
374 * @handle_err: the subsystem calls the driver to handle an error that occurs 377 * @handle_err: the subsystem calls the driver to handle an error that occurs
375 * in the generic implementation of transfer_one_message(). 378 * in the generic implementation of transfer_one_message().
376 * @unprepare_message: undo any work done by prepare_message(). 379 * @unprepare_message: undo any work done by prepare_message().
380 * @slave_abort: abort the ongoing transfer request on an SPI slave controller
377 * @spi_flash_read: to support spi-controller hardwares that provide 381 * @spi_flash_read: to support spi-controller hardwares that provide
378 * accelerated interface to read from flash devices. 382 * accelerated interface to read from flash devices.
379 * @spi_flash_can_dma: analogous to can_dma() interface, but for 383 * @spi_flash_can_dma: analogous to can_dma() interface, but for
@@ -382,7 +386,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
382 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 386 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
383 * number. Any individual value may be -ENOENT for CS lines that 387 * number. Any individual value may be -ENOENT for CS lines that
384 * are not GPIOs (driven by the SPI controller itself). 388 * are not GPIOs (driven by the SPI controller itself).
385 * @statistics: statistics for the spi_master 389 * @statistics: statistics for the spi_controller
386 * @dma_tx: DMA transmit channel 390 * @dma_tx: DMA transmit channel
387 * @dma_rx: DMA receive channel 391 * @dma_rx: DMA receive channel
388 * @dummy_rx: dummy receive buffer for full-duplex devices 392 * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -391,7 +395,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
391 * what Linux expects, this optional hook can be used to translate 395 * what Linux expects, this optional hook can be used to translate
392 * between the two. 396 * between the two.
393 * 397 *
394 * Each SPI master controller can communicate with one or more @spi_device 398 * Each SPI controller can communicate with one or more @spi_device
395 * children. These make a small bus, sharing MOSI, MISO and SCK signals 399 * children. These make a small bus, sharing MOSI, MISO and SCK signals
396 * but not chip select signals. Each device may be configured to use a 400 * but not chip select signals. Each device may be configured to use a
397 * different clock rate, since those shared signals are ignored unless 401 * different clock rate, since those shared signals are ignored unless
@@ -402,7 +406,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
402 * an SPI slave device. For each such message it queues, it calls the 406 * an SPI slave device. For each such message it queues, it calls the
403 * message's completion function when the transaction completes. 407 * message's completion function when the transaction completes.
404 */ 408 */
405struct spi_master { 409struct spi_controller {
406 struct device dev; 410 struct device dev;
407 411
408 struct list_head list; 412 struct list_head list;
@@ -440,12 +444,16 @@ struct spi_master {
440 444
441 /* other constraints relevant to this driver */ 445 /* other constraints relevant to this driver */
442 u16 flags; 446 u16 flags;
443#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ 447#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
444#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ 448#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
445#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ 449#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
446#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ 450#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
447#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ 451#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
448#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ 452
453#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
454
455 /* flag indicating this is an SPI slave controller */
456 bool slave;
449 457
450 /* 458 /*
451 * on some hardware transfer / message size may be constrained 459 * on some hardware transfer / message size may be constrained
@@ -480,8 +488,8 @@ struct spi_master {
480 * any other request management 488 * any other request management
481 * + To a given spi_device, message queueing is pure fifo 489 * + To a given spi_device, message queueing is pure fifo
482 * 490 *
483 * + The master's main job is to process its message queue, 491 * + The controller's main job is to process its message queue,
484 * selecting a chip then transferring data 492 * selecting a chip (for masters), then transferring data
485 * + If there are multiple spi_device children, the i/o queue 493 * + If there are multiple spi_device children, the i/o queue
486 * arbitration algorithm is unspecified (round robin, fifo, 494 * arbitration algorithm is unspecified (round robin, fifo,
487 * priority, reservations, preemption, etc) 495 * priority, reservations, preemption, etc)
@@ -494,7 +502,7 @@ struct spi_master {
494 int (*transfer)(struct spi_device *spi, 502 int (*transfer)(struct spi_device *spi,
495 struct spi_message *mesg); 503 struct spi_message *mesg);
496 504
497 /* called on release() to free memory provided by spi_master */ 505 /* called on release() to free memory provided by spi_controller */
498 void (*cleanup)(struct spi_device *spi); 506 void (*cleanup)(struct spi_device *spi);
499 507
500 /* 508 /*
@@ -504,13 +512,13 @@ struct spi_master {
504 * not modify or store xfer and dma_tx and dma_rx must be set 512 * not modify or store xfer and dma_tx and dma_rx must be set
505 * while the device is prepared. 513 * while the device is prepared.
506 */ 514 */
507 bool (*can_dma)(struct spi_master *master, 515 bool (*can_dma)(struct spi_controller *ctlr,
508 struct spi_device *spi, 516 struct spi_device *spi,
509 struct spi_transfer *xfer); 517 struct spi_transfer *xfer);
510 518
511 /* 519 /*
512 * These hooks are for drivers that want to use the generic 520 * These hooks are for drivers that want to use the generic
513 * master transfer queueing mechanism. If these are used, the 521 * controller transfer queueing mechanism. If these are used, the
514 * transfer() function above must NOT be specified by the driver. 522 * transfer() function above must NOT be specified by the driver.
515 * Over time we expect SPI drivers to be phased over to this API. 523 * Over time we expect SPI drivers to be phased over to this API.
516 */ 524 */
@@ -531,14 +539,15 @@ struct spi_master {
531 struct completion xfer_completion; 539 struct completion xfer_completion;
532 size_t max_dma_len; 540 size_t max_dma_len;
533 541
534 int (*prepare_transfer_hardware)(struct spi_master *master); 542 int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
535 int (*transfer_one_message)(struct spi_master *master, 543 int (*transfer_one_message)(struct spi_controller *ctlr,
536 struct spi_message *mesg); 544 struct spi_message *mesg);
537 int (*unprepare_transfer_hardware)(struct spi_master *master); 545 int (*unprepare_transfer_hardware)(struct spi_controller *ctlr);
538 int (*prepare_message)(struct spi_master *master, 546 int (*prepare_message)(struct spi_controller *ctlr,
539 struct spi_message *message); 547 struct spi_message *message);
540 int (*unprepare_message)(struct spi_master *master, 548 int (*unprepare_message)(struct spi_controller *ctlr,
541 struct spi_message *message); 549 struct spi_message *message);
550 int (*slave_abort)(struct spi_controller *ctlr);
542 int (*spi_flash_read)(struct spi_device *spi, 551 int (*spi_flash_read)(struct spi_device *spi,
543 struct spi_flash_read_message *msg); 552 struct spi_flash_read_message *msg);
544 bool (*spi_flash_can_dma)(struct spi_device *spi, 553 bool (*spi_flash_can_dma)(struct spi_device *spi,
@@ -550,9 +559,9 @@ struct spi_master {
550 * of transfer_one_message() provied by the core. 559 * of transfer_one_message() provied by the core.
551 */ 560 */
552 void (*set_cs)(struct spi_device *spi, bool enable); 561 void (*set_cs)(struct spi_device *spi, bool enable);
553 int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 562 int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
554 struct spi_transfer *transfer); 563 struct spi_transfer *transfer);
555 void (*handle_err)(struct spi_master *master, 564 void (*handle_err)(struct spi_controller *ctlr,
556 struct spi_message *message); 565 struct spi_message *message);
557 566
558 /* gpio chip select */ 567 /* gpio chip select */
@@ -569,57 +578,78 @@ struct spi_master {
569 void *dummy_rx; 578 void *dummy_rx;
570 void *dummy_tx; 579 void *dummy_tx;
571 580
572 int (*fw_translate_cs)(struct spi_master *master, unsigned cs); 581 int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
573}; 582};
574 583
575static inline void *spi_master_get_devdata(struct spi_master *master) 584static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
576{ 585{
577 return dev_get_drvdata(&master->dev); 586 return dev_get_drvdata(&ctlr->dev);
578} 587}
579 588
580static inline void spi_master_set_devdata(struct spi_master *master, void *data) 589static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
590 void *data)
581{ 591{
582 dev_set_drvdata(&master->dev, data); 592 dev_set_drvdata(&ctlr->dev, data);
583} 593}
584 594
585static inline struct spi_master *spi_master_get(struct spi_master *master) 595static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr)
586{ 596{
587 if (!master || !get_device(&master->dev)) 597 if (!ctlr || !get_device(&ctlr->dev))
588 return NULL; 598 return NULL;
589 return master; 599 return ctlr;
600}
601
602static inline void spi_controller_put(struct spi_controller *ctlr)
603{
604 if (ctlr)
605 put_device(&ctlr->dev);
590} 606}
591 607
592static inline void spi_master_put(struct spi_master *master) 608static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
593{ 609{
594 if (master) 610 return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
595 put_device(&master->dev);
596} 611}
597 612
598/* PM calls that need to be issued by the driver */ 613/* PM calls that need to be issued by the driver */
599extern int spi_master_suspend(struct spi_master *master); 614extern int spi_controller_suspend(struct spi_controller *ctlr);
600extern int spi_master_resume(struct spi_master *master); 615extern int spi_controller_resume(struct spi_controller *ctlr);
601 616
602/* Calls the driver make to interact with the message queue */ 617/* Calls the driver make to interact with the message queue */
603extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); 618extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr);
604extern void spi_finalize_current_message(struct spi_master *master); 619extern void spi_finalize_current_message(struct spi_controller *ctlr);
605extern void spi_finalize_current_transfer(struct spi_master *master); 620extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
606 621
607/* the spi driver core manages memory for the spi_master classdev */ 622/* the spi driver core manages memory for the spi_controller classdev */
608extern struct spi_master * 623extern struct spi_controller *__spi_alloc_controller(struct device *host,
609spi_alloc_master(struct device *host, unsigned size); 624 unsigned int size, bool slave);
610 625
611extern int spi_register_master(struct spi_master *master); 626static inline struct spi_controller *spi_alloc_master(struct device *host,
612extern int devm_spi_register_master(struct device *dev, 627 unsigned int size)
613 struct spi_master *master); 628{
614extern void spi_unregister_master(struct spi_master *master); 629 return __spi_alloc_controller(host, size, false);
630}
615 631
616extern struct spi_master *spi_busnum_to_master(u16 busnum); 632static inline struct spi_controller *spi_alloc_slave(struct device *host,
633 unsigned int size)
634{
635 if (!IS_ENABLED(CONFIG_SPI_SLAVE))
636 return NULL;
637
638 return __spi_alloc_controller(host, size, true);
639}
640
641extern int spi_register_controller(struct spi_controller *ctlr);
642extern int devm_spi_register_controller(struct device *dev,
643 struct spi_controller *ctlr);
644extern void spi_unregister_controller(struct spi_controller *ctlr);
645
646extern struct spi_controller *spi_busnum_to_master(u16 busnum);
617 647
618/* 648/*
619 * SPI resource management while processing a SPI message 649 * SPI resource management while processing a SPI message
620 */ 650 */
621 651
622typedef void (*spi_res_release_t)(struct spi_master *master, 652typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
623 struct spi_message *msg, 653 struct spi_message *msg,
624 void *res); 654 void *res);
625 655
@@ -644,7 +674,7 @@ extern void *spi_res_alloc(struct spi_device *spi,
644extern void spi_res_add(struct spi_message *message, void *res); 674extern void spi_res_add(struct spi_message *message, void *res);
645extern void spi_res_free(void *res); 675extern void spi_res_free(void *res);
646 676
647extern void spi_res_release(struct spi_master *master, 677extern void spi_res_release(struct spi_controller *ctlr,
648 struct spi_message *message); 678 struct spi_message *message);
649 679
650/*---------------------------------------------------------------------------*/ 680/*---------------------------------------------------------------------------*/
@@ -828,7 +858,7 @@ struct spi_message {
828 858
829 /* for optional use by whatever driver currently owns the 859 /* for optional use by whatever driver currently owns the
830 * spi_message ... between calls to spi_async and then later 860 * spi_message ... between calls to spi_async and then later
831 * complete(), that's the spi_master controller driver. 861 * complete(), that's the spi_controller controller driver.
832 */ 862 */
833 struct list_head queue; 863 struct list_head queue;
834 void *state; 864 void *state;
@@ -912,25 +942,27 @@ extern int spi_setup(struct spi_device *spi);
912extern int spi_async(struct spi_device *spi, struct spi_message *message); 942extern int spi_async(struct spi_device *spi, struct spi_message *message);
913extern int spi_async_locked(struct spi_device *spi, 943extern int spi_async_locked(struct spi_device *spi,
914 struct spi_message *message); 944 struct spi_message *message);
945extern int spi_slave_abort(struct spi_device *spi);
915 946
916static inline size_t 947static inline size_t
917spi_max_message_size(struct spi_device *spi) 948spi_max_message_size(struct spi_device *spi)
918{ 949{
919 struct spi_master *master = spi->master; 950 struct spi_controller *ctlr = spi->controller;
920 if (!master->max_message_size) 951
952 if (!ctlr->max_message_size)
921 return SIZE_MAX; 953 return SIZE_MAX;
922 return master->max_message_size(spi); 954 return ctlr->max_message_size(spi);
923} 955}
924 956
925static inline size_t 957static inline size_t
926spi_max_transfer_size(struct spi_device *spi) 958spi_max_transfer_size(struct spi_device *spi)
927{ 959{
928 struct spi_master *master = spi->master; 960 struct spi_controller *ctlr = spi->controller;
929 size_t tr_max = SIZE_MAX; 961 size_t tr_max = SIZE_MAX;
930 size_t msg_max = spi_max_message_size(spi); 962 size_t msg_max = spi_max_message_size(spi);
931 963
932 if (master->max_transfer_size) 964 if (ctlr->max_transfer_size)
933 tr_max = master->max_transfer_size(spi); 965 tr_max = ctlr->max_transfer_size(spi);
934 966
935 /* transfer size limit must not be greater than messsage size limit */ 967 /* transfer size limit must not be greater than messsage size limit */
936 return min(tr_max, msg_max); 968 return min(tr_max, msg_max);
@@ -941,7 +973,7 @@ spi_max_transfer_size(struct spi_device *spi)
941/* SPI transfer replacement methods which make use of spi_res */ 973/* SPI transfer replacement methods which make use of spi_res */
942 974
943struct spi_replaced_transfers; 975struct spi_replaced_transfers;
944typedef void (*spi_replaced_release_t)(struct spi_master *master, 976typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
945 struct spi_message *msg, 977 struct spi_message *msg,
946 struct spi_replaced_transfers *res); 978 struct spi_replaced_transfers *res);
947/** 979/**
@@ -985,7 +1017,7 @@ extern struct spi_replaced_transfers *spi_replace_transfers(
985 1017
986/* SPI transfer transformation methods */ 1018/* SPI transfer transformation methods */
987 1019
988extern int spi_split_transfers_maxsize(struct spi_master *master, 1020extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
989 struct spi_message *msg, 1021 struct spi_message *msg,
990 size_t maxsize, 1022 size_t maxsize,
991 gfp_t gfp); 1023 gfp_t gfp);
@@ -999,8 +1031,8 @@ extern int spi_split_transfers_maxsize(struct spi_master *master,
999 1031
1000extern int spi_sync(struct spi_device *spi, struct spi_message *message); 1032extern int spi_sync(struct spi_device *spi, struct spi_message *message);
1001extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); 1033extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
1002extern int spi_bus_lock(struct spi_master *master); 1034extern int spi_bus_lock(struct spi_controller *ctlr);
1003extern int spi_bus_unlock(struct spi_master *master); 1035extern int spi_bus_unlock(struct spi_controller *ctlr);
1004 1036
1005/** 1037/**
1006 * spi_sync_transfer - synchronous SPI data transfer 1038 * spi_sync_transfer - synchronous SPI data transfer
@@ -1185,9 +1217,9 @@ struct spi_flash_read_message {
1185/* SPI core interface for flash read support */ 1217/* SPI core interface for flash read support */
1186static inline bool spi_flash_read_supported(struct spi_device *spi) 1218static inline bool spi_flash_read_supported(struct spi_device *spi)
1187{ 1219{
1188 return spi->master->spi_flash_read && 1220 return spi->controller->spi_flash_read &&
1189 (!spi->master->flash_read_supported || 1221 (!spi->controller->flash_read_supported ||
1190 spi->master->flash_read_supported(spi)); 1222 spi->controller->flash_read_supported(spi));
1191} 1223}
1192 1224
1193int spi_flash_read(struct spi_device *spi, 1225int spi_flash_read(struct spi_device *spi,
@@ -1220,7 +1252,7 @@ int spi_flash_read(struct spi_device *spi,
1220 * @irq: Initializes spi_device.irq; depends on how the board is wired. 1252 * @irq: Initializes spi_device.irq; depends on how the board is wired.
1221 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits 1253 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
1222 * from the chip datasheet and board-specific signal quality issues. 1254 * from the chip datasheet and board-specific signal quality issues.
1223 * @bus_num: Identifies which spi_master parents the spi_device; unused 1255 * @bus_num: Identifies which spi_controller parents the spi_device; unused
1224 * by spi_new_device(), and otherwise depends on board wiring. 1256 * by spi_new_device(), and otherwise depends on board wiring.
1225 * @chip_select: Initializes spi_device.chip_select; depends on how 1257 * @chip_select: Initializes spi_device.chip_select; depends on how
1226 * the board is wired. 1258 * the board is wired.
@@ -1261,7 +1293,7 @@ struct spi_board_info {
1261 1293
1262 1294
1263 /* bus_num is board specific and matches the bus_num of some 1295 /* bus_num is board specific and matches the bus_num of some
1264 * spi_master that will probably be registered later. 1296 * spi_controller that will probably be registered later.
1265 * 1297 *
1266 * chip_select reflects how this chip is wired to that master; 1298 * chip_select reflects how this chip is wired to that master;
1267 * it's less than num_chipselect. 1299 * it's less than num_chipselect.
@@ -1295,7 +1327,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
1295/* If you're hotplugging an adapter with devices (parport, usb, etc) 1327/* If you're hotplugging an adapter with devices (parport, usb, etc)
1296 * use spi_new_device() to describe each device. You can also call 1328 * use spi_new_device() to describe each device. You can also call
1297 * spi_unregister_device() to start making that device vanish, but 1329 * spi_unregister_device() to start making that device vanish, but
1298 * normally that would be handled by spi_unregister_master(). 1330 * normally that would be handled by spi_unregister_controller().
1299 * 1331 *
1300 * You can also use spi_alloc_device() and spi_add_device() to use a two 1332 * You can also use spi_alloc_device() and spi_add_device() to use a two
1301 * stage registration sequence for each spi_device. This gives the caller 1333 * stage registration sequence for each spi_device. This gives the caller
@@ -1304,13 +1336,13 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
1304 * be defined using the board info. 1336 * be defined using the board info.
1305 */ 1337 */
1306extern struct spi_device * 1338extern struct spi_device *
1307spi_alloc_device(struct spi_master *master); 1339spi_alloc_device(struct spi_controller *ctlr);
1308 1340
1309extern int 1341extern int
1310spi_add_device(struct spi_device *spi); 1342spi_add_device(struct spi_device *spi);
1311 1343
1312extern struct spi_device * 1344extern struct spi_device *
1313spi_new_device(struct spi_master *, struct spi_board_info *); 1345spi_new_device(struct spi_controller *, struct spi_board_info *);
1314 1346
1315extern void spi_unregister_device(struct spi_device *spi); 1347extern void spi_unregister_device(struct spi_device *spi);
1316 1348
@@ -1318,9 +1350,32 @@ extern const struct spi_device_id *
1318spi_get_device_id(const struct spi_device *sdev); 1350spi_get_device_id(const struct spi_device *sdev);
1319 1351
1320static inline bool 1352static inline bool
1321spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) 1353spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
1322{ 1354{
1323 return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); 1355 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
1324} 1356}
1325 1357
1358
1359/* Compatibility layer */
1360#define spi_master spi_controller
1361
1362#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
1363#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
1364#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
1365#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
1366#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
1367
1368#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
1369#define spi_master_set_devdata(_ctlr, _data) \
1370 spi_controller_set_devdata(_ctlr, _data)
1371#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
1372#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
1373#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
1374#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
1375
1376#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
1377#define devm_spi_register_master(_dev, _ctlr) \
1378 devm_spi_register_controller(_dev, _ctlr)
1379#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
1380
1326#endif /* __LINUX_SPI_H */ 1381#endif /* __LINUX_SPI_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 167ad8831aaf..4c1d5f7e62c4 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
172{ 172{
173 int retval; 173 int retval;
174 174
175 preempt_disable();
176 retval = __srcu_read_lock(sp); 175 retval = __srcu_read_lock(sp);
177 preempt_enable();
178 rcu_lock_acquire(&(sp)->dep_map); 176 rcu_lock_acquire(&(sp)->dep_map);
179 return retval; 177 return retval;
180} 178}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 94631026f79c..11cef5a7bc87 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
336{ 336{
337 char *cp = (char *)p; 337 char *cp = (char *)p;
338 struct kvec *vec = &rqstp->rq_arg.head[0]; 338 struct kvec *vec = &rqstp->rq_arg.head[0];
339 return cp == (char *)vec->iov_base + vec->iov_len; 339 return cp >= (char*)vec->iov_base
340 && cp <= (char*)vec->iov_base + vec->iov_len;
340} 341}
341 342
342static inline int 343static inline int
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
189struct platform_freeze_ops { 189struct platform_freeze_ops {
190 int (*begin)(void); 190 int (*begin)(void);
191 int (*prepare)(void); 191 int (*prepare)(void);
192 void (*wake)(void);
193 void (*sync)(void);
194 void (*restore)(void); 192 void (*restore)(void);
195 void (*end)(void); 193 void (*end)(void);
196}; 194};
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
430 428
431extern bool pm_wakeup_pending(void); 429extern bool pm_wakeup_pending(void);
432extern void pm_system_wakeup(void); 430extern void pm_system_wakeup(void);
433extern void pm_system_cancel_wakeup(void); 431extern void pm_wakeup_clear(void);
434extern void pm_wakeup_clear(bool reset);
435extern void pm_system_irq_wakeup(unsigned int irq_number); 432extern void pm_system_irq_wakeup(unsigned int irq_number);
436extern bool pm_get_wakeup_count(unsigned int *count, bool block); 433extern bool pm_get_wakeup_count(unsigned int *count, bool block);
437extern bool pm_save_wakeup_count(unsigned int count); 434extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
481 478
482static inline bool pm_wakeup_pending(void) { return false; } 479static inline bool pm_wakeup_pending(void) { return false; }
483static inline void pm_system_wakeup(void) {} 480static inline void pm_system_wakeup(void) {}
484static inline void pm_wakeup_clear(bool reset) {} 481static inline void pm_wakeup_clear(void) {}
485static inline void pm_system_irq_wakeup(unsigned int irq_number) {} 482static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
486 483
487static inline void lock_system_sleep(void) {} 484static inline void lock_system_sleep(void) {}
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 110f4532188c..f7043ccca81c 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
29 */ 29 */
30struct tk_read_base { 30struct tk_read_base {
31 struct clocksource *clock; 31 struct clocksource *clock;
32 u64 (*read)(struct clocksource *cs);
33 u64 mask; 32 u64 mask;
34 u64 cycle_last; 33 u64 cycle_last;
35 u32 mult; 34 u32 mult;
@@ -58,7 +57,7 @@ struct tk_read_base {
58 * interval. 57 * interval.
59 * @xtime_remainder: Shifted nano seconds left over when rounding 58 * @xtime_remainder: Shifted nano seconds left over when rounding
60 * @cycle_interval 59 * @cycle_interval
61 * @raw_interval: Raw nano seconds accumulated per NTP interval. 60 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
62 * @ntp_error: Difference between accumulated time and NTP time in ntp 61 * @ntp_error: Difference between accumulated time and NTP time in ntp
63 * shifted nano seconds. 62 * shifted nano seconds.
64 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and 63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@ struct timekeeper {
100 u64 cycle_interval; 99 u64 cycle_interval;
101 u64 xtime_interval; 100 u64 xtime_interval;
102 s64 xtime_remainder; 101 s64 xtime_remainder;
103 u32 raw_interval; 102 u64 raw_interval;
104 /* The ntp_tick_length() value currently being used. 103 /* The ntp_tick_length() value currently being used.
105 * This cached copy ensures we consistently apply the tick 104 * This cached copy ensures we consistently apply the tick
106 * length for an entire tick, as ntp_tick_length may change 105 * length for an entire tick, as ntp_tick_length may change
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d07cd2105a6c..eccb4ec30a8a 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port,
558 struct tty_driver *driver, unsigned index, 558 struct tty_driver *driver, unsigned index,
559 struct device *device, void *drvdata, 559 struct device *device, void *drvdata,
560 const struct attribute_group **attr_grp); 560 const struct attribute_group **attr_grp);
561extern struct device *tty_port_register_device_serdev(struct tty_port *port,
562 struct tty_driver *driver, unsigned index,
563 struct device *device);
564extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
565 struct tty_driver *driver, unsigned index,
566 struct device *device, void *drvdata,
567 const struct attribute_group **attr_grp);
568extern void tty_port_unregister_device(struct tty_port *port,
569 struct tty_driver *driver, unsigned index);
561extern int tty_port_alloc_xmit_buf(struct tty_port *port); 570extern int tty_port_alloc_xmit_buf(struct tty_port *port);
562extern void tty_port_free_xmit_buf(struct tty_port *port); 571extern void tty_port_free_xmit_buf(struct tty_port *port);
563extern void tty_port_destroy(struct tty_port *port); 572extern void tty_port_destroy(struct tty_port *port);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index a469999a106d..50398b69ca44 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -148,6 +148,7 @@ struct usb_hcd {
148 unsigned rh_registered:1;/* is root hub registered? */ 148 unsigned rh_registered:1;/* is root hub registered? */
149 unsigned rh_pollable:1; /* may we poll the root hub? */ 149 unsigned rh_pollable:1; /* may we poll the root hub? */
150 unsigned msix_enabled:1; /* driver has MSI-X enabled? */ 150 unsigned msix_enabled:1; /* driver has MSI-X enabled? */
151 unsigned msi_enabled:1; /* driver has MSI enabled? */
151 unsigned remove_phy:1; /* auto-remove USB phy */ 152 unsigned remove_phy:1; /* auto-remove USB phy */
152 153
153 /* The next flag is a stopgap, to be removed when all the HCDs 154 /* The next flag is a stopgap, to be removed when all the HCDs
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 7dffa5624ea6..97116379db5f 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -206,6 +206,7 @@ struct cdc_state {
206}; 206};
207 207
208extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); 208extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
209extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
209extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); 210extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
210extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); 211extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
211extern void usbnet_cdc_status(struct usbnet *, struct urb *); 212extern void usbnet_cdc_status(struct usbnet *, struct urb *);
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index eb50ce54b759..298f996969df 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -29,7 +29,7 @@ struct edid;
29struct cec_adapter; 29struct cec_adapter;
30struct cec_notifier; 30struct cec_notifier;
31 31
32#ifdef CONFIG_MEDIA_CEC_NOTIFIER 32#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
33 33
34/** 34/**
35 * cec_notifier_get - find or create a new cec_notifier for the given device. 35 * cec_notifier_get - find or create a new cec_notifier for the given device.
@@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
106{ 106{
107} 107}
108 108
109static inline void cec_notifier_register(struct cec_notifier *n,
110 struct cec_adapter *adap,
111 void (*callback)(struct cec_adapter *adap, u16 pa))
112{
113}
114
115static inline void cec_notifier_unregister(struct cec_notifier *n)
116{
117}
118
109#endif 119#endif
110 120
111#endif 121#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index b8eb895731d5..201f060978da 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -173,7 +173,7 @@ struct cec_adapter {
173 bool passthrough; 173 bool passthrough;
174 struct cec_log_addrs log_addrs; 174 struct cec_log_addrs log_addrs;
175 175
176#ifdef CONFIG_MEDIA_CEC_NOTIFIER 176#ifdef CONFIG_CEC_NOTIFIER
177 struct cec_notifier *notifier; 177 struct cec_notifier *notifier;
178#endif 178#endif
179 179
@@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
206#define cec_phys_addr_exp(pa) \ 206#define cec_phys_addr_exp(pa) \
207 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf 207 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
208 208
209#if IS_ENABLED(CONFIG_CEC_CORE) 209#if IS_REACHABLE(CONFIG_CEC_CORE)
210struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, 210struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
211 void *priv, const char *name, u32 caps, u8 available_las); 211 void *priv, const char *name, u32 caps, u8 available_las);
212int cec_register_adapter(struct cec_adapter *adap, struct device *parent); 212int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
300 */ 300 */
301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); 301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
302 302
303#ifdef CONFIG_MEDIA_CEC_NOTIFIER 303#ifdef CONFIG_CEC_NOTIFIER
304void cec_register_cec_notifier(struct cec_adapter *adap, 304void cec_register_cec_notifier(struct cec_adapter *adap,
305 struct cec_notifier *notifier); 305 struct cec_notifier *notifier);
306#endif 306#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 049af33da3b6..cfc043784166 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -107,10 +107,16 @@ struct dst_entry {
107 }; 107 };
108}; 108};
109 109
110struct dst_metrics {
111 u32 metrics[RTAX_MAX];
112 atomic_t refcnt;
113};
114extern const struct dst_metrics dst_default_metrics;
115
110u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); 116u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
111extern const u32 dst_default_metrics[];
112 117
113#define DST_METRICS_READ_ONLY 0x1UL 118#define DST_METRICS_READ_ONLY 0x1UL
119#define DST_METRICS_REFCOUNTED 0x2UL
114#define DST_METRICS_FLAGS 0x3UL 120#define DST_METRICS_FLAGS 0x3UL
115#define __DST_METRICS_PTR(Y) \ 121#define __DST_METRICS_PTR(Y) \
116 ((u32 *)((Y) & ~DST_METRICS_FLAGS)) 122 ((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 6692c5758b33..f7f6aa789c61 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -114,11 +114,11 @@ struct fib_info {
114 __be32 fib_prefsrc; 114 __be32 fib_prefsrc;
115 u32 fib_tb_id; 115 u32 fib_tb_id;
116 u32 fib_priority; 116 u32 fib_priority;
117 u32 *fib_metrics; 117 struct dst_metrics *fib_metrics;
118#define fib_mtu fib_metrics[RTAX_MTU-1] 118#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
119#define fib_window fib_metrics[RTAX_WINDOW-1] 119#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
120#define fib_rtt fib_metrics[RTAX_RTT-1] 120#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
121#define fib_advmss fib_metrics[RTAX_ADVMSS-1] 121#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
122 int fib_nhs; 122 int fib_nhs;
123#ifdef CONFIG_IP_ROUTE_MULTIPATH 123#ifdef CONFIG_IP_ROUTE_MULTIPATH
124 int fib_weight; 124 int fib_weight;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index dbf0abba33b8..3e505bbff8ca 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
1007 */ 1007 */
1008extern const struct proto_ops inet6_stream_ops; 1008extern const struct proto_ops inet6_stream_ops;
1009extern const struct proto_ops inet6_dgram_ops; 1009extern const struct proto_ops inet6_dgram_ops;
1010extern const struct proto_ops inet6_sockraw_ops;
1010 1011
1011struct group_source_req; 1012struct group_source_req;
1012struct group_filter; 1013struct group_filter;
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index e04fa7691e5d..c519bb5b5bb8 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -9,6 +9,7 @@
9 9
10#ifndef _NF_CONNTRACK_HELPER_H 10#ifndef _NF_CONNTRACK_HELPER_H
11#define _NF_CONNTRACK_HELPER_H 11#define _NF_CONNTRACK_HELPER_H
12#include <linux/refcount.h>
12#include <net/netfilter/nf_conntrack.h> 13#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_extend.h> 14#include <net/netfilter/nf_conntrack_extend.h>
14#include <net/netfilter/nf_conntrack_expect.h> 15#include <net/netfilter/nf_conntrack_expect.h>
@@ -26,6 +27,7 @@ struct nf_conntrack_helper {
26 struct hlist_node hnode; /* Internal use. */ 27 struct hlist_node hnode; /* Internal use. */
27 28
28 char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */ 29 char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
30 refcount_t refcnt;
29 struct module *me; /* pointer to self */ 31 struct module *me; /* pointer to self */
30 const struct nf_conntrack_expect_policy *expect_policy; 32 const struct nf_conntrack_expect_policy *expect_policy;
31 33
@@ -79,6 +81,8 @@ struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
79struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name, 81struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
80 u16 l3num, 82 u16 l3num,
81 u8 protonum); 83 u8 protonum);
84void nf_conntrack_helper_put(struct nf_conntrack_helper *helper);
85
82void nf_ct_helper_init(struct nf_conntrack_helper *helper, 86void nf_ct_helper_init(struct nf_conntrack_helper *helper,
83 u16 l3num, u16 protonum, const char *name, 87 u16 l3num, u16 protonum, const char *name,
84 u16 default_port, u16 spec_port, u32 id, 88 u16 default_port, u16 spec_port, u32 id,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 028faec8fc27..8a8bab8d7b15 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -176,7 +176,7 @@ struct nft_data_desc {
176int nft_data_init(const struct nft_ctx *ctx, 176int nft_data_init(const struct nft_ctx *ctx,
177 struct nft_data *data, unsigned int size, 177 struct nft_data *data, unsigned int size,
178 struct nft_data_desc *desc, const struct nlattr *nla); 178 struct nft_data_desc *desc, const struct nlattr *nla);
179void nft_data_uninit(const struct nft_data *data, enum nft_data_types type); 179void nft_data_release(const struct nft_data *data, enum nft_data_types type);
180int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, 180int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
181 enum nft_data_types type, unsigned int len); 181 enum nft_data_types type, unsigned int len);
182 182
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index f31fb6331a53..3248beaf16b0 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <net/act_api.h> 5#include <net/act_api.h>
6#include <linux/tc_act/tc_csum.h>
6 7
7struct tcf_csum { 8struct tcf_csum {
8 struct tc_action common; 9 struct tc_action common;
@@ -11,4 +12,18 @@ struct tcf_csum {
11}; 12};
12#define to_tcf_csum(a) ((struct tcf_csum *)a) 13#define to_tcf_csum(a) ((struct tcf_csum *)a)
13 14
15static inline bool is_tcf_csum(const struct tc_action *a)
16{
17#ifdef CONFIG_NET_CLS_ACT
18 if (a->ops && a->ops->type == TCA_ACT_CSUM)
19 return true;
20#endif
21 return false;
22}
23
24static inline u32 tcf_csum_update_flags(const struct tc_action *a)
25{
26 return to_tcf_csum(a)->update_flags;
27}
28
14#endif /* __NET_TC_CSUM_H */ 29#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427ae902..be6223c586fa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
925 /* call when ack arrives (optional) */ 925 /* call when ack arrives (optional) */
926 void (*in_ack_event)(struct sock *sk, u32 flags); 926 void (*in_ack_event)(struct sock *sk, u32 flags);
927 /* new value of cwnd after loss (optional) */ 927 /* new value of cwnd after loss (required) */
928 u32 (*undo_cwnd)(struct sock *sk); 928 u32 (*undo_cwnd)(struct sock *sk);
929 /* hook for packet ack accounting (optional) */ 929 /* hook for packet ack accounting (optional) */
930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
diff --git a/include/net/wext.h b/include/net/wext.h
index 345911965dbb..454ff763eeba 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,7 +6,7 @@
6struct net; 6struct net;
7 7
8#ifdef CONFIG_WEXT_CORE 8#ifdef CONFIG_WEXT_CORE
9int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 9int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
10 void __user *arg); 10 void __user *arg);
11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 11int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
12 unsigned long arg); 12 unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
14struct iw_statistics *get_wireless_stats(struct net_device *dev); 14struct iw_statistics *get_wireless_stats(struct net_device *dev);
15int call_commit_handler(struct net_device *dev); 15int call_commit_handler(struct net_device *dev);
16#else 16#else
17static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 17static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
18 void __user *arg) 18 void __user *arg)
19{ 19{
20 return -EINVAL; 20 return -EINVAL;
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4edbf0..6d30a01d281d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
298 298
299/* sysctl_net_x25.c */ 299/* sysctl_net_x25.c */
300#ifdef CONFIG_SYSCTL 300#ifdef CONFIG_SYSCTL
301void x25_register_sysctl(void); 301int x25_register_sysctl(void);
302void x25_unregister_sysctl(void); 302void x25_unregister_sysctl(void);
303#else 303#else
304static inline void x25_register_sysctl(void) {}; 304static inline int x25_register_sysctl(void) { return 0; };
305static inline void x25_unregister_sysctl(void) {}; 305static inline void x25_unregister_sysctl(void) {};
306#endif /* CONFIG_SYSCTL */ 306#endif /* CONFIG_SYSCTL */
307 307
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6793a30c66b1..62f5a259e597 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -979,10 +979,6 @@ struct xfrm_dst {
979 struct flow_cache_object flo; 979 struct flow_cache_object flo;
980 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 980 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
981 int num_pols, num_xfrms; 981 int num_pols, num_xfrms;
982#ifdef CONFIG_XFRM_SUB_POLICY
983 struct flowi *origin;
984 struct xfrm_selector *partner;
985#endif
986 u32 xfrm_genid; 982 u32 xfrm_genid;
987 u32 policy_genid; 983 u32 policy_genid;
988 u32 route_mtu_cached; 984 u32 route_mtu_cached;
@@ -998,12 +994,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
998 dst_release(xdst->route); 994 dst_release(xdst->route);
999 if (likely(xdst->u.dst.xfrm)) 995 if (likely(xdst->u.dst.xfrm))
1000 xfrm_state_put(xdst->u.dst.xfrm); 996 xfrm_state_put(xdst->u.dst.xfrm);
1001#ifdef CONFIG_XFRM_SUB_POLICY
1002 kfree(xdst->origin);
1003 xdst->origin = NULL;
1004 kfree(xdst->partner);
1005 xdst->partner = NULL;
1006#endif
1007} 997}
1008#endif 998#endif
1009 999
@@ -1860,8 +1850,9 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1860} 1850}
1861#endif 1851#endif
1862 1852
1863#ifdef CONFIG_XFRM_OFFLOAD
1864void __net_init xfrm_dev_init(void); 1853void __net_init xfrm_dev_init(void);
1854
1855#ifdef CONFIG_XFRM_OFFLOAD
1865int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); 1856int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
1866int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1857int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1867 struct xfrm_user_offload *xuo); 1858 struct xfrm_user_offload *xuo);
@@ -1887,10 +1878,6 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
1887 } 1878 }
1888} 1879}
1889#else 1880#else
1890static inline void __net_init xfrm_dev_init(void)
1891{
1892}
1893
1894static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 1881static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
1895{ 1882{
1896 return 0; 1883 return 0;
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index f5f70e345318..355b81f4242d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -158,7 +158,6 @@ enum sa_path_rec_type {
158}; 158};
159 159
160struct sa_path_rec_ib { 160struct sa_path_rec_ib {
161 __be64 service_id;
162 __be16 dlid; 161 __be16 dlid;
163 __be16 slid; 162 __be16 slid;
164 u8 raw_traffic; 163 u8 raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
174}; 173};
175 174
176struct sa_path_rec_opa { 175struct sa_path_rec_opa {
177 __be64 service_id;
178 __be32 dlid; 176 __be32 dlid;
179 __be32 slid; 177 __be32 slid;
180 u8 raw_traffic; 178 u8 raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
189struct sa_path_rec { 187struct sa_path_rec {
190 union ib_gid dgid; 188 union ib_gid dgid;
191 union ib_gid sgid; 189 union ib_gid sgid;
190 __be64 service_id;
192 /* reserved */ 191 /* reserved */
193 __be32 flow_label; 192 __be32 flow_label;
194 u8 hop_limit; 193 u8 hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
262 ib->ib.dlid = htons(ntohl(opa->opa.dlid)); 261 ib->ib.dlid = htons(ntohl(opa->opa.dlid));
263 ib->ib.slid = htons(ntohl(opa->opa.slid)); 262 ib->ib.slid = htons(ntohl(opa->opa.slid));
264 } 263 }
265 ib->ib.service_id = opa->opa.service_id; 264 ib->service_id = opa->service_id;
266 ib->ib.raw_traffic = opa->opa.raw_traffic; 265 ib->ib.raw_traffic = opa->opa.raw_traffic;
267} 266}
268 267
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
281 } 280 }
282 opa->opa.slid = slid; 281 opa->opa.slid = slid;
283 opa->opa.dlid = dlid; 282 opa->opa.dlid = dlid;
284 opa->opa.service_id = ib->ib.service_id; 283 opa->service_id = ib->service_id;
285 opa->opa.raw_traffic = ib->ib.raw_traffic; 284 opa->opa.raw_traffic = ib->ib.raw_traffic;
286} 285}
287 286
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
591 (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); 590 (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
592} 591}
593 592
594static inline void sa_path_set_service_id(struct sa_path_rec *rec,
595 __be64 service_id)
596{
597 if (rec->rec_type == SA_PATH_REC_TYPE_IB)
598 rec->ib.service_id = service_id;
599 else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
600 rec->opa.service_id = service_id;
601}
602
603static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) 593static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
604{ 594{
605 if (rec->rec_type == SA_PATH_REC_TYPE_IB) 595 if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
625 rec->opa.raw_traffic = raw_traffic; 615 rec->opa.raw_traffic = raw_traffic;
626} 616}
627 617
628static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
629{
630 if (rec->rec_type == SA_PATH_REC_TYPE_IB)
631 return rec->ib.service_id;
632 else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
633 return rec->opa.service_id;
634 return 0;
635}
636
637static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) 618static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
638{ 619{
639 if (rec->rec_type == SA_PATH_REC_TYPE_IB) 620 if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 585266144329..348c102cb5f6 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
10 struct module *module; 10 struct module *module;
11}; 11};
12 12
13int ibnl_init(void);
14void ibnl_cleanup(void);
15
16/** 13/**
17 * Add a a client to the list of IB netlink exporters. 14 * Add a a client to the list of IB netlink exporters.
18 * @index: Index of the added client 15 * @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
77int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, 74int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
78 unsigned int group, gfp_t flags); 75 unsigned int group, gfp_t flags);
79 76
80/**
81 * Check if there are any listeners to the netlink group
82 * @group: the netlink group ID
83 * Returns 0 on success or a negative for no listeners.
84 */
85int ibnl_chk_listeners(unsigned int group);
86
87#endif /* _RDMA_NETLINK_H */ 77#endif /* _RDMA_NETLINK_H */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 275581d483dd..5f17fb770477 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -557,6 +557,7 @@ struct iscsi_conn {
557#define LOGIN_FLAGS_READ_ACTIVE 1 557#define LOGIN_FLAGS_READ_ACTIVE 1
558#define LOGIN_FLAGS_CLOSED 2 558#define LOGIN_FLAGS_CLOSED 2
559#define LOGIN_FLAGS_READY 4 559#define LOGIN_FLAGS_READY 4
560#define LOGIN_FLAGS_INITIAL_PDU 8
560 unsigned long login_flags; 561 unsigned long login_flags;
561 struct delayed_work login_work; 562 struct delayed_work login_work;
562 struct delayed_work login_cleanup_work; 563 struct delayed_work login_cleanup_work;
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 7e02c983bbe2..f9f702b6ae2e 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -7,37 +7,37 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10DECLARE_EVENT_CLASS(spi_master, 10DECLARE_EVENT_CLASS(spi_controller,
11 11
12 TP_PROTO(struct spi_master *master), 12 TP_PROTO(struct spi_controller *controller),
13 13
14 TP_ARGS(master), 14 TP_ARGS(controller),
15 15
16 TP_STRUCT__entry( 16 TP_STRUCT__entry(
17 __field( int, bus_num ) 17 __field( int, bus_num )
18 ), 18 ),
19 19
20 TP_fast_assign( 20 TP_fast_assign(
21 __entry->bus_num = master->bus_num; 21 __entry->bus_num = controller->bus_num;
22 ), 22 ),
23 23
24 TP_printk("spi%d", (int)__entry->bus_num) 24 TP_printk("spi%d", (int)__entry->bus_num)
25 25
26); 26);
27 27
28DEFINE_EVENT(spi_master, spi_master_idle, 28DEFINE_EVENT(spi_controller, spi_controller_idle,
29 29
30 TP_PROTO(struct spi_master *master), 30 TP_PROTO(struct spi_controller *controller),
31 31
32 TP_ARGS(master) 32 TP_ARGS(controller)
33 33
34); 34);
35 35
36DEFINE_EVENT(spi_master, spi_master_busy, 36DEFINE_EVENT(spi_controller, spi_controller_busy,
37 37
38 TP_PROTO(struct spi_master *master), 38 TP_PROTO(struct spi_controller *controller),
39 39
40 TP_ARGS(master) 40 TP_ARGS(controller)
41 41
42); 42);
43 43
@@ -54,7 +54,7 @@ DECLARE_EVENT_CLASS(spi_message,
54 ), 54 ),
55 55
56 TP_fast_assign( 56 TP_fast_assign(
57 __entry->bus_num = msg->spi->master->bus_num; 57 __entry->bus_num = msg->spi->controller->bus_num;
58 __entry->chip_select = msg->spi->chip_select; 58 __entry->chip_select = msg->spi->chip_select;
59 __entry->msg = msg; 59 __entry->msg = msg;
60 ), 60 ),
@@ -95,7 +95,7 @@ TRACE_EVENT(spi_message_done,
95 ), 95 ),
96 96
97 TP_fast_assign( 97 TP_fast_assign(
98 __entry->bus_num = msg->spi->master->bus_num; 98 __entry->bus_num = msg->spi->controller->bus_num;
99 __entry->chip_select = msg->spi->chip_select; 99 __entry->chip_select = msg->spi->chip_select;
100 __entry->msg = msg; 100 __entry->msg = msg;
101 __entry->frame = msg->frame_length; 101 __entry->frame = msg->frame_length;
@@ -122,7 +122,7 @@ DECLARE_EVENT_CLASS(spi_transfer,
122 ), 122 ),
123 123
124 TP_fast_assign( 124 TP_fast_assign(
125 __entry->bus_num = msg->spi->master->bus_num; 125 __entry->bus_num = msg->spi->controller->bus_num;
126 __entry->chip_select = msg->spi->chip_select; 126 __entry->chip_select = msg->spi->chip_select;
127 __entry->xfer = xfer; 127 __entry->xfer = xfer;
128 __entry->len = xfer->len; 128 __entry->len = xfer->len;
diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
index 7caf44c7fa51..295cd3ef6330 100644
--- a/include/uapi/linux/a.out.h
+++ b/include/uapi/linux/a.out.h
@@ -112,24 +112,7 @@ enum machine_type {
112#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0) 112#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
113#endif 113#endif
114 114
115/* Address of data segment in memory after it is loaded. 115/* Address of data segment in memory after it is loaded. */
116 Note that it is up to you to define SEGMENT_SIZE
117 on machines not listed here. */
118#if defined(vax) || defined(hp300) || defined(pyr)
119#define SEGMENT_SIZE page_size
120#endif
121#ifdef sony
122#define SEGMENT_SIZE 0x2000
123#endif /* Sony. */
124#ifdef is68k
125#define SEGMENT_SIZE 0x20000
126#endif
127#if defined(m68k) && defined(PORTAR)
128#define PAGE_SIZE 0x400
129#define SEGMENT_SIZE PAGE_SIZE
130#endif
131
132#ifdef linux
133#ifndef __KERNEL__ 116#ifndef __KERNEL__
134#include <unistd.h> 117#include <unistd.h>
135#endif 118#endif
@@ -142,7 +125,6 @@ enum machine_type {
142#endif 125#endif
143#endif 126#endif
144#endif 127#endif
145#endif
146 128
147#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE) 129#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
148 130
@@ -260,13 +242,7 @@ struct relocation_info
260 unsigned int r_extern:1; 242 unsigned int r_extern:1;
261 /* Four bits that aren't used, but when writing an object file 243 /* Four bits that aren't used, but when writing an object file
262 it is desirable to clear them. */ 244 it is desirable to clear them. */
263#ifdef NS32K
264 unsigned r_bsr:1;
265 unsigned r_disp:1;
266 unsigned r_pad:2;
267#else
268 unsigned int r_pad:4; 245 unsigned int r_pad:4;
269#endif
270}; 246};
271#endif /* no N_RELOCATION_INFO_DECLARED. */ 247#endif /* no N_RELOCATION_INFO_DECLARED. */
272 248
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 945a1f5f63c5..94dfa9def355 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -132,6 +132,13 @@ enum bpf_attach_type {
132 */ 132 */
133#define BPF_F_ALLOW_OVERRIDE (1U << 0) 133#define BPF_F_ALLOW_OVERRIDE (1U << 0)
134 134
135/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
136 * verifier will perform strict alignment checking as if the kernel
137 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
138 * and NET_IP_ALIGN defined to 2.
139 */
140#define BPF_F_STRICT_ALIGNMENT (1U << 0)
141
135#define BPF_PSEUDO_MAP_FD 1 142#define BPF_PSEUDO_MAP_FD 1
136 143
137/* flags for BPF_MAP_UPDATE_ELEM command */ 144/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -177,6 +184,7 @@ union bpf_attr {
177 __u32 log_size; /* size of user buffer */ 184 __u32 log_size; /* size of user buffer */
178 __aligned_u64 log_buf; /* user supplied buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */
179 __u32 kern_version; /* checked when prog_type=kprobe */ 186 __u32 kern_version; /* checked when prog_type=kprobe */
187 __u32 prog_flags;
180 }; 188 };
181 189
182 struct { /* anonymous struct used by BPF_OBJ_* commands */ 190 struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index d179d7767f51..7d4a594d5d58 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices {
1486 * it was forced up into this mode or autonegotiated. 1486 * it was forced up into this mode or autonegotiated.
1487 */ 1487 */
1488 1488
1489/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ 1489/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal.
1490/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ 1490 * Update drivers/net/phy/phy.c:phy_speed_to_str() and
1491 * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values.
1492 */
1491#define SPEED_10 10 1493#define SPEED_10 10
1492#define SPEED_100 100 1494#define SPEED_100 100
1493#define SPEED_1000 1000 1495#define SPEED_1000 1000
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 8e56ac70e0d1..15ac20382aba 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -888,9 +888,18 @@ enum {
888/* XDP section */ 888/* XDP section */
889 889
890#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) 890#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
891#define XDP_FLAGS_SKB_MODE (2U << 0) 891#define XDP_FLAGS_SKB_MODE (1U << 1)
892#define XDP_FLAGS_DRV_MODE (1U << 2)
892#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ 893#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
893 XDP_FLAGS_SKB_MODE) 894 XDP_FLAGS_SKB_MODE | \
895 XDP_FLAGS_DRV_MODE)
896
897/* These are stored into IFLA_XDP_ATTACHED on dump. */
898enum {
899 XDP_ATTACHED_NONE = 0,
900 XDP_ATTACHED_DRV,
901 XDP_ATTACHED_SKB,
902};
894 903
895enum { 904enum {
896 IFLA_XDP_UNSPEC, 905 IFLA_XDP_UNSPEC,
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 201c6644b237..ef16df06642a 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -70,8 +70,8 @@ struct keyctl_dh_params {
70}; 70};
71 71
72struct keyctl_kdf_params { 72struct keyctl_kdf_params {
73 char *hashname; 73 char __user *hashname;
74 char *otherinfo; 74 char __user *otherinfo;
75 __u32 otherinfolen; 75 __u32 otherinfolen;
76 __u32 __spare[8]; 76 __u32 __spare[8];
77}; 77};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 61b7d36dfe34..156ee4cab82e 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -343,6 +343,7 @@ enum ovs_key_attr {
343#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) 343#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
344 344
345enum ovs_tunnel_key_attr { 345enum ovs_tunnel_key_attr {
346 /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */
346 OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ 347 OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */
347 OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ 348 OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */
348 OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ 349 OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 361297e96f58..576c704e3fb8 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
22 */ 22 */
23#define USB_MAXCHILDREN 31 23#define USB_MAXCHILDREN 31
24 24
25/* See USB 3.1 spec Table 10-5 */
26#define USB_SS_MAXPORTS 15
27
25/* 28/*
26 * Hub request types 29 * Hub request types
27 */ 30 */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 5e00b2333c26..172dc8ee0e3b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array->map.key_size = attr->key_size; 86 array->map.key_size = attr->key_size;
87 array->map.value_size = attr->value_size; 87 array->map.value_size = attr->value_size;
88 array->map.max_entries = attr->max_entries; 88 array->map.max_entries = attr->max_entries;
89 array->map.map_flags = attr->map_flags;
89 array->elem_size = elem_size; 90 array->elem_size = elem_size;
90 91
91 if (!percpu) 92 if (!percpu)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 39cfafd895b8..b09185f0f17d 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
432 trie->map.key_size = attr->key_size; 432 trie->map.key_size = attr->key_size;
433 trie->map.value_size = attr->value_size; 433 trie->map.value_size = attr->value_size;
434 trie->map.max_entries = attr->max_entries; 434 trie->map.max_entries = attr->max_entries;
435 trie->map.map_flags = attr->map_flags;
435 trie->data_size = attr->key_size - 436 trie->data_size = attr->key_size -
436 offsetof(struct bpf_lpm_trie_key, data); 437 offsetof(struct bpf_lpm_trie_key, data);
437 trie->max_prefixlen = trie->data_size * 8; 438 trie->max_prefixlen = trie->data_size * 8;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 4dfd6f2ec2f9..31147d730abf 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
88 smap->map.key_size = attr->key_size; 88 smap->map.key_size = attr->key_size;
89 smap->map.value_size = value_size; 89 smap->map.value_size = value_size;
90 smap->map.max_entries = attr->max_entries; 90 smap->map.max_entries = attr->max_entries;
91 smap->map.map_flags = attr->map_flags;
91 smap->n_buckets = n_buckets; 92 smap->n_buckets = n_buckets;
92 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 93 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
93 94
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fd2411fd6914..265a0d854e33 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -783,7 +783,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
783EXPORT_SYMBOL_GPL(bpf_prog_get_type); 783EXPORT_SYMBOL_GPL(bpf_prog_get_type);
784 784
785/* last field in 'union bpf_attr' used by this command */ 785/* last field in 'union bpf_attr' used by this command */
786#define BPF_PROG_LOAD_LAST_FIELD kern_version 786#define BPF_PROG_LOAD_LAST_FIELD prog_flags
787 787
788static int bpf_prog_load(union bpf_attr *attr) 788static int bpf_prog_load(union bpf_attr *attr)
789{ 789{
@@ -796,6 +796,9 @@ static int bpf_prog_load(union bpf_attr *attr)
796 if (CHECK_ATTR(BPF_PROG_LOAD)) 796 if (CHECK_ATTR(BPF_PROG_LOAD))
797 return -EINVAL; 797 return -EINVAL;
798 798
799 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
800 return -EINVAL;
801
799 /* copy eBPF program license from user space */ 802 /* copy eBPF program license from user space */
800 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 803 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
801 sizeof(license) - 1) < 0) 804 sizeof(license) - 1) < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c5b56c92f8e2..a8a725697bed 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -140,7 +140,7 @@ struct bpf_verifier_stack_elem {
140 struct bpf_verifier_stack_elem *next; 140 struct bpf_verifier_stack_elem *next;
141}; 141};
142 142
143#define BPF_COMPLEXITY_LIMIT_INSNS 65536 143#define BPF_COMPLEXITY_LIMIT_INSNS 98304
144#define BPF_COMPLEXITY_LIMIT_STACK 1024 144#define BPF_COMPLEXITY_LIMIT_STACK 1024
145 145
146#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 146#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
@@ -241,6 +241,12 @@ static void print_verifier_state(struct bpf_verifier_state *state)
241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
242 verbose(",max_value=%llu", 242 verbose(",max_value=%llu",
243 (unsigned long long)reg->max_value); 243 (unsigned long long)reg->max_value);
244 if (reg->min_align)
245 verbose(",min_align=%u", reg->min_align);
246 if (reg->aux_off)
247 verbose(",aux_off=%u", reg->aux_off);
248 if (reg->aux_off_align)
249 verbose(",aux_off_align=%u", reg->aux_off_align);
244 } 250 }
245 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 251 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
246 if (state->stack_slot_type[i] == STACK_SPILL) 252 if (state->stack_slot_type[i] == STACK_SPILL)
@@ -457,16 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
457 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 463 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
458}; 464};
459 465
466static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
467{
468 BUG_ON(regno >= MAX_BPF_REG);
469
470 memset(&regs[regno], 0, sizeof(regs[regno]));
471 regs[regno].type = NOT_INIT;
472 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
473 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
474}
475
460static void init_reg_state(struct bpf_reg_state *regs) 476static void init_reg_state(struct bpf_reg_state *regs)
461{ 477{
462 int i; 478 int i;
463 479
464 for (i = 0; i < MAX_BPF_REG; i++) { 480 for (i = 0; i < MAX_BPF_REG; i++)
465 regs[i].type = NOT_INIT; 481 mark_reg_not_init(regs, i);
466 regs[i].imm = 0;
467 regs[i].min_value = BPF_REGISTER_MIN_RANGE;
468 regs[i].max_value = BPF_REGISTER_MAX_RANGE;
469 }
470 482
471 /* frame pointer */ 483 /* frame pointer */
472 regs[BPF_REG_FP].type = FRAME_PTR; 484 regs[BPF_REG_FP].type = FRAME_PTR;
@@ -492,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
492{ 504{
493 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 505 regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
494 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 506 regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
507 regs[regno].min_align = 0;
495} 508}
496 509
497static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, 510static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
@@ -779,17 +792,37 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
779} 792}
780 793
781static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 794static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
782 int off, int size) 795 int off, int size, bool strict)
783{ 796{
784 if (reg->id && size != 1) { 797 int ip_align;
785 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); 798 int reg_off;
786 return -EACCES; 799
800 /* Byte size accesses are always allowed. */
801 if (!strict || size == 1)
802 return 0;
803
804 reg_off = reg->off;
805 if (reg->id) {
806 if (reg->aux_off_align % size) {
807 verbose("Packet access is only %u byte aligned, %d byte access not allowed\n",
808 reg->aux_off_align, size);
809 return -EACCES;
810 }
811 reg_off += reg->aux_off;
787 } 812 }
788 813
789 /* skb->data is NET_IP_ALIGN-ed */ 814 /* For platforms that do not have a Kconfig enabling
790 if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 815 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
816 * NET_IP_ALIGN is universally set to '2'. And on platforms
817 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
818 * to this code only in strict mode where we want to emulate
819 * the NET_IP_ALIGN==2 checking. Therefore use an
820 * unconditional IP align value of '2'.
821 */
822 ip_align = 2;
823 if ((ip_align + reg_off + off) % size != 0) {
791 verbose("misaligned packet access off %d+%d+%d size %d\n", 824 verbose("misaligned packet access off %d+%d+%d size %d\n",
792 NET_IP_ALIGN, reg->off, off, size); 825 ip_align, reg_off, off, size);
793 return -EACCES; 826 return -EACCES;
794 } 827 }
795 828
@@ -797,9 +830,9 @@ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
797} 830}
798 831
799static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 832static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
800 int size) 833 int size, bool strict)
801{ 834{
802 if (size != 1) { 835 if (strict && size != 1) {
803 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 836 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
804 return -EACCES; 837 return -EACCES;
805 } 838 }
@@ -807,16 +840,17 @@ static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
807 return 0; 840 return 0;
808} 841}
809 842
810static int check_ptr_alignment(const struct bpf_reg_state *reg, 843static int check_ptr_alignment(struct bpf_verifier_env *env,
844 const struct bpf_reg_state *reg,
811 int off, int size) 845 int off, int size)
812{ 846{
847 bool strict = env->strict_alignment;
848
813 switch (reg->type) { 849 switch (reg->type) {
814 case PTR_TO_PACKET: 850 case PTR_TO_PACKET:
815 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 851 return check_pkt_ptr_alignment(reg, off, size, strict);
816 check_pkt_ptr_alignment(reg, off, size);
817 case PTR_TO_MAP_VALUE_ADJ: 852 case PTR_TO_MAP_VALUE_ADJ:
818 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : 853 return check_val_ptr_alignment(reg, size, strict);
819 check_val_ptr_alignment(reg, size);
820 default: 854 default:
821 if (off % size != 0) { 855 if (off % size != 0) {
822 verbose("misaligned access off %d size %d\n", 856 verbose("misaligned access off %d size %d\n",
@@ -849,7 +883,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
849 if (size < 0) 883 if (size < 0)
850 return size; 884 return size;
851 885
852 err = check_ptr_alignment(reg, off, size); 886 err = check_ptr_alignment(env, reg, off, size);
853 if (err) 887 if (err)
854 return err; 888 return err;
855 889
@@ -883,6 +917,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
883 value_regno); 917 value_regno);
884 /* note that reg.[id|off|range] == 0 */ 918 /* note that reg.[id|off|range] == 0 */
885 state->regs[value_regno].type = reg_type; 919 state->regs[value_regno].type = reg_type;
920 state->regs[value_regno].aux_off = 0;
921 state->regs[value_regno].aux_off_align = 0;
886 } 922 }
887 923
888 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 924 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
@@ -953,6 +989,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
953 if (err) 989 if (err)
954 return err; 990 return err;
955 991
992 if (is_pointer_value(env, insn->src_reg)) {
993 verbose("R%d leaks addr into mem\n", insn->src_reg);
994 return -EACCES;
995 }
996
956 /* check whether atomic_add can read the memory */ 997 /* check whether atomic_add can read the memory */
957 err = check_mem_access(env, insn->dst_reg, insn->off, 998 err = check_mem_access(env, insn->dst_reg, insn->off,
958 BPF_SIZE(insn->code), BPF_READ, -1); 999 BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1313,7 +1354,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1313 struct bpf_verifier_state *state = &env->cur_state; 1354 struct bpf_verifier_state *state = &env->cur_state;
1314 const struct bpf_func_proto *fn = NULL; 1355 const struct bpf_func_proto *fn = NULL;
1315 struct bpf_reg_state *regs = state->regs; 1356 struct bpf_reg_state *regs = state->regs;
1316 struct bpf_reg_state *reg;
1317 struct bpf_call_arg_meta meta; 1357 struct bpf_call_arg_meta meta;
1318 bool changes_data; 1358 bool changes_data;
1319 int i, err; 1359 int i, err;
@@ -1380,11 +1420,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1380 } 1420 }
1381 1421
1382 /* reset caller saved regs */ 1422 /* reset caller saved regs */
1383 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1423 for (i = 0; i < CALLER_SAVED_REGS; i++)
1384 reg = regs + caller_saved[i]; 1424 mark_reg_not_init(regs, caller_saved[i]);
1385 reg->type = NOT_INIT;
1386 reg->imm = 0;
1387 }
1388 1425
1389 /* update return register */ 1426 /* update return register */
1390 if (fn->ret_type == RET_INTEGER) { 1427 if (fn->ret_type == RET_INTEGER) {
@@ -1455,6 +1492,8 @@ add_imm:
1455 */ 1492 */
1456 dst_reg->off += imm; 1493 dst_reg->off += imm;
1457 } else { 1494 } else {
1495 bool had_id;
1496
1458 if (src_reg->type == PTR_TO_PACKET) { 1497 if (src_reg->type == PTR_TO_PACKET) {
1459 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1498 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */
1460 tmp_reg = *dst_reg; /* save r7 state */ 1499 tmp_reg = *dst_reg; /* save r7 state */
@@ -1488,14 +1527,23 @@ add_imm:
1488 src_reg->imm); 1527 src_reg->imm);
1489 return -EACCES; 1528 return -EACCES;
1490 } 1529 }
1530
1531 had_id = (dst_reg->id != 0);
1532
1491 /* dst_reg stays as pkt_ptr type and since some positive 1533 /* dst_reg stays as pkt_ptr type and since some positive
1492 * integer value was added to the pointer, increment its 'id' 1534 * integer value was added to the pointer, increment its 'id'
1493 */ 1535 */
1494 dst_reg->id = ++env->id_gen; 1536 dst_reg->id = ++env->id_gen;
1495 1537
1496 /* something was added to pkt_ptr, set range and off to zero */ 1538 /* something was added to pkt_ptr, set range to zero */
1539 dst_reg->aux_off += dst_reg->off;
1497 dst_reg->off = 0; 1540 dst_reg->off = 0;
1498 dst_reg->range = 0; 1541 dst_reg->range = 0;
1542 if (had_id)
1543 dst_reg->aux_off_align = min(dst_reg->aux_off_align,
1544 src_reg->min_align);
1545 else
1546 dst_reg->aux_off_align = src_reg->min_align;
1499 } 1547 }
1500 return 0; 1548 return 0;
1501} 1549}
@@ -1669,6 +1717,13 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
1669 reg->min_value = BPF_REGISTER_MIN_RANGE; 1717 reg->min_value = BPF_REGISTER_MIN_RANGE;
1670} 1718}
1671 1719
1720static u32 calc_align(u32 imm)
1721{
1722 if (!imm)
1723 return 1U << 31;
1724 return imm - ((imm - 1) & imm);
1725}
1726
1672static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1727static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1673 struct bpf_insn *insn) 1728 struct bpf_insn *insn)
1674{ 1729{
@@ -1676,8 +1731,10 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1676 s64 min_val = BPF_REGISTER_MIN_RANGE; 1731 s64 min_val = BPF_REGISTER_MIN_RANGE;
1677 u64 max_val = BPF_REGISTER_MAX_RANGE; 1732 u64 max_val = BPF_REGISTER_MAX_RANGE;
1678 u8 opcode = BPF_OP(insn->code); 1733 u8 opcode = BPF_OP(insn->code);
1734 u32 dst_align, src_align;
1679 1735
1680 dst_reg = &regs[insn->dst_reg]; 1736 dst_reg = &regs[insn->dst_reg];
1737 src_align = 0;
1681 if (BPF_SRC(insn->code) == BPF_X) { 1738 if (BPF_SRC(insn->code) == BPF_X) {
1682 check_reg_overflow(&regs[insn->src_reg]); 1739 check_reg_overflow(&regs[insn->src_reg]);
1683 min_val = regs[insn->src_reg].min_value; 1740 min_val = regs[insn->src_reg].min_value;
@@ -1693,12 +1750,18 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1693 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1750 regs[insn->src_reg].type != UNKNOWN_VALUE) {
1694 min_val = BPF_REGISTER_MIN_RANGE; 1751 min_val = BPF_REGISTER_MIN_RANGE;
1695 max_val = BPF_REGISTER_MAX_RANGE; 1752 max_val = BPF_REGISTER_MAX_RANGE;
1753 src_align = 0;
1754 } else {
1755 src_align = regs[insn->src_reg].min_align;
1696 } 1756 }
1697 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1757 } else if (insn->imm < BPF_REGISTER_MAX_RANGE &&
1698 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1758 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) {
1699 min_val = max_val = insn->imm; 1759 min_val = max_val = insn->imm;
1760 src_align = calc_align(insn->imm);
1700 } 1761 }
1701 1762
1763 dst_align = dst_reg->min_align;
1764
1702 /* We don't know anything about what was done to this register, mark it 1765 /* We don't know anything about what was done to this register, mark it
1703 * as unknown. 1766 * as unknown.
1704 */ 1767 */
@@ -1723,18 +1786,21 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1723 dst_reg->min_value += min_val; 1786 dst_reg->min_value += min_val;
1724 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1787 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1725 dst_reg->max_value += max_val; 1788 dst_reg->max_value += max_val;
1789 dst_reg->min_align = min(src_align, dst_align);
1726 break; 1790 break;
1727 case BPF_SUB: 1791 case BPF_SUB:
1728 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1792 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1729 dst_reg->min_value -= min_val; 1793 dst_reg->min_value -= min_val;
1730 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1794 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1731 dst_reg->max_value -= max_val; 1795 dst_reg->max_value -= max_val;
1796 dst_reg->min_align = min(src_align, dst_align);
1732 break; 1797 break;
1733 case BPF_MUL: 1798 case BPF_MUL:
1734 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1799 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1735 dst_reg->min_value *= min_val; 1800 dst_reg->min_value *= min_val;
1736 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1801 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1737 dst_reg->max_value *= max_val; 1802 dst_reg->max_value *= max_val;
1803 dst_reg->min_align = max(src_align, dst_align);
1738 break; 1804 break;
1739 case BPF_AND: 1805 case BPF_AND:
1740 /* Disallow AND'ing of negative numbers, ain't nobody got time 1806 /* Disallow AND'ing of negative numbers, ain't nobody got time
@@ -1746,17 +1812,23 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1746 else 1812 else
1747 dst_reg->min_value = 0; 1813 dst_reg->min_value = 0;
1748 dst_reg->max_value = max_val; 1814 dst_reg->max_value = max_val;
1815 dst_reg->min_align = max(src_align, dst_align);
1749 break; 1816 break;
1750 case BPF_LSH: 1817 case BPF_LSH:
1751 /* Gotta have special overflow logic here, if we're shifting 1818 /* Gotta have special overflow logic here, if we're shifting
1752 * more than MAX_RANGE then just assume we have an invalid 1819 * more than MAX_RANGE then just assume we have an invalid
1753 * range. 1820 * range.
1754 */ 1821 */
1755 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1822 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) {
1756 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1823 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1757 else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1824 dst_reg->min_align = 1;
1758 dst_reg->min_value <<= min_val; 1825 } else {
1759 1826 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1827 dst_reg->min_value <<= min_val;
1828 if (!dst_reg->min_align)
1829 dst_reg->min_align = 1;
1830 dst_reg->min_align <<= min_val;
1831 }
1760 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1832 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
1761 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1833 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1762 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1834 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
@@ -1766,11 +1838,19 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1766 /* RSH by a negative number is undefined, and the BPF_RSH is an 1838 /* RSH by a negative number is undefined, and the BPF_RSH is an
1767 * unsigned shift, so make the appropriate casts. 1839 * unsigned shift, so make the appropriate casts.
1768 */ 1840 */
1769 if (min_val < 0 || dst_reg->min_value < 0) 1841 if (min_val < 0 || dst_reg->min_value < 0) {
1770 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1842 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1771 else 1843 } else {
1772 dst_reg->min_value = 1844 dst_reg->min_value =
1773 (u64)(dst_reg->min_value) >> min_val; 1845 (u64)(dst_reg->min_value) >> min_val;
1846 }
1847 if (min_val < 0) {
1848 dst_reg->min_align = 1;
1849 } else {
1850 dst_reg->min_align >>= (u64) min_val;
1851 if (!dst_reg->min_align)
1852 dst_reg->min_align = 1;
1853 }
1774 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1854 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1775 dst_reg->max_value >>= max_val; 1855 dst_reg->max_value >>= max_val;
1776 break; 1856 break;
@@ -1872,6 +1952,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1872 regs[insn->dst_reg].imm = insn->imm; 1952 regs[insn->dst_reg].imm = insn->imm;
1873 regs[insn->dst_reg].max_value = insn->imm; 1953 regs[insn->dst_reg].max_value = insn->imm;
1874 regs[insn->dst_reg].min_value = insn->imm; 1954 regs[insn->dst_reg].min_value = insn->imm;
1955 regs[insn->dst_reg].min_align = calc_align(insn->imm);
1875 } 1956 }
1876 1957
1877 } else if (opcode > BPF_END) { 1958 } else if (opcode > BPF_END) {
@@ -2368,7 +2449,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2368{ 2449{
2369 struct bpf_reg_state *regs = env->cur_state.regs; 2450 struct bpf_reg_state *regs = env->cur_state.regs;
2370 u8 mode = BPF_MODE(insn->code); 2451 u8 mode = BPF_MODE(insn->code);
2371 struct bpf_reg_state *reg;
2372 int i, err; 2452 int i, err;
2373 2453
2374 if (!may_access_skb(env->prog->type)) { 2454 if (!may_access_skb(env->prog->type)) {
@@ -2401,11 +2481,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
2401 } 2481 }
2402 2482
2403 /* reset caller saved regs to unreadable */ 2483 /* reset caller saved regs to unreadable */
2404 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2484 for (i = 0; i < CALLER_SAVED_REGS; i++)
2405 reg = regs + caller_saved[i]; 2485 mark_reg_not_init(regs, caller_saved[i]);
2406 reg->type = NOT_INIT;
2407 reg->imm = 0;
2408 }
2409 2486
2410 /* mark destination R0 register as readable, since it contains 2487 /* mark destination R0 register as readable, since it contains
2411 * the value fetched from the packet 2488 * the value fetched from the packet
@@ -2564,6 +2641,7 @@ peek_stack:
2564 env->explored_states[t + 1] = STATE_LIST_MARK; 2641 env->explored_states[t + 1] = STATE_LIST_MARK;
2565 } else { 2642 } else {
2566 /* conditional jump with two edges */ 2643 /* conditional jump with two edges */
2644 env->explored_states[t] = STATE_LIST_MARK;
2567 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2645 ret = push_insn(t, t + 1, FALLTHROUGH, env);
2568 if (ret == 1) 2646 if (ret == 1)
2569 goto peek_stack; 2647 goto peek_stack;
@@ -2615,7 +2693,8 @@ err_free:
2615/* the following conditions reduce the number of explored insns 2693/* the following conditions reduce the number of explored insns
2616 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2694 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
2617 */ 2695 */
2618static bool compare_ptrs_to_packet(struct bpf_reg_state *old, 2696static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
2697 struct bpf_reg_state *old,
2619 struct bpf_reg_state *cur) 2698 struct bpf_reg_state *cur)
2620{ 2699{
2621 if (old->id != cur->id) 2700 if (old->id != cur->id)
@@ -2658,7 +2737,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
2658 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2737 * 'if (R4 > data_end)' and all further insn were already good with r=20,
2659 * so they will be good with r=30 and we can prune the search. 2738 * so they will be good with r=30 and we can prune the search.
2660 */ 2739 */
2661 if (old->off <= cur->off && 2740 if (!env->strict_alignment && old->off <= cur->off &&
2662 old->off >= old->range && cur->off >= cur->range) 2741 old->off >= old->range && cur->off >= cur->range)
2663 return true; 2742 return true;
2664 2743
@@ -2722,8 +2801,14 @@ static bool states_equal(struct bpf_verifier_env *env,
2722 rcur->type != NOT_INIT)) 2801 rcur->type != NOT_INIT))
2723 continue; 2802 continue;
2724 2803
2804 /* Don't care about the reg->id in this case. */
2805 if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
2806 rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
2807 rold->map_ptr == rcur->map_ptr)
2808 continue;
2809
2725 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2810 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
2726 compare_ptrs_to_packet(rold, rcur)) 2811 compare_ptrs_to_packet(env, rold, rcur))
2727 continue; 2812 continue;
2728 2813
2729 return false; 2814 return false;
@@ -2856,8 +2941,15 @@ static int do_check(struct bpf_verifier_env *env)
2856 goto process_bpf_exit; 2941 goto process_bpf_exit;
2857 } 2942 }
2858 2943
2859 if (log_level && do_print_state) { 2944 if (need_resched())
2860 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2945 cond_resched();
2946
2947 if (log_level > 1 || (log_level && do_print_state)) {
2948 if (log_level > 1)
2949 verbose("%d:", insn_idx);
2950 else
2951 verbose("\nfrom %d to %d:",
2952 prev_insn_idx, insn_idx);
2861 print_verifier_state(&env->cur_state); 2953 print_verifier_state(&env->cur_state);
2862 do_print_state = false; 2954 do_print_state = false;
2863 } 2955 }
@@ -3495,6 +3587,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
3495 log_level = 0; 3587 log_level = 0;
3496 } 3588 }
3497 3589
3590 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
3591 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3592 env->strict_alignment = true;
3593
3498 ret = replace_map_fd_with_map_ptr(env); 3594 ret = replace_map_fd_with_map_ptr(env);
3499 if (ret < 0) 3595 if (ret < 0)
3500 goto skip_full_check; 3596 goto skip_full_check;
@@ -3600,6 +3696,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
3600 3696
3601 log_level = 0; 3697 log_level = 0;
3602 3698
3699 env->strict_alignment = false;
3700 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
3701 env->strict_alignment = true;
3702
3603 env->explored_states = kcalloc(env->prog->len, 3703 env->explored_states = kcalloc(env->prog->len,
3604 sizeof(struct bpf_verifier_state_list *), 3704 sizeof(struct bpf_verifier_state_list *),
3605 GFP_KERNEL); 3705 GFP_KERNEL);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c3c9a0e1b3c9..8d4e85eae42c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
4265{ 4265{
4266 lockdep_assert_held(&cgroup_mutex); 4266 lockdep_assert_held(&cgroup_mutex);
4267 4267
4268 if (css->flags & CSS_DYING)
4269 return;
4270
4271 css->flags |= CSS_DYING;
4272
4268 /* 4273 /*
4269 * This must happen before css is disassociated with its cgroup. 4274 * This must happen before css is disassociated with its cgroup.
4270 * See seq_css() for details. 4275 * See seq_css() for details.
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f6501f4f6040..ae643412948a 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -176,9 +176,9 @@ typedef enum {
176} cpuset_flagbits_t; 176} cpuset_flagbits_t;
177 177
178/* convenient tests for these bits */ 178/* convenient tests for these bits */
179static inline bool is_cpuset_online(const struct cpuset *cs) 179static inline bool is_cpuset_online(struct cpuset *cs)
180{ 180{
181 return test_bit(CS_ONLINE, &cs->flags); 181 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
182} 182}
183 183
184static inline int is_cpu_exclusive(const struct cpuset *cs) 184static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9ae6fbe5b5cf..cb5103413bd8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1659 mutex_unlock(&cpuhp_state_mutex); 1659 mutex_unlock(&cpuhp_state_mutex);
1660 if (ret) 1660 if (ret)
1661 return ret; 1661 goto out;
1662 1662
1663 if (st->state < target) 1663 if (st->state < target)
1664 ret = do_cpu_up(dev->id, target); 1664 ret = do_cpu_up(dev->id, target);
1665 else 1665 else
1666 ret = do_cpu_down(dev->id, target); 1666 ret = do_cpu_down(dev->id, target);
1667 1667out:
1668 unlock_device_hotplug(); 1668 unlock_device_hotplug();
1669 return ret ? ret : count; 1669 return ret ? ret : count;
1670} 1670}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6e75a5c9412d..6c4e523dc1e2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
7316 return __perf_event_account_interrupt(event, 1); 7316 return __perf_event_account_interrupt(event, 1);
7317} 7317}
7318 7318
7319static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
7320{
7321 /*
7322 * Due to interrupt latency (AKA "skid"), we may enter the
7323 * kernel before taking an overflow, even if the PMU is only
7324 * counting user events.
7325 * To avoid leaking information to userspace, we must always
7326 * reject kernel samples when exclude_kernel is set.
7327 */
7328 if (event->attr.exclude_kernel && !user_mode(regs))
7329 return false;
7330
7331 return true;
7332}
7333
7319/* 7334/*
7320 * Generic event overflow handling, sampling. 7335 * Generic event overflow handling, sampling.
7321 */ 7336 */
@@ -7337,6 +7352,12 @@ static int __perf_event_overflow(struct perf_event *event,
7337 ret = __perf_event_account_interrupt(event, throttle); 7352 ret = __perf_event_account_interrupt(event, throttle);
7338 7353
7339 /* 7354 /*
7355 * For security, drop the skid kernel samples if necessary.
7356 */
7357 if (!sample_is_allowed(event, regs))
7358 return ret;
7359
7360 /*
7340 * XXX event_limit might not quite work as expected on inherited 7361 * XXX event_limit might not quite work as expected on inherited
7341 * events 7362 * events
7342 */ 7363 */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 2831480c63a2..ee97196bb151 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
580 int ret = -ENOMEM, max_order = 0; 580 int ret = -ENOMEM, max_order = 0;
581 581
582 if (!has_aux(event)) 582 if (!has_aux(event))
583 return -ENOTSUPP; 583 return -EOPNOTSUPP;
584 584
585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { 585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
586 /* 586 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 06d759ab4c62..e53770d2bf95 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process(
1577 if (!p) 1577 if (!p)
1578 goto fork_out; 1578 goto fork_out;
1579 1579
1580 /*
1581 * This _must_ happen before we call free_task(), i.e. before we jump
1582 * to any of the bad_fork_* labels. This is to avoid freeing
1583 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1584 * kernel threads (PF_KTHREAD).
1585 */
1586 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1587 /*
1588 * Clear TID on mm_release()?
1589 */
1590 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1591
1580 ftrace_graph_init_task(p); 1592 ftrace_graph_init_task(p);
1581 1593
1582 rt_mutex_init_task(p); 1594 rt_mutex_init_task(p);
@@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process(
1743 } 1755 }
1744 } 1756 }
1745 1757
1746 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1747 /*
1748 * Clear TID on mm_release()?
1749 */
1750 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1751#ifdef CONFIG_BLOCK 1758#ifdef CONFIG_BLOCK
1752 p->plug = NULL; 1759 p->plug = NULL;
1753#endif 1760#endif
@@ -1845,11 +1852,13 @@ static __latent_entropy struct task_struct *copy_process(
1845 */ 1852 */
1846 recalc_sigpending(); 1853 recalc_sigpending();
1847 if (signal_pending(current)) { 1854 if (signal_pending(current)) {
1848 spin_unlock(&current->sighand->siglock);
1849 write_unlock_irq(&tasklist_lock);
1850 retval = -ERESTARTNOINTR; 1855 retval = -ERESTARTNOINTR;
1851 goto bad_fork_cancel_cgroup; 1856 goto bad_fork_cancel_cgroup;
1852 } 1857 }
1858 if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
1859 retval = -ENOMEM;
1860 goto bad_fork_cancel_cgroup;
1861 }
1853 1862
1854 if (likely(p->pid)) { 1863 if (likely(p->pid)) {
1855 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); 1864 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1907,6 +1916,8 @@ static __latent_entropy struct task_struct *copy_process(
1907 return p; 1916 return p;
1908 1917
1909bad_fork_cancel_cgroup: 1918bad_fork_cancel_cgroup:
1919 spin_unlock(&current->sighand->siglock);
1920 write_unlock_irq(&tasklist_lock);
1910 cgroup_cancel_fork(p); 1921 cgroup_cancel_fork(p);
1911bad_fork_free_pid: 1922bad_fork_free_pid:
1912 cgroup_threadgroup_change_end(current); 1923 cgroup_threadgroup_change_end(current);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 686be4b73018..c94da688ee9b 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -880,8 +880,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
880 if (!desc) 880 if (!desc)
881 return; 881 return;
882 882
883 __irq_do_set_handler(desc, handle, 1, NULL);
884 desc->irq_common_data.handler_data = data; 883 desc->irq_common_data.handler_data = data;
884 __irq_do_set_handler(desc, handle, 1, NULL);
885 885
886 irq_put_desc_busunlock(desc, flags); 886 irq_put_desc_busunlock(desc, flags);
887} 887}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 070be980c37a..425170d4439b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1312 ret = __irq_set_trigger(desc, 1312 ret = __irq_set_trigger(desc,
1313 new->flags & IRQF_TRIGGER_MASK); 1313 new->flags & IRQF_TRIGGER_MASK);
1314 1314
1315 if (ret) 1315 if (ret) {
1316 irq_release_resources(desc);
1316 goto out_mask; 1317 goto out_mask;
1318 }
1317 } 1319 }
1318 1320
1319 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1321 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7367e0ec6f81..adfe3b4cfe05 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -122,7 +122,7 @@ static void *alloc_insn_page(void)
122 return module_alloc(PAGE_SIZE); 122 return module_alloc(PAGE_SIZE);
123} 123}
124 124
125static void free_insn_page(void *page) 125void __weak free_insn_page(void *page)
126{ 126{
127 module_memfree(page); 127 module_memfree(page);
128} 128}
@@ -595,7 +595,7 @@ static void kprobe_optimizer(struct work_struct *work)
595} 595}
596 596
597/* Wait for completing optimization and unoptimization */ 597/* Wait for completing optimization and unoptimization */
598static void wait_for_kprobe_optimizer(void) 598void wait_for_kprobe_optimizer(void)
599{ 599{
600 mutex_lock(&kprobe_mutex); 600 mutex_lock(&kprobe_mutex);
601 601
@@ -2183,6 +2183,12 @@ static int kprobes_module_callback(struct notifier_block *nb,
2183 * The vaddr this probe is installed will soon 2183 * The vaddr this probe is installed will soon
2184 * be vfreed buy not synced to disk. Hence, 2184 * be vfreed buy not synced to disk. Hence,
2185 * disarming the breakpoint isn't needed. 2185 * disarming the breakpoint isn't needed.
2186 *
2187 * Note, this will also move any optimized probes
2188 * that are pending to be removed from their
2189 * corresponding lists to the freeing_list and
2190 * will not be touched by the delayed
2191 * kprobe_optimizer work handler.
2186 */ 2192 */
2187 kill_kprobe(p); 2193 kill_kprobe(p);
2188 } 2194 }
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
index 045022557936..ec4565122e65 100644
--- a/kernel/livepatch/Kconfig
+++ b/kernel/livepatch/Kconfig
@@ -10,6 +10,7 @@ config LIVEPATCH
10 depends on SYSFS 10 depends on SYSFS
11 depends on KALLSYMS_ALL 11 depends on KALLSYMS_ALL
12 depends on HAVE_LIVEPATCH 12 depends on HAVE_LIVEPATCH
13 depends on !TRIM_UNUSED_KSYMS
13 help 14 help
14 Say Y here if you want to support kernel live patching. 15 Say Y here if you want to support kernel live patching.
15 This option has no runtime impact until a kernel "patch" 16 This option has no runtime impact until a kernel "patch"
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index f8269036bf0b..52c4e907c14b 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
59 59
60 ops = container_of(fops, struct klp_ops, fops); 60 ops = container_of(fops, struct klp_ops, fops);
61 61
62 rcu_read_lock(); 62 /*
63 * A variant of synchronize_sched() is used to allow patching functions
64 * where RCU is not watching, see klp_synchronize_transition().
65 */
66 preempt_disable_notrace();
63 67
64 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65 stack_node); 69 stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
115 119
116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 120 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117unlock: 121unlock:
118 rcu_read_unlock(); 122 preempt_enable_notrace();
119} 123}
120 124
121/* 125/*
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index adc0cc64aa4b..b004a1fb6032 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work)
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50 50
51/* 51/*
52 * This function is just a stub to implement a hard force
53 * of synchronize_sched(). This requires synchronizing
54 * tasks even in userspace and idle.
55 */
56static void klp_sync(struct work_struct *work)
57{
58}
59
60/*
61 * We allow to patch also functions where RCU is not watching,
62 * e.g. before user_exit(). We can not rely on the RCU infrastructure
63 * to do the synchronization. Instead hard force the sched synchronization.
64 *
65 * This approach allows to use RCU functions for manipulating func_stack
66 * safely.
67 */
68static void klp_synchronize_transition(void)
69{
70 schedule_on_each_cpu(klp_sync);
71}
72
73/*
52 * The transition to the target patch state is complete. Clean up the data 74 * The transition to the target patch state is complete. Clean up the data
53 * structures. 75 * structures.
54 */ 76 */
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
73 * func->transition gets cleared, the handler may choose a 95 * func->transition gets cleared, the handler may choose a
74 * removed function. 96 * removed function.
75 */ 97 */
76 synchronize_rcu(); 98 klp_synchronize_transition();
77 } 99 }
78 100
79 if (klp_transition_patch->immediate) 101 if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
92 114
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state == KLP_PATCHED) 116 if (klp_target_state == KLP_PATCHED)
95 synchronize_rcu(); 117 klp_synchronize_transition();
96 118
97 read_lock(&tasklist_lock); 119 read_lock(&tasklist_lock);
98 for_each_process_thread(g, task) { 120 for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
136 */ 158 */
137void klp_update_patch_state(struct task_struct *task) 159void klp_update_patch_state(struct task_struct *task)
138{ 160{
139 rcu_read_lock(); 161 /*
162 * A variant of synchronize_sched() is used to allow patching functions
163 * where RCU is not watching, see klp_synchronize_transition().
164 */
165 preempt_disable_notrace();
140 166
141 /* 167 /*
142 * This test_and_clear_tsk_thread_flag() call also serves as a read 168 * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 179 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 task->patch_state = READ_ONCE(klp_target_state); 180 task->patch_state = READ_ONCE(klp_target_state);
155 181
156 rcu_read_unlock(); 182 preempt_enable_notrace();
157} 183}
158 184
159/* 185/*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 565 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540 566
541 /* Let any remaining calls to klp_update_patch_state() complete */ 567 /* Let any remaining calls to klp_update_patch_state() complete */
542 synchronize_rcu(); 568 klp_synchronize_transition();
543 569
544 klp_start_transition(); 570 klp_start_transition();
545} 571}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b95509416909..28cd09e635ed 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1785 int ret; 1785 int ret;
1786 1786
1787 raw_spin_lock_irq(&lock->wait_lock); 1787 raw_spin_lock_irq(&lock->wait_lock);
1788
1789 set_current_state(TASK_INTERRUPTIBLE);
1790
1791 /* sleep on the mutex */ 1788 /* sleep on the mutex */
1789 set_current_state(TASK_INTERRUPTIBLE);
1792 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 1790 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1793 1791 /*
1792 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1793 * have to fix that up.
1794 */
1795 fixup_rt_mutex_waiters(lock);
1794 raw_spin_unlock_irq(&lock->wait_lock); 1796 raw_spin_unlock_irq(&lock->wait_lock);
1795 1797
1796 return ret; 1798 return ret;
@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1822 1824
1823 raw_spin_lock_irq(&lock->wait_lock); 1825 raw_spin_lock_irq(&lock->wait_lock);
1824 /* 1826 /*
1827 * Do an unconditional try-lock, this deals with the lock stealing
1828 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
1829 * sets a NULL owner.
1830 *
1831 * We're not interested in the return value, because the subsequent
1832 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
1833 * we will own the lock and it will have removed the waiter. If we
1834 * failed the trylock, we're still not owner and we need to remove
1835 * ourselves.
1836 */
1837 try_to_take_rt_mutex(lock, current, waiter);
1838 /*
1825 * Unless we're the owner; we're still enqueued on the wait_list. 1839 * Unless we're the owner; we're still enqueued on the wait_list.
1826 * So check if we became owner, if not, take us off the wait_list. 1840 * So check if we became owner, if not, take us off the wait_list.
1827 */ 1841 */
1828 if (rt_mutex_owner(lock) != current) { 1842 if (rt_mutex_owner(lock) != current) {
1829 remove_waiter(lock, waiter); 1843 remove_waiter(lock, waiter);
1830 fixup_rt_mutex_waiters(lock);
1831 cleanup = true; 1844 cleanup = true;
1832 } 1845 }
1833
1834 /* 1846 /*
1835 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might 1847 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1836 * have to fix that up. 1848 * have to fix that up.
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index d1f3e9f558b8..74a5a7255b4d 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -277,7 +277,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
277 * if reparented. 277 * if reparented.
278 */ 278 */
279 for (;;) { 279 for (;;) {
280 set_current_state(TASK_UNINTERRUPTIBLE); 280 set_current_state(TASK_INTERRUPTIBLE);
281 if (pid_ns->nr_hashed == init_pids) 281 if (pid_ns->nr_hashed == init_pids)
282 break; 282 break;
283 schedule(); 283 schedule();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..c7209f060eeb 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,7 @@ int freeze_processes(void)
132 if (!pm_freezing) 132 if (!pm_freezing)
133 atomic_inc(&system_freezing_cnt); 133 atomic_inc(&system_freezing_cnt);
134 134
135 pm_wakeup_clear(true); 135 pm_wakeup_clear();
136 pr_info("Freezing user space processes ... "); 136 pr_info("Freezing user space processes ... ");
137 pm_freezing = true; 137 pm_freezing = true;
138 error = try_to_freeze_tasks(true); 138 error = try_to_freeze_tasks(true);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3b1e0f3ad07f..fa46606f3356 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1425,7 +1425,7 @@ static unsigned int nr_meta_pages;
1425 * Numbers of normal and highmem page frames allocated for hibernation image 1425 * Numbers of normal and highmem page frames allocated for hibernation image
1426 * before suspending devices. 1426 * before suspending devices.
1427 */ 1427 */
1428unsigned int alloc_normal, alloc_highmem; 1428static unsigned int alloc_normal, alloc_highmem;
1429/* 1429/*
1430 * Memory bitmap used for marking saveable pages (during hibernation) or 1430 * Memory bitmap used for marking saveable pages (during hibernation) or
1431 * hibernation image pages (during restore) 1431 * hibernation image pages (during restore)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c0248c74d6d4..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -72,8 +72,6 @@ static void freeze_begin(void)
72 72
73static void freeze_enter(void) 73static void freeze_enter(void)
74{ 74{
75 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
76
77 spin_lock_irq(&suspend_freeze_lock); 75 spin_lock_irq(&suspend_freeze_lock);
78 if (pm_wakeup_pending()) 76 if (pm_wakeup_pending())
79 goto out; 77 goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
100 out: 98 out:
101 suspend_freeze_state = FREEZE_STATE_NONE; 99 suspend_freeze_state = FREEZE_STATE_NONE;
102 spin_unlock_irq(&suspend_freeze_lock); 100 spin_unlock_irq(&suspend_freeze_lock);
103
104 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
105}
106
107static void s2idle_loop(void)
108{
109 do {
110 freeze_enter();
111
112 if (freeze_ops && freeze_ops->wake)
113 freeze_ops->wake();
114
115 dpm_resume_noirq(PMSG_RESUME);
116 if (freeze_ops && freeze_ops->sync)
117 freeze_ops->sync();
118
119 if (pm_wakeup_pending())
120 break;
121
122 pm_wakeup_clear(false);
123 } while (!dpm_suspend_noirq(PMSG_SUSPEND));
124} 101}
125 102
126void freeze_wake(void) 103void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
394 * all the devices are suspended. 371 * all the devices are suspended.
395 */ 372 */
396 if (state == PM_SUSPEND_FREEZE) { 373 if (state == PM_SUSPEND_FREEZE) {
397 s2idle_loop(); 374 trace_suspend_resume(TPS("machine_suspend"), state, true);
398 goto Platform_early_resume; 375 freeze_enter();
376 trace_suspend_resume(TPS("machine_suspend"), state, false);
377 goto Platform_wake;
399 } 378 }
400 379
401 error = disable_nonboot_cpus(); 380 error = disable_nonboot_cpus();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a1aecf44ab07..a1db38abac5b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
269#define MAX_CMDLINECONSOLES 8 269#define MAX_CMDLINECONSOLES 8
270 270
271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
272static int console_cmdline_cnt;
273 272
274static int preferred_console = -1; 273static int preferred_console = -1;
275int console_set_on_cmdline; 274int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
1906 * See if this tty is not yet registered, and 1905 * See if this tty is not yet registered, and
1907 * if we have a slot free. 1906 * if we have a slot free.
1908 */ 1907 */
1909 for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) { 1908 for (i = 0, c = console_cmdline;
1909 i < MAX_CMDLINECONSOLES && c->name[0];
1910 i++, c++) {
1910 if (strcmp(c->name, name) == 0 && c->index == idx) { 1911 if (strcmp(c->name, name) == 0 && c->index == idx) {
1911 if (brl_options) 1912 if (!brl_options)
1912 return 0; 1913 preferred_console = i;
1913
1914 /*
1915 * Maintain an invariant that will help to find if
1916 * the matching console is preferred, see
1917 * register_console():
1918 *
1919 * The last non-braille console is always
1920 * the preferred one.
1921 */
1922 if (i != console_cmdline_cnt - 1)
1923 swap(console_cmdline[i],
1924 console_cmdline[console_cmdline_cnt - 1]);
1925
1926 preferred_console = console_cmdline_cnt - 1;
1927
1928 return 0; 1914 return 0;
1929 } 1915 }
1930 } 1916 }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
1937 braille_set_options(c, brl_options); 1923 braille_set_options(c, brl_options);
1938 1924
1939 c->index = idx; 1925 c->index = idx;
1940 console_cmdline_cnt++;
1941 return 0; 1926 return 0;
1942} 1927}
1943/* 1928/*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
2477 } 2462 }
2478 2463
2479 /* 2464 /*
2480 * See if this console matches one we selected on the command line. 2465 * See if this console matches one we selected on
2481 * 2466 * the command line.
2482 * There may be several entries in the console_cmdline array matching
2483 * with the same console, one with newcon->match(), another by
2484 * name/index:
2485 *
2486 * pl011,mmio,0x87e024000000,115200 -- added from SPCR
2487 * ttyAMA0 -- added from command line
2488 *
2489 * Traverse the console_cmdline array in reverse order to be
2490 * sure that if this console is preferred then it will be the first
2491 * matching entry. We use the invariant that is maintained in
2492 * __add_preferred_console().
2493 */ 2467 */
2494 for (i = console_cmdline_cnt - 1; i >= 0; i--) { 2468 for (i = 0, c = console_cmdline;
2495 c = console_cmdline + i; 2469 i < MAX_CMDLINECONSOLES && c->name[0];
2496 2470 i++, c++) {
2497 if (!newcon->match || 2471 if (!newcon->match ||
2498 newcon->match(newcon, c->name, c->index, c->options) != 0) { 2472 newcon->match(newcon, c->name, c->index, c->options) != 0) {
2499 /* default matching */ 2473 /* default matching */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 266ddcc1d8bb..60f356d91060 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -60,19 +60,25 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
60} 60}
61 61
62 62
63void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
64 const struct cred *ptracer_cred)
65{
66 BUG_ON(!list_empty(&child->ptrace_entry));
67 list_add(&child->ptrace_entry, &new_parent->ptraced);
68 child->parent = new_parent;
69 child->ptracer_cred = get_cred(ptracer_cred);
70}
71
63/* 72/*
64 * ptrace a task: make the debugger its new parent and 73 * ptrace a task: make the debugger its new parent and
65 * move it to the ptrace list. 74 * move it to the ptrace list.
66 * 75 *
67 * Must be called with the tasklist lock write-held. 76 * Must be called with the tasklist lock write-held.
68 */ 77 */
69void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 78static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
70{ 79{
71 BUG_ON(!list_empty(&child->ptrace_entry));
72 list_add(&child->ptrace_entry, &new_parent->ptraced);
73 child->parent = new_parent;
74 rcu_read_lock(); 80 rcu_read_lock();
75 child->ptracer_cred = get_cred(__task_cred(new_parent)); 81 __ptrace_link(child, new_parent, __task_cred(new_parent));
76 rcu_read_unlock(); 82 rcu_read_unlock();
77} 83}
78 84
@@ -386,7 +392,7 @@ static int ptrace_attach(struct task_struct *task, long request,
386 flags |= PT_SEIZED; 392 flags |= PT_SEIZED;
387 task->ptrace = flags; 393 task->ptrace = flags;
388 394
389 __ptrace_link(task, current); 395 ptrace_link(task, current);
390 396
391 /* SEIZE doesn't trap tracee on attach */ 397 /* SEIZE doesn't trap tracee on attach */
392 if (!seize) 398 if (!seize)
@@ -459,7 +465,7 @@ static int ptrace_traceme(void)
459 */ 465 */
460 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 466 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
461 current->ptrace = PT_PTRACED; 467 current->ptrace = PT_PTRACED;
462 __ptrace_link(current, current->real_parent); 468 ptrace_link(current, current->real_parent);
463 } 469 }
464 } 470 }
465 write_unlock_irq(&tasklist_lock); 471 write_unlock_irq(&tasklist_lock);
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 584d8a983883..dea03614263f 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
263 263
264/* 264/*
265 * Counts the new reader in the appropriate per-CPU element of the 265 * Counts the new reader in the appropriate per-CPU element of the
266 * srcu_struct. Must be called from process context. 266 * srcu_struct.
267 * Returns an index that must be passed to the matching srcu_read_unlock(). 267 * Returns an index that must be passed to the matching srcu_read_unlock().
268 */ 268 */
269int __srcu_read_lock(struct srcu_struct *sp) 269int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
271 int idx; 271 int idx;
272 272
273 idx = READ_ONCE(sp->completed) & 0x1; 273 idx = READ_ONCE(sp->completed) & 0x1;
274 __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); 274 this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
275 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 275 smp_mb(); /* B */ /* Avoid leaking the critical section. */
276 return idx; 276 return idx;
277} 277}
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
281 * Removes the count for the old reader from the appropriate per-CPU 281 * Removes the count for the old reader from the appropriate per-CPU
282 * element of the srcu_struct. Note that this may well be a different 282 * element of the srcu_struct. Note that this may well be a different
283 * CPU than that which was incremented by the corresponding srcu_read_lock(). 283 * CPU than that which was incremented by the corresponding srcu_read_lock().
284 * Must be called from process context.
285 */ 284 */
286void __srcu_read_unlock(struct srcu_struct *sp, int idx) 285void __srcu_read_unlock(struct srcu_struct *sp, int idx)
287{ 286{
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 36e1f82faed1..32798eb14853 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
97 97
98/* 98/*
99 * Counts the new reader in the appropriate per-CPU element of the 99 * Counts the new reader in the appropriate per-CPU element of the
100 * srcu_struct. Must be called from process context. 100 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
101 * Returns an index that must be passed to the matching srcu_read_unlock(). 101 * __srcu_read_unlock() must be in the same handler instance. Returns an
102 * index that must be passed to the matching srcu_read_unlock().
102 */ 103 */
103int __srcu_read_lock(struct srcu_struct *sp) 104int __srcu_read_lock(struct srcu_struct *sp)
104{ 105{
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
112 113
113/* 114/*
114 * Removes the count for the old reader from the appropriate element of 115 * Removes the count for the old reader from the appropriate element of
115 * the srcu_struct. Must be called from process context. 116 * the srcu_struct.
116 */ 117 */
117void __srcu_read_unlock(struct srcu_struct *sp, int idx) 118void __srcu_read_unlock(struct srcu_struct *sp, int idx)
118{ 119{
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3ae8474557df..157654fa436a 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
357 357
358/* 358/*
359 * Counts the new reader in the appropriate per-CPU element of the 359 * Counts the new reader in the appropriate per-CPU element of the
360 * srcu_struct. Must be called from process context. 360 * srcu_struct.
361 * Returns an index that must be passed to the matching srcu_read_unlock(). 361 * Returns an index that must be passed to the matching srcu_read_unlock().
362 */ 362 */
363int __srcu_read_lock(struct srcu_struct *sp) 363int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
365 int idx; 365 int idx;
366 366
367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 367 idx = READ_ONCE(sp->srcu_idx) & 0x1;
368 __this_cpu_inc(sp->sda->srcu_lock_count[idx]); 368 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */
370 return idx; 370 return idx;
371} 371}
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
375 * Removes the count for the old reader from the appropriate per-CPU 375 * Removes the count for the old reader from the appropriate per-CPU
376 * element of the srcu_struct. Note that this may well be a different 376 * element of the srcu_struct. Note that this may well be a different
377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 377 * CPU than that which was incremented by the corresponding srcu_read_lock().
378 * Must be called from process context.
379 */ 378 */
380void __srcu_read_unlock(struct srcu_struct *sp, int idx) 379void __srcu_read_unlock(struct srcu_struct *sp, int idx)
381{ 380{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 759f4bd52cd6..326d4f88e2b1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3502,6 +3502,31 @@ asmlinkage __visible void __sched schedule(void)
3502} 3502}
3503EXPORT_SYMBOL(schedule); 3503EXPORT_SYMBOL(schedule);
3504 3504
3505/*
3506 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
3507 * state (have scheduled out non-voluntarily) by making sure that all
3508 * tasks have either left the run queue or have gone into user space.
3509 * As idle tasks do not do either, they must not ever be preempted
3510 * (schedule out non-voluntarily).
3511 *
3512 * schedule_idle() is similar to schedule_preempt_disable() except that it
3513 * never enables preemption because it does not call sched_submit_work().
3514 */
3515void __sched schedule_idle(void)
3516{
3517 /*
3518 * As this skips calling sched_submit_work(), which the idle task does
3519 * regardless because that function is a nop when the task is in a
3520 * TASK_RUNNING state, make sure this isn't used someplace that the
3521 * current task can be in any other state. Note, idle is always in the
3522 * TASK_RUNNING state.
3523 */
3524 WARN_ON_ONCE(current->state);
3525 do {
3526 __schedule(false);
3527 } while (need_resched());
3528}
3529
3505#ifdef CONFIG_CONTEXT_TRACKING 3530#ifdef CONFIG_CONTEXT_TRACKING
3506asmlinkage __visible void __sched schedule_user(void) 3531asmlinkage __visible void __sched schedule_user(void)
3507{ 3532{
@@ -5580,7 +5605,7 @@ void idle_task_exit(void)
5580 BUG_ON(cpu_online(smp_processor_id())); 5605 BUG_ON(cpu_online(smp_processor_id()));
5581 5606
5582 if (mm != &init_mm) { 5607 if (mm != &init_mm) {
5583 switch_mm_irqs_off(mm, &init_mm, current); 5608 switch_mm(mm, &init_mm, current);
5584 finish_arch_post_lock_switch(); 5609 finish_arch_post_lock_switch();
5585 } 5610 }
5586 mmdrop(mm); 5611 mmdrop(mm);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 76877a62b5fa..076a2e31951c 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
101 if (sg_policy->next_freq == next_freq) 101 if (sg_policy->next_freq == next_freq)
102 return; 102 return;
103 103
104 if (sg_policy->next_freq > next_freq)
105 next_freq = (sg_policy->next_freq + next_freq) >> 1;
106
107 sg_policy->next_freq = next_freq; 104 sg_policy->next_freq = next_freq;
108 sg_policy->last_freq_update_time = time; 105 sg_policy->last_freq_update_time = time;
109 106
@@ -245,11 +242,10 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
245 sugov_update_commit(sg_policy, time, next_f); 242 sugov_update_commit(sg_policy, time, next_f);
246} 243}
247 244
248static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) 245static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
249{ 246{
250 struct sugov_policy *sg_policy = sg_cpu->sg_policy; 247 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
251 struct cpufreq_policy *policy = sg_policy->policy; 248 struct cpufreq_policy *policy = sg_policy->policy;
252 u64 last_freq_update_time = sg_policy->last_freq_update_time;
253 unsigned long util = 0, max = 1; 249 unsigned long util = 0, max = 1;
254 unsigned int j; 250 unsigned int j;
255 251
@@ -265,7 +261,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
265 * enough, don't take the CPU into account as it probably is 261 * enough, don't take the CPU into account as it probably is
266 * idle now (and clear iowait_boost for it). 262 * idle now (and clear iowait_boost for it).
267 */ 263 */
268 delta_ns = last_freq_update_time - j_sg_cpu->last_update; 264 delta_ns = time - j_sg_cpu->last_update;
269 if (delta_ns > TICK_NSEC) { 265 if (delta_ns > TICK_NSEC) {
270 j_sg_cpu->iowait_boost = 0; 266 j_sg_cpu->iowait_boost = 0;
271 continue; 267 continue;
@@ -309,7 +305,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
309 if (flags & SCHED_CPUFREQ_RT_DL) 305 if (flags & SCHED_CPUFREQ_RT_DL)
310 next_f = sg_policy->policy->cpuinfo.max_freq; 306 next_f = sg_policy->policy->cpuinfo.max_freq;
311 else 307 else
312 next_f = sugov_next_freq_shared(sg_cpu); 308 next_f = sugov_next_freq_shared(sg_cpu, time);
313 309
314 sugov_update_commit(sg_policy, time, next_f); 310 sugov_update_commit(sg_policy, time, next_f);
315 } 311 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d71109321841..c77e4b1d51c0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void)
3563 trace_sched_stat_runtime_enabled()) { 3563 trace_sched_stat_runtime_enabled()) {
3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3565 "stat_blocked and stat_runtime require the " 3565 "stat_blocked and stat_runtime require the "
3566 "kernel parameter schedstats=enabled or " 3566 "kernel parameter schedstats=enable or "
3567 "kernel.sched_schedstats=1\n"); 3567 "kernel.sched_schedstats=1\n");
3568 } 3568 }
3569#endif 3569#endif
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2a25a9ec2c6e..ef63adce0c9c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -265,7 +265,7 @@ static void do_idle(void)
265 smp_mb__after_atomic(); 265 smp_mb__after_atomic();
266 266
267 sched_ttwu_pending(); 267 sched_ttwu_pending();
268 schedule_preempt_disabled(); 268 schedule_idle();
269 269
270 if (unlikely(klp_patch_pending(current))) 270 if (unlikely(klp_patch_pending(current)))
271 klp_update_patch_state(current); 271 klp_update_patch_state(current);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7808ab050599..6dda2aab731e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1467,6 +1467,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1467} 1467}
1468#endif 1468#endif
1469 1469
1470extern void schedule_idle(void);
1471
1470extern void sysrq_sched_debug_show(void); 1472extern void sysrq_sched_debug_show(void);
1471extern void sched_init_granularity(void); 1473extern void sched_init_granularity(void);
1472extern void update_max_interval(void); 1474extern void update_max_interval(void);
diff --git a/kernel/signal.c b/kernel/signal.c
index ca92bcfeb322..45b4c1ffe14e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
510 return !tsk->ptrace; 510 return !tsk->ptrace;
511} 511}
512 512
513static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 513static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
514 bool *resched_timer)
514{ 515{
515 struct sigqueue *q, *first = NULL; 516 struct sigqueue *q, *first = NULL;
516 517
@@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
532still_pending: 533still_pending:
533 list_del_init(&first->list); 534 list_del_init(&first->list);
534 copy_siginfo(info, &first->info); 535 copy_siginfo(info, &first->info);
536
537 *resched_timer =
538 (first->flags & SIGQUEUE_PREALLOC) &&
539 (info->si_code == SI_TIMER) &&
540 (info->si_sys_private);
541
535 __sigqueue_free(first); 542 __sigqueue_free(first);
536 } else { 543 } else {
537 /* 544 /*
@@ -548,12 +555,12 @@ still_pending:
548} 555}
549 556
550static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 557static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
551 siginfo_t *info) 558 siginfo_t *info, bool *resched_timer)
552{ 559{
553 int sig = next_signal(pending, mask); 560 int sig = next_signal(pending, mask);
554 561
555 if (sig) 562 if (sig)
556 collect_signal(sig, pending, info); 563 collect_signal(sig, pending, info, resched_timer);
557 return sig; 564 return sig;
558} 565}
559 566
@@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
565 */ 572 */
566int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 573int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
567{ 574{
575 bool resched_timer = false;
568 int signr; 576 int signr;
569 577
570 /* We only dequeue private signals from ourselves, we don't let 578 /* We only dequeue private signals from ourselves, we don't let
571 * signalfd steal them 579 * signalfd steal them
572 */ 580 */
573 signr = __dequeue_signal(&tsk->pending, mask, info); 581 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
574 if (!signr) { 582 if (!signr) {
575 signr = __dequeue_signal(&tsk->signal->shared_pending, 583 signr = __dequeue_signal(&tsk->signal->shared_pending,
576 mask, info); 584 mask, info, &resched_timer);
577#ifdef CONFIG_POSIX_TIMERS 585#ifdef CONFIG_POSIX_TIMERS
578 /* 586 /*
579 * itimer signal ? 587 * itimer signal ?
@@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
621 current->jobctl |= JOBCTL_STOP_DEQUEUED; 629 current->jobctl |= JOBCTL_STOP_DEQUEUED;
622 } 630 }
623#ifdef CONFIG_POSIX_TIMERS 631#ifdef CONFIG_POSIX_TIMERS
624 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 632 if (resched_timer) {
625 /* 633 /*
626 * Release the siglock to ensure proper locking order 634 * Release the siglock to ensure proper locking order
627 * of timer locks outside of siglocks. Note, we leave 635 * of timer locks outside of siglocks. Note, we leave
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5cb5b0008d97..ee2f4202d82a 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -387,7 +387,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
387{ 387{
388 struct alarm_base *base = &alarm_bases[alarm->type]; 388 struct alarm_base *base = &alarm_bases[alarm->type];
389 389
390 start = ktime_add(start, base->gettime()); 390 start = ktime_add_safe(start, base->gettime());
391 alarm_start(alarm, start); 391 alarm_start(alarm, start);
392} 392}
393EXPORT_SYMBOL_GPL(alarm_start_relative); 393EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -475,7 +475,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
475 overrun++; 475 overrun++;
476 } 476 }
477 477
478 alarm->node.expires = ktime_add(alarm->node.expires, interval); 478 alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
479 return overrun; 479 return overrun;
480} 480}
481EXPORT_SYMBOL_GPL(alarm_forward); 481EXPORT_SYMBOL_GPL(alarm_forward);
@@ -660,13 +660,21 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
660 660
661 /* start the timer */ 661 /* start the timer */
662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); 662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval);
663
664 /*
665 * Rate limit to the tick as a hot fix to prevent DOS. Will be
666 * mopped up later.
667 */
668 if (timr->it.alarm.interval < TICK_NSEC)
669 timr->it.alarm.interval = TICK_NSEC;
670
663 exp = timespec64_to_ktime(new_setting->it_value); 671 exp = timespec64_to_ktime(new_setting->it_value);
664 /* Convert (if necessary) to absolute time */ 672 /* Convert (if necessary) to absolute time */
665 if (flags != TIMER_ABSTIME) { 673 if (flags != TIMER_ABSTIME) {
666 ktime_t now; 674 ktime_t now;
667 675
668 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); 676 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
669 exp = ktime_add(now, exp); 677 exp = ktime_add_safe(now, exp);
670 } 678 }
671 679
672 alarm_start(&timr->it.alarm.alarmtimer, exp); 680 alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 1370f067fb51..d2a1e6dd0291 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -825,8 +825,10 @@ static void check_thread_timers(struct task_struct *tsk,
825 * At the hard limit, we just die. 825 * At the hard limit, we just die.
826 * No need to calculate anything else now. 826 * No need to calculate anything else now.
827 */ 827 */
828 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 828 if (print_fatal_signals) {
829 tsk->comm, task_pid_nr(tsk)); 829 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n",
830 tsk->comm, task_pid_nr(tsk));
831 }
830 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 832 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
831 return; 833 return;
832 } 834 }
@@ -838,8 +840,10 @@ static void check_thread_timers(struct task_struct *tsk,
838 soft += USEC_PER_SEC; 840 soft += USEC_PER_SEC;
839 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; 841 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
840 } 842 }
841 pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 843 if (print_fatal_signals) {
842 tsk->comm, task_pid_nr(tsk)); 844 pr_info("RT Watchdog Timeout (soft): %s[%d]\n",
845 tsk->comm, task_pid_nr(tsk));
846 }
843 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 847 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
844 } 848 }
845 } 849 }
@@ -936,8 +940,10 @@ static void check_process_timers(struct task_struct *tsk,
936 * At the hard limit, we just die. 940 * At the hard limit, we just die.
937 * No need to calculate anything else now. 941 * No need to calculate anything else now.
938 */ 942 */
939 pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 943 if (print_fatal_signals) {
940 tsk->comm, task_pid_nr(tsk)); 944 pr_info("RT Watchdog Timeout (hard): %s[%d]\n",
945 tsk->comm, task_pid_nr(tsk));
946 }
941 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 947 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
942 return; 948 return;
943 } 949 }
@@ -945,8 +951,10 @@ static void check_process_timers(struct task_struct *tsk,
945 /* 951 /*
946 * At the soft limit, send a SIGXCPU every second. 952 * At the soft limit, send a SIGXCPU every second.
947 */ 953 */
948 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 954 if (print_fatal_signals) {
949 tsk->comm, task_pid_nr(tsk)); 955 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n",
956 tsk->comm, task_pid_nr(tsk));
957 }
950 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 958 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
951 if (soft < hard) { 959 if (soft < hard) {
952 soft++; 960 soft++;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 987e496bb51a..b398c2ea69b2 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -37,9 +37,11 @@ static int tick_broadcast_forced;
37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
38 38
39#ifdef CONFIG_TICK_ONESHOT 39#ifdef CONFIG_TICK_ONESHOT
40static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
40static void tick_broadcast_clear_oneshot(int cpu); 41static void tick_broadcast_clear_oneshot(int cpu);
41static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); 42static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
42#else 43#else
44static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
43static inline void tick_broadcast_clear_oneshot(int cpu) { } 45static inline void tick_broadcast_clear_oneshot(int cpu) { }
44static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } 46static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
45#endif 47#endif
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
867/** 869/**
868 * tick_broadcast_setup_oneshot - setup the broadcast device 870 * tick_broadcast_setup_oneshot - setup the broadcast device
869 */ 871 */
870void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 872static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
871{ 873{
872 int cpu = smp_processor_id(); 874 int cpu = smp_processor_id();
873 875
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f738251000fe..be0ac01f2e12 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
126 126
127/* Functions related to oneshot broadcasting */ 127/* Functions related to oneshot broadcasting */
128#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 128#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
129extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
130extern void tick_broadcast_switch_to_oneshot(void); 129extern void tick_broadcast_switch_to_oneshot(void);
131extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); 130extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
132extern int tick_broadcast_oneshot_active(void); 131extern int tick_broadcast_oneshot_active(void);
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void);
134bool tick_broadcast_oneshot_available(void); 133bool tick_broadcast_oneshot_available(void);
135extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 134extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
136#else /* !(BROADCAST && ONESHOT): */ 135#else /* !(BROADCAST && ONESHOT): */
137static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
138static inline void tick_broadcast_switch_to_oneshot(void) { } 136static inline void tick_broadcast_switch_to_oneshot(void) { }
139static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } 137static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
140static inline int tick_broadcast_oneshot_active(void) { return 0; } 138static inline int tick_broadcast_oneshot_active(void) { return 0; }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9652bc57fd09..b602c48cb841 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
118 tk->offs_boot = ktime_add(tk->offs_boot, delta); 118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
119} 119}
120 120
121/*
122 * tk_clock_read - atomic clocksource read() helper
123 *
124 * This helper is necessary to use in the read paths because, while the
125 * seqlock ensures we don't return a bad value while structures are updated,
126 * it doesn't protect from potential crashes. There is the possibility that
127 * the tkr's clocksource may change between the read reference, and the
128 * clock reference passed to the read function. This can cause crashes if
129 * the wrong clocksource is passed to the wrong read function.
130 * This isn't necessary to use when holding the timekeeper_lock or doing
131 * a read of the fast-timekeeper tkrs (which is protected by its own locking
132 * and update logic).
133 */
134static inline u64 tk_clock_read(struct tk_read_base *tkr)
135{
136 struct clocksource *clock = READ_ONCE(tkr->clock);
137
138 return clock->read(clock);
139}
140
121#ifdef CONFIG_DEBUG_TIMEKEEPING 141#ifdef CONFIG_DEBUG_TIMEKEEPING
122#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ 142#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
123 143
@@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
175 */ 195 */
176 do { 196 do {
177 seq = read_seqcount_begin(&tk_core.seq); 197 seq = read_seqcount_begin(&tk_core.seq);
178 now = tkr->read(tkr->clock); 198 now = tk_clock_read(tkr);
179 last = tkr->cycle_last; 199 last = tkr->cycle_last;
180 mask = tkr->mask; 200 mask = tkr->mask;
181 max = tkr->clock->max_cycles; 201 max = tkr->clock->max_cycles;
@@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
209 u64 cycle_now, delta; 229 u64 cycle_now, delta;
210 230
211 /* read clocksource */ 231 /* read clocksource */
212 cycle_now = tkr->read(tkr->clock); 232 cycle_now = tk_clock_read(tkr);
213 233
214 /* calculate the delta since the last update_wall_time */ 234 /* calculate the delta since the last update_wall_time */
215 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); 235 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
238 ++tk->cs_was_changed_seq; 258 ++tk->cs_was_changed_seq;
239 old_clock = tk->tkr_mono.clock; 259 old_clock = tk->tkr_mono.clock;
240 tk->tkr_mono.clock = clock; 260 tk->tkr_mono.clock = clock;
241 tk->tkr_mono.read = clock->read;
242 tk->tkr_mono.mask = clock->mask; 261 tk->tkr_mono.mask = clock->mask;
243 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); 262 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
244 263
245 tk->tkr_raw.clock = clock; 264 tk->tkr_raw.clock = clock;
246 tk->tkr_raw.read = clock->read;
247 tk->tkr_raw.mask = clock->mask; 265 tk->tkr_raw.mask = clock->mask;
248 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; 266 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
249 267
@@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
262 /* Go back from cycles -> shifted ns */ 280 /* Go back from cycles -> shifted ns */
263 tk->xtime_interval = interval * clock->mult; 281 tk->xtime_interval = interval * clock->mult;
264 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 282 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
265 tk->raw_interval = (interval * clock->mult) >> clock->shift; 283 tk->raw_interval = interval * clock->mult;
266 284
267 /* if changing clocks, convert xtime_nsec shift units */ 285 /* if changing clocks, convert xtime_nsec shift units */
268 if (old_clock) { 286 if (old_clock) {
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
404 422
405 now += timekeeping_delta_to_ns(tkr, 423 now += timekeeping_delta_to_ns(tkr,
406 clocksource_delta( 424 clocksource_delta(
407 tkr->read(tkr->clock), 425 tk_clock_read(tkr),
408 tkr->cycle_last, 426 tkr->cycle_last,
409 tkr->mask)); 427 tkr->mask));
410 } while (read_seqcount_retry(&tkf->seq, seq)); 428 } while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
461 return cycles_at_suspend; 479 return cycles_at_suspend;
462} 480}
463 481
482static struct clocksource dummy_clock = {
483 .read = dummy_clock_read,
484};
485
464/** 486/**
465 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. 487 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
466 * @tk: Timekeeper to snapshot. 488 * @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
477 struct tk_read_base *tkr = &tk->tkr_mono; 499 struct tk_read_base *tkr = &tk->tkr_mono;
478 500
479 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 501 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
480 cycles_at_suspend = tkr->read(tkr->clock); 502 cycles_at_suspend = tk_clock_read(tkr);
481 tkr_dummy.read = dummy_clock_read; 503 tkr_dummy.clock = &dummy_clock;
482 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); 504 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
483 505
484 tkr = &tk->tkr_raw; 506 tkr = &tk->tkr_raw;
485 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 507 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
486 tkr_dummy.read = dummy_clock_read; 508 tkr_dummy.clock = &dummy_clock;
487 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); 509 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
488} 510}
489 511
@@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
649 */ 671 */
650static void timekeeping_forward_now(struct timekeeper *tk) 672static void timekeeping_forward_now(struct timekeeper *tk)
651{ 673{
652 struct clocksource *clock = tk->tkr_mono.clock;
653 u64 cycle_now, delta; 674 u64 cycle_now, delta;
654 u64 nsec; 675 u64 nsec;
655 676
656 cycle_now = tk->tkr_mono.read(clock); 677 cycle_now = tk_clock_read(&tk->tkr_mono);
657 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 678 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
658 tk->tkr_mono.cycle_last = cycle_now; 679 tk->tkr_mono.cycle_last = cycle_now;
659 tk->tkr_raw.cycle_last = cycle_now; 680 tk->tkr_raw.cycle_last = cycle_now;
@@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
929 950
930 do { 951 do {
931 seq = read_seqcount_begin(&tk_core.seq); 952 seq = read_seqcount_begin(&tk_core.seq);
932 953 now = tk_clock_read(&tk->tkr_mono);
933 now = tk->tkr_mono.read(tk->tkr_mono.clock);
934 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; 954 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
935 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; 955 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
936 base_real = ktime_add(tk->tkr_mono.base, 956 base_real = ktime_add(tk->tkr_mono.base,
@@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
1108 * Check whether the system counter value provided by the 1128 * Check whether the system counter value provided by the
1109 * device driver is on the current timekeeping interval. 1129 * device driver is on the current timekeeping interval.
1110 */ 1130 */
1111 now = tk->tkr_mono.read(tk->tkr_mono.clock); 1131 now = tk_clock_read(&tk->tkr_mono);
1112 interval_start = tk->tkr_mono.cycle_last; 1132 interval_start = tk->tkr_mono.cycle_last;
1113 if (!cycle_between(interval_start, cycles, now)) { 1133 if (!cycle_between(interval_start, cycles, now)) {
1114 clock_was_set_seq = tk->clock_was_set_seq; 1134 clock_was_set_seq = tk->clock_was_set_seq;
@@ -1629,7 +1649,7 @@ void timekeeping_resume(void)
1629 * The less preferred source will only be tried if there is no better 1649 * The less preferred source will only be tried if there is no better
1630 * usable source. The rtc part is handled separately in rtc core code. 1650 * usable source. The rtc part is handled separately in rtc core code.
1631 */ 1651 */
1632 cycle_now = tk->tkr_mono.read(clock); 1652 cycle_now = tk_clock_read(&tk->tkr_mono);
1633 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1653 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1634 cycle_now > tk->tkr_mono.cycle_last) { 1654 cycle_now > tk->tkr_mono.cycle_last) {
1635 u64 nsec, cyc_delta; 1655 u64 nsec, cyc_delta;
@@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1976 u32 shift, unsigned int *clock_set) 1996 u32 shift, unsigned int *clock_set)
1977{ 1997{
1978 u64 interval = tk->cycle_interval << shift; 1998 u64 interval = tk->cycle_interval << shift;
1979 u64 raw_nsecs; 1999 u64 snsec_per_sec;
1980 2000
1981 /* If the offset is smaller than a shifted interval, do nothing */ 2001 /* If the offset is smaller than a shifted interval, do nothing */
1982 if (offset < interval) 2002 if (offset < interval)
@@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1991 *clock_set |= accumulate_nsecs_to_secs(tk); 2011 *clock_set |= accumulate_nsecs_to_secs(tk);
1992 2012
1993 /* Accumulate raw time */ 2013 /* Accumulate raw time */
1994 raw_nsecs = (u64)tk->raw_interval << shift; 2014 tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
1995 raw_nsecs += tk->raw_time.tv_nsec; 2015 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
1996 if (raw_nsecs >= NSEC_PER_SEC) { 2016 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
1997 u64 raw_secs = raw_nsecs; 2017 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
1998 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 2018 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
1999 tk->raw_time.tv_sec += raw_secs; 2019 tk->raw_time.tv_sec++;
2000 } 2020 }
2001 tk->raw_time.tv_nsec = raw_nsecs; 2021 tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
2022 tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
2002 2023
2003 /* Accumulate error between NTP and clock interval */ 2024 /* Accumulate error between NTP and clock interval */
2004 tk->ntp_error += tk->ntp_tick << shift; 2025 tk->ntp_error += tk->ntp_tick << shift;
@@ -2030,7 +2051,7 @@ void update_wall_time(void)
2030#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 2051#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2031 offset = real_tk->cycle_interval; 2052 offset = real_tk->cycle_interval;
2032#else 2053#else
2033 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), 2054 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2034 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 2055 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2035#endif 2056#endif
2036 2057
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bd8ae8d5ae9c..193c5f5e3f79 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1662,14 +1662,14 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1662 goto out; 1662 goto out;
1663 1663
1664 if (attr == &dev_attr_act_mask) { 1664 if (attr == &dev_attr_act_mask) {
1665 if (sscanf(buf, "%llx", &value) != 1) { 1665 if (kstrtoull(buf, 0, &value)) {
1666 /* Assume it is a list of trace category names */ 1666 /* Assume it is a list of trace category names */
1667 ret = blk_trace_str2mask(buf); 1667 ret = blk_trace_str2mask(buf);
1668 if (ret < 0) 1668 if (ret < 0)
1669 goto out; 1669 goto out;
1670 value = ret; 1670 value = ret;
1671 } 1671 }
1672 } else if (sscanf(buf, "%llu", &value) != 1) 1672 } else if (kstrtoull(buf, 0, &value))
1673 goto out; 1673 goto out;
1674 1674
1675 ret = -ENXIO; 1675 ret = -ENXIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 39dca4e86a94..b308be30dfb9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4144,9 +4144,9 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4144 int i, ret = -ENODEV; 4144 int i, ret = -ENODEV;
4145 int size; 4145 int size;
4146 4146
4147 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 4147 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4148 func_g.search = NULL; 4148 func_g.search = NULL;
4149 else if (glob) { 4149 else {
4150 int not; 4150 int not;
4151 4151
4152 func_g.type = filter_parse_regex(glob, strlen(glob), 4152 func_g.type = filter_parse_regex(glob, strlen(glob),
@@ -4256,6 +4256,14 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4256 return ret; 4256 return ret;
4257} 4257}
4258 4258
4259void clear_ftrace_function_probes(struct trace_array *tr)
4260{
4261 struct ftrace_func_probe *probe, *n;
4262
4263 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4264 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4265}
4266
4259static LIST_HEAD(ftrace_commands); 4267static LIST_HEAD(ftrace_commands);
4260static DEFINE_MUTEX(ftrace_cmd_mutex); 4268static DEFINE_MUTEX(ftrace_cmd_mutex);
4261 4269
@@ -4329,9 +4337,6 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
4329 4337
4330 command = strsep(&next, ":"); 4338 command = strsep(&next, ":");
4331 4339
4332 if (WARN_ON_ONCE(!tr))
4333 return -EINVAL;
4334
4335 mutex_lock(&ftrace_cmd_mutex); 4340 mutex_lock(&ftrace_cmd_mutex);
4336 list_for_each_entry(p, &ftrace_commands, list) { 4341 list_for_each_entry(p, &ftrace_commands, list) {
4337 if (strcmp(p->name, command) == 0) { 4342 if (strcmp(p->name, command) == 0) {
@@ -5055,7 +5060,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
5055 } 5060 }
5056 5061
5057 out: 5062 out:
5058 kfree(fgd->new_hash); 5063 free_ftrace_hash(fgd->new_hash);
5059 kfree(fgd); 5064 kfree(fgd);
5060 5065
5061 return ret; 5066 return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c4536c449021..091e801145c9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1558,7 +1558,7 @@ static __init int init_trace_selftests(void)
1558 1558
1559 return 0; 1559 return 0;
1560} 1560}
1561early_initcall(init_trace_selftests); 1561core_initcall(init_trace_selftests);
1562#else 1562#else
1563static inline int run_tracer_selftest(struct tracer *type) 1563static inline int run_tracer_selftest(struct tracer *type)
1564{ 1564{
@@ -2568,7 +2568,36 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
2568void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 2568void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2569 int pc) 2569 int pc)
2570{ 2570{
2571 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); 2571 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2572
2573 if (rcu_is_watching()) {
2574 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2575 return;
2576 }
2577
2578 /*
2579 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2580 * but if the above rcu_is_watching() failed, then the NMI
2581 * triggered someplace critical, and rcu_irq_enter() should
2582 * not be called from NMI.
2583 */
2584 if (unlikely(in_nmi()))
2585 return;
2586
2587 /*
2588 * It is possible that a function is being traced in a
2589 * location that RCU is not watching. A call to
2590 * rcu_irq_enter() will make sure that it is, but there's
2591 * a few internal rcu functions that could be traced
2592 * where that wont work either. In those cases, we just
2593 * do nothing.
2594 */
2595 if (unlikely(rcu_irq_enter_disabled()))
2596 return;
2597
2598 rcu_irq_enter_irqson();
2599 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2600 rcu_irq_exit_irqson();
2572} 2601}
2573 2602
2574/** 2603/**
@@ -6852,6 +6881,9 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
6852 char *number; 6881 char *number;
6853 int ret; 6882 int ret;
6854 6883
6884 if (!tr)
6885 return -ENODEV;
6886
6855 /* hash funcs only work with set_ftrace_filter */ 6887 /* hash funcs only work with set_ftrace_filter */
6856 if (!enable) 6888 if (!enable)
6857 return -EINVAL; 6889 return -EINVAL;
@@ -7550,6 +7582,7 @@ static int instance_rmdir(const char *name)
7550 } 7582 }
7551 7583
7552 tracing_set_nop(tr); 7584 tracing_set_nop(tr);
7585 clear_ftrace_function_probes(tr);
7553 event_trace_del_tracer(tr); 7586 event_trace_del_tracer(tr);
7554 ftrace_clear_pids(tr); 7587 ftrace_clear_pids(tr);
7555 ftrace_destroy_function_files(tr); 7588 ftrace_destroy_function_files(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 291a1bca5748..39fd77330aab 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -980,6 +980,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
980extern int 980extern int
981unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 981unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
982 struct ftrace_probe_ops *ops); 982 struct ftrace_probe_ops *ops);
983extern void clear_ftrace_function_probes(struct trace_array *tr);
983 984
984int register_ftrace_command(struct ftrace_func_command *cmd); 985int register_ftrace_command(struct ftrace_func_command *cmd);
985int unregister_ftrace_command(struct ftrace_func_command *cmd); 986int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -998,6 +999,10 @@ static inline __init int unregister_ftrace_command(char *cmd_name)
998{ 999{
999 return -EINVAL; 1000 return -EINVAL;
1000} 1001}
1002static inline void clear_ftrace_function_probes(struct trace_array *tr)
1003{
1004}
1005
1001/* 1006/*
1002 * The ops parameter passed in is usually undefined. 1007 * The ops parameter passed in is usually undefined.
1003 * This must be a macro. 1008 * This must be a macro.
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a3bddbfd0874..a0910c0cdf2e 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -654,6 +654,9 @@ ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
654{ 654{
655 struct ftrace_probe_ops *ops; 655 struct ftrace_probe_ops *ops;
656 656
657 if (!tr)
658 return -ENODEV;
659
657 /* we register both traceon and traceoff to this callback */ 660 /* we register both traceon and traceoff to this callback */
658 if (strcmp(cmd, "traceon") == 0) 661 if (strcmp(cmd, "traceon") == 0)
659 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; 662 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
@@ -670,6 +673,9 @@ ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
670{ 673{
671 struct ftrace_probe_ops *ops; 674 struct ftrace_probe_ops *ops;
672 675
676 if (!tr)
677 return -ENODEV;
678
673 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; 679 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
674 680
675 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, 681 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
@@ -682,6 +688,9 @@ ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
682{ 688{
683 struct ftrace_probe_ops *ops; 689 struct ftrace_probe_ops *ops;
684 690
691 if (!tr)
692 return -ENODEV;
693
685 ops = &dump_probe_ops; 694 ops = &dump_probe_ops;
686 695
687 /* Only dump once. */ 696 /* Only dump once. */
@@ -695,6 +704,9 @@ ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
695{ 704{
696 struct ftrace_probe_ops *ops; 705 struct ftrace_probe_ops *ops;
697 706
707 if (!tr)
708 return -ENODEV;
709
698 ops = &cpudump_probe_ops; 710 ops = &cpudump_probe_ops;
699 711
700 /* Only dump once. */ 712 /* Only dump once. */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8485f6738a87..b53c8d369163 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -707,20 +707,16 @@ static int create_trace_kprobe(int argc, char **argv)
707 pr_info("Probe point is not specified.\n"); 707 pr_info("Probe point is not specified.\n");
708 return -EINVAL; 708 return -EINVAL;
709 } 709 }
710 if (isdigit(argv[1][0])) { 710
711 /* an address specified */ 711 /* try to parse an address. if that fails, try to read the
712 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); 712 * input as a symbol. */
713 if (ret) { 713 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
714 pr_info("Failed to parse address.\n");
715 return ret;
716 }
717 } else {
718 /* a symbol specified */ 714 /* a symbol specified */
719 symbol = argv[1]; 715 symbol = argv[1];
720 /* TODO: support .init module functions */ 716 /* TODO: support .init module functions */
721 ret = traceprobe_split_symbol_offset(symbol, &offset); 717 ret = traceprobe_split_symbol_offset(symbol, &offset);
722 if (ret) { 718 if (ret) {
723 pr_info("Failed to parse symbol.\n"); 719 pr_info("Failed to parse either an address or a symbol.\n");
724 return ret; 720 return ret;
725 } 721 }
726 if (offset && is_return && 722 if (offset && is_return &&
@@ -1535,6 +1531,11 @@ static __init int kprobe_trace_self_tests_init(void)
1535 1531
1536end: 1532end:
1537 release_all_trace_kprobes(); 1533 release_all_trace_kprobes();
1534 /*
1535 * Wait for the optimizer work to finish. Otherwise it might fiddle
1536 * with probes in already freed __init text.
1537 */
1538 wait_for_kprobe_optimizer();
1538 if (warn) 1539 if (warn)
1539 pr_cont("NG: Some tests are failed. Please check them.\n"); 1540 pr_cont("NG: Some tests are failed. Please check them.\n");
1540 else 1541 else
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 76aa04d4c925..b4a751e8f9d6 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -409,7 +409,9 @@ static const struct file_operations stack_trace_fops = {
409static int 409static int
410stack_trace_filter_open(struct inode *inode, struct file *file) 410stack_trace_filter_open(struct inode *inode, struct file *file)
411{ 411{
412 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, 412 struct ftrace_ops *ops = inode->i_private;
413
414 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
413 inode, file); 415 inode, file);
414} 416}
415 417
@@ -476,7 +478,7 @@ static __init int stack_trace_init(void)
476 NULL, &stack_trace_fops); 478 NULL, &stack_trace_fops);
477 479
478 trace_create_file("stack_trace_filter", 0444, d_tracer, 480 trace_create_file("stack_trace_filter", 0444, d_tracer,
479 NULL, &stack_trace_filter_fops); 481 &trace_ops, &stack_trace_filter_fops);
480 482
481 if (stack_trace_filter_buf[0]) 483 if (stack_trace_filter_buf[0])
482 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); 484 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 3c6432df7e63..4c0888c4a68d 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -23,14 +23,14 @@
23 * the values[M, M+1, ..., N] into the ints array in get_options. 23 * the values[M, M+1, ..., N] into the ints array in get_options.
24 */ 24 */
25 25
26static int get_range(char **str, int *pint) 26static int get_range(char **str, int *pint, int n)
27{ 27{
28 int x, inc_counter, upper_range; 28 int x, inc_counter, upper_range;
29 29
30 (*str)++; 30 (*str)++;
31 upper_range = simple_strtol((*str), NULL, 0); 31 upper_range = simple_strtol((*str), NULL, 0);
32 inc_counter = upper_range - *pint; 32 inc_counter = upper_range - *pint;
33 for (x = *pint; x < upper_range; x++) 33 for (x = *pint; n && x < upper_range; x++, n--)
34 *pint++ = x; 34 *pint++ = x;
35 return inc_counter; 35 return inc_counter;
36} 36}
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
97 break; 97 break;
98 if (res == 3) { 98 if (res == 3) {
99 int range_nums; 99 int range_nums;
100 range_nums = get_range((char **)&str, ints + i); 100 range_nums = get_range((char **)&str, ints + i, nints - i);
101 if (range_nums < 0) 101 if (range_nums < 0)
102 break; 102 break;
103 /* 103 /*
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7f2562..9f79547d1b97 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
43u32 crc32c(u32 crc, const void *address, unsigned int length) 43u32 crc32c(u32 crc, const void *address, unsigned int length)
44{ 44{
45 SHASH_DESC_ON_STACK(shash, tfm); 45 SHASH_DESC_ON_STACK(shash, tfm);
46 u32 *ctx = (u32 *)shash_desc_ctx(shash); 46 u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
47 int err; 47 int err;
48 48
49 shash->tfm = tfm; 49 shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
53 err = crypto_shash_update(shash, address, length); 53 err = crypto_shash_update(shash, address, length);
54 BUG_ON(err); 54 BUG_ON(err);
55 55
56 return *ctx; 56 ret = *ctx;
57 barrier_data(ctx);
58 return ret;
57} 59}
58 60
59EXPORT_SYMBOL(crc32c); 61EXPORT_SYMBOL(crc32c);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 889bc31785be..be88cbaadde3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4504,6 +4504,44 @@ static struct bpf_test tests[] = {
4504 { }, 4504 { },
4505 { { 0, 1 } }, 4505 { { 0, 1 } },
4506 }, 4506 },
4507 {
4508 "JMP_JSGE_K: Signed jump: value walk 1",
4509 .u.insns_int = {
4510 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4511 BPF_LD_IMM64(R1, -3),
4512 BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
4513 BPF_ALU64_IMM(BPF_ADD, R1, 1),
4514 BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
4515 BPF_ALU64_IMM(BPF_ADD, R1, 1),
4516 BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
4517 BPF_ALU64_IMM(BPF_ADD, R1, 1),
4518 BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
4519 BPF_EXIT_INSN(), /* bad exit */
4520 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
4521 BPF_EXIT_INSN(),
4522 },
4523 INTERNAL,
4524 { },
4525 { { 0, 1 } },
4526 },
4527 {
4528 "JMP_JSGE_K: Signed jump: value walk 2",
4529 .u.insns_int = {
4530 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4531 BPF_LD_IMM64(R1, -3),
4532 BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
4533 BPF_ALU64_IMM(BPF_ADD, R1, 2),
4534 BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
4535 BPF_ALU64_IMM(BPF_ADD, R1, 2),
4536 BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
4537 BPF_EXIT_INSN(), /* bad exit */
4538 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
4539 BPF_EXIT_INSN(),
4540 },
4541 INTERNAL,
4542 { },
4543 { { 0, 1 } },
4544 },
4507 /* BPF_JMP | BPF_JGT | BPF_K */ 4545 /* BPF_JMP | BPF_JGT | BPF_K */
4508 { 4546 {
4509 "JMP_JGT_K: if (3 > 2) return 1", 4547 "JMP_JGT_K: if (3 > 2) return 1",
diff --git a/mm/gup.c b/mm/gup.c
index d9e6fddcc51f..576c4df58882 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
387 /* mlock all present pages, but do not fault in new pages */ 387 /* mlock all present pages, but do not fault in new pages */
388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
389 return -ENOENT; 389 return -ENOENT;
390 /* For mm_populate(), just skip the stack guard page. */
391 if ((*flags & FOLL_POPULATE) &&
392 (stack_guard_page_start(vma, address) ||
393 stack_guard_page_end(vma, address + PAGE_SIZE)))
394 return -ENOENT;
395 if (*flags & FOLL_WRITE) 390 if (*flags & FOLL_WRITE)
396 fault_flags |= FAULT_FLAG_WRITE; 391 fault_flags |= FAULT_FLAG_WRITE;
397 if (*flags & FOLL_REMOTE) 392 if (*flags & FOLL_REMOTE)
@@ -407,12 +402,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
407 402
408 ret = handle_mm_fault(vma, address, fault_flags); 403 ret = handle_mm_fault(vma, address, fault_flags);
409 if (ret & VM_FAULT_ERROR) { 404 if (ret & VM_FAULT_ERROR) {
410 if (ret & VM_FAULT_OOM) 405 int err = vm_fault_to_errno(ret, *flags);
411 return -ENOMEM; 406
412 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 407 if (err)
413 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 408 return err;
414 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
415 return -EFAULT;
416 BUG(); 409 BUG();
417 } 410 }
418 411
@@ -723,12 +716,10 @@ retry:
723 ret = handle_mm_fault(vma, address, fault_flags); 716 ret = handle_mm_fault(vma, address, fault_flags);
724 major |= ret & VM_FAULT_MAJOR; 717 major |= ret & VM_FAULT_MAJOR;
725 if (ret & VM_FAULT_ERROR) { 718 if (ret & VM_FAULT_ERROR) {
726 if (ret & VM_FAULT_OOM) 719 int err = vm_fault_to_errno(ret, 0);
727 return -ENOMEM; 720
728 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 721 if (err)
729 return -EHWPOISON; 722 return err;
730 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
731 return -EFAULT;
732 BUG(); 723 BUG();
733 } 724 }
734 725
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a84909cf20d3..88c6167f194d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1426 */ 1426 */
1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1428 page = pmd_page(*vmf->pmd); 1428 page = pmd_page(*vmf->pmd);
1429 if (!get_page_unless_zero(page))
1430 goto out_unlock;
1429 spin_unlock(vmf->ptl); 1431 spin_unlock(vmf->ptl);
1430 wait_on_page_locked(page); 1432 wait_on_page_locked(page);
1433 put_page(page);
1431 goto out; 1434 goto out;
1432 } 1435 }
1433 1436
@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1459 1462
1460 /* Migration could have started since the pmd_trans_migrating check */ 1463 /* Migration could have started since the pmd_trans_migrating check */
1461 if (!page_locked) { 1464 if (!page_locked) {
1465 page_nid = -1;
1466 if (!get_page_unless_zero(page))
1467 goto out_unlock;
1462 spin_unlock(vmf->ptl); 1468 spin_unlock(vmf->ptl);
1463 wait_on_page_locked(page); 1469 wait_on_page_locked(page);
1464 page_nid = -1; 1470 put_page(page);
1465 goto out; 1471 goto out;
1466 } 1472 }
1467 1473
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e5828875f7bb..3eedb187e549 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4170 } 4170 }
4171 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4171 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4172 if (ret & VM_FAULT_ERROR) { 4172 if (ret & VM_FAULT_ERROR) {
4173 int err = vm_fault_to_errno(ret, flags);
4174
4175 if (err)
4176 return err;
4177
4173 remainder = 0; 4178 remainder = 0;
4174 break; 4179 break;
4175 } 4180 }
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 945fd1ca49b5..df4ebdb2b10a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
652 spin_unlock(ptl); 652 spin_unlock(ptl);
653 free_page_and_swap_cache(src_page); 653 free_page_and_swap_cache(src_page);
654 } 654 }
655 cond_resched();
656 } 655 }
657} 656}
658 657
diff --git a/mm/ksm.c b/mm/ksm.c
index d9fc0e456128..216184af0e19 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
1028 goto out; 1028 goto out;
1029 1029
1030 if (PageTransCompound(page)) { 1030 if (PageTransCompound(page)) {
1031 err = split_huge_page(page); 1031 if (split_huge_page(page))
1032 if (err)
1033 goto out_unlock; 1032 goto out_unlock;
1034 } 1033 }
1035 1034
diff --git a/mm/memblock.c b/mm/memblock.c
index b049c9b2dba8..7b8a5db76a2f 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
1739 } 1739 }
1740} 1740}
1741 1741
1742extern unsigned long __init_memblock
1743memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
1744{
1745 struct memblock_region *rgn;
1746 unsigned long size = 0;
1747 int idx;
1748
1749 for_each_memblock_type((&memblock.reserved), rgn) {
1750 phys_addr_t start, end;
1751
1752 if (rgn->base + rgn->size < start_addr)
1753 continue;
1754 if (rgn->base > end_addr)
1755 continue;
1756
1757 start = rgn->base;
1758 end = start + rgn->size;
1759 size += end - start;
1760 }
1761
1762 return size;
1763}
1764
1742void __init_memblock __memblock_dump_all(void) 1765void __init_memblock __memblock_dump_all(void)
1743{ 1766{
1744 pr_info("MEMBLOCK configuration:\n"); 1767 pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2527dfeddb00..ecc183fd94f3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1185 * correctly, we save a copy of the page flags at this time. 1185 * correctly, we save a copy of the page flags at this time.
1186 */ 1186 */
1187 page_flags = p->flags; 1187 if (PageHuge(p))
1188 page_flags = hpage->flags;
1189 else
1190 page_flags = p->flags;
1188 1191
1189 /* 1192 /*
1190 * unpoison always clear PG_hwpoison inside page lock 1193 * unpoison always clear PG_hwpoison inside page lock
@@ -1595,12 +1598,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
1595 if (ret) { 1598 if (ret) {
1596 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", 1599 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1597 pfn, ret, page->flags, &page->flags); 1600 pfn, ret, page->flags, &page->flags);
1598 /* 1601 if (!list_empty(&pagelist))
1599 * We know that soft_offline_huge_page() tries to migrate 1602 putback_movable_pages(&pagelist);
1600 * only one hugepage pointed to by hpage, so we need not
1601 * run through the pagelist here.
1602 */
1603 putback_active_hugepage(hpage);
1604 if (ret > 0) 1603 if (ret > 0)
1605 ret = -EIO; 1604 ret = -EIO;
1606 } else { 1605 } else {
diff --git a/mm/memory.c b/mm/memory.c
index 6ff5d729ded0..bb11c474857e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2855,40 +2855,6 @@ out_release:
2855} 2855}
2856 2856
2857/* 2857/*
2858 * This is like a special single-page "expand_{down|up}wards()",
2859 * except we must first make sure that 'address{-|+}PAGE_SIZE'
2860 * doesn't hit another vma.
2861 */
2862static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2863{
2864 address &= PAGE_MASK;
2865 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2866 struct vm_area_struct *prev = vma->vm_prev;
2867
2868 /*
2869 * Is there a mapping abutting this one below?
2870 *
2871 * That's only ok if it's the same stack mapping
2872 * that has gotten split..
2873 */
2874 if (prev && prev->vm_end == address)
2875 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2876
2877 return expand_downwards(vma, address - PAGE_SIZE);
2878 }
2879 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2880 struct vm_area_struct *next = vma->vm_next;
2881
2882 /* As VM_GROWSDOWN but s/below/above/ */
2883 if (next && next->vm_start == address + PAGE_SIZE)
2884 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2885
2886 return expand_upwards(vma, address + PAGE_SIZE);
2887 }
2888 return 0;
2889}
2890
2891/*
2892 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2858 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2893 * but allow concurrent faults), and pte mapped but not yet locked. 2859 * but allow concurrent faults), and pte mapped but not yet locked.
2894 * We return with mmap_sem still held, but pte unmapped and unlocked. 2860 * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf)
2904 if (vma->vm_flags & VM_SHARED) 2870 if (vma->vm_flags & VM_SHARED)
2905 return VM_FAULT_SIGBUS; 2871 return VM_FAULT_SIGBUS;
2906 2872
2907 /* Check if we need to add a guard page to the stack */
2908 if (check_stack_guard_page(vma, vmf->address) < 0)
2909 return VM_FAULT_SIGSEGV;
2910
2911 /* 2873 /*
2912 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2874 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2913 * pte_offset_map() on pmds where a huge pmd might be created 2875 * pte_offset_map() on pmds where a huge pmd might be created
@@ -3029,6 +2991,17 @@ static int __do_fault(struct vm_fault *vmf)
3029 return ret; 2991 return ret;
3030} 2992}
3031 2993
2994/*
2995 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
2996 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
2997 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
2998 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
2999 */
3000static int pmd_devmap_trans_unstable(pmd_t *pmd)
3001{
3002 return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3003}
3004
3032static int pte_alloc_one_map(struct vm_fault *vmf) 3005static int pte_alloc_one_map(struct vm_fault *vmf)
3033{ 3006{
3034 struct vm_area_struct *vma = vmf->vma; 3007 struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3025,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
3052map_pte: 3025map_pte:
3053 /* 3026 /*
3054 * If a huge pmd materialized under us just retry later. Use 3027 * If a huge pmd materialized under us just retry later. Use
3055 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd 3028 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3056 * didn't become pmd_trans_huge under us and then back to pmd_none, as 3029 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3057 * a result of MADV_DONTNEED running immediately after a huge pmd fault 3030 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3058 * in a different thread of this mm, in turn leading to a misleading 3031 * running immediately after a huge pmd fault in a different thread of
3059 * pmd_trans_huge() retval. All we have to ensure is that it is a 3032 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3060 * regular pmd that we can walk with pte_offset_map() and we can do that 3033 * All we have to ensure is that it is a regular pmd that we can walk
3061 * through an atomic read in C, which is what pmd_trans_unstable() 3034 * with pte_offset_map() and we can do that through an atomic read in
3062 * provides. 3035 * C, which is what pmd_trans_unstable() provides.
3063 */ 3036 */
3064 if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3037 if (pmd_devmap_trans_unstable(vmf->pmd))
3065 return VM_FAULT_NOPAGE; 3038 return VM_FAULT_NOPAGE;
3066 3039
3040 /*
3041 * At this point we know that our vmf->pmd points to a page of ptes
3042 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3043 * for the duration of the fault. If a racing MADV_DONTNEED runs and
3044 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3045 * be valid and we will re-check to make sure the vmf->pte isn't
3046 * pte_none() under vmf->ptl protection when we return to
3047 * alloc_set_pte().
3048 */
3067 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3049 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3068 &vmf->ptl); 3050 &vmf->ptl);
3069 return 0; 3051 return 0;
@@ -3690,7 +3672,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3690 vmf->pte = NULL; 3672 vmf->pte = NULL;
3691 } else { 3673 } else {
3692 /* See comment in pte_alloc_one_map() */ 3674 /* See comment in pte_alloc_one_map() */
3693 if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3675 if (pmd_devmap_trans_unstable(vmf->pmd))
3694 return 0; 3676 return 0;
3695 /* 3677 /*
3696 * A regular pmd is established and it can't morph into a huge 3678 * A regular pmd is established and it can't morph into a huge
diff --git a/mm/mlock.c b/mm/mlock.c
index c483c5c20b4b..b562b5523a65 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
284{ 284{
285 int i; 285 int i;
286 int nr = pagevec_count(pvec); 286 int nr = pagevec_count(pvec);
287 int delta_munlocked; 287 int delta_munlocked = -nr;
288 struct pagevec pvec_putback; 288 struct pagevec pvec_putback;
289 int pgrescued = 0; 289 int pgrescued = 0;
290 290
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
304 continue; 304 continue;
305 else 305 else
306 __munlock_isolation_failed(page); 306 __munlock_isolation_failed(page);
307 } else {
308 delta_munlocked++;
307 } 309 }
308 310
309 /* 311 /*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
315 pagevec_add(&pvec_putback, pvec->pages[i]); 317 pagevec_add(&pvec_putback, pvec->pages[i]);
316 pvec->pages[i] = NULL; 318 pvec->pages[i] = NULL;
317 } 319 }
318 delta_munlocked = -nr + pagevec_count(&pvec_putback);
319 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 320 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
320 spin_unlock_irq(zone_lru_lock(zone)); 321 spin_unlock_irq(zone_lru_lock(zone));
321 322
diff --git a/mm/mmap.c b/mm/mmap.c
index f82741e199c0..a5e3dcd75e79 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
183 unsigned long retval; 183 unsigned long retval;
184 unsigned long newbrk, oldbrk; 184 unsigned long newbrk, oldbrk;
185 struct mm_struct *mm = current->mm; 185 struct mm_struct *mm = current->mm;
186 struct vm_area_struct *next;
186 unsigned long min_brk; 187 unsigned long min_brk;
187 bool populate; 188 bool populate;
188 LIST_HEAD(uf); 189 LIST_HEAD(uf);
@@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
229 } 230 }
230 231
231 /* Check against existing mmap mappings. */ 232 /* Check against existing mmap mappings. */
232 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 233 next = find_vma(mm, oldbrk);
234 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
233 goto out; 235 goto out;
234 236
235 /* Ok, looks good - let it rip. */ 237 /* Ok, looks good - let it rip. */
@@ -253,10 +255,22 @@ out:
253 255
254static long vma_compute_subtree_gap(struct vm_area_struct *vma) 256static long vma_compute_subtree_gap(struct vm_area_struct *vma)
255{ 257{
256 unsigned long max, subtree_gap; 258 unsigned long max, prev_end, subtree_gap;
257 max = vma->vm_start; 259
258 if (vma->vm_prev) 260 /*
259 max -= vma->vm_prev->vm_end; 261 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
262 * allow two stack_guard_gaps between them here, and when choosing
263 * an unmapped area; whereas when expanding we only require one.
264 * That's a little inconsistent, but keeps the code here simpler.
265 */
266 max = vm_start_gap(vma);
267 if (vma->vm_prev) {
268 prev_end = vm_end_gap(vma->vm_prev);
269 if (max > prev_end)
270 max -= prev_end;
271 else
272 max = 0;
273 }
260 if (vma->vm_rb.rb_left) { 274 if (vma->vm_rb.rb_left) {
261 subtree_gap = rb_entry(vma->vm_rb.rb_left, 275 subtree_gap = rb_entry(vma->vm_rb.rb_left,
262 struct vm_area_struct, vm_rb)->rb_subtree_gap; 276 struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm)
352 anon_vma_unlock_read(anon_vma); 366 anon_vma_unlock_read(anon_vma);
353 } 367 }
354 368
355 highest_address = vma->vm_end; 369 highest_address = vm_end_gap(vma);
356 vma = vma->vm_next; 370 vma = vma->vm_next;
357 i++; 371 i++;
358 } 372 }
@@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
541 if (vma->vm_next) 555 if (vma->vm_next)
542 vma_gap_update(vma->vm_next); 556 vma_gap_update(vma->vm_next);
543 else 557 else
544 mm->highest_vm_end = vma->vm_end; 558 mm->highest_vm_end = vm_end_gap(vma);
545 559
546 /* 560 /*
547 * vma->vm_prev wasn't known when we followed the rbtree to find the 561 * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -856,7 +870,7 @@ again:
856 vma_gap_update(vma); 870 vma_gap_update(vma);
857 if (end_changed) { 871 if (end_changed) {
858 if (!next) 872 if (!next)
859 mm->highest_vm_end = end; 873 mm->highest_vm_end = vm_end_gap(vma);
860 else if (!adjust_next) 874 else if (!adjust_next)
861 vma_gap_update(next); 875 vma_gap_update(next);
862 } 876 }
@@ -941,7 +955,7 @@ again:
941 * mm->highest_vm_end doesn't need any update 955 * mm->highest_vm_end doesn't need any update
942 * in remove_next == 1 case. 956 * in remove_next == 1 case.
943 */ 957 */
944 VM_WARN_ON(mm->highest_vm_end != end); 958 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
945 } 959 }
946 } 960 }
947 if (insert && file) 961 if (insert && file)
@@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1787 1801
1788 while (true) { 1802 while (true) {
1789 /* Visit left subtree if it looks promising */ 1803 /* Visit left subtree if it looks promising */
1790 gap_end = vma->vm_start; 1804 gap_end = vm_start_gap(vma);
1791 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1805 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1792 struct vm_area_struct *left = 1806 struct vm_area_struct *left =
1793 rb_entry(vma->vm_rb.rb_left, 1807 rb_entry(vma->vm_rb.rb_left,
@@ -1798,12 +1812,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1798 } 1812 }
1799 } 1813 }
1800 1814
1801 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1815 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1802check_current: 1816check_current:
1803 /* Check if current node has a suitable gap */ 1817 /* Check if current node has a suitable gap */
1804 if (gap_start > high_limit) 1818 if (gap_start > high_limit)
1805 return -ENOMEM; 1819 return -ENOMEM;
1806 if (gap_end >= low_limit && gap_end - gap_start >= length) 1820 if (gap_end >= low_limit &&
1821 gap_end > gap_start && gap_end - gap_start >= length)
1807 goto found; 1822 goto found;
1808 1823
1809 /* Visit right subtree if it looks promising */ 1824 /* Visit right subtree if it looks promising */
@@ -1825,8 +1840,8 @@ check_current:
1825 vma = rb_entry(rb_parent(prev), 1840 vma = rb_entry(rb_parent(prev),
1826 struct vm_area_struct, vm_rb); 1841 struct vm_area_struct, vm_rb);
1827 if (prev == vma->vm_rb.rb_left) { 1842 if (prev == vma->vm_rb.rb_left) {
1828 gap_start = vma->vm_prev->vm_end; 1843 gap_start = vm_end_gap(vma->vm_prev);
1829 gap_end = vma->vm_start; 1844 gap_end = vm_start_gap(vma);
1830 goto check_current; 1845 goto check_current;
1831 } 1846 }
1832 } 1847 }
@@ -1890,7 +1905,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1890 1905
1891 while (true) { 1906 while (true) {
1892 /* Visit right subtree if it looks promising */ 1907 /* Visit right subtree if it looks promising */
1893 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1908 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1894 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 1909 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1895 struct vm_area_struct *right = 1910 struct vm_area_struct *right =
1896 rb_entry(vma->vm_rb.rb_right, 1911 rb_entry(vma->vm_rb.rb_right,
@@ -1903,10 +1918,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1903 1918
1904check_current: 1919check_current:
1905 /* Check if current node has a suitable gap */ 1920 /* Check if current node has a suitable gap */
1906 gap_end = vma->vm_start; 1921 gap_end = vm_start_gap(vma);
1907 if (gap_end < low_limit) 1922 if (gap_end < low_limit)
1908 return -ENOMEM; 1923 return -ENOMEM;
1909 if (gap_start <= high_limit && gap_end - gap_start >= length) 1924 if (gap_start <= high_limit &&
1925 gap_end > gap_start && gap_end - gap_start >= length)
1910 goto found; 1926 goto found;
1911 1927
1912 /* Visit left subtree if it looks promising */ 1928 /* Visit left subtree if it looks promising */
@@ -1929,7 +1945,7 @@ check_current:
1929 struct vm_area_struct, vm_rb); 1945 struct vm_area_struct, vm_rb);
1930 if (prev == vma->vm_rb.rb_right) { 1946 if (prev == vma->vm_rb.rb_right) {
1931 gap_start = vma->vm_prev ? 1947 gap_start = vma->vm_prev ?
1932 vma->vm_prev->vm_end : 0; 1948 vm_end_gap(vma->vm_prev) : 0;
1933 goto check_current; 1949 goto check_current;
1934 } 1950 }
1935 } 1951 }
@@ -1967,7 +1983,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1967 unsigned long len, unsigned long pgoff, unsigned long flags) 1983 unsigned long len, unsigned long pgoff, unsigned long flags)
1968{ 1984{
1969 struct mm_struct *mm = current->mm; 1985 struct mm_struct *mm = current->mm;
1970 struct vm_area_struct *vma; 1986 struct vm_area_struct *vma, *prev;
1971 struct vm_unmapped_area_info info; 1987 struct vm_unmapped_area_info info;
1972 1988
1973 if (len > TASK_SIZE - mmap_min_addr) 1989 if (len > TASK_SIZE - mmap_min_addr)
@@ -1978,9 +1994,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1978 1994
1979 if (addr) { 1995 if (addr) {
1980 addr = PAGE_ALIGN(addr); 1996 addr = PAGE_ALIGN(addr);
1981 vma = find_vma(mm, addr); 1997 vma = find_vma_prev(mm, addr, &prev);
1982 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1998 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1983 (!vma || addr + len <= vma->vm_start)) 1999 (!vma || addr + len <= vm_start_gap(vma)) &&
2000 (!prev || addr >= vm_end_gap(prev)))
1984 return addr; 2001 return addr;
1985 } 2002 }
1986 2003
@@ -2003,7 +2020,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2003 const unsigned long len, const unsigned long pgoff, 2020 const unsigned long len, const unsigned long pgoff,
2004 const unsigned long flags) 2021 const unsigned long flags)
2005{ 2022{
2006 struct vm_area_struct *vma; 2023 struct vm_area_struct *vma, *prev;
2007 struct mm_struct *mm = current->mm; 2024 struct mm_struct *mm = current->mm;
2008 unsigned long addr = addr0; 2025 unsigned long addr = addr0;
2009 struct vm_unmapped_area_info info; 2026 struct vm_unmapped_area_info info;
@@ -2018,9 +2035,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2018 /* requesting a specific address */ 2035 /* requesting a specific address */
2019 if (addr) { 2036 if (addr) {
2020 addr = PAGE_ALIGN(addr); 2037 addr = PAGE_ALIGN(addr);
2021 vma = find_vma(mm, addr); 2038 vma = find_vma_prev(mm, addr, &prev);
2022 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 2039 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
2023 (!vma || addr + len <= vma->vm_start)) 2040 (!vma || addr + len <= vm_start_gap(vma)) &&
2041 (!prev || addr >= vm_end_gap(prev)))
2024 return addr; 2042 return addr;
2025 } 2043 }
2026 2044
@@ -2155,21 +2173,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
2155 * update accounting. This is shared with both the 2173 * update accounting. This is shared with both the
2156 * grow-up and grow-down cases. 2174 * grow-up and grow-down cases.
2157 */ 2175 */
2158static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) 2176static int acct_stack_growth(struct vm_area_struct *vma,
2177 unsigned long size, unsigned long grow)
2159{ 2178{
2160 struct mm_struct *mm = vma->vm_mm; 2179 struct mm_struct *mm = vma->vm_mm;
2161 struct rlimit *rlim = current->signal->rlim; 2180 struct rlimit *rlim = current->signal->rlim;
2162 unsigned long new_start, actual_size; 2181 unsigned long new_start;
2163 2182
2164 /* address space limit tests */ 2183 /* address space limit tests */
2165 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2184 if (!may_expand_vm(mm, vma->vm_flags, grow))
2166 return -ENOMEM; 2185 return -ENOMEM;
2167 2186
2168 /* Stack limit test */ 2187 /* Stack limit test */
2169 actual_size = size; 2188 if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2170 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2171 actual_size -= PAGE_SIZE;
2172 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2173 return -ENOMEM; 2189 return -ENOMEM;
2174 2190
2175 /* mlock limit tests */ 2191 /* mlock limit tests */
@@ -2207,16 +2223,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2207int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2223int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2208{ 2224{
2209 struct mm_struct *mm = vma->vm_mm; 2225 struct mm_struct *mm = vma->vm_mm;
2226 struct vm_area_struct *next;
2227 unsigned long gap_addr;
2210 int error = 0; 2228 int error = 0;
2211 2229
2212 if (!(vma->vm_flags & VM_GROWSUP)) 2230 if (!(vma->vm_flags & VM_GROWSUP))
2213 return -EFAULT; 2231 return -EFAULT;
2214 2232
2215 /* Guard against wrapping around to address 0. */ 2233 /* Guard against exceeding limits of the address space. */
2216 if (address < PAGE_ALIGN(address+4)) 2234 address &= PAGE_MASK;
2217 address = PAGE_ALIGN(address+4); 2235 if (address >= TASK_SIZE)
2218 else
2219 return -ENOMEM; 2236 return -ENOMEM;
2237 address += PAGE_SIZE;
2238
2239 /* Enforce stack_guard_gap */
2240 gap_addr = address + stack_guard_gap;
2241
2242 /* Guard against overflow */
2243 if (gap_addr < address || gap_addr > TASK_SIZE)
2244 gap_addr = TASK_SIZE;
2245
2246 next = vma->vm_next;
2247 if (next && next->vm_start < gap_addr) {
2248 if (!(next->vm_flags & VM_GROWSUP))
2249 return -ENOMEM;
2250 /* Check that both stack segments have the same anon_vma? */
2251 }
2220 2252
2221 /* We must make sure the anon_vma is allocated. */ 2253 /* We must make sure the anon_vma is allocated. */
2222 if (unlikely(anon_vma_prepare(vma))) 2254 if (unlikely(anon_vma_prepare(vma)))
@@ -2261,7 +2293,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2261 if (vma->vm_next) 2293 if (vma->vm_next)
2262 vma_gap_update(vma->vm_next); 2294 vma_gap_update(vma->vm_next);
2263 else 2295 else
2264 mm->highest_vm_end = address; 2296 mm->highest_vm_end = vm_end_gap(vma);
2265 spin_unlock(&mm->page_table_lock); 2297 spin_unlock(&mm->page_table_lock);
2266 2298
2267 perf_event_mmap(vma); 2299 perf_event_mmap(vma);
@@ -2282,6 +2314,8 @@ int expand_downwards(struct vm_area_struct *vma,
2282 unsigned long address) 2314 unsigned long address)
2283{ 2315{
2284 struct mm_struct *mm = vma->vm_mm; 2316 struct mm_struct *mm = vma->vm_mm;
2317 struct vm_area_struct *prev;
2318 unsigned long gap_addr;
2285 int error; 2319 int error;
2286 2320
2287 address &= PAGE_MASK; 2321 address &= PAGE_MASK;
@@ -2289,6 +2323,17 @@ int expand_downwards(struct vm_area_struct *vma,
2289 if (error) 2323 if (error)
2290 return error; 2324 return error;
2291 2325
2326 /* Enforce stack_guard_gap */
2327 gap_addr = address - stack_guard_gap;
2328 if (gap_addr > address)
2329 return -ENOMEM;
2330 prev = vma->vm_prev;
2331 if (prev && prev->vm_end > gap_addr) {
2332 if (!(prev->vm_flags & VM_GROWSDOWN))
2333 return -ENOMEM;
2334 /* Check that both stack segments have the same anon_vma? */
2335 }
2336
2292 /* We must make sure the anon_vma is allocated. */ 2337 /* We must make sure the anon_vma is allocated. */
2293 if (unlikely(anon_vma_prepare(vma))) 2338 if (unlikely(anon_vma_prepare(vma)))
2294 return -ENOMEM; 2339 return -ENOMEM;
@@ -2343,28 +2388,25 @@ int expand_downwards(struct vm_area_struct *vma,
2343 return error; 2388 return error;
2344} 2389}
2345 2390
2346/* 2391/* enforced gap between the expanding stack and other mappings. */
2347 * Note how expand_stack() refuses to expand the stack all the way to 2392unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2348 * abut the next virtual mapping, *unless* that mapping itself is also 2393
2349 * a stack mapping. We want to leave room for a guard page, after all 2394static int __init cmdline_parse_stack_guard_gap(char *p)
2350 * (the guard page itself is not added here, that is done by the 2395{
2351 * actual page faulting logic) 2396 unsigned long val;
2352 * 2397 char *endptr;
2353 * This matches the behavior of the guard page logic (see mm/memory.c: 2398
2354 * check_stack_guard_page()), which only allows the guard page to be 2399 val = simple_strtoul(p, &endptr, 10);
2355 * removed under these circumstances. 2400 if (!*endptr)
2356 */ 2401 stack_guard_gap = val << PAGE_SHIFT;
2402
2403 return 0;
2404}
2405__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2406
2357#ifdef CONFIG_STACK_GROWSUP 2407#ifdef CONFIG_STACK_GROWSUP
2358int expand_stack(struct vm_area_struct *vma, unsigned long address) 2408int expand_stack(struct vm_area_struct *vma, unsigned long address)
2359{ 2409{
2360 struct vm_area_struct *next;
2361
2362 address &= PAGE_MASK;
2363 next = vma->vm_next;
2364 if (next && next->vm_start == address + PAGE_SIZE) {
2365 if (!(next->vm_flags & VM_GROWSUP))
2366 return -ENOMEM;
2367 }
2368 return expand_upwards(vma, address); 2410 return expand_upwards(vma, address);
2369} 2411}
2370 2412
@@ -2386,14 +2428,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2386#else 2428#else
2387int expand_stack(struct vm_area_struct *vma, unsigned long address) 2429int expand_stack(struct vm_area_struct *vma, unsigned long address)
2388{ 2430{
2389 struct vm_area_struct *prev;
2390
2391 address &= PAGE_MASK;
2392 prev = vma->vm_prev;
2393 if (prev && prev->vm_end == address) {
2394 if (!(prev->vm_flags & VM_GROWSDOWN))
2395 return -ENOMEM;
2396 }
2397 return expand_downwards(vma, address); 2431 return expand_downwards(vma, address);
2398} 2432}
2399 2433
@@ -2491,7 +2525,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2491 vma->vm_prev = prev; 2525 vma->vm_prev = prev;
2492 vma_gap_update(vma); 2526 vma_gap_update(vma);
2493 } else 2527 } else
2494 mm->highest_vm_end = prev ? prev->vm_end : 0; 2528 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2495 tail_vma->vm_next = NULL; 2529 tail_vma->vm_next = NULL;
2496 2530
2497 /* Kill the cache */ 2531 /* Kill the cache */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f9e450c6b6e4..2302f250d6b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
292#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 292#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
293static inline void reset_deferred_meminit(pg_data_t *pgdat) 293static inline void reset_deferred_meminit(pg_data_t *pgdat)
294{ 294{
295 unsigned long max_initialise;
296 unsigned long reserved_lowmem;
297
298 /*
299 * Initialise at least 2G of a node but also take into account that
300 * two large system hashes that can take up 1GB for 0.25TB/node.
301 */
302 max_initialise = max(2UL << (30 - PAGE_SHIFT),
303 (pgdat->node_spanned_pages >> 8));
304
305 /*
306 * Compensate the all the memblock reservations (e.g. crash kernel)
307 * from the initial estimation to make sure we will initialize enough
308 * memory to boot.
309 */
310 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
311 pgdat->node_start_pfn + max_initialise);
312 max_initialise += reserved_lowmem;
313
314 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
295 pgdat->first_deferred_pfn = ULONG_MAX; 315 pgdat->first_deferred_pfn = ULONG_MAX;
296} 316}
297 317
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
314 unsigned long pfn, unsigned long zone_end, 334 unsigned long pfn, unsigned long zone_end,
315 unsigned long *nr_initialised) 335 unsigned long *nr_initialised)
316{ 336{
317 unsigned long max_initialise;
318
319 /* Always populate low zones for address-contrained allocations */ 337 /* Always populate low zones for address-contrained allocations */
320 if (zone_end < pgdat_end_pfn(pgdat)) 338 if (zone_end < pgdat_end_pfn(pgdat))
321 return true; 339 return true;
322 /*
323 * Initialise at least 2G of a node but also take into account that
324 * two large system hashes that can take up 1GB for 0.25TB/node.
325 */
326 max_initialise = max(2UL << (30 - PAGE_SHIFT),
327 (pgdat->node_spanned_pages >> 8));
328
329 (*nr_initialised)++; 340 (*nr_initialised)++;
330 if ((*nr_initialised > max_initialise) && 341 if ((*nr_initialised > pgdat->static_init_size) &&
331 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 342 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
332 pgdat->first_deferred_pfn = pfn; 343 pgdat->first_deferred_pfn = pfn;
333 return false; 344 return false;
@@ -3870,7 +3881,9 @@ retry:
3870 goto got_pg; 3881 goto got_pg;
3871 3882
3872 /* Avoid allocations with no watermarks from looping endlessly */ 3883 /* Avoid allocations with no watermarks from looping endlessly */
3873 if (test_thread_flag(TIF_MEMDIE)) 3884 if (test_thread_flag(TIF_MEMDIE) &&
3885 (alloc_flags == ALLOC_NO_WATERMARKS ||
3886 (gfp_mask & __GFP_NOMEMALLOC)))
3874 goto nopage; 3887 goto nopage;
3875 3888
3876 /* Retry as long as the OOM killer is making progress */ 3889 /* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6136 /* pg_data_t should be reset to zero when it's allocated */ 6149 /* pg_data_t should be reset to zero when it's allocated */
6137 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6150 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6138 6151
6139 reset_deferred_meminit(pgdat);
6140 pgdat->node_id = nid; 6152 pgdat->node_id = nid;
6141 pgdat->node_start_pfn = node_start_pfn; 6153 pgdat->node_start_pfn = node_start_pfn;
6142 pgdat->per_cpu_nodestats = NULL; 6154 pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6158 (unsigned long)pgdat->node_mem_map); 6170 (unsigned long)pgdat->node_mem_map);
6159#endif 6171#endif
6160 6172
6173 reset_deferred_meminit(pgdat);
6161 free_area_init_core(pgdat); 6174 free_area_init_core(pgdat);
6162} 6175}
6163 6176
diff --git a/mm/slub.c b/mm/slub.c
index 57e5156f02be..8addc535bcdc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5512 char mbuf[64]; 5512 char mbuf[64];
5513 char *buf; 5513 char *buf;
5514 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); 5514 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5515 ssize_t len;
5515 5516
5516 if (!attr || !attr->store || !attr->show) 5517 if (!attr || !attr->store || !attr->show)
5517 continue; 5518 continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5536 buf = buffer; 5537 buf = buffer;
5537 } 5538 }
5538 5539
5539 attr->show(root_cache, buf); 5540 len = attr->show(root_cache, buf);
5540 attr->store(s, buf, strlen(buf)); 5541 if (len > 0)
5542 attr->store(s, buf, len);
5541 } 5543 }
5542 5544
5543 if (buffer) 5545 if (buffer)
@@ -5623,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
5623 return name; 5625 return name;
5624} 5626}
5625 5627
5628static void sysfs_slab_remove_workfn(struct work_struct *work)
5629{
5630 struct kmem_cache *s =
5631 container_of(work, struct kmem_cache, kobj_remove_work);
5632
5633 if (!s->kobj.state_in_sysfs)
5634 /*
5635 * For a memcg cache, this may be called during
5636 * deactivation and again on shutdown. Remove only once.
5637 * A cache is never shut down before deactivation is
5638 * complete, so no need to worry about synchronization.
5639 */
5640 return;
5641
5642#ifdef CONFIG_MEMCG
5643 kset_unregister(s->memcg_kset);
5644#endif
5645 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5646 kobject_del(&s->kobj);
5647 kobject_put(&s->kobj);
5648}
5649
5626static int sysfs_slab_add(struct kmem_cache *s) 5650static int sysfs_slab_add(struct kmem_cache *s)
5627{ 5651{
5628 int err; 5652 int err;
@@ -5630,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
5630 struct kset *kset = cache_kset(s); 5654 struct kset *kset = cache_kset(s);
5631 int unmergeable = slab_unmergeable(s); 5655 int unmergeable = slab_unmergeable(s);
5632 5656
5657 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5658
5633 if (!kset) { 5659 if (!kset) {
5634 kobject_init(&s->kobj, &slab_ktype); 5660 kobject_init(&s->kobj, &slab_ktype);
5635 return 0; 5661 return 0;
@@ -5693,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
5693 */ 5719 */
5694 return; 5720 return;
5695 5721
5696 if (!s->kobj.state_in_sysfs) 5722 kobject_get(&s->kobj);
5697 /* 5723 schedule_work(&s->kobj_remove_work);
5698 * For a memcg cache, this may be called during
5699 * deactivation and again on shutdown. Remove only once.
5700 * A cache is never shut down before deactivation is
5701 * complete, so no need to worry about synchronization.
5702 */
5703 return;
5704
5705#ifdef CONFIG_MEMCG
5706 kset_unregister(s->memcg_kset);
5707#endif
5708 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5709 kobject_del(&s->kobj);
5710} 5724}
5711 5725
5712void sysfs_slab_release(struct kmem_cache *s) 5726void sysfs_slab_release(struct kmem_cache *s)
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index ac6318a064d3..3405b4ee1757 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
48 if (!page) 48 if (!page)
49 goto not_enough_page; 49 goto not_enough_page;
50 ctrl->map[idx] = page; 50 ctrl->map[idx] = page;
51
52 if (!(idx % SWAP_CLUSTER_MAX))
53 cond_resched();
51 } 54 }
52 return 0; 55 return 0;
53not_enough_page: 56not_enough_page:
diff --git a/mm/util.c b/mm/util.c
index 464df3489903..26be6407abd7 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
358 358
359 /* 359 /*
360 * Make sure that larger requests are not too disruptive - no OOM 360 * We want to attempt a large physically contiguous block first because
361 * killer and no allocation failure warnings as we have a fallback 361 * it is less likely to fragment multiple larger blocks and therefore
362 * contribute to a long term fragmentation less than vmalloc fallback.
363 * However make sure that larger requests are not too disruptive - no
364 * OOM killer and no allocation failure warnings as we have a fallback.
362 */ 365 */
363 if (size > PAGE_SIZE) { 366 if (size > PAGE_SIZE) {
364 kmalloc_flags |= __GFP_NOWARN; 367 kmalloc_flags |= __GFP_NOWARN;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 34a1c3e46ed7..ecc97f74ab18 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
287 if (p4d_none(*p4d)) 287 if (p4d_none(*p4d))
288 return NULL; 288 return NULL;
289 pud = pud_offset(p4d, addr); 289 pud = pud_offset(p4d, addr);
290 if (pud_none(*pud)) 290
291 /*
292 * Don't dereference bad PUD or PMD (below) entries. This will also
293 * identify huge mappings, which we may encounter on architectures
294 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
295 * identified as vmalloc addresses by is_vmalloc_addr(), but are
296 * not [unambiguously] associated with a struct page, so there is
297 * no correct value to return for them.
298 */
299 WARN_ON_ONCE(pud_bad(*pud));
300 if (pud_none(*pud) || pud_bad(*pud))
291 return NULL; 301 return NULL;
292 pmd = pmd_offset(pud, addr); 302 pmd = pmd_offset(pud, addr);
293 if (pmd_none(*pmd)) 303 WARN_ON_ONCE(pmd_bad(*pmd));
304 if (pmd_none(*pmd) || pmd_bad(*pmd))
294 return NULL; 305 return NULL;
295 306
296 ptep = pte_offset_map(pmd, addr); 307 ptep = pte_offset_map(pmd, addr);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581f705c..ce0618bfa8d0 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
115 unsigned long pressure = 0; 115 unsigned long pressure = 0;
116 116
117 /* 117 /*
118 * reclaimed can be greater than scanned in cases 118 * reclaimed can be greater than scanned for things such as reclaimed
119 * like THP, where the scanned is 1 and reclaimed 119 * slab pages. shrink_node() just adds reclaimed pages without a
120 * could be 512 120 * related increment to scanned pages.
121 */ 121 */
122 if (reclaimed >= scanned) 122 if (reclaimed >= scanned)
123 goto out; 123 goto out;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 467069b73ce1..9649579b5b9f 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
277 return 0; 277 return 0;
278 278
279out_free_newdev: 279out_free_newdev:
280 free_netdev(new_dev); 280 if (new_dev->reg_state == NETREG_UNINITIALIZED)
281 free_netdev(new_dev);
281 return err; 282 return err;
282} 283}
283 284
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 953b6728bd00..abc5f400fc71 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev)
813 813
814 free_percpu(vlan->vlan_pcpu_stats); 814 free_percpu(vlan->vlan_pcpu_stats);
815 vlan->vlan_pcpu_stats = NULL; 815 vlan->vlan_pcpu_stats = NULL;
816 free_netdev(dev);
817} 816}
818 817
819void vlan_setup(struct net_device *dev) 818void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev)
826 netif_keep_dst(dev); 825 netif_keep_dst(dev);
827 826
828 dev->netdev_ops = &vlan_netdev_ops; 827 dev->netdev_ops = &vlan_netdev_ops;
829 dev->destructor = vlan_dev_free; 828 dev->needs_free_netdev = true;
829 dev->priv_destructor = vlan_dev_free;
830 dev->ethtool_ops = &vlan_ethtool_ops; 830 dev->ethtool_ops = &vlan_ethtool_ops;
831 831
832 dev->min_mtu = 0; 832 dev->min_mtu = 0;
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 71e85643b3f9..6ad3e043c617 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -454,8 +454,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
454 goto error_xenbus; 454 goto error_xenbus;
455 } 455 }
456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); 456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
457 if (!priv->tag) { 457 if (IS_ERR(priv->tag)) {
458 ret = -EINVAL; 458 ret = PTR_ERR(priv->tag);
459 goto error_xenbus; 459 goto error_xenbus;
460 } 460 }
461 ret = xenbus_transaction_end(xbt, 0); 461 ret = xenbus_transaction_end(xbt, 0);
@@ -525,7 +525,7 @@ static struct xenbus_driver xen_9pfs_front_driver = {
525 .otherend_changed = xen_9pfs_front_changed, 525 .otherend_changed = xen_9pfs_front_changed,
526}; 526};
527 527
528int p9_trans_xen_init(void) 528static int p9_trans_xen_init(void)
529{ 529{
530 if (!xen_domain()) 530 if (!xen_domain())
531 return -ENODEV; 531 return -ENODEV;
@@ -537,7 +537,7 @@ int p9_trans_xen_init(void)
537} 537}
538module_init(p9_trans_xen_init); 538module_init(p9_trans_xen_init);
539 539
540void p9_trans_xen_exit(void) 540static void p9_trans_xen_exit(void)
541{ 541{
542 v9fs_unregister_trans(&p9_xen_trans); 542 v9fs_unregister_trans(&p9_xen_trans);
543 return xenbus_unregister_driver(&xen_9pfs_front_driver); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 013e970eff39..000ca2f113ab 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1064 1064
1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface); 1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface);
1066 1066
1067 soft_iface->stats.rx_packets++; 1067 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
1068 soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; 1068 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
1069 skb->len + ETH_HLEN + hdr_size);
1069 1070
1070 netif_rx(skb_new); 1071 netif_rx(skb_new);
1071 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); 1072 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e1ebe14ee2a6..ae9f4d37d34f 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
987 batadv_dbg(BATADV_DBG_BLA, bat_priv, 987 batadv_dbg(BATADV_DBG_BLA, bat_priv,
988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", 988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
989 orig_addr_gw); 989 orig_addr_gw);
990 return NET_RX_DROP; 990 goto free_skb;
991 } 991 }
992 } 992 }
993 993
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b25789abf7b9..10f7edfb176e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev)
1034 * netdev and its private data (bat_priv) 1034 * netdev and its private data (bat_priv)
1035 */ 1035 */
1036 rcu_barrier(); 1036 rcu_barrier();
1037
1038 free_netdev(dev);
1039} 1037}
1040 1038
1041/** 1039/**
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev)
1047 ether_setup(dev); 1045 ether_setup(dev);
1048 1046
1049 dev->netdev_ops = &batadv_netdev_ops; 1047 dev->netdev_ops = &batadv_netdev_ops;
1050 dev->destructor = batadv_softif_free; 1048 dev->needs_free_netdev = true;
1049 dev->priv_destructor = batadv_softif_free;
1051 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; 1050 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1052 dev->priv_flags |= IFF_NO_QUEUE; 1051 dev->priv_flags |= IFF_NO_QUEUE;
1053 1052
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 608959989f8e..ab3b654b05cc 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev)
598 598
599 dev->netdev_ops = &netdev_ops; 599 dev->netdev_ops = &netdev_ops;
600 dev->header_ops = &header_ops; 600 dev->header_ops = &header_ops;
601 dev->destructor = free_netdev; 601 dev->needs_free_netdev = true;
602} 602}
603 603
604static struct device_type bt_type = { 604static struct device_type bt_type = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 430b53e7d941..f0f3447e8aa4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
379 ether_setup(dev); 379 ether_setup(dev);
380 380
381 dev->netdev_ops = &br_netdev_ops; 381 dev->netdev_ops = &br_netdev_ops;
382 dev->destructor = free_netdev; 382 dev->needs_free_netdev = true;
383 dev->ethtool_ops = &br_ethtool_ops; 383 dev->ethtool_ops = &br_ethtool_ops;
384 SET_NETDEV_DEVTYPE(dev, &br_type); 384 SET_NETDEV_DEVTYPE(dev, &br_type);
385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index c5ce7745b230..32bd3ead9ba1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
595 err = 0; 595 err = 0;
596 switch (nla_type(attr)) { 596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!(p->flags & BR_VLAN_TUNNEL)) 598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL; 599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err) 601 if (err)
@@ -835,6 +835,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
835 return -EPROTONOSUPPORT; 835 return -EPROTONOSUPPORT;
836 } 836 }
837 } 837 }
838
839 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
840 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
841
842 if (defpvid >= VLAN_VID_MASK)
843 return -EINVAL;
844 }
838#endif 845#endif
839 846
840 return 0; 847 return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 08341d2aa9c9..6f12a5271219 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,6 +179,8 @@ static void br_stp_start(struct net_bridge *br)
179 br_debug(br, "using kernel STP\n"); 179 br_debug(br, "using kernel STP\n");
180 180
181 /* To start timers on any ports left in blocking */ 181 /* To start timers on any ports left in blocking */
182 if (br->dev->flags & IFF_UP)
183 mod_timer(&br->hello_timer, jiffies + br->hello_time);
182 br_port_state_selection(br); 184 br_port_state_selection(br);
183 } 185 }
184 186
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index c98b3e5c140a..60b6fe277a8b 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 if (br->stp_enabled != BR_USER_STP) 43 if (br->stp_enabled == BR_KERNEL_STP)
44 mod_timer(&br->hello_timer, 44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time)); 45 round_jiffies(jiffies + br->hello_time));
46 } 46 }
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 5929309beaa1..db85230e49c3 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
68 if (e->ethproto != htons(ETH_P_ARP) || 68 if (e->ethproto != htons(ETH_P_ARP) ||
69 e->invflags & EBT_IPROTO) 69 e->invflags & EBT_IPROTO)
70 return -EINVAL; 70 return -EINVAL;
71 if (ebt_invalid_target(info->target))
72 return -EINVAL;
73
71 return 0; 74 return 0;
72} 75}
73 76
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 9ec0c9f908fa..9c6e619f452b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name,
1373 strlcpy(name, _name, sizeof(name)); 1373 strlcpy(name, _name, sizeof(name));
1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || 1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || 1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
1376 xt_data_to_user(um + entrysize, data, usersize, datasize)) 1376 xt_data_to_user(um + entrysize, data, usersize, datasize,
1377 XT_ALIGN(datasize)))
1377 return -EFAULT; 1378 return -EFAULT;
1378 1379
1379 return 0; 1380 return 0;
@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1658 if (match->compat_to_user(cm->data, m->data)) 1659 if (match->compat_to_user(cm->data, m->data))
1659 return -EFAULT; 1660 return -EFAULT;
1660 } else { 1661 } else {
1661 if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) 1662 if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1663 COMPAT_XT_ALIGN(msize)))
1662 return -EFAULT; 1664 return -EFAULT;
1663 } 1665 }
1664 1666
@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1687 if (target->compat_to_user(cm->data, t->data)) 1689 if (target->compat_to_user(cm->data, t->data))
1688 return -EFAULT; 1690 return -EFAULT;
1689 } else { 1691 } else {
1690 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) 1692 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1693 COMPAT_XT_ALIGN(tsize)))
1691 return -EFAULT; 1694 return -EFAULT;
1692 } 1695 }
1693 1696
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad344c843..21f18ea2fce4 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
754 754
755 lock_sock(sk); 755 lock_sock(sk);
756 756
757 err = -EINVAL;
758 if (addr_len < offsetofend(struct sockaddr, sa_family))
759 goto out;
760
757 err = -EAFNOSUPPORT; 761 err = -EAFNOSUPPORT;
758 if (uaddr->sa_family != AF_CAIF) 762 if (uaddr->sa_family != AF_CAIF)
759 goto out; 763 goto out;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fcc220c..71b6ab240dea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
81{ 81{
82 struct sk_buff *skb; 82 struct sk_buff *skb;
83 83
84 if (likely(in_interrupt())) 84 skb = alloc_skb(len + pfx, GFP_ATOMIC);
85 skb = alloc_skb(len + pfx, GFP_ATOMIC);
86 else
87 skb = alloc_skb(len + pfx, GFP_KERNEL);
88
89 if (unlikely(skb == NULL)) 85 if (unlikely(skb == NULL))
90 return NULL; 86 return NULL;
91 87
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 1816fc9f1ee7..fe3c53efb949 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
392{ 392{
393 struct chnl_net *priv = netdev_priv(dev); 393 struct chnl_net *priv = netdev_priv(dev);
394 caif_free_client(&priv->chnl); 394 caif_free_client(&priv->chnl);
395 free_netdev(dev);
396} 395}
397 396
398static void ipcaif_net_setup(struct net_device *dev) 397static void ipcaif_net_setup(struct net_device *dev)
399{ 398{
400 struct chnl_net *priv; 399 struct chnl_net *priv;
401 dev->netdev_ops = &netdev_ops; 400 dev->netdev_ops = &netdev_ops;
402 dev->destructor = chnl_net_destructor; 401 dev->needs_free_netdev = true;
402 dev->priv_destructor = chnl_net_destructor;
403 dev->flags |= IFF_NOARP; 403 dev->flags |= IFF_NOARP;
404 dev->flags |= IFF_POINTOPOINT; 404 dev->flags |= IFF_POINTOPOINT;
405 dev->mtu = GPRS_PDP_MTU; 405 dev->mtu = GPRS_PDP_MTU;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b6406fe33c76..88edac0f3e36 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
872 872
873static int can_pernet_init(struct net *net) 873static int can_pernet_init(struct net *net)
874{ 874{
875 net->can.can_rcvlists_lock = 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
877 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
878 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
879 878
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 2034fb926670..8757fb87dab8 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
151 struct timespec validity; 151 struct timespec validity;
152 void *tp, *tpend; 152 void *tp, *tpend;
153 void **ptp; 153 void **ptp;
154 struct ceph_crypto_key new_session_key; 154 struct ceph_crypto_key new_session_key = { 0 };
155 struct ceph_buffer *new_ticket_blob; 155 struct ceph_buffer *new_ticket_blob;
156 unsigned long new_expires, new_renew_after; 156 unsigned long new_expires, new_renew_after;
157 u64 new_secret_id; 157 u64 new_secret_id;
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac,
215 dout(" ticket blob is %d bytes\n", dlen); 215 dout(" ticket blob is %d bytes\n", dlen);
216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); 216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
217 blob_struct_v = ceph_decode_8(ptp); 217 blob_struct_v = ceph_decode_8(ptp);
218 if (blob_struct_v != 1)
219 goto bad;
220
218 new_secret_id = ceph_decode_64(ptp); 221 new_secret_id = ceph_decode_64(ptp);
219 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); 222 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
220 if (ret) 223 if (ret)
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
234 type, ceph_entity_type_name(type), th->secret_id, 237 type, ceph_entity_type_name(type), th->secret_id,
235 (int)th->ticket_blob->vec.iov_len); 238 (int)th->ticket_blob->vec.iov_len);
236 xi->have_keys |= th->service; 239 xi->have_keys |= th->service;
237 240 return 0;
238out:
239 return ret;
240 241
241bad: 242bad:
242 ret = -EINVAL; 243 ret = -EINVAL;
243 goto out; 244out:
245 ceph_crypto_key_destroy(&new_session_key);
246 return ret;
244} 247}
245 248
246static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, 249static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4fd02831beed..47e94b560ba0 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = {
56module_param_cb(supported_features, &param_ops_supported_features, NULL, 56module_param_cb(supported_features, &param_ops_supported_features, NULL,
57 S_IRUGO); 57 S_IRUGO);
58 58
59/*
60 * find filename portion of a path (/foo/bar/baz -> baz)
61 */
62const char *ceph_file_part(const char *s, int len)
63{
64 const char *e = s + len;
65
66 while (e != s && *(e-1) != '/')
67 e--;
68 return e;
69}
70EXPORT_SYMBOL(ceph_file_part);
71
72const char *ceph_msg_type_name(int type) 59const char *ceph_msg_type_name(int type)
73{ 60{
74 switch (type) { 61 switch (type) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5766a6c896c4..588a91930051 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1174 * Returns true if the result moves the cursor on to the next piece 1174 * Returns true if the result moves the cursor on to the next piece
1175 * of the data item. 1175 * of the data item.
1176 */ 1176 */
1177static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1177static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1178 size_t bytes) 1178 size_t bytes)
1179{ 1179{
1180 bool new_piece; 1180 bool new_piece;
1181 1181
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1207 new_piece = true; 1207 new_piece = true;
1208 } 1208 }
1209 cursor->need_crc = new_piece; 1209 cursor->need_crc = new_piece;
1210
1211 return new_piece;
1212} 1210}
1213 1211
1214static size_t sizeof_footer(struct ceph_connection *con) 1212static size_t sizeof_footer(struct ceph_connection *con)
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con)
1577 size_t page_offset; 1575 size_t page_offset;
1578 size_t length; 1576 size_t length;
1579 bool last_piece; 1577 bool last_piece;
1580 bool need_crc;
1581 int ret; 1578 int ret;
1582 1579
1583 page = ceph_msg_data_next(cursor, &page_offset, &length, 1580 page = ceph_msg_data_next(cursor, &page_offset, &length,
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con)
1592 } 1589 }
1593 if (do_datacrc && cursor->need_crc) 1590 if (do_datacrc && cursor->need_crc)
1594 crc = ceph_crc32c_page(crc, page, page_offset, length); 1591 crc = ceph_crc32c_page(crc, page, page_offset, length);
1595 need_crc = ceph_msg_data_advance(cursor, (size_t)ret); 1592 ceph_msg_data_advance(cursor, (size_t)ret);
1596 } 1593 }
1597 1594
1598 dout("%s %p msg %p done\n", __func__, con, msg); 1595 dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con)
2231 struct ceph_msg *m; 2228 struct ceph_msg *m;
2232 u64 ack = le64_to_cpu(con->in_temp_ack); 2229 u64 ack = le64_to_cpu(con->in_temp_ack);
2233 u64 seq; 2230 u64 seq;
2231 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2232 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
2234 2233
2235 while (!list_empty(&con->out_sent)) { 2234 /*
2236 m = list_first_entry(&con->out_sent, struct ceph_msg, 2235 * In the reconnect case, con_fault() has requeued messages
2237 list_head); 2236 * in out_sent. We should cleanup old messages according to
2237 * the reconnect seq.
2238 */
2239 while (!list_empty(list)) {
2240 m = list_first_entry(list, struct ceph_msg, list_head);
2241 if (reconnect && m->needs_out_seq)
2242 break;
2238 seq = le64_to_cpu(m->hdr.seq); 2243 seq = le64_to_cpu(m->hdr.seq);
2239 if (seq > ack) 2244 if (seq > ack)
2240 break; 2245 break;
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con)
2243 m->ack_stamp = jiffies; 2248 m->ack_stamp = jiffies;
2244 ceph_msg_remove(m); 2249 ceph_msg_remove(m);
2245 } 2250 }
2251
2246 prepare_read_tag(con); 2252 prepare_read_tag(con);
2247} 2253}
2248 2254
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2299 2305
2300 if (do_datacrc) 2306 if (do_datacrc)
2301 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2307 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2302 (void) ceph_msg_data_advance(cursor, (size_t)ret); 2308 ceph_msg_data_advance(cursor, (size_t)ret);
2303 } 2309 }
2304 if (do_datacrc) 2310 if (do_datacrc)
2305 con->in_data_crc = crc; 2311 con->in_data_crc = crc;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 29a0ef351c5e..250f11f78609 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
43 int i, err = -EINVAL; 43 int i, err = -EINVAL;
44 struct ceph_fsid fsid; 44 struct ceph_fsid fsid;
45 u32 epoch, num_mon; 45 u32 epoch, num_mon;
46 u16 version;
47 u32 len; 46 u32 len;
48 47
49 ceph_decode_32_safe(&p, end, len, bad); 48 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad); 49 ceph_decode_need(&p, end, len, bad);
51 50
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); 51 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
53 52 p += sizeof(u16); /* skip version */
54 ceph_decode_16_safe(&p, end, version, bad);
55 53
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); 54 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 55 ceph_decode_copy(&p, &fsid, sizeof(fsid));
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index ffe9e904d4d1..55e3a477f92d 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
317 u32 yes; 317 u32 yes;
318 struct crush_rule *r; 318 struct crush_rule *r;
319 319
320 err = -EINVAL;
320 ceph_decode_32_safe(p, end, yes, bad); 321 ceph_decode_32_safe(p, end, yes, bad);
321 if (!yes) { 322 if (!yes) {
322 dout("crush_decode NO rule %d off %x %p to %p\n", 323 dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 96cf83da0d66..416137c64bf8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1253 if (!new_ifalias) 1253 if (!new_ifalias)
1254 return -ENOMEM; 1254 return -ENOMEM;
1255 dev->ifalias = new_ifalias; 1255 dev->ifalias = new_ifalias;
1256 memcpy(dev->ifalias, alias, len);
1257 dev->ifalias[len] = 0;
1256 1258
1257 strlcpy(dev->ifalias, alias, len+1);
1258 return len; 1259 return len;
1259} 1260}
1260 1261
@@ -4766,6 +4767,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
4766} 4767}
4767EXPORT_SYMBOL(gro_find_complete_by_type); 4768EXPORT_SYMBOL(gro_find_complete_by_type);
4768 4769
4770static void napi_skb_free_stolen_head(struct sk_buff *skb)
4771{
4772 skb_dst_drop(skb);
4773 secpath_reset(skb);
4774 kmem_cache_free(skbuff_head_cache, skb);
4775}
4776
4769static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4777static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4770{ 4778{
4771 switch (ret) { 4779 switch (ret) {
@@ -4779,13 +4787,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4779 break; 4787 break;
4780 4788
4781 case GRO_MERGED_FREE: 4789 case GRO_MERGED_FREE:
4782 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { 4790 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4783 skb_dst_drop(skb); 4791 napi_skb_free_stolen_head(skb);
4784 secpath_reset(skb); 4792 else
4785 kmem_cache_free(skbuff_head_cache, skb);
4786 } else {
4787 __kfree_skb(skb); 4793 __kfree_skb(skb);
4788 }
4789 break; 4794 break;
4790 4795
4791 case GRO_HELD: 4796 case GRO_HELD:
@@ -4857,10 +4862,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
4857 break; 4862 break;
4858 4863
4859 case GRO_DROP: 4864 case GRO_DROP:
4860 case GRO_MERGED_FREE:
4861 napi_reuse_skb(napi, skb); 4865 napi_reuse_skb(napi, skb);
4862 break; 4866 break;
4863 4867
4868 case GRO_MERGED_FREE:
4869 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4870 napi_skb_free_stolen_head(skb);
4871 else
4872 napi_reuse_skb(napi, skb);
4873 break;
4874
4864 case GRO_MERGED: 4875 case GRO_MERGED:
4865 case GRO_CONSUMED: 4876 case GRO_CONSUMED:
4866 break; 4877 break;
@@ -4948,6 +4959,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4948} 4959}
4949EXPORT_SYMBOL(__skb_gro_checksum_complete); 4960EXPORT_SYMBOL(__skb_gro_checksum_complete);
4950 4961
4962static void net_rps_send_ipi(struct softnet_data *remsd)
4963{
4964#ifdef CONFIG_RPS
4965 while (remsd) {
4966 struct softnet_data *next = remsd->rps_ipi_next;
4967
4968 if (cpu_online(remsd->cpu))
4969 smp_call_function_single_async(remsd->cpu, &remsd->csd);
4970 remsd = next;
4971 }
4972#endif
4973}
4974
4951/* 4975/*
4952 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4976 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4953 * Note: called with local irq disabled, but exits with local irq enabled. 4977 * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4987,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4963 local_irq_enable(); 4987 local_irq_enable();
4964 4988
4965 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4989 /* Send pending IPI's to kick RPS processing on remote cpus. */
4966 while (remsd) { 4990 net_rps_send_ipi(remsd);
4967 struct softnet_data *next = remsd->rps_ipi_next;
4968
4969 if (cpu_online(remsd->cpu))
4970 smp_call_function_single_async(remsd->cpu,
4971 &remsd->csd);
4972 remsd = next;
4973 }
4974 } else 4991 } else
4975#endif 4992#endif
4976 local_irq_enable(); 4993 local_irq_enable();
@@ -5199,8 +5216,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5199 if (rc == BUSY_POLL_BUDGET) 5216 if (rc == BUSY_POLL_BUDGET)
5200 __napi_schedule(napi); 5217 __napi_schedule(napi);
5201 local_bh_enable(); 5218 local_bh_enable();
5202 if (local_softirq_pending())
5203 do_softirq();
5204} 5219}
5205 5220
5206void napi_busy_loop(unsigned int napi_id, 5221void napi_busy_loop(unsigned int napi_id,
@@ -6852,6 +6867,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
6852} 6867}
6853EXPORT_SYMBOL(dev_change_proto_down); 6868EXPORT_SYMBOL(dev_change_proto_down);
6854 6869
6870bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6871{
6872 struct netdev_xdp xdp;
6873
6874 memset(&xdp, 0, sizeof(xdp));
6875 xdp.command = XDP_QUERY_PROG;
6876
6877 /* Query must always succeed. */
6878 WARN_ON(xdp_op(dev, &xdp) < 0);
6879 return xdp.prog_attached;
6880}
6881
6882static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6883 struct netlink_ext_ack *extack,
6884 struct bpf_prog *prog)
6885{
6886 struct netdev_xdp xdp;
6887
6888 memset(&xdp, 0, sizeof(xdp));
6889 xdp.command = XDP_SETUP_PROG;
6890 xdp.extack = extack;
6891 xdp.prog = prog;
6892
6893 return xdp_op(dev, &xdp);
6894}
6895
6855/** 6896/**
6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6897 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6857 * @dev: device 6898 * @dev: device
@@ -6864,41 +6905,34 @@ EXPORT_SYMBOL(dev_change_proto_down);
6864int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6905int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6865 int fd, u32 flags) 6906 int fd, u32 flags)
6866{ 6907{
6867 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
6868 const struct net_device_ops *ops = dev->netdev_ops; 6908 const struct net_device_ops *ops = dev->netdev_ops;
6869 struct bpf_prog *prog = NULL; 6909 struct bpf_prog *prog = NULL;
6870 struct netdev_xdp xdp; 6910 xdp_op_t xdp_op, xdp_chk;
6871 int err; 6911 int err;
6872 6912
6873 ASSERT_RTNL(); 6913 ASSERT_RTNL();
6874 6914
6875 xdp_op = ops->ndo_xdp; 6915 xdp_op = xdp_chk = ops->ndo_xdp;
6916 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6917 return -EOPNOTSUPP;
6876 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6918 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6877 xdp_op = generic_xdp_install; 6919 xdp_op = generic_xdp_install;
6920 if (xdp_op == xdp_chk)
6921 xdp_chk = generic_xdp_install;
6878 6922
6879 if (fd >= 0) { 6923 if (fd >= 0) {
6880 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6924 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6881 memset(&xdp, 0, sizeof(xdp)); 6925 return -EEXIST;
6882 xdp.command = XDP_QUERY_PROG; 6926 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6883 6927 __dev_xdp_attached(dev, xdp_op))
6884 err = xdp_op(dev, &xdp); 6928 return -EBUSY;
6885 if (err < 0)
6886 return err;
6887 if (xdp.prog_attached)
6888 return -EBUSY;
6889 }
6890 6929
6891 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6930 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6892 if (IS_ERR(prog)) 6931 if (IS_ERR(prog))
6893 return PTR_ERR(prog); 6932 return PTR_ERR(prog);
6894 } 6933 }
6895 6934
6896 memset(&xdp, 0, sizeof(xdp)); 6935 err = dev_xdp_install(dev, xdp_op, extack, prog);
6897 xdp.command = XDP_SETUP_PROG;
6898 xdp.extack = extack;
6899 xdp.prog = prog;
6900
6901 err = xdp_op(dev, &xdp);
6902 if (err < 0 && prog) 6936 if (err < 0 && prog)
6903 bpf_prog_put(prog); 6937 bpf_prog_put(prog);
6904 6938
@@ -7482,6 +7516,8 @@ out:
7482err_uninit: 7516err_uninit:
7483 if (dev->netdev_ops->ndo_uninit) 7517 if (dev->netdev_ops->ndo_uninit)
7484 dev->netdev_ops->ndo_uninit(dev); 7518 dev->netdev_ops->ndo_uninit(dev);
7519 if (dev->priv_destructor)
7520 dev->priv_destructor(dev);
7485 goto out; 7521 goto out;
7486} 7522}
7487EXPORT_SYMBOL(register_netdevice); 7523EXPORT_SYMBOL(register_netdevice);
@@ -7689,8 +7725,10 @@ void netdev_run_todo(void)
7689 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 7725 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
7690 WARN_ON(dev->dn_ptr); 7726 WARN_ON(dev->dn_ptr);
7691 7727
7692 if (dev->destructor) 7728 if (dev->priv_destructor)
7693 dev->destructor(dev); 7729 dev->priv_destructor(dev);
7730 if (dev->needs_free_netdev)
7731 free_netdev(dev);
7694 7732
7695 /* Report a network device has been unregistered */ 7733 /* Report a network device has been unregistered */
7696 rtnl_lock(); 7734 rtnl_lock();
@@ -7755,9 +7793,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7755 } else { 7793 } else {
7756 netdev_stats_to_stats64(storage, &dev->stats); 7794 netdev_stats_to_stats64(storage, &dev->stats);
7757 } 7795 }
7758 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 7796 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7759 storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 7797 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7760 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); 7798 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
7761 return storage; 7799 return storage;
7762} 7800}
7763EXPORT_SYMBOL(dev_get_stats); 7801EXPORT_SYMBOL(dev_get_stats);
@@ -8173,7 +8211,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
8173 struct sk_buff **list_skb; 8211 struct sk_buff **list_skb;
8174 struct sk_buff *skb; 8212 struct sk_buff *skb;
8175 unsigned int cpu; 8213 unsigned int cpu;
8176 struct softnet_data *sd, *oldsd; 8214 struct softnet_data *sd, *oldsd, *remsd = NULL;
8177 8215
8178 local_irq_disable(); 8216 local_irq_disable();
8179 cpu = smp_processor_id(); 8217 cpu = smp_processor_id();
@@ -8214,6 +8252,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
8214 raise_softirq_irqoff(NET_TX_SOFTIRQ); 8252 raise_softirq_irqoff(NET_TX_SOFTIRQ);
8215 local_irq_enable(); 8253 local_irq_enable();
8216 8254
8255#ifdef CONFIG_RPS
8256 remsd = oldsd->rps_ipi_list;
8257 oldsd->rps_ipi_list = NULL;
8258#endif
8259 /* send out pending IPI's on offline CPU */
8260 net_rps_send_ipi(remsd);
8261
8217 /* Process offline CPU's input_pkt_queue */ 8262 /* Process offline CPU's input_pkt_queue */
8218 while ((skb = __skb_dequeue(&oldsd->process_queue))) { 8263 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
8219 netif_rx_ni(skb); 8264 netif_rx_ni(skb);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d293506..27fad31784a8 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
410 if (cmd == SIOCGIFNAME) 410 if (cmd == SIOCGIFNAME)
411 return dev_ifname(net, (struct ifreq __user *)arg); 411 return dev_ifname(net, (struct ifreq __user *)arg);
412 412
413 /*
414 * Take care of Wireless Extensions. Unfortunately struct iwreq
415 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
416 * so we need to treat it specially, otherwise applications may
417 * fault if the struct they're passing happens to land at the
418 * end of a mapped page.
419 */
420 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
421 struct iwreq iwr;
422
423 if (copy_from_user(&iwr, arg, sizeof(iwr)))
424 return -EFAULT;
425
426 return wext_handle_ioctl(net, &iwr, cmd, arg);
427 }
428
413 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 429 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
414 return -EFAULT; 430 return -EFAULT;
415 431
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
559 ret = -EFAULT; 575 ret = -EFAULT;
560 return ret; 576 return ret;
561 } 577 }
562 /* Take care of Wireless Extensions */
563 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
564 return wext_handle_ioctl(net, &ifr, cmd, arg);
565 return -ENOTTY; 578 return -ENOTTY;
566 } 579 }
567} 580}
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a292e7c..a0adfc31a3fe 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ start_again:
1680 1680
1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1682 &devlink_nl_family, NLM_F_MULTI, cmd);
1683 if (!hdr) 1683 if (!hdr) {
1684 nlmsg_free(skb);
1684 return -EMSGSIZE; 1685 return -EMSGSIZE;
1686 }
1685 1687
1686 if (devlink_nl_put_handle(skb, devlink)) 1688 if (devlink_nl_put_handle(skb, devlink))
1687 goto nla_put_failure; 1689 goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
2098 2100
2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2101 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2102 &devlink_nl_family, NLM_F_MULTI, cmd);
2101 if (!hdr) 2103 if (!hdr) {
2104 nlmsg_free(skb);
2102 return -EMSGSIZE; 2105 return -EMSGSIZE;
2106 }
2103 2107
2104 if (devlink_nl_put_handle(skb, devlink)) 2108 if (devlink_nl_put_handle(skb, devlink))
2105 goto nla_put_failure; 2109 goto nla_put_failure;
diff --git a/net/core/dst.c b/net/core/dst.c
index 960e503b5a52..13ba4a090c41 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
151} 151}
152EXPORT_SYMBOL(dst_discard_out); 152EXPORT_SYMBOL(dst_discard_out);
153 153
154const u32 dst_default_metrics[RTAX_MAX + 1] = { 154const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable 155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section. 156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch 157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it. 158 * any writes on it.
159 */ 159 */
160 [RTAX_MAX] = 0xdeadbeef, 160 .refcnt = ATOMIC_INIT(1),
161}; 161};
162 162
163void dst_init(struct dst_entry *dst, struct dst_ops *ops, 163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
169 if (dev) 169 if (dev)
170 dev_hold(dev); 170 dev_hold(dev);
171 dst->ops = ops; 171 dst->ops = ops;
172 dst_init_metrics(dst, dst_default_metrics, true); 172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 dst->expires = 0UL; 173 dst->expires = 0UL;
174 dst->path = dst; 174 dst->path = dst;
175 dst->from = NULL; 175 dst->from = NULL;
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
314 314
315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
316{ 316{
317 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 317 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
318 318
319 if (p) { 319 if (p) {
320 u32 *old_p = __DST_METRICS_PTR(old); 320 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
321 unsigned long prev, new; 321 unsigned long prev, new;
322 322
323 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 323 atomic_set(&p->refcnt, 1);
324 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
324 325
325 new = (unsigned long) p; 326 new = (unsigned long) p;
326 prev = cmpxchg(&dst->_metrics, old, new); 327 prev = cmpxchg(&dst->_metrics, old, new);
327 328
328 if (prev != old) { 329 if (prev != old) {
329 kfree(p); 330 kfree(p);
330 p = __DST_METRICS_PTR(prev); 331 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
331 if (prev & DST_METRICS_READ_ONLY) 332 if (prev & DST_METRICS_READ_ONLY)
332 p = NULL; 333 p = NULL;
334 } else if (prev & DST_METRICS_REFCOUNTED) {
335 if (atomic_dec_and_test(&old_p->refcnt))
336 kfree(old_p);
333 } 337 }
334 } 338 }
335 return p; 339 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
340 return (u32 *)p;
336} 341}
337EXPORT_SYMBOL(dst_cow_metrics_generic); 342EXPORT_SYMBOL(dst_cow_metrics_generic);
338 343
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
341{ 346{
342 unsigned long prev, new; 347 unsigned long prev, new;
343 348
344 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 349 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
345 prev = cmpxchg(&dst->_metrics, old, new); 350 prev = cmpxchg(&dst->_metrics, old, new);
346 if (prev == old) 351 if (prev == old)
347 kfree(__DST_METRICS_PTR(old)); 352 kfree(__DST_METRICS_PTR(old));
@@ -464,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
464 spin_lock_bh(&dst_garbage.lock); 469 spin_lock_bh(&dst_garbage.lock);
465 dst = dst_garbage.list; 470 dst = dst_garbage.list;
466 dst_garbage.list = NULL; 471 dst_garbage.list = NULL;
472 /* The code in dst_ifdown places a hold on the loopback device.
473 * If the gc entry processing is set to expire after a lengthy
474 * interval, this hold can cause netdev_wait_allrefs() to hang
475 * out and wait for a long time -- until the the loopback
476 * interface is released. If we're really unlucky, it'll emit
477 * pr_emerg messages to console too. Reset the interval here,
478 * so dst cleanups occur in a more timely fashion.
479 */
480 if (dst_garbage.timer_inc > DST_GC_INC) {
481 dst_garbage.timer_inc = DST_GC_INC;
482 dst_garbage.timer_expires = DST_GC_MIN;
483 mod_delayed_work(system_wq, &dst_gc_work,
484 dst_garbage.timer_expires);
485 }
467 spin_unlock_bh(&dst_garbage.lock); 486 spin_unlock_bh(&dst_garbage.lock);
468 487
469 if (last) 488 if (last)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f21c4d3aeae0..3bba291c6c32 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
568 struct net *net = sock_net(skb->sk); 568 struct net *net = sock_net(skb->sk);
569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 569 struct fib_rule_hdr *frh = nlmsg_data(nlh);
570 struct fib_rules_ops *ops = NULL; 570 struct fib_rules_ops *ops = NULL;
571 struct fib_rule *rule, *tmp; 571 struct fib_rule *rule, *r;
572 struct nlattr *tb[FRA_MAX+1]; 572 struct nlattr *tb[FRA_MAX+1];
573 struct fib_kuid_range range; 573 struct fib_kuid_range range;
574 int err = -EINVAL; 574 int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
668 668
669 /* 669 /*
670 * Check if this rule is a target to any of them. If so, 670 * Check if this rule is a target to any of them. If so,
671 * adjust to the next one with the same preference or
671 * disable them. As this operation is eventually very 672 * disable them. As this operation is eventually very
672 * expensive, it is only performed if goto rules have 673 * expensive, it is only performed if goto rules, except
673 * actually been added. 674 * current if it is goto rule, have actually been added.
674 */ 675 */
675 if (ops->nr_goto_rules > 0) { 676 if (ops->nr_goto_rules > 0) {
676 list_for_each_entry(tmp, &ops->rules_list, list) { 677 struct fib_rule *n;
677 if (rtnl_dereference(tmp->ctarget) == rule) { 678
678 RCU_INIT_POINTER(tmp->ctarget, NULL); 679 n = list_next_entry(rule, list);
680 if (&n->list == &ops->rules_list || n->pref != rule->pref)
681 n = NULL;
682 list_for_each_entry(r, &ops->rules_list, list) {
683 if (rtnl_dereference(r->ctarget) != rule)
684 continue;
685 rcu_assign_pointer(r->ctarget, n);
686 if (!n)
679 ops->unresolved_rules++; 687 ops->unresolved_rules++;
680 }
681 } 688 }
682 } 689 }
683 690
diff --git a/net/core/filter.c b/net/core/filter.c
index a253a6197e6b..a6bb95fa87b2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
2281 func == bpf_skb_change_head || 2281 func == bpf_skb_change_head ||
2282 func == bpf_skb_change_tail || 2282 func == bpf_skb_change_tail ||
2283 func == bpf_skb_pull_data || 2283 func == bpf_skb_pull_data ||
2284 func == bpf_clone_redirect ||
2284 func == bpf_l3_csum_replace || 2285 func == bpf_l3_csum_replace ||
2285 func == bpf_l4_csum_replace || 2286 func == bpf_l4_csum_replace ||
2286 func == bpf_xdp_adjust_head) 2287 func == bpf_xdp_adjust_head)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 58b0bcc125b5..d274f81fcc2c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1132 lladdr = neigh->ha; 1132 lladdr = neigh->ha;
1133 } 1133 }
1134 1134
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1138
1139 /* If entry was valid and address is not changed, 1135 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE. 1136 do not change entry state, if new one is STALE.
1141 */ 1137 */
@@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1157 } 1153 }
1158 } 1154 }
1159 1155
1156 /* Update timestamps only once we know we will make a change to the
1157 * neighbour entry. Otherwise we risk to move the locktime window with
1158 * noop updates and ignore relevant ARP updates.
1159 */
1160 if (new != old || lladdr != neigh->ha) {
1161 if (new & NUD_CONNECTED)
1162 neigh->confirmed = jiffies;
1163 neigh->updated = jiffies;
1164 }
1165
1160 if (new != old) { 1166 if (new != old) {
1161 neigh_del_timer(neigh); 1167 neigh_del_timer(neigh);
1162 if (new & NUD_PROBE) 1168 if (new & NUD_PROBE)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 1934efd4a9d4..26bbfababff2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -315,6 +315,25 @@ out_undo:
315 goto out; 315 goto out;
316} 316}
317 317
318static int __net_init net_defaults_init_net(struct net *net)
319{
320 net->core.sysctl_somaxconn = SOMAXCONN;
321 return 0;
322}
323
324static struct pernet_operations net_defaults_ops = {
325 .init = net_defaults_init_net,
326};
327
328static __init int net_defaults_init(void)
329{
330 if (register_pernet_subsys(&net_defaults_ops))
331 panic("Cannot initialize net default settings");
332
333 return 0;
334}
335
336core_initcall(net_defaults_init);
318 337
319#ifdef CONFIG_NET_NS 338#ifdef CONFIG_NET_NS
320static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 339static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bcb0f610ee42..467a2f4510a7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev,
899static size_t rtnl_xdp_size(void) 899static size_t rtnl_xdp_size(void)
900{ 900{
901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
902 nla_total_size(1) + /* XDP_ATTACHED */ 902 nla_total_size(1); /* XDP_ATTACHED */
903 nla_total_size(4); /* XDP_FLAGS */
904 903
905 return xdp_size; 904 return xdp_size;
906} 905}
@@ -932,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
932 + nla_total_size(1) /* IFLA_LINKMODE */ 931 + nla_total_size(1) /* IFLA_LINKMODE */
933 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
934 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 933 + nla_total_size(4) /* IFLA_LINK_NETNSID */
934 + nla_total_size(4) /* IFLA_GROUP */
935 + nla_total_size(ext_filter_mask 935 + nla_total_size(ext_filter_mask
936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1125,6 +1125,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1125 struct ifla_vf_mac vf_mac; 1125 struct ifla_vf_mac vf_mac;
1126 struct ifla_vf_info ivi; 1126 struct ifla_vf_info ivi;
1127 1127
1128 memset(&ivi, 0, sizeof(ivi));
1129
1128 /* Not all SR-IOV capable drivers support the 1130 /* Not all SR-IOV capable drivers support the
1129 * spoofcheck and "RSS query enable" query. Preset to 1131 * spoofcheck and "RSS query enable" query. Preset to
1130 * -1 so the user space tool can detect that the driver 1132 * -1 so the user space tool can detect that the driver
@@ -1133,7 +1135,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1133 ivi.spoofchk = -1; 1135 ivi.spoofchk = -1;
1134 ivi.rss_query_en = -1; 1136 ivi.rss_query_en = -1;
1135 ivi.trusted = -1; 1137 ivi.trusted = -1;
1136 memset(ivi.mac, 0, sizeof(ivi.mac));
1137 /* The default value for VF link state is "auto" 1138 /* The default value for VF link state is "auto"
1138 * IFLA_VF_LINK_STATE_AUTO which equals zero 1139 * IFLA_VF_LINK_STATE_AUTO which equals zero
1139 */ 1140 */
@@ -1247,37 +1248,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1247 return 0; 1248 return 0;
1248} 1249}
1249 1250
1251static u8 rtnl_xdp_attached_mode(struct net_device *dev)
1252{
1253 const struct net_device_ops *ops = dev->netdev_ops;
1254
1255 ASSERT_RTNL();
1256
1257 if (rcu_access_pointer(dev->xdp_prog))
1258 return XDP_ATTACHED_SKB;
1259 if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
1260 return XDP_ATTACHED_DRV;
1261
1262 return XDP_ATTACHED_NONE;
1263}
1264
1250static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1265static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1251{ 1266{
1252 struct nlattr *xdp; 1267 struct nlattr *xdp;
1253 u32 xdp_flags = 0;
1254 u8 val = 0;
1255 int err; 1268 int err;
1256 1269
1257 xdp = nla_nest_start(skb, IFLA_XDP); 1270 xdp = nla_nest_start(skb, IFLA_XDP);
1258 if (!xdp) 1271 if (!xdp)
1259 return -EMSGSIZE; 1272 return -EMSGSIZE;
1260 if (rcu_access_pointer(dev->xdp_prog)) { 1273
1261 xdp_flags = XDP_FLAGS_SKB_MODE; 1274 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 val = 1; 1275 rtnl_xdp_attached_mode(dev));
1263 } else if (dev->netdev_ops->ndo_xdp) {
1264 struct netdev_xdp xdp_op = {};
1265
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1268 if (err)
1269 goto err_cancel;
1270 val = xdp_op.prog_attached;
1271 }
1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val);
1273 if (err) 1276 if (err)
1274 goto err_cancel; 1277 goto err_cancel;
1275 1278
1276 if (xdp_flags) {
1277 err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags);
1278 if (err)
1279 goto err_cancel;
1280 }
1281 nla_nest_end(skb, xdp); 1279 nla_nest_end(skb, xdp);
1282 return 0; 1280 return 0;
1283 1281
@@ -1471,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1471 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1469 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1472 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1470 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1473 [IFLA_XDP] = { .type = NLA_NESTED }, 1471 [IFLA_XDP] = { .type = NLA_NESTED },
1472 [IFLA_GROUP] = { .type = NLA_U32 },
1474}; 1473};
1475 1474
1476static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1475static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1631,13 +1630,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1631 cb->nlh->nlmsg_seq, 0, 1630 cb->nlh->nlmsg_seq, 0,
1632 flags, 1631 flags,
1633 ext_filter_mask); 1632 ext_filter_mask);
1634 /* If we ran out of room on the first message,
1635 * we're in trouble
1636 */
1637 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1638 1633
1639 if (err < 0) 1634 if (err < 0) {
1640 goto out; 1635 if (likely(skb->len))
1636 goto out;
1637
1638 goto out_err;
1639 }
1641 1640
1642 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1641 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1643cont: 1642cont:
@@ -1645,10 +1644,12 @@ cont:
1645 } 1644 }
1646 } 1645 }
1647out: 1646out:
1647 err = skb->len;
1648out_err:
1648 cb->args[1] = idx; 1649 cb->args[1] = idx;
1649 cb->args[0] = h; 1650 cb->args[0] = h;
1650 1651
1651 return skb->len; 1652 return err;
1652} 1653}
1653 1654
1654int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 1655int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
@@ -2199,6 +2200,11 @@ static int do_setlink(const struct sk_buff *skb,
2199 err = -EINVAL; 2200 err = -EINVAL;
2200 goto errout; 2201 goto errout;
2201 } 2202 }
2203 if ((xdp_flags & XDP_FLAGS_SKB_MODE) &&
2204 (xdp_flags & XDP_FLAGS_DRV_MODE)) {
2205 err = -EINVAL;
2206 goto errout;
2207 }
2202 } 2208 }
2203 2209
2204 if (xdp[IFLA_XDP_FD]) { 2210 if (xdp[IFLA_XDP_FD]) {
@@ -3228,8 +3234,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3228 int err = 0; 3234 int err = 0;
3229 int fidx = 0; 3235 int fidx = 0;
3230 3236
3231 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3237 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3232 IFLA_MAX, ifla_policy, NULL) == 0) { 3238 IFLA_MAX, ifla_policy, NULL);
3239 if (err < 0) {
3240 return -EINVAL;
3241 } else if (err == 0) {
3233 if (tb[IFLA_MASTER]) 3242 if (tb[IFLA_MASTER])
3234 br_idx = nla_get_u32(tb[IFLA_MASTER]); 3243 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3235 } 3244 }
@@ -3452,8 +3461,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3452 err = br_dev->netdev_ops->ndo_bridge_getlink( 3461 err = br_dev->netdev_ops->ndo_bridge_getlink(
3453 skb, portid, seq, dev, 3462 skb, portid, seq, dev,
3454 filter_mask, NLM_F_MULTI); 3463 filter_mask, NLM_F_MULTI);
3455 if (err < 0 && err != -EOPNOTSUPP) 3464 if (err < 0 && err != -EOPNOTSUPP) {
3456 break; 3465 if (likely(skb->len))
3466 break;
3467
3468 goto out_err;
3469 }
3457 } 3470 }
3458 idx++; 3471 idx++;
3459 } 3472 }
@@ -3464,16 +3477,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3464 seq, dev, 3477 seq, dev,
3465 filter_mask, 3478 filter_mask,
3466 NLM_F_MULTI); 3479 NLM_F_MULTI);
3467 if (err < 0 && err != -EOPNOTSUPP) 3480 if (err < 0 && err != -EOPNOTSUPP) {
3468 break; 3481 if (likely(skb->len))
3482 break;
3483
3484 goto out_err;
3485 }
3469 } 3486 }
3470 idx++; 3487 idx++;
3471 } 3488 }
3472 } 3489 }
3490 err = skb->len;
3491out_err:
3473 rcu_read_unlock(); 3492 rcu_read_unlock();
3474 cb->args[0] = idx; 3493 cb->args[0] = idx;
3475 3494
3476 return skb->len; 3495 return err;
3477} 3496}
3478 3497
3479static inline size_t bridge_nlmsg_size(void) 3498static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..b1be7c01efe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3754 3754
3755 spin_lock_irqsave(&q->lock, flags); 3755 spin_lock_irqsave(&q->lock, flags);
3756 skb = __skb_dequeue(q); 3756 skb = __skb_dequeue(q);
3757 if (skb && (skb_next = skb_peek(q))) 3757 if (skb && (skb_next = skb_peek(q))) {
3758 icmp_next = is_icmp_err_skb(skb_next); 3758 icmp_next = is_icmp_err_skb(skb_next);
3759 if (icmp_next)
3760 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3761 }
3759 spin_unlock_irqrestore(&q->lock, flags); 3762 spin_unlock_irqrestore(&q->lock, flags);
3760 3763
3761 if (is_icmp_err_skb(skb) && !icmp_next) 3764 if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/core/sock.c b/net/core/sock.c
index 79c6aee6af9b..727f924b7f91 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,10 +139,7 @@
139 139
140#include <trace/events/sock.h> 140#include <trace/events/sock.h>
141 141
142#ifdef CONFIG_INET
143#include <net/tcp.h> 142#include <net/tcp.h>
144#endif
145
146#include <net/busy_poll.h> 143#include <net/busy_poll.h>
147 144
148static DEFINE_MUTEX(proto_list_mutex); 145static DEFINE_MUTEX(proto_list_mutex);
@@ -1803,28 +1800,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
1803 * delay queue. We want to allow the owner socket to send more 1800 * delay queue. We want to allow the owner socket to send more
1804 * packets, as if they were already TX completed by a typical driver. 1801 * packets, as if they were already TX completed by a typical driver.
1805 * But we also want to keep skb->sk set because some packet schedulers 1802 * But we also want to keep skb->sk set because some packet schedulers
1806 * rely on it (sch_fq for example). So we set skb->truesize to a small 1803 * rely on it (sch_fq for example).
1807 * amount (1) and decrease sk_wmem_alloc accordingly.
1808 */ 1804 */
1809void skb_orphan_partial(struct sk_buff *skb) 1805void skb_orphan_partial(struct sk_buff *skb)
1810{ 1806{
1811 /* If this skb is a TCP pure ACK or already went here, 1807 if (skb_is_tcp_pure_ack(skb))
1812 * we have nothing to do. 2 is already a very small truesize.
1813 */
1814 if (skb->truesize <= 2)
1815 return; 1808 return;
1816 1809
1817 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1818 * so we do not completely orphan skb, but transfert all
1819 * accounted bytes but one, to avoid unexpected reorders.
1820 */
1821 if (skb->destructor == sock_wfree 1810 if (skb->destructor == sock_wfree
1822#ifdef CONFIG_INET 1811#ifdef CONFIG_INET
1823 || skb->destructor == tcp_wfree 1812 || skb->destructor == tcp_wfree
1824#endif 1813#endif
1825 ) { 1814 ) {
1826 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1815 struct sock *sk = skb->sk;
1827 skb->truesize = 1; 1816
1817 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1818 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1819 skb->destructor = sock_efree;
1820 }
1828 } else { 1821 } else {
1829 skb_orphan(skb); 1822 skb_orphan(skb);
1830 } 1823 }
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index ea23254b2457..b7cd9aafe99e 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
479{ 479{
480 struct ctl_table *tbl; 480 struct ctl_table *tbl;
481 481
482 net->core.sysctl_somaxconn = SOMAXCONN;
483
484 tbl = netns_core_table; 482 tbl = netns_core_table;
485 if (!net_eq(net, &init_net)) { 483 if (!net_eq(net, &init_net)) {
486 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 484 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 840f14aaa016..992621172220 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
427 newnp->pktoptions = NULL; 427 newnp->pktoptions = NULL;
428 newnp->opt = NULL; 428 newnp->opt = NULL;
429 newnp->ipv6_mc_list = NULL;
430 newnp->ipv6_ac_list = NULL;
431 newnp->ipv6_fl_list = NULL;
429 newnp->mcast_oif = inet6_iif(skb); 432 newnp->mcast_oif = inet6_iif(skb);
430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 433 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
431 434
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
490 /* Clone RX bits */ 493 /* Clone RX bits */
491 newnp->rxopt.all = np->rxopt.all; 494 newnp->rxopt.all = np->rxopt.all;
492 495
496 newnp->ipv6_mc_list = NULL;
497 newnp->ipv6_ac_list = NULL;
498 newnp->ipv6_fl_list = NULL;
493 newnp->pktoptions = NULL; 499 newnp->pktoptions = NULL;
494 newnp->opt = NULL; 500 newnp->opt = NULL;
495 newnp->mcast_oif = inet6_iif(skb); 501 newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 4b9518a0d248..6f95612b4d32 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
189} 189}
190 190
191static inline void dnrt_drop(struct dn_route *rt)
192{
193 dst_release(&rt->dst);
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
195}
196
197static void dn_dst_check_expire(unsigned long dummy) 191static void dn_dst_check_expire(unsigned long dummy)
198{ 192{
199 int i; 193 int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
248 } 242 }
249 *rtp = rt->dst.dn_next; 243 *rtp = rt->dst.dn_next;
250 rt->dst.dn_next = NULL; 244 rt->dst.dn_next = NULL;
251 dnrt_drop(rt); 245 dnrt_free(rt);
252 break; 246 break;
253 } 247 }
254 spin_unlock_bh(&dn_rt_hash_table[i].lock); 248 spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
350 dst_use(&rth->dst, now); 344 dst_use(&rth->dst, now);
351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 345 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
352 346
353 dnrt_drop(rt); 347 dst_free(&rt->dst);
354 *rp = rth; 348 *rp = rth;
355 return 0; 349 return 0;
356 } 350 }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
380 for(; rt; rt = next) { 374 for(; rt; rt = next) {
381 next = rcu_dereference_raw(rt->dst.dn_next); 375 next = rcu_dereference_raw(rt->dst.dn_next);
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 376 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
383 dst_free((struct dst_entry *)rt); 377 dnrt_free(rt);
384 } 378 }
385 379
386nothing_to_declare: 380nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
1187 if (dev_out->flags & IFF_LOOPBACK) 1181 if (dev_out->flags & IFF_LOOPBACK)
1188 flags |= RTCF_LOCAL; 1182 flags |= RTCF_LOCAL;
1189 1183
1190 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1184 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1191 if (rt == NULL) 1185 if (rt == NULL)
1192 goto e_nobufs; 1186 goto e_nobufs;
1193 1187
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ed81ac6dd1a..aa8ffecc46a4 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
102{ 102{
103 struct nlmsghdr *nlh = nlmsg_hdr(skb); 103 struct nlmsghdr *nlh = nlmsg_hdr(skb);
104 104
105 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 105 if (skb->len < sizeof(*nlh) ||
106 nlh->nlmsg_len < sizeof(*nlh) ||
107 skb->len < nlh->nlmsg_len)
106 return; 108 return;
107 109
108 if (!netlink_capable(skb, CAP_NET_ADMIN)) 110 if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..90038d45a547 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
223 return 0; 223 return 0;
224} 224}
225 225
226#ifdef CONFIG_PM_SLEEP
227int dsa_switch_suspend(struct dsa_switch *ds)
228{
229 int i, ret = 0;
230
231 /* Suspend slave network devices */
232 for (i = 0; i < ds->num_ports; i++) {
233 if (!dsa_is_port_initialized(ds, i))
234 continue;
235
236 ret = dsa_slave_suspend(ds->ports[i].netdev);
237 if (ret)
238 return ret;
239 }
240
241 if (ds->ops->suspend)
242 ret = ds->ops->suspend(ds);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(dsa_switch_suspend);
247
248int dsa_switch_resume(struct dsa_switch *ds)
249{
250 int i, ret = 0;
251
252 if (ds->ops->resume)
253 ret = ds->ops->resume(ds);
254
255 if (ret)
256 return ret;
257
258 /* Resume slave network devices */
259 for (i = 0; i < ds->num_ports; i++) {
260 if (!dsa_is_port_initialized(ds, i))
261 continue;
262
263 ret = dsa_slave_resume(ds->ports[i].netdev);
264 if (ret)
265 return ret;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dsa_switch_resume);
271#endif
272
226static struct packet_type dsa_pack_type __read_mostly = { 273static struct packet_type dsa_pack_type __read_mostly = {
227 .type = cpu_to_be16(ETH_P_XDSA), 274 .type = cpu_to_be16(ETH_P_XDSA),
228 .func = dsa_switch_rcv, 275 .func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..7796580e99ee 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
484 dsa_ds_unapply(dst, ds); 484 dsa_ds_unapply(dst, ds);
485 } 485 }
486 486
487 if (dst->cpu_switch) 487 if (dst->cpu_switch) {
488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
489 dst->cpu_switch = NULL;
490 }
489 491
490 pr_info("DSA: tree %d unapplied\n", dst->tree); 492 pr_info("DSA: tree %d unapplied\n", dst->tree);
491 dst->applied = false; 493 dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..7281098df04e 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
289 dsa_switch_unregister_notifier(ds); 289 dsa_switch_unregister_notifier(ds);
290} 290}
291 291
292#ifdef CONFIG_PM_SLEEP
293int dsa_switch_suspend(struct dsa_switch *ds)
294{
295 int i, ret = 0;
296
297 /* Suspend slave network devices */
298 for (i = 0; i < ds->num_ports; i++) {
299 if (!dsa_is_port_initialized(ds, i))
300 continue;
301
302 ret = dsa_slave_suspend(ds->ports[i].netdev);
303 if (ret)
304 return ret;
305 }
306
307 if (ds->ops->suspend)
308 ret = ds->ops->suspend(ds);
309
310 return ret;
311}
312EXPORT_SYMBOL_GPL(dsa_switch_suspend);
313
314int dsa_switch_resume(struct dsa_switch *ds)
315{
316 int i, ret = 0;
317
318 if (ds->ops->resume)
319 ret = ds->ops->resume(ds);
320
321 if (ret)
322 return ret;
323
324 /* Resume slave network devices */
325 for (i = 0; i < ds->num_ports; i++) {
326 if (!dsa_is_port_initialized(ds, i))
327 continue;
328
329 ret = dsa_slave_resume(ds->ports[i].netdev);
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(dsa_switch_resume);
337#endif
338
339/* platform driver init and cleanup *****************************************/ 292/* platform driver init and cleanup *****************************************/
340static int dev_is_class(struct device *dev, void *class) 293static int dev_is_class(struct device *dev, void *class)
341{ 294{
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c73160fb11e7..0a0a392dc2bd 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
378 del_timer_sync(&hsr->announce_timer); 378 del_timer_sync(&hsr->announce_timer);
379 379
380 synchronize_rcu(); 380 synchronize_rcu();
381 free_netdev(hsr_dev);
382} 381}
383 382
384static const struct net_device_ops hsr_device_ops = { 383static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
404 SET_NETDEV_DEVTYPE(dev, &hsr_type); 403 SET_NETDEV_DEVTYPE(dev, &hsr_type);
405 dev->priv_flags |= IFF_NO_QUEUE; 404 dev->priv_flags |= IFF_NO_QUEUE;
406 405
407 dev->destructor = hsr_dev_destroy; 406 dev->needs_free_netdev = true;
407 dev->priv_destructor = hsr_dev_destroy;
408 408
409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 409 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | 410 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa3e7d3..04b5450c5a55 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
324 unsigned long irqflags; 324 unsigned long irqflags;
325 325
326 frame->is_supervision = is_supervision_frame(port->hsr, skb); 326 frame->is_supervision = is_supervision_frame(port->hsr, skb);
327 frame->node_src = hsr_get_node(&port->hsr->node_db, skb, 327 frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
328 frame->is_supervision);
329 if (frame->node_src == NULL) 328 if (frame->node_src == NULL)
330 return -1; /* Unknown node and !is_supervision, or no mem */ 329 return -1; /* Unknown node and !is_supervision, or no mem */
331 330
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea925816f79..284a9b820df8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
158 158
159/* Get the hsr_node from which 'skb' was sent. 159/* Get the hsr_node from which 'skb' was sent.
160 */ 160 */
161struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 161struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
162 bool is_sup) 162 bool is_sup)
163{ 163{
164 struct list_head *node_db = &port->hsr->node_db;
164 struct hsr_node *node; 165 struct hsr_node *node;
165 struct ethhdr *ethhdr; 166 struct ethhdr *ethhdr;
166 u16 seq_out; 167 u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
186 */ 187 */
187 seq_out = hsr_get_skb_sequence_nr(skb) - 1; 188 seq_out = hsr_get_skb_sequence_nr(skb) - 1;
188 } else { 189 } else {
189 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); 190 /* this is called also for frames from master port and
191 * so warn only for non master ports
192 */
193 if (port->type != HSR_PT_MASTER)
194 WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
190 seq_out = HSR_SEQNR_START; 195 seq_out = HSR_SEQNR_START;
191 } 196 }
192 197
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f98f5a..4e04f0e868e9 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@ struct hsr_node;
18 18
19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 19struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
20 u16 seq_out); 20 u16 seq_out);
21struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 21struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
22 bool is_sup); 22 bool is_sup);
23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, 23void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
24 struct hsr_port *port); 24 struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0dad20..0a866f332290 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
107 107
108 ldev->netdev_ops = &lowpan_netdev_ops; 108 ldev->netdev_ops = &lowpan_netdev_ops;
109 ldev->header_ops = &lowpan_header_ops; 109 ldev->header_ops = &lowpan_header_ops;
110 ldev->destructor = free_netdev; 110 ldev->needs_free_netdev = true;
111 ldev->features |= NETIF_F_NETNS_LOCAL; 111 ldev->features |= NETIF_F_NETNS_LOCAL;
112} 112}
113 113
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad1661343..58925b6597de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
1043 .type = SOCK_DGRAM, 1043 .type = SOCK_DGRAM,
1044 .protocol = IPPROTO_ICMP, 1044 .protocol = IPPROTO_ICMP,
1045 .prot = &ping_prot, 1045 .prot = &ping_prot,
1046 .ops = &inet_dgram_ops, 1046 .ops = &inet_sockraw_ops,
1047 .flags = INET_PROTOSW_REUSE, 1047 .flags = INET_PROTOSW_REUSE,
1048 }, 1048 },
1049 1049
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 0937b34c27ca..e9f3386a528b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -641,6 +641,32 @@ void arp_xmit(struct sk_buff *skb)
641} 641}
642EXPORT_SYMBOL(arp_xmit); 642EXPORT_SYMBOL(arp_xmit);
643 643
644static bool arp_is_garp(struct net *net, struct net_device *dev,
645 int *addr_type, __be16 ar_op,
646 __be32 sip, __be32 tip,
647 unsigned char *sha, unsigned char *tha)
648{
649 bool is_garp = tip == sip;
650
651 /* Gratuitous ARP _replies_ also require target hwaddr to be
652 * the same as source.
653 */
654 if (is_garp && ar_op == htons(ARPOP_REPLY))
655 is_garp =
656 /* IPv4 over IEEE 1394 doesn't provide target
657 * hardware address field in its ARP payload.
658 */
659 tha &&
660 !memcmp(tha, sha, dev->addr_len);
661
662 if (is_garp) {
663 *addr_type = inet_addr_type_dev_table(net, dev, sip);
664 if (*addr_type != RTN_UNICAST)
665 is_garp = false;
666 }
667 return is_garp;
668}
669
644/* 670/*
645 * Process an arp request. 671 * Process an arp request.
646 */ 672 */
@@ -653,6 +679,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
653 unsigned char *arp_ptr; 679 unsigned char *arp_ptr;
654 struct rtable *rt; 680 struct rtable *rt;
655 unsigned char *sha; 681 unsigned char *sha;
682 unsigned char *tha = NULL;
656 __be32 sip, tip; 683 __be32 sip, tip;
657 u16 dev_type = dev->type; 684 u16 dev_type = dev->type;
658 int addr_type; 685 int addr_type;
@@ -724,6 +751,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
724 break; 751 break;
725#endif 752#endif
726 default: 753 default:
754 tha = arp_ptr;
727 arp_ptr += dev->addr_len; 755 arp_ptr += dev->addr_len;
728 } 756 }
729 memcpy(&tip, arp_ptr, 4); 757 memcpy(&tip, arp_ptr, 4);
@@ -835,19 +863,25 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
835 863
836 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 864 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
837 865
838 if (IN_DEV_ARP_ACCEPT(in_dev)) { 866 addr_type = -1;
839 unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip); 867 if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
868 is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
869 sip, tip, sha, tha);
870 }
840 871
872 if (IN_DEV_ARP_ACCEPT(in_dev)) {
841 /* Unsolicited ARP is not accepted by default. 873 /* Unsolicited ARP is not accepted by default.
842 It is possible, that this option should be enabled for some 874 It is possible, that this option should be enabled for some
843 devices (strip is candidate) 875 devices (strip is candidate)
844 */ 876 */
845 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
846 addr_type == RTN_UNICAST;
847
848 if (!n && 877 if (!n &&
849 ((arp->ar_op == htons(ARPOP_REPLY) && 878 (is_garp ||
850 addr_type == RTN_UNICAST) || is_garp)) 879 (arp->ar_op == htons(ARPOP_REPLY) &&
880 (addr_type == RTN_UNICAST ||
881 (addr_type < 0 &&
882 /* postpone calculation to as late as possible */
883 inet_addr_type_dev_table(net, dev, sip) ==
884 RTN_UNICAST)))))
851 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 885 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
852 } 886 }
853 887
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 65cc02bd82bc..93322f895eab 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
248 u8 *tail; 248 u8 *tail;
249 u8 *vaddr; 249 u8 *vaddr;
250 int nfrags; 250 int nfrags;
251 int esph_offset;
251 struct page *page; 252 struct page *page;
252 struct sk_buff *trailer; 253 struct sk_buff *trailer;
253 int tailen = esp->tailen; 254 int tailen = esp->tailen;
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
313 } 314 }
314 315
315cow: 316cow:
317 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
318
316 nfrags = skb_cow_data(skb, tailen, &trailer); 319 nfrags = skb_cow_data(skb, tailen, &trailer);
317 if (nfrags < 0) 320 if (nfrags < 0)
318 goto out; 321 goto out;
319 tail = skb_tail_pointer(trailer); 322 tail = skb_tail_pointer(trailer);
320 esp->esph = ip_esp_hdr(skb); 323 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
321 324
322skip_cow: 325skip_cow:
323 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 326 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 39bd1edee676..83e3ed258467 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -763,7 +763,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
763 unsigned int e = 0, s_e; 763 unsigned int e = 0, s_e;
764 struct fib_table *tb; 764 struct fib_table *tb;
765 struct hlist_head *head; 765 struct hlist_head *head;
766 int dumped = 0; 766 int dumped = 0, err;
767 767
768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) 769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -783,20 +783,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
783 if (dumped) 783 if (dumped)
784 memset(&cb->args[2], 0, sizeof(cb->args) - 784 memset(&cb->args[2], 0, sizeof(cb->args) -
785 2 * sizeof(cb->args[0])); 785 2 * sizeof(cb->args[0]));
786 if (fib_table_dump(tb, skb, cb) < 0) 786 err = fib_table_dump(tb, skb, cb);
787 goto out; 787 if (err < 0) {
788 if (likely(skb->len))
789 goto out;
790
791 goto out_err;
792 }
788 dumped = 1; 793 dumped = 1;
789next: 794next:
790 e++; 795 e++;
791 } 796 }
792 } 797 }
793out: 798out:
799 err = skb->len;
800out_err:
794 rcu_read_unlock(); 801 rcu_read_unlock();
795 802
796 cb->args[1] = e; 803 cb->args[1] = e;
797 cb->args[0] = h; 804 cb->args[0] = h;
798 805
799 return skb->len; 806 return err;
800} 807}
801 808
802/* Prepare and feed intra-kernel routing request. 809/* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index da449ddb8cc1..ad9ad4aab5da 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
203static void free_fib_info_rcu(struct rcu_head *head) 203static void free_fib_info_rcu(struct rcu_head *head)
204{ 204{
205 struct fib_info *fi = container_of(head, struct fib_info, rcu); 205 struct fib_info *fi = container_of(head, struct fib_info, rcu);
206 struct dst_metrics *m;
206 207
207 change_nexthops(fi) { 208 change_nexthops(fi) {
208 if (nexthop_nh->nh_dev) 209 if (nexthop_nh->nh_dev)
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input); 214 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi); 215 } endfor_nexthops(fi);
215 216
216 if (fi->fib_metrics != (u32 *) dst_default_metrics) 217 m = fi->fib_metrics;
217 kfree(fi->fib_metrics); 218 if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
219 kfree(m);
218 kfree(fi); 220 kfree(fi);
219} 221}
220 222
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
971 val = 255; 973 val = 255;
972 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 974 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
973 return -EINVAL; 975 return -EINVAL;
974 fi->fib_metrics[type - 1] = val; 976 fi->fib_metrics->metrics[type - 1] = val;
975 } 977 }
976 978
977 if (ecn_ca) 979 if (ecn_ca)
978 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 980 fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
979 981
980 return 0; 982 return 0;
981} 983}
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1033 goto failure; 1035 goto failure;
1034 fib_info_cnt++; 1036 fib_info_cnt++;
1035 if (cfg->fc_mx) { 1037 if (cfg->fc_mx) {
1036 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1038 fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
1037 if (!fi->fib_metrics) 1039 if (!fi->fib_metrics)
1038 goto failure; 1040 goto failure;
1041 atomic_set(&fi->fib_metrics->refcnt, 1);
1039 } else 1042 } else
1040 fi->fib_metrics = (u32 *) dst_default_metrics; 1043 fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
1041 1044
1042 fi->fib_net = net; 1045 fi->fib_net = net;
1043 fi->fib_protocol = cfg->fc_protocol; 1046 fi->fib_protocol = cfg->fc_protocol;
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1238 if (fi->fib_priority && 1241 if (fi->fib_priority &&
1239 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) 1242 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1240 goto nla_put_failure; 1243 goto nla_put_failure;
1241 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 1244 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1242 goto nla_put_failure; 1245 goto nla_put_failure;
1243 1246
1244 if (fi->fib_prefsrc && 1247 if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1201409ba1dc..51182ff2b441 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1983,6 +1983,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1983 1983
1984 /* rcu_read_lock is hold by caller */ 1984 /* rcu_read_lock is hold by caller */
1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1986 int err;
1987
1986 if (i < s_i) { 1988 if (i < s_i) {
1987 i++; 1989 i++;
1988 continue; 1990 continue;
@@ -1993,17 +1995,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1993 continue; 1995 continue;
1994 } 1996 }
1995 1997
1996 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1998 err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1997 cb->nlh->nlmsg_seq, 1999 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1998 RTM_NEWROUTE, 2000 tb->tb_id, fa->fa_type,
1999 tb->tb_id, 2001 xkey, KEYLENGTH - fa->fa_slen,
2000 fa->fa_type, 2002 fa->fa_tos, fa->fa_info, NLM_F_MULTI);
2001 xkey, 2003 if (err < 0) {
2002 KEYLENGTH - fa->fa_slen,
2003 fa->fa_tos,
2004 fa->fa_info, NLM_F_MULTI) < 0) {
2005 cb->args[4] = i; 2004 cb->args[4] = i;
2006 return -1; 2005 return err;
2007 } 2006 }
2008 i++; 2007 i++;
2009 } 2008 }
@@ -2025,10 +2024,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
2025 t_key key = cb->args[3]; 2024 t_key key = cb->args[3];
2026 2025
2027 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 2026 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2028 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 2027 int err;
2028
2029 err = fn_trie_dump_leaf(l, tb, skb, cb);
2030 if (err < 0) {
2029 cb->args[3] = key; 2031 cb->args[3] = key;
2030 cb->args[2] = count; 2032 cb->args[2] = count;
2031 return -1; 2033 return err;
2032 } 2034 }
2033 2035
2034 ++count; 2036 ++count;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5f5647..9144fa7df2ad 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
657 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 657 /* Needed by both icmp_global_allow and icmp_xmit_lock */
658 local_bh_disable(); 658 local_bh_disable();
659 659
660 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 660 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
661 if (!icmpv4_global_allow(net, type, code)) 661 * incoming dev is loopback. If outgoing dev change to not be
662 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
663 */
664 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
665 !icmpv4_global_allow(net, type, code))
662 goto out_bh_enable; 666 goto out_bh_enable;
663 667
664 sk = icmp_xmit_lock(net); 668 sk = icmp_xmit_lock(net);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44fd86de2823..ec9a396fa466 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1113 if (!pmc) 1113 if (!pmc)
1114 return; 1114 return;
1115 spin_lock_init(&pmc->lock);
1115 spin_lock_bh(&im->lock); 1116 spin_lock_bh(&im->lock);
1116 pmc->interface = im->interface; 1117 pmc->interface = im->interface;
1117 in_dev_hold(in_dev); 1118 in_dev_hold(in_dev);
@@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2071 2072
2072static void ip_mc_clear_src(struct ip_mc_list *pmc) 2073static void ip_mc_clear_src(struct ip_mc_list *pmc)
2073{ 2074{
2074 struct ip_sf_list *psf, *nextpsf; 2075 struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
2075 2076
2076 for (psf = pmc->tomb; psf; psf = nextpsf) { 2077 spin_lock_bh(&pmc->lock);
2078 tomb = pmc->tomb;
2079 pmc->tomb = NULL;
2080 sources = pmc->sources;
2081 pmc->sources = NULL;
2082 pmc->sfmode = MCAST_EXCLUDE;
2083 pmc->sfcount[MCAST_INCLUDE] = 0;
2084 pmc->sfcount[MCAST_EXCLUDE] = 1;
2085 spin_unlock_bh(&pmc->lock);
2086
2087 for (psf = tomb; psf; psf = nextpsf) {
2077 nextpsf = psf->sf_next; 2088 nextpsf = psf->sf_next;
2078 kfree(psf); 2089 kfree(psf);
2079 } 2090 }
2080 pmc->tomb = NULL; 2091 for (psf = sources; psf; psf = nextpsf) {
2081 for (psf = pmc->sources; psf; psf = nextpsf) {
2082 nextpsf = psf->sf_next; 2092 nextpsf = psf->sf_next;
2083 kfree(psf); 2093 kfree(psf);
2084 } 2094 }
2085 pmc->sources = NULL;
2086 pmc->sfmode = MCAST_EXCLUDE;
2087 pmc->sfcount[MCAST_INCLUDE] = 0;
2088 pmc->sfcount[MCAST_EXCLUDE] = 1;
2089} 2095}
2090 2096
2091/* Join a multicast group 2097/* Join a multicast group
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7a3fd25e8913..532b36e9ce2a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk,
964 csummode = CHECKSUM_PARTIAL; 964 csummode = CHECKSUM_PARTIAL;
965 965
966 cork->length += length; 966 cork->length += length;
967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
968 (skb && skb_is_gso(skb))) &&
968 (sk->sk_protocol == IPPROTO_UDP) && 969 (sk->sk_protocol == IPPROTO_UDP) &&
969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 970 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 971 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b878ecbc0608..129d1a3616f8 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
446 return 0; 446 return 0;
447 447
448drop: 448drop:
449 if (tun_dst)
450 dst_release((struct dst_entry *)tun_dst);
449 kfree_skb(skb); 451 kfree_skb(skb);
450 return 0; 452 return 0;
451} 453}
@@ -967,7 +969,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
967 gro_cells_destroy(&tunnel->gro_cells); 969 gro_cells_destroy(&tunnel->gro_cells);
968 dst_cache_destroy(&tunnel->dst_cache); 970 dst_cache_destroy(&tunnel->dst_cache);
969 free_percpu(dev->tstats); 971 free_percpu(dev->tstats);
970 free_netdev(dev);
971} 972}
972 973
973void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) 974void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1156,8 @@ int ip_tunnel_init(struct net_device *dev)
1155 struct iphdr *iph = &tunnel->parms.iph; 1156 struct iphdr *iph = &tunnel->parms.iph;
1156 int err; 1157 int err;
1157 1158
1158 dev->destructor = ip_tunnel_dev_free; 1159 dev->needs_free_netdev = true;
1160 dev->priv_destructor = ip_tunnel_dev_free;
1159 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1161 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1160 if (!dev->tstats) 1162 if (!dev->tstats)
1161 return -ENOMEM; 1163 return -ENOMEM;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a02d52ed50e..8ae425cad818 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
101static void ipmr_free_table(struct mr_table *mrt); 101static void ipmr_free_table(struct mr_table *mrt);
102 102
103static void ip_mr_forward(struct net *net, struct mr_table *mrt, 103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache, 104 struct net_device *dev, struct sk_buff *skb,
105 int local); 105 struct mfc_cache *cache, int local);
106static int ipmr_cache_report(struct mr_table *mrt, 106static int ipmr_cache_report(struct mr_table *mrt,
107 struct sk_buff *pkt, vifi_t vifi, int assert); 107 struct sk_buff *pkt, vifi_t vifi, int assert);
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
502 dev->flags = IFF_NOARP; 502 dev->flags = IFF_NOARP;
503 dev->netdev_ops = &reg_vif_netdev_ops; 503 dev->netdev_ops = &reg_vif_netdev_ops;
504 dev->destructor = free_netdev; 504 dev->needs_free_netdev = true;
505 dev->features |= NETIF_F_NETNS_LOCAL; 505 dev->features |= NETIF_F_NETNS_LOCAL;
506} 506}
507 507
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
988 988
989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
990 } else { 990 } else {
991 ip_mr_forward(net, mrt, skb, c, 0); 991 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
992 } 992 }
993 } 993 }
994} 994}
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
1073 1073
1074/* Queue a packet for resolution. It gets locked cache entry! */ 1074/* Queue a packet for resolution. It gets locked cache entry! */
1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, 1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1076 struct sk_buff *skb) 1076 struct sk_buff *skb, struct net_device *dev)
1077{ 1077{
1078 const struct iphdr *iph = ip_hdr(skb); 1078 const struct iphdr *iph = ip_hdr(skb);
1079 struct mfc_cache *c; 1079 struct mfc_cache *c;
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1130 kfree_skb(skb); 1130 kfree_skb(skb);
1131 err = -ENOBUFS; 1131 err = -ENOBUFS;
1132 } else { 1132 } else {
1133 if (dev) {
1134 skb->dev = dev;
1135 skb->skb_iif = dev->ifindex;
1136 }
1133 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1137 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1134 err = 0; 1138 err = 0;
1135 } 1139 }
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1828 1832
1829/* "local" means that we should preserve one skb (for local delivery) */ 1833/* "local" means that we should preserve one skb (for local delivery) */
1830static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1834static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1831 struct sk_buff *skb, struct mfc_cache *cache, 1835 struct net_device *dev, struct sk_buff *skb,
1832 int local) 1836 struct mfc_cache *cache, int local)
1833{ 1837{
1834 int true_vifi = ipmr_find_vif(mrt, skb->dev); 1838 int true_vifi = ipmr_find_vif(mrt, dev);
1835 int psend = -1; 1839 int psend = -1;
1836 int vif, ct; 1840 int vif, ct;
1837 1841
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1853 } 1857 }
1854 1858
1855 /* Wrong interface: drop packet and (maybe) send PIM assert. */ 1859 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1856 if (mrt->vif_table[vif].dev != skb->dev) { 1860 if (mrt->vif_table[vif].dev != dev) {
1857 struct net_device *mdev;
1858
1859 mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
1860 if (mdev == skb->dev)
1861 goto forward;
1862
1863 if (rt_is_output_route(skb_rtable(skb))) { 1861 if (rt_is_output_route(skb_rtable(skb))) {
1864 /* It is our own packet, looped back. 1862 /* It is our own packet, looped back.
1865 * Very complicated situation... 1863 * Very complicated situation...
@@ -1980,6 +1978,20 @@ int ip_mr_input(struct sk_buff *skb)
1980 struct net *net = dev_net(skb->dev); 1978 struct net *net = dev_net(skb->dev);
1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1979 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1982 struct mr_table *mrt; 1980 struct mr_table *mrt;
1981 struct net_device *dev;
1982
1983 /* skb->dev passed in is the loX master dev for vrfs.
1984 * As there are no vifs associated with loopback devices,
1985 * get the proper interface that does have a vif associated with it.
1986 */
1987 dev = skb->dev;
1988 if (netif_is_l3_master(skb->dev)) {
1989 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1990 if (!dev) {
1991 kfree_skb(skb);
1992 return -ENODEV;
1993 }
1994 }
1983 1995
1984 /* Packet is looped back after forward, it should not be 1996 /* Packet is looped back after forward, it should not be
1985 * forwarded second time, but still can be delivered locally. 1997 * forwarded second time, but still can be delivered locally.
@@ -2017,7 +2029,7 @@ int ip_mr_input(struct sk_buff *skb)
2017 /* already under rcu_read_lock() */ 2029 /* already under rcu_read_lock() */
2018 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2030 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2019 if (!cache) { 2031 if (!cache) {
2020 int vif = ipmr_find_vif(mrt, skb->dev); 2032 int vif = ipmr_find_vif(mrt, dev);
2021 2033
2022 if (vif >= 0) 2034 if (vif >= 0)
2023 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, 2035 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@@ -2037,9 +2049,9 @@ int ip_mr_input(struct sk_buff *skb)
2037 } 2049 }
2038 2050
2039 read_lock(&mrt_lock); 2051 read_lock(&mrt_lock);
2040 vif = ipmr_find_vif(mrt, skb->dev); 2052 vif = ipmr_find_vif(mrt, dev);
2041 if (vif >= 0) { 2053 if (vif >= 0) {
2042 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2054 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2043 read_unlock(&mrt_lock); 2055 read_unlock(&mrt_lock);
2044 2056
2045 return err2; 2057 return err2;
@@ -2050,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb)
2050 } 2062 }
2051 2063
2052 read_lock(&mrt_lock); 2064 read_lock(&mrt_lock);
2053 ip_mr_forward(net, mrt, skb, cache, local); 2065 ip_mr_forward(net, mrt, dev, skb, cache, local);
2054 read_unlock(&mrt_lock); 2066 read_unlock(&mrt_lock);
2055 2067
2056 if (local) 2068 if (local)
@@ -2224,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2224 iph->saddr = saddr; 2236 iph->saddr = saddr;
2225 iph->daddr = daddr; 2237 iph->daddr = daddr;
2226 iph->version = 0; 2238 iph->version = 0;
2227 err = ipmr_cache_unresolved(mrt, vif, skb2); 2239 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2228 read_unlock(&mrt_lock); 2240 read_unlock(&mrt_lock);
2229 rcu_read_unlock(); 2241 rcu_read_unlock();
2230 return err; 2242 return err;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 655d9eebe43e..6883b3d4ba8f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
1385 1385
1386static void ipv4_dst_destroy(struct dst_entry *dst) 1386static void ipv4_dst_destroy(struct dst_entry *dst)
1387{ 1387{
1388 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1388 struct rtable *rt = (struct rtable *) dst; 1389 struct rtable *rt = (struct rtable *) dst;
1389 1390
1391 if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
1392 kfree(p);
1393
1390 if (!list_empty(&rt->rt_uncached)) { 1394 if (!list_empty(&rt->rt_uncached)) {
1391 struct uncached_list *ul = rt->rt_uncached_list; 1395 struct uncached_list *ul = rt->rt_uncached_list;
1392 1396
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438 rt->rt_gateway = nh->nh_gw; 1442 rt->rt_gateway = nh->nh_gw;
1439 rt->rt_uses_gateway = 1; 1443 rt->rt_uses_gateway = 1;
1440 } 1444 }
1441 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1445 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1446 if (fi->fib_metrics != &dst_default_metrics) {
1447 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1448 atomic_inc(&fi->fib_metrics->refcnt);
1449 }
1442#ifdef CONFIG_IP_ROUTE_CLASSID 1450#ifdef CONFIG_IP_ROUTE_CLASSID
1443 rt->dst.tclassid = nh->nh_tclassid; 1451 rt->dst.tclassid = nh->nh_tclassid;
1444#endif 1452#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1e4c76d2b827..40aca7803cf2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1084{ 1084{
1085 struct tcp_sock *tp = tcp_sk(sk); 1085 struct tcp_sock *tp = tcp_sk(sk);
1086 struct inet_sock *inet = inet_sk(sk); 1086 struct inet_sock *inet = inet_sk(sk);
1087 struct sockaddr *uaddr = msg->msg_name;
1087 int err, flags; 1088 int err, flags;
1088 1089
1089 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1090 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1091 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1092 uaddr->sa_family == AF_UNSPEC))
1090 return -EOPNOTSUPP; 1093 return -EOPNOTSUPP;
1091 if (tp->fastopen_req) 1094 if (tp->fastopen_req)
1092 return -EALREADY; /* Another Fast Open is in progress */ 1095 return -EALREADY; /* Another Fast Open is in progress */
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1108 } 1111 }
1109 } 1112 }
1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1113 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1114 err = __inet_stream_connect(sk->sk_socket, uaddr,
1112 msg->msg_namelen, flags, 1); 1115 msg->msg_namelen, flags, 1);
1113 /* fastopen_req could already be freed in __inet_stream_connect 1116 /* fastopen_req could already be freed in __inet_stream_connect
1114 * if the connection times out or gets rst 1117 * if the connection times out or gets rst
@@ -2320,9 +2323,15 @@ int tcp_disconnect(struct sock *sk, int flags)
2320 tcp_set_ca_state(sk, TCP_CA_Open); 2323 tcp_set_ca_state(sk, TCP_CA_Open);
2321 tcp_clear_retrans(tp); 2324 tcp_clear_retrans(tp);
2322 inet_csk_delack_init(sk); 2325 inet_csk_delack_init(sk);
2326 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2327 * issue in __tcp_select_window()
2328 */
2329 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2323 tcp_init_send_head(sk); 2330 tcp_init_send_head(sk);
2324 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2331 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2325 __sk_dst_reset(sk); 2332 __sk_dst_reset(sk);
2333 dst_release(sk->sk_rx_dst);
2334 sk->sk_rx_dst = NULL;
2326 tcp_saved_syn_free(tp); 2335 tcp_saved_syn_free(tp);
2327 2336
2328 /* Clean up fastopen related fields */ 2337 /* Clean up fastopen related fields */
@@ -2374,9 +2383,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
2374 return 0; 2383 return 0;
2375} 2384}
2376 2385
2377static int tcp_repair_options_est(struct tcp_sock *tp, 2386static int tcp_repair_options_est(struct sock *sk,
2378 struct tcp_repair_opt __user *optbuf, unsigned int len) 2387 struct tcp_repair_opt __user *optbuf, unsigned int len)
2379{ 2388{
2389 struct tcp_sock *tp = tcp_sk(sk);
2380 struct tcp_repair_opt opt; 2390 struct tcp_repair_opt opt;
2381 2391
2382 while (len >= sizeof(opt)) { 2392 while (len >= sizeof(opt)) {
@@ -2389,6 +2399,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
2389 switch (opt.opt_code) { 2399 switch (opt.opt_code) {
2390 case TCPOPT_MSS: 2400 case TCPOPT_MSS:
2391 tp->rx_opt.mss_clamp = opt.opt_val; 2401 tp->rx_opt.mss_clamp = opt.opt_val;
2402 tcp_mtup_init(sk);
2392 break; 2403 break;
2393 case TCPOPT_WINDOW: 2404 case TCPOPT_WINDOW:
2394 { 2405 {
@@ -2548,7 +2559,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2548 if (!tp->repair) 2559 if (!tp->repair)
2549 err = -EINVAL; 2560 err = -EINVAL;
2550 else if (sk->sk_state == TCP_ESTABLISHED) 2561 else if (sk->sk_state == TCP_ESTABLISHED)
2551 err = tcp_repair_options_est(tp, 2562 err = tcp_repair_options_est(sk,
2552 (struct tcp_repair_opt __user *)optval, 2563 (struct tcp_repair_opt __user *)optval,
2553 optlen); 2564 optlen);
2554 else 2565 else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512054a6..324c9bcc5456 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
180{ 180{
181 const struct inet_connection_sock *icsk = inet_csk(sk); 181 const struct inet_connection_sock *icsk = inet_csk(sk);
182 182
183 tcp_sk(sk)->prior_ssthresh = 0;
183 if (icsk->icsk_ca_ops->init) 184 if (icsk->icsk_ca_ops->init)
184 icsk->icsk_ca_ops->init(sk); 185 icsk->icsk_ca_ops->init(sk);
185 if (tcp_ca_needs_ecn(sk)) 186 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5a3ad09e2786..174d4376baa5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1179,13 +1179,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1179 */ 1179 */
1180 if (pkt_len > mss) { 1180 if (pkt_len > mss) {
1181 unsigned int new_len = (pkt_len / mss) * mss; 1181 unsigned int new_len = (pkt_len / mss) * mss;
1182 if (!in_sack && new_len < pkt_len) { 1182 if (!in_sack && new_len < pkt_len)
1183 new_len += mss; 1183 new_len += mss;
1184 if (new_len >= skb->len)
1185 return 0;
1186 }
1187 pkt_len = new_len; 1184 pkt_len = new_len;
1188 } 1185 }
1186
1187 if (pkt_len >= skb->len && !in_sack)
1188 return 0;
1189
1189 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1190 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1190 if (err < 0) 1191 if (err < 0)
1191 return err; 1192 return err;
@@ -3189,7 +3190,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3189 int delta; 3190 int delta;
3190 3191
3191 /* Non-retransmitted hole got filled? That's reordering */ 3192 /* Non-retransmitted hole got filled? That's reordering */
3192 if (reord < prior_fackets) 3193 if (reord < prior_fackets && reord <= tp->fackets_out)
3193 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3194 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3194 3195
3195 delta = tcp_is_fack(tp) ? pkts_acked : 3196 delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ea6e4cff9faf..1d6219bf2d6b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1612,7 +1612,7 @@ static void udp_v4_rehash(struct sock *sk)
1612 udp_lib_rehash(sk, new_hash); 1612 udp_lib_rehash(sk, new_hash);
1613} 1613}
1614 1614
1615int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1615static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1616{ 1616{
1617 int rc; 1617 int rc;
1618 1618
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL(udp_encap_enable);
1657 * Note that in the success and error cases, the skb is assumed to 1657 * Note that in the success and error cases, the skb is assumed to
1658 * have either been requeued or freed. 1658 * have either been requeued or freed.
1659 */ 1659 */
1660int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1660static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1661{ 1661{
1662 struct udp_sock *up = udp_sk(sk); 1662 struct udp_sock *up = udp_sk(sk);
1663 int is_udplite = IS_UDPLITE(sk); 1663 int is_udplite = IS_UDPLITE(sk);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index feb50a16398d..a8cf8c6fb60c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
25 int flags, int *addr_len); 25 int flags, int *addr_len);
26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
27 int flags); 27 int flags);
28int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29void udp_destroy_sock(struct sock *sk); 28void udp_destroy_sock(struct sock *sk);
30 29
31#ifdef CONFIG_PROC_FS 30#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8d297a79b568..1d2dbace42ff 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 332static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay) 333 unsigned long delay)
334{ 334{
335 if (!delayed_work_pending(&ifp->dad_work)) 335 in6_ifa_hold(ifp);
336 in6_ifa_hold(ifp); 336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
337 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 337 in6_ifa_put(ifp);
338} 338}
339 339
340static int snmp6_alloc_dev(struct inet6_dev *idev) 340static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -1022,7 +1022,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1022 INIT_HLIST_NODE(&ifa->addr_lst); 1022 INIT_HLIST_NODE(&ifa->addr_lst);
1023 ifa->scope = scope; 1023 ifa->scope = scope;
1024 ifa->prefix_len = pfxlen; 1024 ifa->prefix_len = pfxlen;
1025 ifa->flags = flags | IFA_F_TENTATIVE; 1025 ifa->flags = flags;
1026 /* No need to add the TENTATIVE flag for addresses with NODAD */
1027 if (!(flags & IFA_F_NODAD))
1028 ifa->flags |= IFA_F_TENTATIVE;
1026 ifa->valid_lft = valid_lft; 1029 ifa->valid_lft = valid_lft;
1027 ifa->prefered_lft = prefered_lft; 1030 ifa->prefered_lft = prefered_lft;
1028 ifa->cstamp = ifa->tstamp = jiffies; 1031 ifa->cstamp = ifa->tstamp = jiffies;
@@ -3366,6 +3369,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3366 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3367 struct netdev_notifier_changeupper_info *info; 3370 struct netdev_notifier_changeupper_info *info;
3368 struct inet6_dev *idev = __in6_dev_get(dev); 3371 struct inet6_dev *idev = __in6_dev_get(dev);
3372 struct net *net = dev_net(dev);
3369 int run_pending = 0; 3373 int run_pending = 0;
3370 int err; 3374 int err;
3371 3375
@@ -3381,7 +3385,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3381 case NETDEV_CHANGEMTU: 3385 case NETDEV_CHANGEMTU:
3382 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3386 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3383 if (dev->mtu < IPV6_MIN_MTU) { 3387 if (dev->mtu < IPV6_MIN_MTU) {
3384 addrconf_ifdown(dev, 1); 3388 addrconf_ifdown(dev, dev != net->loopback_dev);
3385 break; 3389 break;
3386 } 3390 }
3387 3391
@@ -3497,7 +3501,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3497 * IPV6_MIN_MTU stop IPv6 on this interface. 3501 * IPV6_MIN_MTU stop IPv6 on this interface.
3498 */ 3502 */
3499 if (dev->mtu < IPV6_MIN_MTU) 3503 if (dev->mtu < IPV6_MIN_MTU)
3500 addrconf_ifdown(dev, 1); 3504 addrconf_ifdown(dev, dev != net->loopback_dev);
3501 } 3505 }
3502 break; 3506 break;
3503 3507
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de713c6..8d772fea1dde 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1319 struct ipv6hdr *ip6_hdr; 1319 struct ipv6hdr *ip6_hdr;
1320 struct ipv6_opt_hdr *hop; 1320 struct ipv6_opt_hdr *hop;
1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1321 unsigned char buf[CALIPSO_MAX_BUFFER];
1322 int len_delta, new_end, pad; 1322 int len_delta, new_end, pad, payload;
1323 unsigned int start, end; 1323 unsigned int start, end;
1324 1324
1325 ip6_hdr = ipv6_hdr(skb); 1325 ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1346 if (ret_val < 0) 1346 if (ret_val < 0)
1347 return ret_val; 1347 return ret_val;
1348 1348
1349 ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
1350
1349 if (len_delta) { 1351 if (len_delta) {
1350 if (len_delta > 0) 1352 if (len_delta > 0)
1351 skb_push(skb, len_delta); 1353 skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1355 sizeof(*ip6_hdr) + start); 1357 sizeof(*ip6_hdr) + start);
1356 skb_reset_network_header(skb); 1358 skb_reset_network_header(skb);
1357 ip6_hdr = ipv6_hdr(skb); 1359 ip6_hdr = ipv6_hdr(skb);
1360 payload = ntohs(ip6_hdr->payload_len);
1361 ip6_hdr->payload_len = htons(payload + len_delta);
1358 } 1362 }
1359 1363
1360 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); 1364 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e011122ebd43..5c786f5ab961 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -250,8 +250,14 @@ ipv4_connected:
250 */ 250 */
251 251
252 err = ip6_datagram_dst_update(sk, true); 252 err = ip6_datagram_dst_update(sk, true);
253 if (err) 253 if (err) {
254 /* Reset daddr and dport so that udp_v6_early_demux()
255 * fails to find this socket
256 */
257 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
258 inet->inet_dport = 0;
254 goto out; 259 goto out;
260 }
255 261
256 sk->sk_state = TCP_ESTABLISHED; 262 sk->sk_state = TCP_ESTABLISHED;
257 sk_set_txhash(sk); 263 sk_set_txhash(sk);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index d950d43ba255..f02f131f6435 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -30,6 +30,25 @@
30#include <net/ipv6.h> 30#include <net/ipv6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32 32
33static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
34{
35 int off = sizeof(struct ipv6hdr);
36 struct ipv6_opt_hdr *exthdr;
37
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
39 return offsetof(struct ipv6hdr, nexthdr);
40
41 while (off < nhlen) {
42 exthdr = (void *)ipv6_hdr + off;
43 if (exthdr->nexthdr == NEXTHDR_ESP)
44 return off;
45
46 off += ipv6_optlen(exthdr);
47 }
48
49 return 0;
50}
51
33static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 52static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
34 struct sk_buff *skb) 53 struct sk_buff *skb)
35{ 54{
@@ -38,6 +57,7 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
38 struct xfrm_state *x; 57 struct xfrm_state *x;
39 __be32 seq; 58 __be32 seq;
40 __be32 spi; 59 __be32 spi;
60 int nhoff;
41 int err; 61 int err;
42 62
43 skb_pull(skb, offset); 63 skb_pull(skb, offset);
@@ -72,6 +92,11 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
72 92
73 xo->flags |= XFRM_GRO; 93 xo->flags |= XFRM_GRO;
74 94
95 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
96 if (!nhoff)
97 goto out;
98
99 IP6CB(skb)->nhoff = nhoff;
75 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 100 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
76 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 101 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
77 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 102 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b57c6a5..ec849d88a662 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
36 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
37 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
38 .flags = FIB_LOOKUP_NOREF, 37 .flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 43 fib_rules_lookup(net->ipv6.fib6_rules_ops,
45 flowi6_to_flowi(fl6), flags, &arg); 44 flowi6_to_flowi(fl6), flags, &arg);
46 45
47 rt = arg.result; 46 if (arg.result)
47 return arg.result;
48 48
49 if (!rt) { 49 dst_hold(&net->ipv6.ip6_null_entry->dst);
50 dst_hold(&net->ipv6.ip6_null_entry->dst); 50 return &net->ipv6.ip6_null_entry->dst;
51 return &net->ipv6.ip6_null_entry->dst;
52 }
53
54 if (rt->rt6i_flags & RTF_REJECT &&
55 rt->dst.error == -EAGAIN) {
56 ip6_rt_put(rt);
57 rt = net->ipv6.ip6_null_entry;
58 dst_hold(&rt->dst);
59 }
60
61 return &rt->dst;
62} 51}
63 52
64static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 53static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
121 flp6->saddr = saddr; 110 flp6->saddr = saddr;
122 } 111 }
123 err = rt->dst.error; 112 err = rt->dst.error;
124 goto out; 113 if (err != -EAGAIN)
114 goto out;
125 } 115 }
126again: 116again:
127 ip6_rt_put(rt); 117 ip6_rt_put(rt);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 230b5aac9f03..8d7b113958b1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
491 local_bh_disable(); 491 local_bh_disable();
492 492
493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */
494 if (!icmpv6_global_allow(type)) 494 if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
495 goto out_bh_enable; 495 goto out_bh_enable;
496 496
497 mip6_addr_swap(skb); 497 mip6_addr_swap(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 2fd5ca151dcf..77f7f8c7d93d 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
62{ 62{
63 u32 *v = (u32 *)loc.v32; 63 u32 *v = (u32 *)loc.v32;
64 64
65 __ila_hash_secret_init();
65 return jhash_2words(v[0], v[1], hashrnd); 66 return jhash_2words(v[0], v[1], hashrnd);
66} 67}
67 68
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c68a545..e6b78ba0e636 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
289 struct rt6_info *rt; 289 struct rt6_info *rt;
290 290
291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
292 if (rt->rt6i_flags & RTF_REJECT && 292 if (rt->dst.error == -EAGAIN) {
293 rt->dst.error == -EAGAIN) {
294 ip6_rt_put(rt); 293 ip6_rt_put(rt);
295 rt = net->ipv6.ip6_null_entry; 294 rt = net->ipv6.ip6_null_entry;
296 dst_hold(&rt->dst); 295 dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8d128ba79b66..64eea3962733 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
537 537
538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
539 539
540 dsfield = ipv4_get_dsfield(iph);
541
542 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 540 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
543 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 541 dsfield = ipv4_get_dsfield(iph);
544 & IPV6_TCLASS_MASK; 542 else
543 dsfield = ip6_tclass(t->parms.flowinfo);
545 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 544 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
546 fl6.flowi6_mark = skb->mark; 545 fl6.flowi6_mark = skb->mark;
547 else 546 else
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
598 597
599 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 598 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
600 599
601 dsfield = ipv6_get_dsfield(ipv6h);
602 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 600 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
603 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 601 dsfield = ipv6_get_dsfield(ipv6h);
602 else
603 dsfield = ip6_tclass(t->parms.flowinfo);
604
604 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 605 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
605 fl6.flowlabel |= ip6_flowlabel(ipv6h); 606 fl6.flowlabel |= ip6_flowlabel(ipv6h);
606 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 607 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -990,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev)
990 991
991 dst_cache_destroy(&t->dst_cache); 992 dst_cache_destroy(&t->dst_cache);
992 free_percpu(dev->tstats); 993 free_percpu(dev->tstats);
993 free_netdev(dev);
994} 994}
995 995
996static void ip6gre_tunnel_setup(struct net_device *dev) 996static void ip6gre_tunnel_setup(struct net_device *dev)
997{ 997{
998 dev->netdev_ops = &ip6gre_netdev_ops; 998 dev->netdev_ops = &ip6gre_netdev_ops;
999 dev->destructor = ip6gre_dev_free; 999 dev->needs_free_netdev = true;
1000 dev->priv_destructor = ip6gre_dev_free;
1000 1001
1001 dev->type = ARPHRD_IP6GRE; 1002 dev->type = ARPHRD_IP6GRE;
1002 1003
@@ -1147,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net)
1147 return 0; 1148 return 0;
1148 1149
1149err_reg_dev: 1150err_reg_dev:
1150 ip6gre_dev_free(ign->fb_tunnel_dev); 1151 free_netdev(ign->fb_tunnel_dev);
1151err_alloc_dev: 1152err_alloc_dev:
1152 return err; 1153 return err;
1153} 1154}
@@ -1299,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
1299 ether_setup(dev); 1300 ether_setup(dev);
1300 1301
1301 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1302 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1302 dev->destructor = ip6gre_dev_free; 1303 dev->needs_free_netdev = true;
1304 dev->priv_destructor = ip6gre_dev_free;
1303 1305
1304 dev->features |= NETIF_F_NETNS_LOCAL; 1306 dev->features |= NETIF_F_NETNS_LOCAL;
1305 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1307 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 93e58a5e1837..cdb3728faca7 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
63 const struct net_offload *ops; 63 const struct net_offload *ops;
64 int proto; 64 int proto;
65 struct frag_hdr *fptr; 65 struct frag_hdr *fptr;
66 unsigned int unfrag_ip6hlen;
67 unsigned int payload_len; 66 unsigned int payload_len;
68 u8 *prevhdr; 67 u8 *prevhdr;
69 int offset = 0; 68 int offset = 0;
@@ -116,8 +115,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
116 skb->network_header = (u8 *)ipv6h - skb->head; 115 skb->network_header = (u8 *)ipv6h - skb->head;
117 116
118 if (udpfrag) { 117 if (udpfrag) {
119 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 118 int err = ip6_find_1stfragopt(skb, &prevhdr);
120 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 119 if (err < 0) {
120 kfree_skb_list(segs);
121 return ERR_PTR(err);
122 }
123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
121 fptr->frag_off = htons(offset); 124 fptr->frag_off = htons(offset);
122 if (skb->next) 125 if (skb->next)
123 fptr->frag_off |= htons(IP6_MF); 126 fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 58f6288e9ba5..1699acb2fa2c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -597,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
597 int ptr, offset = 0, err = 0; 597 int ptr, offset = 0, err = 0;
598 u8 *prevhdr, nexthdr = 0; 598 u8 *prevhdr, nexthdr = 0;
599 599
600 hlen = ip6_find_1stfragopt(skb, &prevhdr); 600 err = ip6_find_1stfragopt(skb, &prevhdr);
601 if (err < 0)
602 goto fail;
603 hlen = err;
601 nexthdr = *prevhdr; 604 nexthdr = *prevhdr;
602 605
603 mtu = ip6_skb_dst_mtu(skb); 606 mtu = ip6_skb_dst_mtu(skb);
@@ -1387,7 +1390,7 @@ emsgsize:
1387 */ 1390 */
1388 1391
1389 cork->length += length; 1392 cork->length += length;
1390 if ((((length + fragheaderlen) > mtu) || 1393 if ((((length + (skb ? skb->len : headersize)) > mtu) ||
1391 (skb && skb_is_gso(skb))) && 1394 (skb && skb_is_gso(skb))) &&
1392 (sk->sk_protocol == IPPROTO_UDP) && 1395 (sk->sk_protocol == IPPROTO_UDP) &&
1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 1396 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
@@ -1463,6 +1466,11 @@ alloc_new_skb:
1463 */ 1466 */
1464 alloclen += sizeof(struct frag_hdr); 1467 alloclen += sizeof(struct frag_hdr);
1465 1468
1469 copy = datalen - transhdrlen - fraggap;
1470 if (copy < 0) {
1471 err = -EINVAL;
1472 goto error;
1473 }
1466 if (transhdrlen) { 1474 if (transhdrlen) {
1467 skb = sock_alloc_send_skb(sk, 1475 skb = sock_alloc_send_skb(sk,
1468 alloclen + hh_len, 1476 alloclen + hh_len,
@@ -1512,13 +1520,9 @@ alloc_new_skb:
1512 data += fraggap; 1520 data += fraggap;
1513 pskb_trim_unique(skb_prev, maxfraglen); 1521 pskb_trim_unique(skb_prev, maxfraglen);
1514 } 1522 }
1515 copy = datalen - transhdrlen - fraggap; 1523 if (copy > 0 &&
1516 1524 getfrag(from, data + transhdrlen, offset,
1517 if (copy < 0) { 1525 copy, fraggap, skb) < 0) {
1518 err = -EINVAL;
1519 kfree_skb(skb);
1520 goto error;
1521 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1522 err = -EFAULT; 1526 err = -EFAULT;
1523 kfree_skb(skb); 1527 kfree_skb(skb);
1524 goto error; 1528 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6eb2ae507500..8c6c3c8e7eef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
254 gro_cells_destroy(&t->gro_cells); 254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache); 255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats); 256 free_percpu(dev->tstats);
257 free_netdev(dev);
258} 257}
259 258
260static int ip6_tnl_create2(struct net_device *dev) 259static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
322 return t; 321 return t;
323 322
324failed_free: 323failed_free:
325 ip6_dev_free(dev); 324 free_netdev(dev);
326failed: 325failed:
327 return ERR_PTR(err); 326 return ERR_PTR(err);
328} 327}
@@ -859,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
859 return 0; 858 return 0;
860 859
861drop: 860drop:
861 if (tun_dst)
862 dst_release((struct dst_entry *)tun_dst);
862 kfree_skb(skb); 863 kfree_skb(skb);
863 return 0; 864 return 0;
864} 865}
@@ -1095,6 +1096,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1095 1096
1096 if (!dst) { 1097 if (!dst) {
1097route_lookup: 1098route_lookup:
1099 /* add dsfield to flowlabel for route lookup */
1100 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1101
1098 dst = ip6_route_output(net, NULL, fl6); 1102 dst = ip6_route_output(net, NULL, fl6);
1099 1103
1100 if (dst->error) 1104 if (dst->error)
@@ -1196,7 +1200,7 @@ route_lookup:
1196 skb_push(skb, sizeof(struct ipv6hdr)); 1200 skb_push(skb, sizeof(struct ipv6hdr));
1197 skb_reset_network_header(skb); 1201 skb_reset_network_header(skb);
1198 ipv6h = ipv6_hdr(skb); 1202 ipv6h = ipv6_hdr(skb);
1199 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), 1203 ip6_flow_hdr(ipv6h, dsfield,
1200 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1204 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1201 ipv6h->hop_limit = hop_limit; 1205 ipv6h->hop_limit = hop_limit;
1202 ipv6h->nexthdr = proto; 1206 ipv6h->nexthdr = proto;
@@ -1231,8 +1235,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1231 if (tproto != IPPROTO_IPIP && tproto != 0) 1235 if (tproto != IPPROTO_IPIP && tproto != 0)
1232 return -1; 1236 return -1;
1233 1237
1234 dsfield = ipv4_get_dsfield(iph);
1235
1236 if (t->parms.collect_md) { 1238 if (t->parms.collect_md) {
1237 struct ip_tunnel_info *tun_info; 1239 struct ip_tunnel_info *tun_info;
1238 const struct ip_tunnel_key *key; 1240 const struct ip_tunnel_key *key;
@@ -1246,6 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1246 fl6.flowi6_proto = IPPROTO_IPIP; 1248 fl6.flowi6_proto = IPPROTO_IPIP;
1247 fl6.daddr = key->u.ipv6.dst; 1249 fl6.daddr = key->u.ipv6.dst;
1248 fl6.flowlabel = key->label; 1250 fl6.flowlabel = key->label;
1251 dsfield = key->tos;
1249 } else { 1252 } else {
1250 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1253 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1251 encap_limit = t->parms.encap_limit; 1254 encap_limit = t->parms.encap_limit;
@@ -1254,8 +1257,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1254 fl6.flowi6_proto = IPPROTO_IPIP; 1257 fl6.flowi6_proto = IPPROTO_IPIP;
1255 1258
1256 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1257 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1260 dsfield = ipv4_get_dsfield(iph);
1258 & IPV6_TCLASS_MASK; 1261 else
1262 dsfield = ip6_tclass(t->parms.flowinfo);
1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1263 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1260 fl6.flowi6_mark = skb->mark; 1264 fl6.flowi6_mark = skb->mark;
1261 else 1265 else
@@ -1267,6 +1271,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1267 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1271 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1268 return -1; 1272 return -1;
1269 1273
1274 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1275
1270 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1276 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1271 1277
1272 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1278 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1300,8 +1306,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1300 ip6_tnl_addr_conflict(t, ipv6h)) 1306 ip6_tnl_addr_conflict(t, ipv6h))
1301 return -1; 1307 return -1;
1302 1308
1303 dsfield = ipv6_get_dsfield(ipv6h);
1304
1305 if (t->parms.collect_md) { 1309 if (t->parms.collect_md) {
1306 struct ip_tunnel_info *tun_info; 1310 struct ip_tunnel_info *tun_info;
1307 const struct ip_tunnel_key *key; 1311 const struct ip_tunnel_key *key;
@@ -1315,6 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1315 fl6.flowi6_proto = IPPROTO_IPV6; 1319 fl6.flowi6_proto = IPPROTO_IPV6;
1316 fl6.daddr = key->u.ipv6.dst; 1320 fl6.daddr = key->u.ipv6.dst;
1317 fl6.flowlabel = key->label; 1321 fl6.flowlabel = key->label;
1322 dsfield = key->tos;
1318 } else { 1323 } else {
1319 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1324 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1320 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1325 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1337,7 +1342,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1337 fl6.flowi6_proto = IPPROTO_IPV6; 1342 fl6.flowi6_proto = IPPROTO_IPV6;
1338 1343
1339 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1344 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1340 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); 1345 dsfield = ipv6_get_dsfield(ipv6h);
1346 else
1347 dsfield = ip6_tclass(t->parms.flowinfo);
1341 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1348 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1342 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1349 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1343 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1351,6 +1358,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1351 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1358 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1352 return -1; 1359 return -1;
1353 1360
1361 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1362
1354 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1363 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1355 1364
1356 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1365 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1769,7 +1778,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1769static void ip6_tnl_dev_setup(struct net_device *dev) 1778static void ip6_tnl_dev_setup(struct net_device *dev)
1770{ 1779{
1771 dev->netdev_ops = &ip6_tnl_netdev_ops; 1780 dev->netdev_ops = &ip6_tnl_netdev_ops;
1772 dev->destructor = ip6_dev_free; 1781 dev->needs_free_netdev = true;
1782 dev->priv_destructor = ip6_dev_free;
1773 1783
1774 dev->type = ARPHRD_TUNNEL6; 1784 dev->type = ARPHRD_TUNNEL6;
1775 dev->flags |= IFF_NOARP; 1785 dev->flags |= IFF_NOARP;
@@ -2216,7 +2226,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
2216 return 0; 2226 return 0;
2217 2227
2218err_register: 2228err_register:
2219 ip6_dev_free(ip6n->fb_tnl_dev); 2229 free_netdev(ip6n->fb_tnl_dev);
2220err_alloc_dev: 2230err_alloc_dev:
2221 return err; 2231 return err;
2222} 2232}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d67ef56454b2..837ea1eefe7f 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
180static void vti6_dev_free(struct net_device *dev) 180static void vti6_dev_free(struct net_device *dev)
181{ 181{
182 free_percpu(dev->tstats); 182 free_percpu(dev->tstats);
183 free_netdev(dev);
184} 183}
185 184
186static int vti6_tnl_create2(struct net_device *dev) 185static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
235 return t; 234 return t;
236 235
237failed_free: 236failed_free:
238 vti6_dev_free(dev); 237 free_netdev(dev);
239failed: 238failed:
240 return NULL; 239 return NULL;
241} 240}
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = {
842static void vti6_dev_setup(struct net_device *dev) 841static void vti6_dev_setup(struct net_device *dev)
843{ 842{
844 dev->netdev_ops = &vti6_netdev_ops; 843 dev->netdev_ops = &vti6_netdev_ops;
845 dev->destructor = vti6_dev_free; 844 dev->needs_free_netdev = true;
845 dev->priv_destructor = vti6_dev_free;
846 846
847 dev->type = ARPHRD_TUNNEL6; 847 dev->type = ARPHRD_TUNNEL6;
848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 848 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net)
1100 return 0; 1100 return 0;
1101 1101
1102err_register: 1102err_register:
1103 vti6_dev_free(ip6n->fb_tnl_dev); 1103 free_netdev(ip6n->fb_tnl_dev);
1104err_alloc_dev: 1104err_alloc_dev:
1105 return err; 1105 return err;
1106} 1106}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 374997d26488..2ecb39b943b5 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
734 dev->flags = IFF_NOARP; 734 dev->flags = IFF_NOARP;
735 dev->netdev_ops = &reg_vif_netdev_ops; 735 dev->netdev_ops = &reg_vif_netdev_ops;
736 dev->destructor = free_netdev; 736 dev->needs_free_netdev = true;
737 dev->features |= NETIF_F_NETNS_LOCAL; 737 dev->features |= NETIF_F_NETNS_LOCAL;
738} 738}
739 739
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd4252346a32..e9065b8d3af8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
80{ 80{
81 u16 offset = sizeof(struct ipv6hdr); 81 u16 offset = sizeof(struct ipv6hdr);
82 struct ipv6_opt_hdr *exthdr =
83 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
84 unsigned int packet_len = skb_tail_pointer(skb) - 82 unsigned int packet_len = skb_tail_pointer(skb) -
85 skb_network_header(skb); 83 skb_network_header(skb);
86 int found_rhdr = 0; 84 int found_rhdr = 0;
87 *nexthdr = &ipv6_hdr(skb)->nexthdr; 85 *nexthdr = &ipv6_hdr(skb)->nexthdr;
88 86
89 while (offset + 1 <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr;
90 89
91 switch (**nexthdr) { 90 switch (**nexthdr) {
92 91
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
107 return offset; 106 return offset;
108 } 107 }
109 108
110 offset += ipv6_optlen(exthdr); 109 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
111 *nexthdr = &exthdr->nexthdr; 110 return -EINVAL;
111
112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
113 offset); 113 offset);
114 offset += ipv6_optlen(exthdr);
115 *nexthdr = &exthdr->nexthdr;
114 } 116 }
115 117
116 return offset; 118 return -EINVAL;
117} 119}
118EXPORT_SYMBOL(ip6_find_1stfragopt); 120EXPORT_SYMBOL(ip6_find_1stfragopt);
119 121
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa90e6d..ac826dd338ff 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
192 .type = SOCK_DGRAM, 192 .type = SOCK_DGRAM,
193 .protocol = IPPROTO_ICMPV6, 193 .protocol = IPPROTO_ICMPV6,
194 .prot = &pingv6_prot, 194 .prot = &pingv6_prot,
195 .ops = &inet6_dgram_ops, 195 .ops = &inet6_sockraw_ops,
196 .flags = INET_PROTOSW_REUSE, 196 .flags = INET_PROTOSW_REUSE,
197}; 197};
198 198
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae9ca73..e88bcb8ff0fd 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
219 u64 buff64[SNMP_MIB_MAX]; 219 u64 buff64[SNMP_MIB_MAX];
220 int i; 220 int i;
221 221
222 memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); 222 memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
223 223
224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); 224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
225 for (i = 0; itemlist[i].name; i++) 225 for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9e261d..60be012fe708 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
1338#endif /* CONFIG_PROC_FS */ 1338#endif /* CONFIG_PROC_FS */
1339 1339
1340/* Same as inet6_dgram_ops, sans udp_poll. */ 1340/* Same as inet6_dgram_ops, sans udp_poll. */
1341static const struct proto_ops inet6_sockraw_ops = { 1341const struct proto_ops inet6_sockraw_ops = {
1342 .family = PF_INET6, 1342 .family = PF_INET6,
1343 .owner = THIS_MODULE, 1343 .owner = THIS_MODULE,
1344 .release = inet6_release, 1344 .release = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b5e64e..322bd62e688b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
2804 if ((rt->dst.dev == dev || !dev) && 2804 if ((rt->dst.dev == dev || !dev) &&
2805 rt != adn->net->ipv6.ip6_null_entry && 2805 rt != adn->net->ipv6.ip6_null_entry &&
2806 (rt->rt6i_nsiblings == 0 || 2806 (rt->rt6i_nsiblings == 0 ||
2807 (dev && netdev_unregistering(dev)) ||
2807 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) 2808 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2808 return -1; 2809 return -1;
2809 2810
@@ -3721,7 +3722,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
3721 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3722 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3722 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3723 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3723#endif 3724#endif
3724 } else if (event == NETDEV_UNREGISTER) { 3725 } else if (event == NETDEV_UNREGISTER &&
3726 dev->reg_state != NETREG_UNREGISTERED) {
3727 /* NETDEV_UNREGISTER could be fired for multiple times by
3728 * netdev_wait_allrefs(). Make sure we only call this once.
3729 */
3725 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); 3730 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3726#ifdef CONFIG_IPV6_MULTIPLE_TABLES 3731#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3727 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); 3732 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 61e5902f0687..f8ad15891cd7 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
265 return nt; 265 return nt;
266 266
267failed_free: 267failed_free:
268 ipip6_dev_free(dev); 268 free_netdev(dev);
269failed: 269failed:
270 return NULL; 270 return NULL;
271} 271}
@@ -305,7 +305,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
305 * we try harder to allocate. 305 * we try harder to allocate.
306 */ 306 */
307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? 307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
308 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 308 kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) :
309 NULL; 309 NULL;
310 310
311 rcu_read_lock(); 311 rcu_read_lock();
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev)
1336 1336
1337 dst_cache_destroy(&tunnel->dst_cache); 1337 dst_cache_destroy(&tunnel->dst_cache);
1338 free_percpu(dev->tstats); 1338 free_percpu(dev->tstats);
1339 free_netdev(dev);
1340} 1339}
1341 1340
1342#define SIT_FEATURES (NETIF_F_SG | \ 1341#define SIT_FEATURES (NETIF_F_SG | \
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1351 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1350 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
1352 1351
1353 dev->netdev_ops = &ipip6_netdev_ops; 1352 dev->netdev_ops = &ipip6_netdev_ops;
1354 dev->destructor = ipip6_dev_free; 1353 dev->needs_free_netdev = true;
1354 dev->priv_destructor = ipip6_dev_free;
1355 1355
1356 dev->type = ARPHRD_SIT; 1356 dev->type = ARPHRD_SIT;
1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1357 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a8237acd210..4f4310a36a04 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1062,6 +1062,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif 1063#endif
1064 1064
1065 newnp->ipv6_mc_list = NULL;
1065 newnp->ipv6_ac_list = NULL; 1066 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL; 1067 newnp->ipv6_fl_list = NULL;
1067 newnp->pktoptions = NULL; 1068 newnp->pktoptions = NULL;
@@ -1131,6 +1132,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1131 First: no IPv4 options. 1132 First: no IPv4 options.
1132 */ 1133 */
1133 newinet->inet_opt = NULL; 1134 newinet->inet_opt = NULL;
1135 newnp->ipv6_mc_list = NULL;
1134 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_ac_list = NULL;
1135 newnp->ipv6_fl_list = NULL; 1137 newnp->ipv6_fl_list = NULL;
1136 1138
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 04862abfe4ec..75703fda23e7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -526,7 +526,7 @@ out:
526 return; 526 return;
527} 527}
528 528
529int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 529static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
530{ 530{
531 int rc; 531 int rc;
532 532
@@ -569,7 +569,7 @@ void udpv6_encap_enable(void)
569} 569}
570EXPORT_SYMBOL(udpv6_encap_enable); 570EXPORT_SYMBOL(udpv6_encap_enable);
571 571
572int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 572static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
573{ 573{
574 struct udp_sock *up = udp_sk(sk); 574 struct udp_sock *up = udp_sk(sk);
575 int is_udplite = IS_UDPLITE(sk); 575 int is_udplite = IS_UDPLITE(sk);
@@ -879,7 +879,8 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
879 struct sock *sk; 879 struct sock *sk;
880 880
881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
882 if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 882 if (sk->sk_state == TCP_ESTABLISHED &&
883 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
883 return sk; 884 return sk;
884 /* Only check first socket in chain */ 885 /* Only check first socket in chain */
885 break; 886 break;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index e78bdc76dcc3..f180b3d85e31 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,6 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
28 int flags, int *addr_len); 28 int flags, int *addr_len);
29int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
30void udpv6_destroy_sock(struct sock *sk); 29void udpv6_destroy_sock(struct sock *sk);
31 30
32#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c480f2f..a2267f80febb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 29 u8 frag_hdr_sz = sizeof(struct frag_hdr);
30 __wsum csum; 30 __wsum csum;
31 int tnl_hlen; 31 int tnl_hlen;
32 int err;
32 33
33 mss = skb_shinfo(skb)->gso_size; 34 mss = skb_shinfo(skb)->gso_size;
34 if (unlikely(skb->len <= mss)) 35 if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
90 /* Find the unfragmentable header and shift it left by frag_hdr_sz 91 /* Find the unfragmentable header and shift it left by frag_hdr_sz
91 * bytes to insert fragment header. 92 * bytes to insert fragment header.
92 */ 93 */
93 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 94 err = ip6_find_1stfragopt(skb, &prevhdr);
95 if (err < 0)
96 return ERR_PTR(err);
97 unfrag_ip6hlen = err;
94 nexthdr = *prevhdr; 98 nexthdr = *prevhdr;
95 *prevhdr = NEXTHDR_FRAGMENT; 99 *prevhdr = NEXTHDR_FRAGMENT;
96 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + 100 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 08a807b29298..3ef5d913e7a3 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
43 return 1; 43 return 1;
44#endif 44#endif
45 45
46 ipv6_hdr(skb)->payload_len = htons(skb->len);
47 __skb_push(skb, skb->data - skb_network_header(skb)); 46 __skb_push(skb, skb->data - skb_network_header(skb));
47 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
48 48
49 if (xo && (xo->flags & XFRM_GRO)) { 49 if (xo && (xo->flags & XFRM_GRO)) {
50 skb_mac_header_rebuild(skb); 50 skb_mac_header_rebuild(skb);
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f31912..9ad07a91708e 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
31 31
32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
33 if (hdr_len < 0)
34 return hdr_len;
33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
34 skb_set_network_header(skb, -x->props.header_len); 36 skb_set_network_header(skb, -x->props.header_len);
35 skb->transport_header = skb->network_header + hdr_len; 37 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 74d09f91709e..3be852808a9d 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
65 ether_setup(dev); 65 ether_setup(dev);
66 66
67 dev->netdev_ops = &irlan_eth_netdev_ops; 67 dev->netdev_ops = &irlan_eth_netdev_ops;
68 dev->destructor = free_netdev; 68 dev->needs_free_netdev = true;
69 dev->min_mtu = 0; 69 dev->min_mtu = 0;
70 dev->max_mtu = ETH_MAX_MTU; 70 dev->max_mtu = ETH_MAX_MTU;
71 71
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c1950bb14735..b1432b668033 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1157,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1157 goto out; 1157 goto out;
1158 } 1158 }
1159 1159
1160 err = -ENOBUFS;
1160 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1161 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1161 if (sa->sadb_sa_auth) { 1162 if (sa->sadb_sa_auth) {
1162 int keysize = 0; 1163 int keysize = 0;
@@ -1168,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1168 if (key) 1169 if (key)
1169 keysize = (key->sadb_key_bits + 7) / 8; 1170 keysize = (key->sadb_key_bits + 7) / 8;
1170 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); 1171 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
1171 if (!x->aalg) 1172 if (!x->aalg) {
1173 err = -ENOMEM;
1172 goto out; 1174 goto out;
1175 }
1173 strcpy(x->aalg->alg_name, a->name); 1176 strcpy(x->aalg->alg_name, a->name);
1174 x->aalg->alg_key_len = 0; 1177 x->aalg->alg_key_len = 0;
1175 if (key) { 1178 if (key) {
@@ -1188,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1188 goto out; 1191 goto out;
1189 } 1192 }
1190 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); 1193 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
1191 if (!x->calg) 1194 if (!x->calg) {
1195 err = -ENOMEM;
1192 goto out; 1196 goto out;
1197 }
1193 strcpy(x->calg->alg_name, a->name); 1198 strcpy(x->calg->alg_name, a->name);
1194 x->props.calgo = sa->sadb_sa_encrypt; 1199 x->props.calgo = sa->sadb_sa_encrypt;
1195 } else { 1200 } else {
@@ -1203,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1203 if (key) 1208 if (key)
1204 keysize = (key->sadb_key_bits + 7) / 8; 1209 keysize = (key->sadb_key_bits + 7) / 8;
1205 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); 1210 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
1206 if (!x->ealg) 1211 if (!x->ealg) {
1212 err = -ENOMEM;
1207 goto out; 1213 goto out;
1214 }
1208 strcpy(x->ealg->alg_name, a->name); 1215 strcpy(x->ealg->alg_name, a->name);
1209 x->ealg->alg_key_len = 0; 1216 x->ealg->alg_key_len = 0;
1210 if (key) { 1217 if (key) {
@@ -1249,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1249 struct xfrm_encap_tmpl *natt; 1256 struct xfrm_encap_tmpl *natt;
1250 1257
1251 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); 1258 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
1252 if (!x->encap) 1259 if (!x->encap) {
1260 err = -ENOMEM;
1253 goto out; 1261 goto out;
1262 }
1254 1263
1255 natt = x->encap; 1264 natt = x->encap;
1256 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; 1265 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -2755,6 +2764,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
2755 int err, err2; 2764 int err, err2;
2756 2765
2757 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); 2766 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
2767 if (!err)
2768 xfrm_garbage_collect(net);
2758 err2 = unicast_flush_resp(sk, hdr); 2769 err2 = unicast_flush_resp(sk, hdr);
2759 if (err || err2) { 2770 if (err || err2) {
2760 if (err == -ESRCH) /* empty table - old silent behavior */ 2771 if (err == -ESRCH) /* empty table - old silent behavior */
@@ -3285,7 +3296,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3285 p += pol->sadb_x_policy_len*8; 3296 p += pol->sadb_x_policy_len*8;
3286 sec_ctx = (struct sadb_x_sec_ctx *)p; 3297 sec_ctx = (struct sadb_x_sec_ctx *)p;
3287 if (len < pol->sadb_x_policy_len*8 + 3298 if (len < pol->sadb_x_policy_len*8 +
3288 sec_ctx->sadb_x_sec_len) { 3299 sec_ctx->sadb_x_sec_len*8) {
3289 *dir = -EINVAL; 3300 *dir = -EINVAL;
3290 goto out; 3301 goto out;
3291 } 3302 }
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8b21af7321b9..4de2ec94b08c 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
114{ 114{
115 struct l2tp_eth *priv = netdev_priv(dev); 115 struct l2tp_eth *priv = netdev_priv(dev);
116 116
117 stats->tx_bytes = atomic_long_read(&priv->tx_bytes); 117 stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes);
118 stats->tx_packets = atomic_long_read(&priv->tx_packets); 118 stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
119 stats->tx_dropped = atomic_long_read(&priv->tx_dropped); 119 stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
120 stats->rx_bytes = atomic_long_read(&priv->rx_bytes); 120 stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes);
121 stats->rx_packets = atomic_long_read(&priv->rx_packets); 121 stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
122 stats->rx_errors = atomic_long_read(&priv->rx_errors); 122 stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors);
123
123} 124}
124 125
125static const struct net_device_ops l2tp_eth_netdev_ops = { 126static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
141 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 142 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
142 dev->features |= NETIF_F_LLTX; 143 dev->features |= NETIF_F_LLTX;
143 dev->netdev_ops = &l2tp_eth_netdev_ops; 144 dev->netdev_ops = &l2tp_eth_netdev_ops;
144 dev->destructor = free_netdev; 145 dev->needs_free_netdev = true;
145} 146}
146 147
147static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) 148static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8364fe5b59e4..c38d16f22d2a 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
311 int rc = -EINVAL; 311 int rc = -EINVAL;
312 312
313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); 313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
314
315 lock_sock(sk);
314 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 316 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
315 goto out; 317 goto out;
316 rc = -EAFNOSUPPORT; 318 rc = -EAFNOSUPPORT;
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
382out_put: 384out_put:
383 llc_sap_put(sap); 385 llc_sap_put(sap);
384out: 386out:
387 release_sock(sk);
385 return rc; 388 return rc;
386} 389}
387 390
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62f7bef..cf2392b2ac71 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
741 ieee80211_agg_start_txq(sta, tid, true); 741 ieee80211_agg_start_txq(sta, tid, true);
742} 742}
743 743
744void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
745 struct tid_ampdu_tx *tid_tx)
745{ 746{
746 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 struct ieee80211_sub_if_data *sdata = sta->sdata;
747 struct ieee80211_local *local = sdata->local; 748 struct ieee80211_local *local = sdata->local;
748 struct sta_info *sta;
749 struct tid_ampdu_tx *tid_tx;
750 749
751 trace_api_start_tx_ba_cb(sdata, ra, tid); 750 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
751 return;
752
753 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
754 ieee80211_agg_tx_operational(local, sta, tid);
755}
756
757static struct tid_ampdu_tx *
758ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
759 const u8 *ra, u16 tid, struct sta_info **sta)
760{
761 struct tid_ampdu_tx *tid_tx;
752 762
753 if (tid >= IEEE80211_NUM_TIDS) { 763 if (tid >= IEEE80211_NUM_TIDS) {
754 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 764 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
755 tid, IEEE80211_NUM_TIDS); 765 tid, IEEE80211_NUM_TIDS);
756 return; 766 return NULL;
757 } 767 }
758 768
759 mutex_lock(&local->sta_mtx); 769 *sta = sta_info_get_bss(sdata, ra);
760 sta = sta_info_get_bss(sdata, ra); 770 if (!*sta) {
761 if (!sta) {
762 mutex_unlock(&local->sta_mtx);
763 ht_dbg(sdata, "Could not find station: %pM\n", ra); 771 ht_dbg(sdata, "Could not find station: %pM\n", ra);
764 return; 772 return NULL;
765 } 773 }
766 774
767 mutex_lock(&sta->ampdu_mlme.mtx); 775 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
768 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
769 776
770 if (WARN_ON(!tid_tx)) { 777 if (WARN_ON(!tid_tx))
771 ht_dbg(sdata, "addBA was not requested!\n"); 778 ht_dbg(sdata, "addBA was not requested!\n");
772 goto unlock;
773 }
774 779
775 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 780 return tid_tx;
776 goto unlock;
777
778 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
779 ieee80211_agg_tx_operational(local, sta, tid);
780
781 unlock:
782 mutex_unlock(&sta->ampdu_mlme.mtx);
783 mutex_unlock(&local->sta_mtx);
784} 781}
785 782
786void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 783void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
788{ 785{
789 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
790 struct ieee80211_local *local = sdata->local; 787 struct ieee80211_local *local = sdata->local;
791 struct ieee80211_ra_tid *ra_tid; 788 struct sta_info *sta;
792 struct sk_buff *skb = dev_alloc_skb(0); 789 struct tid_ampdu_tx *tid_tx;
793 790
794 if (unlikely(!skb)) 791 trace_api_start_tx_ba_cb(sdata, ra, tid);
795 return;
796 792
797 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 793 rcu_read_lock();
798 memcpy(&ra_tid->ra, ra, ETH_ALEN); 794 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
799 ra_tid->tid = tid; 795 if (!tid_tx)
796 goto out;
800 797
801 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 798 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
802 skb_queue_tail(&sdata->skb_queue, skb); 799 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
803 ieee80211_queue_work(&local->hw, &sdata->work); 800 out:
801 rcu_read_unlock();
804} 802}
805EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
806 804
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
860} 858}
861EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 859EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
862 860
863void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 861void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
862 struct tid_ampdu_tx *tid_tx)
864{ 863{
865 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 struct ieee80211_sub_if_data *sdata = sta->sdata;
866 struct ieee80211_local *local = sdata->local;
867 struct sta_info *sta;
868 struct tid_ampdu_tx *tid_tx;
869 bool send_delba = false; 865 bool send_delba = false;
870 866
871 trace_api_stop_tx_ba_cb(sdata, ra, tid); 867 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
872 868 sta->sta.addr, tid);
873 if (tid >= IEEE80211_NUM_TIDS) {
874 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
875 tid, IEEE80211_NUM_TIDS);
876 return;
877 }
878
879 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
880
881 mutex_lock(&local->sta_mtx);
882
883 sta = sta_info_get_bss(sdata, ra);
884 if (!sta) {
885 ht_dbg(sdata, "Could not find station: %pM\n", ra);
886 goto unlock;
887 }
888 869
889 mutex_lock(&sta->ampdu_mlme.mtx);
890 spin_lock_bh(&sta->lock); 870 spin_lock_bh(&sta->lock);
891 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
892 871
893 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 872 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
894 ht_dbg(sdata, 873 ht_dbg(sdata,
895 "unexpected callback to A-MPDU stop for %pM tid %d\n", 874 "unexpected callback to A-MPDU stop for %pM tid %d\n",
896 sta->sta.addr, tid); 875 sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
906 spin_unlock_bh(&sta->lock); 885 spin_unlock_bh(&sta->lock);
907 886
908 if (send_delba) 887 if (send_delba)
909 ieee80211_send_delba(sdata, ra, tid, 888 ieee80211_send_delba(sdata, sta->sta.addr, tid,
910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 889 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
911
912 mutex_unlock(&sta->ampdu_mlme.mtx);
913 unlock:
914 mutex_unlock(&local->sta_mtx);
915} 890}
916 891
917void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 892void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
919{ 894{
920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 895 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
921 struct ieee80211_local *local = sdata->local; 896 struct ieee80211_local *local = sdata->local;
922 struct ieee80211_ra_tid *ra_tid; 897 struct sta_info *sta;
923 struct sk_buff *skb = dev_alloc_skb(0); 898 struct tid_ampdu_tx *tid_tx;
924 899
925 if (unlikely(!skb)) 900 trace_api_stop_tx_ba_cb(sdata, ra, tid);
926 return;
927 901
928 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 902 rcu_read_lock();
929 memcpy(&ra_tid->ra, ra, ETH_ALEN); 903 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
930 ra_tid->tid = tid; 904 if (!tid_tx)
905 goto out;
931 906
932 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 907 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
933 skb_queue_tail(&sdata->skb_queue, skb); 908 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
934 ieee80211_queue_work(&local->hw, &sdata->work); 909 out:
910 rcu_read_unlock();
935} 911}
936EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
937 913
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6c2e6060cd54..4a388fe8c2d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
902 default: 902 default:
903 return -EINVAL; 903 return -EINVAL;
904 } 904 }
905 sdata->u.ap.req_smps = sdata->smps_mode;
906
905 sdata->needed_rx_chains = sdata->local->rx_chains; 907 sdata->needed_rx_chains = sdata->local->rx_chains;
906 908
907 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 909 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a528773563..6ca5442b1e03 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright 2017 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
289{ 290{
290 int i; 291 int i;
291 292
292 cancel_work_sync(&sta->ampdu_mlme.work);
293
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 293 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 __ieee80211_stop_tx_ba_session(sta, i, reason); 294 __ieee80211_stop_tx_ba_session(sta, i, reason);
296 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
299 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
300 } 299 }
300
301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work);
301} 303}
302 304
303void ieee80211_ba_session_work(struct work_struct *work) 305void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
352 spin_unlock_bh(&sta->lock); 354 spin_unlock_bh(&sta->lock);
353 355
354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 356 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
355 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 357 if (!tid_tx)
356 &tid_tx->state)) 358 continue;
359
360 if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
361 ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
362 if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
357 ___ieee80211_stop_tx_ba_session(sta, tid, 363 ___ieee80211_stop_tx_ba_session(sta, tid,
358 AGG_STOP_LOCAL_REQUEST); 364 AGG_STOP_LOCAL_REQUEST);
365 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
366 ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
359 } 367 }
360 mutex_unlock(&sta->ampdu_mlme.mtx); 368 mutex_unlock(&sta->ampdu_mlme.mtx);
361} 369}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c148f554..5e002f62c235 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
1036 1036
1037enum sdata_queue_type { 1037enum sdata_queue_type {
1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
1039 IEEE80211_SDATA_QUEUE_AGG_START = 1,
1040 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
1041 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
1042 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
1043}; 1041};
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1427 return local->hw.wiphy->bands[band]; 1425 return local->hw.wiphy->bands[band];
1428} 1426}
1429 1427
1430/* this struct represents 802.11n's RA/TID combination */
1431struct ieee80211_ra_tid {
1432 u8 ra[ETH_ALEN];
1433 u16 tid;
1434};
1435
1436/* this struct holds the value parsing from channel switch IE */ 1428/* this struct holds the value parsing from channel switch IE */
1437struct ieee80211_csa_ie { 1429struct ieee80211_csa_ie {
1438 struct cfg80211_chan_def chandef; 1430 struct cfg80211_chan_def chandef;
@@ -1539,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
1539 return true; 1531 return true;
1540 /* can't handle non-legacy preamble yet */ 1532 /* can't handle non-legacy preamble yet */
1541 if (status->flag & RX_FLAG_MACTIME_PLCP_START && 1533 if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
1542 status->encoding != RX_ENC_LEGACY) 1534 status->encoding == RX_ENC_LEGACY)
1543 return true; 1535 return true;
1544 return false; 1536 return false;
1545} 1537}
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1794 enum ieee80211_agg_stop_reason reason); 1786 enum ieee80211_agg_stop_reason reason);
1795int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1787int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1796 enum ieee80211_agg_stop_reason reason); 1788 enum ieee80211_agg_stop_reason reason);
1797void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1789void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
1798void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1790 struct tid_ampdu_tx *tid_tx);
1791void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1792 struct tid_ampdu_tx *tid_tx);
1799void ieee80211_ba_session_work(struct work_struct *work); 1793void ieee80211_ba_session_work(struct work_struct *work);
1800void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1794void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1801void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1795void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81f5d81..f5f50150ba1c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
1213static void ieee80211_if_free(struct net_device *dev) 1213static void ieee80211_if_free(struct net_device *dev)
1214{ 1214{
1215 free_percpu(dev->tstats); 1215 free_percpu(dev->tstats);
1216 free_netdev(dev);
1217} 1216}
1218 1217
1219static void ieee80211_if_setup(struct net_device *dev) 1218static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev)
1221 ether_setup(dev); 1220 ether_setup(dev);
1222 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1221 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1223 dev->netdev_ops = &ieee80211_dataif_ops; 1222 dev->netdev_ops = &ieee80211_dataif_ops;
1224 dev->destructor = ieee80211_if_free; 1223 dev->needs_free_netdev = true;
1224 dev->priv_destructor = ieee80211_if_free;
1225} 1225}
1226 1226
1227static void ieee80211_if_setup_no_queue(struct net_device *dev) 1227static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
1237 struct ieee80211_local *local = sdata->local; 1237 struct ieee80211_local *local = sdata->local;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 struct sta_info *sta; 1239 struct sta_info *sta;
1240 struct ieee80211_ra_tid *ra_tid;
1241 struct ieee80211_rx_agg *rx_agg; 1240 struct ieee80211_rx_agg *rx_agg;
1242 1241
1243 if (!ieee80211_sdata_running(sdata)) 1242 if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1252 while ((skb = skb_dequeue(&sdata->skb_queue))) {
1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1253 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1255 1254
1256 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1255 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1257 ra_tid = (void *)&skb->cb;
1258 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
1259 ra_tid->tid);
1260 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
1261 ra_tid = (void *)&skb->cb;
1262 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
1263 ra_tid->tid);
1264 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1265 rx_agg = (void *)&skb->cb; 1256 rx_agg = (void *)&skb->cb;
1266 mutex_lock(&local->sta_mtx); 1257 mutex_lock(&local->sta_mtx);
1267 sta = sta_info_get_bss(sdata, rx_agg->addr); 1258 sta = sta_info_get_bss(sdata, rx_agg->addr);
@@ -1825,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1825 ret = dev_alloc_name(ndev, ndev->name); 1816 ret = dev_alloc_name(ndev, ndev->name);
1826 if (ret < 0) { 1817 if (ret < 0) {
1827 ieee80211_if_free(ndev); 1818 ieee80211_if_free(ndev);
1819 free_netdev(ndev);
1828 return ret; 1820 return ret;
1829 } 1821 }
1830 1822
@@ -1914,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1914 1906
1915 ret = register_netdevice(ndev); 1907 ret = register_netdevice(ndev);
1916 if (ret) { 1908 if (ret) {
1917 ieee80211_if_free(ndev); 1909 free_netdev(ndev);
1918 return ret; 1910 return ret;
1919 } 1911 }
1920 } 1912 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0ea9712bd99e..cc8e6ea1b27e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
601 struct ieee80211_supported_band *sband; 601 struct ieee80211_supported_band *sband;
602 struct ieee80211_chanctx_conf *chanctx_conf; 602 struct ieee80211_chanctx_conf *chanctx_conf;
603 struct ieee80211_channel *chan; 603 struct ieee80211_channel *chan;
604 u32 rate_flags, rates = 0; 604 u32 rates = 0;
605 605
606 sdata_assert_lock(sdata); 606 sdata_assert_lock(sdata);
607 607
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
612 return; 612 return;
613 } 613 }
614 chan = chanctx_conf->def.chan; 614 chan = chanctx_conf->def.chan;
615 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
616 rcu_read_unlock(); 615 rcu_read_unlock();
617 sband = local->hw.wiphy->bands[chan->band]; 616 sband = local->hw.wiphy->bands[chan->band];
618 shift = ieee80211_vif_get_shift(&sdata->vif); 617 shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
636 */ 635 */
637 rates_len = 0; 636 rates_len = 0;
638 for (i = 0; i < sband->n_bitrates; i++) { 637 for (i = 0; i < sband->n_bitrates; i++) {
639 if ((rate_flags & sband->bitrates[i].flags)
640 != rate_flags)
641 continue;
642 rates |= BIT(i); 638 rates |= BIT(i);
643 rates_len++; 639 rates_len++;
644 } 640 }
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2818 u32 *rates, u32 *basic_rates, 2814 u32 *rates, u32 *basic_rates,
2819 bool *have_higher_than_11mbit, 2815 bool *have_higher_than_11mbit,
2820 int *min_rate, int *min_rate_index, 2816 int *min_rate, int *min_rate_index,
2821 int shift, u32 rate_flags) 2817 int shift)
2822{ 2818{
2823 int i, j; 2819 int i, j;
2824 2820
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
2846 int brate; 2842 int brate;
2847 2843
2848 br = &sband->bitrates[j]; 2844 br = &sband->bitrates[j];
2849 if ((rate_flags & br->flags) != rate_flags)
2850 continue;
2851 2845
2852 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); 2846 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
2853 if (brate == rate) { 2847 if (brate == rate) {
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4398 return -ENOMEM; 4392 return -ENOMEM;
4399 } 4393 }
4400 4394
4401 if (new_sta || override) { 4395 /*
4402 err = ieee80211_prep_channel(sdata, cbss); 4396 * Set up the information for the new channel before setting the
4403 if (err) { 4397 * new channel. We can't - completely race-free - change the basic
4404 if (new_sta) 4398 * rates bitmap and the channel (sband) that it refers to, but if
4405 sta_info_free(local, new_sta); 4399 * we set it up before we at least avoid calling into the driver's
4406 return -EINVAL; 4400 * bss_info_changed() method with invalid information (since we do
4407 } 4401 * call that from changing the channel - only for IDLE and perhaps
4408 } 4402 * some others, but ...).
4409 4403 *
4404 * So to avoid that, just set up all the new information before the
4405 * channel, but tell the driver to apply it only afterwards, since
4406 * it might need the new channel for that.
4407 */
4410 if (new_sta) { 4408 if (new_sta) {
4411 u32 rates = 0, basic_rates = 0; 4409 u32 rates = 0, basic_rates = 0;
4412 bool have_higher_than_11mbit; 4410 bool have_higher_than_11mbit;
4413 int min_rate = INT_MAX, min_rate_index = -1; 4411 int min_rate = INT_MAX, min_rate_index = -1;
4414 struct ieee80211_chanctx_conf *chanctx_conf;
4415 const struct cfg80211_bss_ies *ies; 4412 const struct cfg80211_bss_ies *ies;
4416 int shift = ieee80211_vif_get_shift(&sdata->vif); 4413 int shift = ieee80211_vif_get_shift(&sdata->vif);
4417 u32 rate_flags;
4418
4419 rcu_read_lock();
4420 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
4421 if (WARN_ON(!chanctx_conf)) {
4422 rcu_read_unlock();
4423 sta_info_free(local, new_sta);
4424 return -EINVAL;
4425 }
4426 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
4427 rcu_read_unlock();
4428 4414
4429 ieee80211_get_rates(sband, bss->supp_rates, 4415 ieee80211_get_rates(sband, bss->supp_rates,
4430 bss->supp_rates_len, 4416 bss->supp_rates_len,
4431 &rates, &basic_rates, 4417 &rates, &basic_rates,
4432 &have_higher_than_11mbit, 4418 &have_higher_than_11mbit,
4433 &min_rate, &min_rate_index, 4419 &min_rate, &min_rate_index,
4434 shift, rate_flags); 4420 shift);
4435 4421
4436 /* 4422 /*
4437 * This used to be a workaround for basic rates missing 4423 * This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
4489 sdata->vif.bss_conf.sync_dtim_count = 0; 4475 sdata->vif.bss_conf.sync_dtim_count = 0;
4490 } 4476 }
4491 rcu_read_unlock(); 4477 rcu_read_unlock();
4478 }
4492 4479
4493 /* tell driver about BSSID, basic rates and timing */ 4480 if (new_sta || override) {
4481 err = ieee80211_prep_channel(sdata, cbss);
4482 if (err) {
4483 if (new_sta)
4484 sta_info_free(local, new_sta);
4485 return -EINVAL;
4486 }
4487 }
4488
4489 if (new_sta) {
4490 /*
4491 * tell driver about BSSID, basic rates and timing
4492 * this was set up above, before setting the channel
4493 */
4494 ieee80211_bss_info_change_notify(sdata, 4494 ieee80211_bss_info_change_notify(sdata,
4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | 4495 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
4496 BSS_CHANGED_BEACON_INT); 4496 BSS_CHANGED_BEACON_INT);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 35f4c7d7a500..3674fe3d67dc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1613 */ 1613 */
1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1615 !ieee80211_has_morefrags(hdr->frame_control) && 1615 !ieee80211_has_morefrags(hdr->frame_control) &&
1616 !ieee80211_is_back_req(hdr->frame_control) &&
1616 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1617 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1617 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1618 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1618 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1619 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1619 /* PM bit is only checked in frames where it isn't reserved, 1620 /*
1621 * PM bit is only checked in frames where it isn't reserved,
1620 * in AP mode it's reserved in non-bufferable management frames 1622 * in AP mode it's reserved in non-bufferable management frames
1621 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1623 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1624 * BAR frames should be ignored as specified in
1625 * IEEE 802.11-2012 10.2.1.2.
1622 */ 1626 */
1623 (!ieee80211_is_mgmt(hdr->frame_control) || 1627 (!ieee80211_is_mgmt(hdr->frame_control) ||
1624 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1628 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -2492,7 +2496,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2492 if (is_multicast_ether_addr(hdr->addr1)) { 2496 if (is_multicast_ether_addr(hdr->addr1)) {
2493 mpp_addr = hdr->addr3; 2497 mpp_addr = hdr->addr3;
2494 proxied_addr = mesh_hdr->eaddr1; 2498 proxied_addr = mesh_hdr->eaddr1;
2495 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2499 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2500 MESH_FLAGS_AE_A5_A6) {
2496 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2501 /* has_a4 already checked in ieee80211_rx_mesh_check */
2497 mpp_addr = hdr->addr4; 2502 mpp_addr = hdr->addr4;
2498 proxied_addr = mesh_hdr->eaddr2; 2503 proxied_addr = mesh_hdr->eaddr2;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a835bb0..403e3cc58b57 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
2155 struct ieee80211_sta_rx_stats *cpurxs; 2155 struct ieee80211_sta_rx_stats *cpurxs;
2156 2156
2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2158 sinfo->rx_packets += cpurxs->dropped; 2158 sinfo->rx_dropped_misc += cpurxs->dropped;
2159 } 2159 }
2160 } 2160 }
2161 2161
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cacb20d5..ea0747d6a6da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
116#define HT_AGG_STATE_STOPPING 3 116#define HT_AGG_STATE_STOPPING 3
117#define HT_AGG_STATE_WANT_START 4 117#define HT_AGG_STATE_WANT_START 4
118#define HT_AGG_STATE_WANT_STOP 5 118#define HT_AGG_STATE_WANT_STOP 5
119#define HT_AGG_STATE_START_CB 6
120#define HT_AGG_STATE_STOP_CB 7
119 121
120enum ieee80211_agg_stop_reason { 122enum ieee80211_agg_stop_reason {
121 AGG_STOP_DECLINED, 123 AGG_STOP_DECLINED,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c1ef22df865f..cc19614ff4e6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
17#include <asm/unaligned.h> 17#include <asm/unaligned.h>
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19#include <crypto/aes.h> 19#include <crypto/aes.h>
20#include <crypto/algapi.h>
20 21
21#include "ieee80211_i.h" 22#include "ieee80211_i.h"
22#include "michael.h" 23#include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
153 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 154 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
154 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 155 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
155 michael_mic(key, hdr, data, data_len, mic); 156 michael_mic(key, hdr, data, data_len, mic);
156 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) 157 if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
157 goto mic_fail; 158 goto mic_fail;
158 159
159 /* remove Michael MIC from payload */ 160 /* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
1048 bip_aad(skb, aad); 1049 bip_aad(skb, aad);
1049 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, 1050 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
1050 skb->data + 24, skb->len - 24, mic); 1051 skb->data + 24, skb->len - 24, mic);
1051 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1052 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1052 key->u.aes_cmac.icverrors++; 1053 key->u.aes_cmac.icverrors++;
1053 return RX_DROP_UNUSABLE; 1054 return RX_DROP_UNUSABLE;
1054 } 1055 }
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
1098 bip_aad(skb, aad); 1099 bip_aad(skb, aad);
1099 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, 1100 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1100 skb->data + 24, skb->len - 24, mic); 1101 skb->data + 24, skb->len - 24, mic);
1101 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1102 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1102 key->u.aes_cmac.icverrors++; 1103 key->u.aes_cmac.icverrors++;
1103 return RX_DROP_UNUSABLE; 1104 return RX_DROP_UNUSABLE;
1104 } 1105 }
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1202 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, 1203 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1203 skb->data + 24, skb->len - 24, 1204 skb->data + 24, skb->len - 24,
1204 mic) < 0 || 1205 mic) < 0 ||
1205 memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1206 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1206 key->u.aes_gmac.icverrors++; 1207 key->u.aes_gmac.icverrors++;
1207 return RX_DROP_UNUSABLE; 1208 return RX_DROP_UNUSABLE;
1208 } 1209 }
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 06019dba4b10..bd88a9b80773 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
527 527
528 mac802154_llsec_destroy(&sdata->sec); 528 mac802154_llsec_destroy(&sdata->sec);
529
530 free_netdev(dev);
531} 529}
532 530
533static void ieee802154_if_setup(struct net_device *dev) 531static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
593 sdata->dev->dev_addr); 591 sdata->dev->dev_addr);
594 592
595 sdata->dev->header_ops = &mac802154_header_ops; 593 sdata->dev->header_ops = &mac802154_header_ops;
596 sdata->dev->destructor = mac802154_wpan_free; 594 sdata->dev->needs_free_netdev = true;
595 sdata->dev->priv_destructor = mac802154_wpan_free;
597 sdata->dev->netdev_ops = &mac802154_wpan_ops; 596 sdata->dev->netdev_ops = &mac802154_wpan_ops;
598 sdata->dev->ml_priv = &mac802154_mlme_wpan; 597 sdata->dev->ml_priv = &mac802154_mlme_wpan;
599 wpan_dev->promiscuous_mode = false; 598 wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
608 607
609 break; 608 break;
610 case NL802154_IFTYPE_MONITOR: 609 case NL802154_IFTYPE_MONITOR:
611 sdata->dev->destructor = free_netdev; 610 sdata->dev->needs_free_netdev = true;
612 sdata->dev->netdev_ops = &mac802154_monitor_ops; 611 sdata->dev->netdev_ops = &mac802154_monitor_ops;
613 wpan_dev->promiscuous_mode = true; 612 wpan_dev->promiscuous_mode = true;
614 break; 613 break;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66009da..7b05fd1497ce 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
1418 continue; 1418 continue;
1419 alive++; 1419 alive++;
1420 nh_flags &= ~flags; 1420 nh_flags &= ~flags;
1421 WRITE_ONCE(nh->nh_flags, flags); 1421 WRITE_ONCE(nh->nh_flags, nh_flags);
1422 } endfor_nexthops(rt); 1422 } endfor_nexthops(rt);
1423 1423
1424 WRITE_ONCE(rt->rt_nhn_alive, alive); 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index d2d7bdf1d510..ad99c1ceea6f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -849,10 +849,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
849{ 849{
850 unsigned int verdict = NF_DROP; 850 unsigned int verdict = NF_DROP;
851 851
852 if (IP_VS_FWD_METHOD(cp) != 0) { 852 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
853 pr_err("shouldn't reach here, because the box is on the " 853 goto ignore_cp;
854 "half connection in the tun/dr module.\n");
855 }
856 854
857 /* Ensure the checksum is correct */ 855 /* Ensure the checksum is correct */
858 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 856 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
886 ip_vs_notrack(skb); 884 ip_vs_notrack(skb);
887 else 885 else
888 ip_vs_update_conntrack(skb, cp, 0); 886 ip_vs_update_conntrack(skb, cp, 0);
887
888ignore_cp:
889 verdict = NF_ACCEPT; 889 verdict = NF_ACCEPT;
890 890
891out: 891out:
@@ -1385,8 +1385,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1385 */ 1385 */
1386 cp = pp->conn_out_get(ipvs, af, skb, &iph); 1386 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1387 1387
1388 if (likely(cp)) 1388 if (likely(cp)) {
1389 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1390 goto ignore_cp;
1389 return handle_response(af, skb, pd, cp, &iph, hooknum); 1391 return handle_response(af, skb, pd, cp, &iph, hooknum);
1392 }
1390 1393
1391 /* Check for real-server-started requests */ 1394 /* Check for real-server-started requests */
1392 if (atomic_read(&ipvs->conn_out_counter)) { 1395 if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1444 } 1447 }
1445 } 1448 }
1446 } 1449 }
1450
1451out:
1447 IP_VS_DBG_PKT(12, af, pp, skb, iph.off, 1452 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1448 "ip_vs_out: packet continues traversal as normal"); 1453 "ip_vs_out: packet continues traversal as normal");
1449 return NF_ACCEPT; 1454 return NF_ACCEPT;
1455
1456ignore_cp:
1457 __ip_vs_conn_put(cp);
1458 goto out;
1450} 1459}
1451 1460
1452/* 1461/*
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 3a60efa7799b..7f6100ca63be 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
174#endif 174#endif
175 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
176 h = NULL; 176 h = NULL;
177 if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
178 module_put(h->me);
179 h = NULL;
180 }
177 181
178 rcu_read_unlock(); 182 rcu_read_unlock();
179 183
@@ -181,6 +185,13 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
181} 185}
182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 186EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
183 187
188void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
189{
190 refcount_dec(&helper->refcnt);
191 module_put(helper->me);
192}
193EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
194
184struct nf_conn_help * 195struct nf_conn_help *
185nf_ct_helper_ext_add(struct nf_conn *ct, 196nf_ct_helper_ext_add(struct nf_conn *ct,
186 struct nf_conntrack_helper *helper, gfp_t gfp) 197 struct nf_conntrack_helper *helper, gfp_t gfp)
@@ -417,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
417 } 428 }
418 } 429 }
419 } 430 }
431 refcount_set(&me->refcnt, 1);
420 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 432 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
421 nf_ct_helper_count++; 433 nf_ct_helper_count++;
422out: 434out:
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index dcf561b5c97a..a8be9b72e6cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h>
48#ifdef CONFIG_NF_NAT_NEEDED 50#ifdef CONFIG_NF_NAT_NEEDED
49#include <net/netfilter/nf_nat_core.h> 51#include <net/netfilter/nf_nat_core.h>
50#include <net/netfilter/nf_nat_l4proto.h> 52#include <net/netfilter/nf_nat_l4proto.h>
@@ -888,8 +890,13 @@ restart:
888 } 890 }
889out: 891out:
890 local_bh_enable(); 892 local_bh_enable();
891 if (last) 893 if (last) {
894 /* nf ct hash resize happened, now clear the leftover. */
895 if ((struct nf_conn *)cb->args[1] == last)
896 cb->args[1] = 0;
897
892 nf_ct_put(last); 898 nf_ct_put(last);
899 }
893 900
894 while (i) { 901 while (i) {
895 i--; 902 i--;
@@ -1007,9 +1014,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1007 1014
1008static int 1015static int
1009ctnetlink_parse_tuple(const struct nlattr * const cda[], 1016ctnetlink_parse_tuple(const struct nlattr * const cda[],
1010 struct nf_conntrack_tuple *tuple, 1017 struct nf_conntrack_tuple *tuple, u32 type,
1011 enum ctattr_type type, u_int8_t l3num, 1018 u_int8_t l3num, struct nf_conntrack_zone *zone)
1012 struct nf_conntrack_zone *zone)
1013{ 1019{
1014 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1020 struct nlattr *tb[CTA_TUPLE_MAX+1];
1015 int err; 1021 int err;
@@ -1828,6 +1834,8 @@ ctnetlink_create_conntrack(struct net *net,
1828 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 1834 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1829 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 1835 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1830 nf_ct_labels_ext_add(ct); 1836 nf_ct_labels_ext_add(ct);
1837 nfct_seqadj_ext_add(ct);
1838 nfct_synproxy_ext_add(ct);
1831 1839
1832 /* we must add conntrack extensions before confirmation. */ 1840 /* we must add conntrack extensions before confirmation. */
1833 ct->status |= IPS_CONFIRMED; 1841 ct->status |= IPS_CONFIRMED;
@@ -2447,7 +2455,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
2447 2455
2448static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2456static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2449 const struct nf_conntrack_tuple *tuple, 2457 const struct nf_conntrack_tuple *tuple,
2450 enum ctattr_expect type) 2458 u32 type)
2451{ 2459{
2452 struct nlattr *nest_parms; 2460 struct nlattr *nest_parms;
2453 2461
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d599a85..1c5b14a6cab3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
512 u8 pf, unsigned int hooknum) 512 u8 pf, unsigned int hooknum)
513{ 513{
514 const struct sctphdr *sh; 514 const struct sctphdr *sh;
515 struct sctphdr _sctph;
516 const char *logmsg; 515 const char *logmsg;
517 516
518 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 if (skb->len < dataoff + sizeof(struct sctphdr)) {
519 if (!sh) {
520 logmsg = "nf_ct_sctp: short packet "; 518 logmsg = "nf_ct_sctp: short packet ";
521 goto out_invalid; 519 goto out_invalid;
522 } 520 }
523 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
524 skb->ip_summed == CHECKSUM_NONE) { 522 skb->ip_summed == CHECKSUM_NONE) {
523 if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
524 logmsg = "nf_ct_sctp: failed to read header ";
525 goto out_invalid;
526 }
527 sh = (const struct sctphdr *)(skb->data + dataoff);
525 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
526 logmsg = "nf_ct_sctp: bad CRC "; 529 logmsg = "nf_ct_sctp: bad CRC ";
527 goto out_invalid; 530 goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index b48d6b5aae8a..6c72922d20ca 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -409,6 +409,10 @@ nf_nat_setup_info(struct nf_conn *ct,
409{ 409{
410 struct nf_conntrack_tuple curr_tuple, new_tuple; 410 struct nf_conntrack_tuple curr_tuple, new_tuple;
411 411
412 /* Can't setup nat info for confirmed ct. */
413 if (nf_ct_is_confirmed(ct))
414 return NF_ACCEPT;
415
412 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 416 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
413 maniptype == NF_NAT_MANIP_DST); 417 maniptype == NF_NAT_MANIP_DST);
414 BUG_ON(nf_nat_initialized(ct, maniptype)); 418 BUG_ON(nf_nat_initialized(ct, maniptype));
@@ -562,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
562 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
563 * will delete entry from already-freed table. 567 * will delete entry from already-freed table.
564 */ 568 */
565 ct->status &= ~IPS_NAT_DONE_MASK; 569 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
566 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
567 nf_nat_bysource_params); 571 nf_nat_bysource_params);
568 572
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 559225029740..da314be0c048 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3367,35 +3367,50 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3367 return nf_tables_fill_setelem(args->skb, set, elem); 3367 return nf_tables_fill_setelem(args->skb, set, elem);
3368} 3368}
3369 3369
3370struct nft_set_dump_ctx {
3371 const struct nft_set *set;
3372 struct nft_ctx ctx;
3373};
3374
3370static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3375static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3371{ 3376{
3377 struct nft_set_dump_ctx *dump_ctx = cb->data;
3372 struct net *net = sock_net(skb->sk); 3378 struct net *net = sock_net(skb->sk);
3373 u8 genmask = nft_genmask_cur(net); 3379 struct nft_af_info *afi;
3380 struct nft_table *table;
3374 struct nft_set *set; 3381 struct nft_set *set;
3375 struct nft_set_dump_args args; 3382 struct nft_set_dump_args args;
3376 struct nft_ctx ctx; 3383 bool set_found = false;
3377 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
3378 struct nfgenmsg *nfmsg; 3384 struct nfgenmsg *nfmsg;
3379 struct nlmsghdr *nlh; 3385 struct nlmsghdr *nlh;
3380 struct nlattr *nest; 3386 struct nlattr *nest;
3381 u32 portid, seq; 3387 u32 portid, seq;
3382 int event, err; 3388 int event;
3383 3389
3384 err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, 3390 rcu_read_lock();
3385 NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy, 3391 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
3386 NULL); 3392 if (afi != dump_ctx->ctx.afi)
3387 if (err < 0) 3393 continue;
3388 return err;
3389 3394
3390 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, 3395 list_for_each_entry_rcu(table, &afi->tables, list) {
3391 (void *)nla, genmask); 3396 if (table != dump_ctx->ctx.table)
3392 if (err < 0) 3397 continue;
3393 return err;
3394 3398
3395 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], 3399 list_for_each_entry_rcu(set, &table->sets, list) {
3396 genmask); 3400 if (set == dump_ctx->set) {
3397 if (IS_ERR(set)) 3401 set_found = true;
3398 return PTR_ERR(set); 3402 break;
3403 }
3404 }
3405 break;
3406 }
3407 break;
3408 }
3409
3410 if (!set_found) {
3411 rcu_read_unlock();
3412 return -ENOENT;
3413 }
3399 3414
3400 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); 3415 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM);
3401 portid = NETLINK_CB(cb->skb).portid; 3416 portid = NETLINK_CB(cb->skb).portid;
@@ -3407,11 +3422,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3407 goto nla_put_failure; 3422 goto nla_put_failure;
3408 3423
3409 nfmsg = nlmsg_data(nlh); 3424 nfmsg = nlmsg_data(nlh);
3410 nfmsg->nfgen_family = ctx.afi->family; 3425 nfmsg->nfgen_family = afi->family;
3411 nfmsg->version = NFNETLINK_V0; 3426 nfmsg->version = NFNETLINK_V0;
3412 nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); 3427 nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
3413 3428
3414 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) 3429 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
3415 goto nla_put_failure; 3430 goto nla_put_failure;
3416 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) 3431 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
3417 goto nla_put_failure; 3432 goto nla_put_failure;
@@ -3422,12 +3437,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3422 3437
3423 args.cb = cb; 3438 args.cb = cb;
3424 args.skb = skb; 3439 args.skb = skb;
3425 args.iter.genmask = nft_genmask_cur(ctx.net); 3440 args.iter.genmask = nft_genmask_cur(net);
3426 args.iter.skip = cb->args[0]; 3441 args.iter.skip = cb->args[0];
3427 args.iter.count = 0; 3442 args.iter.count = 0;
3428 args.iter.err = 0; 3443 args.iter.err = 0;
3429 args.iter.fn = nf_tables_dump_setelem; 3444 args.iter.fn = nf_tables_dump_setelem;
3430 set->ops->walk(&ctx, set, &args.iter); 3445 set->ops->walk(&dump_ctx->ctx, set, &args.iter);
3446 rcu_read_unlock();
3431 3447
3432 nla_nest_end(skb, nest); 3448 nla_nest_end(skb, nest);
3433 nlmsg_end(skb, nlh); 3449 nlmsg_end(skb, nlh);
@@ -3441,9 +3457,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3441 return skb->len; 3457 return skb->len;
3442 3458
3443nla_put_failure: 3459nla_put_failure:
3460 rcu_read_unlock();
3444 return -ENOSPC; 3461 return -ENOSPC;
3445} 3462}
3446 3463
3464static int nf_tables_dump_set_done(struct netlink_callback *cb)
3465{
3466 kfree(cb->data);
3467 return 0;
3468}
3469
3447static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, 3470static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3448 struct sk_buff *skb, const struct nlmsghdr *nlh, 3471 struct sk_buff *skb, const struct nlmsghdr *nlh,
3449 const struct nlattr * const nla[]) 3472 const struct nlattr * const nla[])
@@ -3465,7 +3488,18 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3465 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3488 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3466 struct netlink_dump_control c = { 3489 struct netlink_dump_control c = {
3467 .dump = nf_tables_dump_set, 3490 .dump = nf_tables_dump_set,
3491 .done = nf_tables_dump_set_done,
3468 }; 3492 };
3493 struct nft_set_dump_ctx *dump_ctx;
3494
3495 dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
3496 if (!dump_ctx)
3497 return -ENOMEM;
3498
3499 dump_ctx->set = set;
3500 dump_ctx->ctx = ctx;
3501
3502 c.data = dump_ctx;
3469 return netlink_dump_start(nlsk, skb, nlh, &c); 3503 return netlink_dump_start(nlsk, skb, nlh, &c);
3470 } 3504 }
3471 return -EOPNOTSUPP; 3505 return -EOPNOTSUPP;
@@ -3593,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3593{ 3627{
3594 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3628 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3595 3629
3596 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3630 nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
3597 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3631 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3598 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3632 nft_data_release(nft_set_ext_data(ext), set->dtype);
3599 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3633 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3600 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3634 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3601 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) 3635 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
@@ -3604,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3604} 3638}
3605EXPORT_SYMBOL_GPL(nft_set_elem_destroy); 3639EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
3606 3640
3641/* Only called from commit path, nft_set_elem_deactivate() already deals with
3642 * the refcounting from the preparation phase.
3643 */
3644static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem)
3645{
3646 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3647
3648 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3649 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3650 kfree(elem);
3651}
3652
3607static int nft_setelem_parse_flags(const struct nft_set *set, 3653static int nft_setelem_parse_flags(const struct nft_set *set,
3608 const struct nlattr *attr, u32 *flags) 3654 const struct nlattr *attr, u32 *flags)
3609{ 3655{
@@ -3815,9 +3861,9 @@ err4:
3815 kfree(elem.priv); 3861 kfree(elem.priv);
3816err3: 3862err3:
3817 if (nla[NFTA_SET_ELEM_DATA] != NULL) 3863 if (nla[NFTA_SET_ELEM_DATA] != NULL)
3818 nft_data_uninit(&data, d2.type); 3864 nft_data_release(&data, d2.type);
3819err2: 3865err2:
3820 nft_data_uninit(&elem.key.val, d1.type); 3866 nft_data_release(&elem.key.val, d1.type);
3821err1: 3867err1:
3822 return err; 3868 return err;
3823} 3869}
@@ -3862,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3862 return err; 3908 return err;
3863} 3909}
3864 3910
3911/**
3912 * nft_data_hold - hold a nft_data item
3913 *
3914 * @data: struct nft_data to release
3915 * @type: type of data
3916 *
3917 * Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded,
3918 * NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and
3919 * NFT_GOTO verdicts. This function must be called on active data objects
3920 * from the second phase of the commit protocol.
3921 */
3922static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
3923{
3924 if (type == NFT_DATA_VERDICT) {
3925 switch (data->verdict.code) {
3926 case NFT_JUMP:
3927 case NFT_GOTO:
3928 data->verdict.chain->use++;
3929 break;
3930 }
3931 }
3932}
3933
3934static void nft_set_elem_activate(const struct net *net,
3935 const struct nft_set *set,
3936 struct nft_set_elem *elem)
3937{
3938 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3939
3940 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3941 nft_data_hold(nft_set_ext_data(ext), set->dtype);
3942 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3943 (*nft_set_ext_obj(ext))->use++;
3944}
3945
3946static void nft_set_elem_deactivate(const struct net *net,
3947 const struct nft_set *set,
3948 struct nft_set_elem *elem)
3949{
3950 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3951
3952 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3953 nft_data_release(nft_set_ext_data(ext), set->dtype);
3954 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3955 (*nft_set_ext_obj(ext))->use--;
3956}
3957
3865static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, 3958static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3866 const struct nlattr *attr) 3959 const struct nlattr *attr)
3867{ 3960{
@@ -3927,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3927 kfree(elem.priv); 4020 kfree(elem.priv);
3928 elem.priv = priv; 4021 elem.priv = priv;
3929 4022
4023 nft_set_elem_deactivate(ctx->net, set, &elem);
4024
3930 nft_trans_elem(trans) = elem; 4025 nft_trans_elem(trans) = elem;
3931 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 4026 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3932 return 0; 4027 return 0;
@@ -3936,7 +4031,7 @@ err4:
3936err3: 4031err3:
3937 kfree(elem.priv); 4032 kfree(elem.priv);
3938err2: 4033err2:
3939 nft_data_uninit(&elem.key.val, desc.type); 4034 nft_data_release(&elem.key.val, desc.type);
3940err1: 4035err1:
3941 return err; 4036 return err;
3942} 4037}
@@ -4743,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans)
4743 nft_set_destroy(nft_trans_set(trans)); 4838 nft_set_destroy(nft_trans_set(trans));
4744 break; 4839 break;
4745 case NFT_MSG_DELSETELEM: 4840 case NFT_MSG_DELSETELEM:
4746 nft_set_elem_destroy(nft_trans_elem_set(trans), 4841 nf_tables_set_elem_destroy(nft_trans_elem_set(trans),
4747 nft_trans_elem(trans).priv, true); 4842 nft_trans_elem(trans).priv);
4748 break; 4843 break;
4749 case NFT_MSG_DELOBJ: 4844 case NFT_MSG_DELOBJ:
4750 nft_obj_destroy(nft_trans_obj(trans)); 4845 nft_obj_destroy(nft_trans_obj(trans));
@@ -4979,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4979 case NFT_MSG_DELSETELEM: 5074 case NFT_MSG_DELSETELEM:
4980 te = (struct nft_trans_elem *)trans->data; 5075 te = (struct nft_trans_elem *)trans->data;
4981 5076
5077 nft_set_elem_activate(net, te->set, &te->elem);
4982 te->set->ops->activate(net, te->set, &te->elem); 5078 te->set->ops->activate(net, te->set, &te->elem);
4983 te->set->ndeact--; 5079 te->set->ndeact--;
4984 5080
@@ -5464,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx,
5464EXPORT_SYMBOL_GPL(nft_data_init); 5560EXPORT_SYMBOL_GPL(nft_data_init);
5465 5561
5466/** 5562/**
5467 * nft_data_uninit - release a nft_data item 5563 * nft_data_release - release a nft_data item
5468 * 5564 *
5469 * @data: struct nft_data to release 5565 * @data: struct nft_data to release
5470 * @type: type of data 5566 * @type: type of data
@@ -5472,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init);
5472 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, 5568 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
5473 * all others need to be released by calling this function. 5569 * all others need to be released by calling this function.
5474 */ 5570 */
5475void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 5571void nft_data_release(const struct nft_data *data, enum nft_data_types type)
5476{ 5572{
5477 if (type < NFT_DATA_VERDICT) 5573 if (type < NFT_DATA_VERDICT)
5478 return; 5574 return;
@@ -5483,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
5483 WARN_ON(1); 5579 WARN_ON(1);
5484 } 5580 }
5485} 5581}
5486EXPORT_SYMBOL_GPL(nft_data_uninit); 5582EXPORT_SYMBOL_GPL(nft_data_release);
5487 5583
5488int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, 5584int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
5489 enum nft_data_types type, unsigned int len) 5585 enum nft_data_types type, unsigned int len)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 950bf6eadc65..be678a323598 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
686 tuple_set = true; 686 tuple_set = true;
687 } 687 }
688 688
689 ret = -ENOENT;
689 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { 690 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
690 cur = &nlcth->helper; 691 cur = &nlcth->helper;
691 j++; 692 j++;
@@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
699 tuple.dst.protonum != cur->tuple.dst.protonum)) 700 tuple.dst.protonum != cur->tuple.dst.protonum))
700 continue; 701 continue;
701 702
702 found = true; 703 if (refcount_dec_if_one(&cur->refcnt)) {
703 nf_conntrack_helper_unregister(cur); 704 found = true;
704 kfree(cur->expect_policy); 705 nf_conntrack_helper_unregister(cur);
706 kfree(cur->expect_policy);
705 707
706 list_del(&nlcth->list); 708 list_del(&nlcth->list);
707 kfree(nlcth); 709 kfree(nlcth);
710 } else {
711 ret = -EBUSY;
712 }
708 } 713 }
709 714
710 /* Make sure we return success if we flush and there is no helpers */ 715 /* Make sure we return success if we flush and there is no helpers */
711 return (found || j == 0) ? 0 : -ENOENT; 716 return (found || j == 0) ? 0 : ret;
712} 717}
713 718
714static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { 719static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 877d9acd91ef..fff8073e2a56 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -83,17 +83,26 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
83 tb[NFTA_BITWISE_MASK]); 83 tb[NFTA_BITWISE_MASK]);
84 if (err < 0) 84 if (err < 0)
85 return err; 85 return err;
86 if (d1.len != priv->len) 86 if (d1.len != priv->len) {
87 return -EINVAL; 87 err = -EINVAL;
88 goto err1;
89 }
88 90
89 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2, 91 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
90 tb[NFTA_BITWISE_XOR]); 92 tb[NFTA_BITWISE_XOR]);
91 if (err < 0) 93 if (err < 0)
92 return err; 94 goto err1;
93 if (d2.len != priv->len) 95 if (d2.len != priv->len) {
94 return -EINVAL; 96 err = -EINVAL;
97 goto err2;
98 }
95 99
96 return 0; 100 return 0;
101err2:
102 nft_data_release(&priv->xor, d2.type);
103err1:
104 nft_data_release(&priv->mask, d1.type);
105 return err;
97} 106}
98 107
99static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) 108static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 2b96effeadc1..c2945eb3397c 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -201,10 +201,18 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
201 if (err < 0) 201 if (err < 0)
202 return ERR_PTR(err); 202 return ERR_PTR(err);
203 203
204 if (desc.type != NFT_DATA_VALUE) {
205 err = -EINVAL;
206 goto err1;
207 }
208
204 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) 209 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
205 return &nft_cmp_fast_ops; 210 return &nft_cmp_fast_ops;
206 else 211
207 return &nft_cmp_ops; 212 return &nft_cmp_ops;
213err1:
214 nft_data_release(&data, desc.type);
215 return ERR_PTR(-EINVAL);
208} 216}
209 217
210struct nft_expr_type nft_cmp_type __read_mostly = { 218struct nft_expr_type nft_cmp_type __read_mostly = {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index a34ceb38fc55..1678e9e75e8e 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj)
826 struct nft_ct_helper_obj *priv = nft_obj_data(obj); 826 struct nft_ct_helper_obj *priv = nft_obj_data(obj);
827 827
828 if (priv->helper4) 828 if (priv->helper4)
829 module_put(priv->helper4->me); 829 nf_conntrack_helper_put(priv->helper4);
830 if (priv->helper6) 830 if (priv->helper6)
831 module_put(priv->helper6->me); 831 nf_conntrack_helper_put(priv->helper6);
832} 832}
833 833
834static void nft_ct_helper_obj_eval(struct nft_object *obj, 834static void nft_ct_helper_obj_eval(struct nft_object *obj,
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 728baf88295a..4717d7796927 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
65 return 0; 65 return 0;
66 66
67err1: 67err1:
68 nft_data_uninit(&priv->data, desc.type); 68 nft_data_release(&priv->data, desc.type);
69 return err; 69 return err;
70} 70}
71 71
@@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
73 const struct nft_expr *expr) 73 const struct nft_expr *expr)
74{ 74{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 75 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76 return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); 76
77 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
77} 78}
78 79
79static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) 80static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 9edc74eedc10..cedb96c3619f 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
102 priv->len = desc_from.len; 102 priv->len = desc_from.len;
103 return 0; 103 return 0;
104err2: 104err2:
105 nft_data_uninit(&priv->data_to, desc_to.type); 105 nft_data_release(&priv->data_to, desc_to.type);
106err1: 106err1:
107 nft_data_uninit(&priv->data_from, desc_from.type); 107 nft_data_release(&priv->data_from, desc_from.type);
108 return err; 108 return err;
109} 109}
110 110
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 8ec086b6b56b..3d3a6df4ce70 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -222,7 +222,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
222 struct nft_set_elem elem; 222 struct nft_set_elem elem;
223 int err; 223 int err;
224 224
225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); 225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
226 iter->err = err; 226 iter->err = err;
227 if (err) 227 if (err)
228 return; 228 return;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb53f0a..fbdbaa00dd5f 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
116 else if (d > 0) 116 else if (d > 0)
117 p = &parent->rb_right; 117 p = &parent->rb_right;
118 else { 118 else {
119 if (nft_set_elem_active(&rbe->ext, genmask)) { 119 if (nft_rbtree_interval_end(rbe) &&
120 if (nft_rbtree_interval_end(rbe) && 120 !nft_rbtree_interval_end(new)) {
121 !nft_rbtree_interval_end(new)) 121 p = &parent->rb_left;
122 p = &parent->rb_left; 122 } else if (!nft_rbtree_interval_end(rbe) &&
123 else if (!nft_rbtree_interval_end(rbe) && 123 nft_rbtree_interval_end(new)) {
124 nft_rbtree_interval_end(new)) 124 p = &parent->rb_right;
125 p = &parent->rb_right; 125 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
126 else { 126 *ext = &rbe->ext;
127 *ext = &rbe->ext; 127 return -EEXIST;
128 return -EEXIST; 128 } else {
129 } 129 p = &parent->rb_left;
130 } 130 }
131 } 131 }
132 } 132 }
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8876b7da6884..1770c1d9b37f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -283,28 +283,30 @@ static int xt_obj_to_user(u16 __user *psize, u16 size,
283 &U->u.user.revision, K->u.kernel.TYPE->revision) 283 &U->u.user.revision, K->u.kernel.TYPE->revision)
284 284
285int xt_data_to_user(void __user *dst, const void *src, 285int xt_data_to_user(void __user *dst, const void *src,
286 int usersize, int size) 286 int usersize, int size, int aligned_size)
287{ 287{
288 usersize = usersize ? : size; 288 usersize = usersize ? : size;
289 if (copy_to_user(dst, src, usersize)) 289 if (copy_to_user(dst, src, usersize))
290 return -EFAULT; 290 return -EFAULT;
291 if (usersize != size && clear_user(dst + usersize, size - usersize)) 291 if (usersize != aligned_size &&
292 clear_user(dst + usersize, aligned_size - usersize))
292 return -EFAULT; 293 return -EFAULT;
293 294
294 return 0; 295 return 0;
295} 296}
296EXPORT_SYMBOL_GPL(xt_data_to_user); 297EXPORT_SYMBOL_GPL(xt_data_to_user);
297 298
298#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ 299#define XT_DATA_TO_USER(U, K, TYPE) \
299 xt_data_to_user(U->data, K->data, \ 300 xt_data_to_user(U->data, K->data, \
300 K->u.kernel.TYPE->usersize, \ 301 K->u.kernel.TYPE->usersize, \
301 C_SIZE ? : K->u.kernel.TYPE->TYPE##size) 302 K->u.kernel.TYPE->TYPE##size, \
303 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
302 304
303int xt_match_to_user(const struct xt_entry_match *m, 305int xt_match_to_user(const struct xt_entry_match *m,
304 struct xt_entry_match __user *u) 306 struct xt_entry_match __user *u)
305{ 307{
306 return XT_OBJ_TO_USER(u, m, match, 0) || 308 return XT_OBJ_TO_USER(u, m, match, 0) ||
307 XT_DATA_TO_USER(u, m, match, 0); 309 XT_DATA_TO_USER(u, m, match);
308} 310}
309EXPORT_SYMBOL_GPL(xt_match_to_user); 311EXPORT_SYMBOL_GPL(xt_match_to_user);
310 312
@@ -312,7 +314,7 @@ int xt_target_to_user(const struct xt_entry_target *t,
312 struct xt_entry_target __user *u) 314 struct xt_entry_target __user *u)
313{ 315{
314 return XT_OBJ_TO_USER(u, t, target, 0) || 316 return XT_OBJ_TO_USER(u, t, target, 0) ||
315 XT_DATA_TO_USER(u, t, target, 0); 317 XT_DATA_TO_USER(u, t, target);
316} 318}
317EXPORT_SYMBOL_GPL(xt_target_to_user); 319EXPORT_SYMBOL_GPL(xt_target_to_user);
318 320
@@ -611,6 +613,12 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
611} 613}
612EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 614EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
613 615
616#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
617 xt_data_to_user(U->data, K->data, \
618 K->u.kernel.TYPE->usersize, \
619 C_SIZE, \
620 COMPAT_XT_ALIGN(C_SIZE))
621
614int xt_compat_match_to_user(const struct xt_entry_match *m, 622int xt_compat_match_to_user(const struct xt_entry_match *m,
615 void __user **dstptr, unsigned int *size) 623 void __user **dstptr, unsigned int *size)
616{ 624{
@@ -626,7 +634,7 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
626 if (match->compat_to_user((void __user *)cm->data, m->data)) 634 if (match->compat_to_user((void __user *)cm->data, m->data))
627 return -EFAULT; 635 return -EFAULT;
628 } else { 636 } else {
629 if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) 637 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
630 return -EFAULT; 638 return -EFAULT;
631 } 639 }
632 640
@@ -972,7 +980,7 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
972 if (target->compat_to_user((void __user *)ct->data, t->data)) 980 if (target->compat_to_user((void __user *)ct->data, t->data))
973 return -EFAULT; 981 return -EFAULT;
974 } else { 982 } else {
975 if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) 983 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
976 return -EFAULT; 984 return -EFAULT;
977 } 985 }
978 986
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index bb7ad82dcd56..623ef37de886 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
96 96
97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); 97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
98 if (help == NULL) { 98 if (help == NULL) {
99 module_put(helper->me); 99 nf_conntrack_helper_put(helper);
100 return -ENOMEM; 100 return -ENOMEM;
101 } 101 }
102 102
@@ -263,7 +263,7 @@ out:
263err4: 263err4:
264 help = nfct_help(ct); 264 help = nfct_help(ct);
265 if (help) 265 if (help)
266 module_put(help->helper->me); 266 nf_conntrack_helper_put(help->helper);
267err3: 267err3:
268 nf_ct_tmpl_free(ct); 268 nf_ct_tmpl_free(ct);
269err2: 269err2:
@@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
346 if (ct) { 346 if (ct) {
347 help = nfct_help(ct); 347 help = nfct_help(ct);
348 if (help) 348 if (help)
349 module_put(help->helper->me); 349 nf_conntrack_helper_put(help->helper);
350 350
351 nf_ct_netns_put(par->net, par->family); 351 nf_ct_netns_put(par->net, par->family);
352 352
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f00a6ec..7586d446d7dc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
1415 goto out; 1416 goto out;
1416 } 1417 }
1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1418 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1418 NETLINK_CB(p->skb2).nsid_is_set = true; 1419 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1420 NETLINK_CB(p->skb2).nsid_is_set = true;
1419 val = netlink_broadcast_deliver(sk, p->skb2); 1421 val = netlink_broadcast_deliver(sk, p->skb2);
1420 if (val < 0) { 1422 if (val < 0) {
1421 netlink_overrun(sk); 1423 netlink_overrun(sk);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index bf602e33c40a..08679ebb3068 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1123 1123
1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); 1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
1125 if (!help) { 1125 if (!help) {
1126 module_put(helper->me); 1126 nf_conntrack_helper_put(helper);
1127 return -ENOMEM; 1127 return -ENOMEM;
1128 } 1128 }
1129 1129
@@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a)
1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) 1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1585{ 1585{
1586 if (ct_info->helper) 1586 if (ct_info->helper)
1587 module_put(ct_info->helper->me); 1587 nf_conntrack_helper_put(ct_info->helper);
1588 if (ct_info->ct) 1588 if (ct_info->ct)
1589 nf_ct_tmpl_free(ct_info->ct); 1589 nf_ct_tmpl_free(ct_info->ct);
1590} 1590}
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 89193a634da4..04a3128adcf0 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
94 struct vport *vport = ovs_internal_dev_get_vport(dev); 94 struct vport *vport = ovs_internal_dev_get_vport(dev);
95 95
96 ovs_vport_free(vport); 96 ovs_vport_free(vport);
97 free_netdev(dev);
98} 97}
99 98
100static void 99static void
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
156 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 155 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
157 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 156 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
158 IFF_PHONY_HEADROOM | IFF_NO_QUEUE; 157 IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
159 netdev->destructor = internal_dev_destructor; 158 netdev->needs_free_netdev = true;
159 netdev->priv_destructor = internal_dev_destructor;
160 netdev->ethtool_ops = &internal_dev_ethtool_ops; 160 netdev->ethtool_ops = &internal_dev_ethtool_ops;
161 netdev->rtnl_link_ops = &internal_dev_link_ops; 161 netdev->rtnl_link_ops = &internal_dev_link_ops;
162 162
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f4001763134d..e3eeed19cc7a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2658,13 +2658,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2659 } 2659 }
2660 2660
2661 sockc.tsflags = po->sk.sk_tsflags;
2662 if (msg->msg_controllen) {
2663 err = sock_cmsg_send(&po->sk, msg, &sockc);
2664 if (unlikely(err))
2665 goto out;
2666 }
2667
2668 err = -ENXIO; 2661 err = -ENXIO;
2669 if (unlikely(dev == NULL)) 2662 if (unlikely(dev == NULL))
2670 goto out; 2663 goto out;
@@ -2672,6 +2665,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2672 if (unlikely(!(dev->flags & IFF_UP))) 2665 if (unlikely(!(dev->flags & IFF_UP)))
2673 goto out_put; 2666 goto out_put;
2674 2667
2668 sockc.tsflags = po->sk.sk_tsflags;
2669 if (msg->msg_controllen) {
2670 err = sock_cmsg_send(&po->sk, msg, &sockc);
2671 if (unlikely(err))
2672 goto out_put;
2673 }
2674
2675 if (po->sk.sk_socket->type == SOCK_RAW) 2675 if (po->sk.sk_socket->type == SOCK_RAW)
2676 reserve = dev->hard_header_len; 2676 reserve = dev->hard_header_len;
2677 size_max = po->tx_ring.frame_size 2677 size_max = po->tx_ring.frame_size
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 21c28b51be94..2c9337946e30 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
236 dev->tx_queue_len = 10; 236 dev->tx_queue_len = 10;
237 237
238 dev->netdev_ops = &gprs_netdev_ops; 238 dev->netdev_ops = &gprs_netdev_ops;
239 dev->destructor = free_netdev; 239 dev->needs_free_netdev = true;
240} 240}
241 241
242/* 242/*
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 0a4e28477ad9..54369225766e 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
217 unsigned int *_toklen) 217 unsigned int *_toklen)
218{ 218{
219 const __be32 *xdr = *_xdr; 219 const __be32 *xdr = *_xdr;
220 unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
221 221
222 /* there must be at least one name, and at least #names+1 length 222 /* there must be at least one name, and at least #names+1 length
223 * words */ 223 * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
247 toklen -= 4; 247 toklen -= 4;
248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
249 return -EINVAL; 249 return -EINVAL;
250 if (tmp > toklen) 250 paddedlen = (tmp + 3) & ~3;
251 if (paddedlen > toklen)
251 return -EINVAL; 252 return -EINVAL;
252 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
253 if (!princ->name_parts[loop]) 254 if (!princ->name_parts[loop])
254 return -ENOMEM; 255 return -ENOMEM;
255 memcpy(princ->name_parts[loop], xdr, tmp); 256 memcpy(princ->name_parts[loop], xdr, tmp);
256 princ->name_parts[loop][tmp] = 0; 257 princ->name_parts[loop][tmp] = 0;
257 tmp = (tmp + 3) & ~3; 258 toklen -= paddedlen;
258 toklen -= tmp; 259 xdr += paddedlen >> 2;
259 xdr += tmp >> 2;
260 } 260 }
261 261
262 if (toklen < 4) 262 if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
265 toklen -= 4; 265 toklen -= 4;
266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
267 return -EINVAL; 267 return -EINVAL;
268 if (tmp > toklen) 268 paddedlen = (tmp + 3) & ~3;
269 if (paddedlen > toklen)
269 return -EINVAL; 270 return -EINVAL;
270 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
271 if (!princ->realm) 272 if (!princ->realm)
272 return -ENOMEM; 273 return -ENOMEM;
273 memcpy(princ->realm, xdr, tmp); 274 memcpy(princ->realm, xdr, tmp);
274 princ->realm[tmp] = 0; 275 princ->realm[tmp] = 0;
275 tmp = (tmp + 3) & ~3; 276 toklen -= paddedlen;
276 toklen -= tmp; 277 xdr += paddedlen >> 2;
277 xdr += tmp >> 2;
278 278
279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm);
280 280
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
293 unsigned int *_toklen) 293 unsigned int *_toklen)
294{ 294{
295 const __be32 *xdr = *_xdr; 295 const __be32 *xdr = *_xdr;
296 unsigned int toklen = *_toklen, len; 296 unsigned int toklen = *_toklen, len, paddedlen;
297 297
298 /* there must be at least one tag and one length word */ 298 /* there must be at least one tag and one length word */
299 if (toklen <= 8) 299 if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
307 toklen -= 8; 307 toklen -= 8;
308 if (len > max_data_size) 308 if (len > max_data_size)
309 return -EINVAL; 309 return -EINVAL;
310 paddedlen = (len + 3) & ~3;
311 if (paddedlen > toklen)
312 return -EINVAL;
310 td->data_len = len; 313 td->data_len = len;
311 314
312 if (len > 0) { 315 if (len > 0) {
313 td->data = kmemdup(xdr, len, GFP_KERNEL); 316 td->data = kmemdup(xdr, len, GFP_KERNEL);
314 if (!td->data) 317 if (!td->data)
315 return -ENOMEM; 318 return -ENOMEM;
316 len = (len + 3) & ~3; 319 toklen -= paddedlen;
317 toklen -= len; 320 xdr += paddedlen >> 2;
318 xdr += len >> 2;
319 } 321 }
320 322
321 _debug("tag %x len %x", td->tag, td->data_len); 323 _debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
387 const __be32 **_xdr, unsigned int *_toklen) 389 const __be32 **_xdr, unsigned int *_toklen)
388{ 390{
389 const __be32 *xdr = *_xdr; 391 const __be32 *xdr = *_xdr;
390 unsigned int toklen = *_toklen, len; 392 unsigned int toklen = *_toklen, len, paddedlen;
391 393
392 /* there must be at least one length word */ 394 /* there must be at least one length word */
393 if (toklen <= 4) 395 if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
399 toklen -= 4; 401 toklen -= 4;
400 if (len > AFSTOKEN_K5_TIX_MAX) 402 if (len > AFSTOKEN_K5_TIX_MAX)
401 return -EINVAL; 403 return -EINVAL;
404 paddedlen = (len + 3) & ~3;
405 if (paddedlen > toklen)
406 return -EINVAL;
402 *_tktlen = len; 407 *_tktlen = len;
403 408
404 _debug("ticket len %u", len); 409 _debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 412 *_ticket = kmemdup(xdr, len, GFP_KERNEL);
408 if (!*_ticket) 413 if (!*_ticket)
409 return -ENOMEM; 414 return -ENOMEM;
410 len = (len + 3) & ~3; 415 toklen -= paddedlen;
411 toklen -= len; 416 xdr += paddedlen >> 2;
412 xdr += len >> 2;
413 } 417 }
414 418
415 *_xdr = xdr; 419 *_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
552{ 556{
553 const __be32 *xdr = prep->data, *token; 557 const __be32 *xdr = prep->data, *token;
554 const char *cp; 558 const char *cp;
555 unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 559 unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
556 size_t datalen = prep->datalen; 560 size_t datalen = prep->datalen;
557 int ret; 561 int ret;
558 562
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 582 if (len < 1 || len > AFSTOKEN_CELL_MAX)
579 goto not_xdr; 583 goto not_xdr;
580 datalen -= 4; 584 datalen -= 4;
581 tmp = (len + 3) & ~3; 585 paddedlen = (len + 3) & ~3;
582 if (tmp > datalen) 586 if (paddedlen > datalen)
583 goto not_xdr; 587 goto not_xdr;
584 588
585 cp = (const char *) xdr; 589 cp = (const char *) xdr;
586 for (loop = 0; loop < len; loop++) 590 for (loop = 0; loop < len; loop++)
587 if (!isprint(cp[loop])) 591 if (!isprint(cp[loop]))
588 goto not_xdr; 592 goto not_xdr;
589 if (len < tmp) 593 for (; loop < paddedlen; loop++)
590 for (; loop < tmp; loop++) 594 if (cp[loop])
591 if (cp[loop]) 595 goto not_xdr;
592 goto not_xdr;
593 _debug("cellname: [%u/%u] '%*.*s'", 596 _debug("cellname: [%u/%u] '%*.*s'",
594 len, tmp, len, len, (const char *) xdr); 597 len, paddedlen, len, len, (const char *) xdr);
595 datalen -= tmp; 598 datalen -= paddedlen;
596 xdr += tmp >> 2; 599 xdr += paddedlen >> 2;
597 600
598 /* get the token count */ 601 /* get the token count */
599 if (datalen < 12) 602 if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
614 sec_ix = ntohl(*xdr); 617 sec_ix = ntohl(*xdr);
615 datalen -= 4; 618 datalen -= 4;
616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 619 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
617 if (toklen < 20 || toklen > datalen) 620 paddedlen = (toklen + 3) & ~3;
621 if (toklen < 20 || toklen > datalen || paddedlen > datalen)
618 goto not_xdr; 622 goto not_xdr;
619 datalen -= (toklen + 3) & ~3; 623 datalen -= paddedlen;
620 xdr += (toklen + 3) >> 2; 624 xdr += paddedlen >> 2;
621 625
622 } while (--loop > 0); 626 } while (--loop > 0);
623 627
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 164b5ac094be..7dc5892671c8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
94 k++; 94 k++;
95 } 95 }
96 96
97 if (n) 97 if (n) {
98 err = -EINVAL;
98 goto err_out; 99 goto err_out;
100 }
99 101
100 return keys_ex; 102 return keys_ex;
101 103
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f42008b29311..b062bc80c7cb 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
132 } 132 }
133 } 133 }
134 134
135 spin_lock_bh(&police->tcf_lock);
136 if (est) { 135 if (est) {
137 err = gen_replace_estimator(&police->tcf_bstats, NULL, 136 err = gen_replace_estimator(&police->tcf_bstats, NULL,
138 &police->tcf_rate_est, 137 &police->tcf_rate_est,
139 &police->tcf_lock, 138 &police->tcf_lock,
140 NULL, est); 139 NULL, est);
141 if (err) 140 if (err)
142 goto failure_unlock; 141 goto failure;
143 } else if (tb[TCA_POLICE_AVRATE] && 142 } else if (tb[TCA_POLICE_AVRATE] &&
144 (ret == ACT_P_CREATED || 143 (ret == ACT_P_CREATED ||
145 !gen_estimator_active(&police->tcf_rate_est))) { 144 !gen_estimator_active(&police->tcf_rate_est))) {
146 err = -EINVAL; 145 err = -EINVAL;
147 goto failure_unlock; 146 goto failure;
148 } 147 }
149 148
149 spin_lock_bh(&police->tcf_lock);
150 /* No failure allowed after this point */ 150 /* No failure allowed after this point */
151 police->tcfp_mtu = parm->mtu; 151 police->tcfp_mtu = parm->mtu;
152 if (police->tcfp_mtu == 0) { 152 if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
192 192
193 return ret; 193 return ret;
194 194
195failure_unlock:
196 spin_unlock_bh(&police->tcf_lock);
197failure: 195failure:
198 qdisc_put_rtab(P_tab); 196 qdisc_put_rtab(P_tab);
199 qdisc_put_rtab(R_tab); 197 qdisc_put_rtab(R_tab);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index dee469fed967..51859b8edd7e 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -203,7 +203,6 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
203 203
204 *arg = (unsigned long) head; 204 *arg = (unsigned long) head;
205 rcu_assign_pointer(tp->root, new); 205 rcu_assign_pointer(tp->root, new);
206 call_rcu(&head->rcu, mall_destroy_rcu);
207 return 0; 206 return 0;
208 207
209err_replace_hw_filter: 208err_replace_hw_filter:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bbe57d57b67f..cfdbfa18a95e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1019,7 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
1019 return sch; 1019 return sch;
1020 } 1020 }
1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1022 ops->destroy(sch); 1022 if (ops->destroy)
1023 ops->destroy(sch);
1023err_out3: 1024err_out3:
1024 dev_put(dev); 1025 dev_put(dev);
1025 kfree((char *) sch - sch->padded); 1026 kfree((char *) sch - sch->padded);
@@ -1831,6 +1832,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1831 if (!qdisc_dev(root)) 1832 if (!qdisc_dev(root))
1832 return 0; 1833 return 0;
1833 1834
1835 if (tcm->tcm_parent) {
1836 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1837 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1838 return -1;
1839 return 0;
1840 }
1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1841 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1842 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1836 return -1; 1843 return -1;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a9708da28eb5..95238284c422 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
1176 1176
1177 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1177 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1179 if (!asoc->stream) { 1179
1180 if (sctp_state(asoc, COOKIE_WAIT)) {
1181 sctp_stream_free(asoc->stream);
1180 asoc->stream = new->stream; 1182 asoc->stream = new->stream;
1181 new->stream = NULL; 1183 new->stream = NULL;
1182 } 1184 }
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8c589230794f..3dcd0ecf3d99 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
275 if (sctp_sk(sk)->bind_hash) 275 if (sctp_sk(sk)->bind_hash)
276 sctp_put_port(sk); 276 sctp_put_port(sk);
277 277
278 sctp_sk(sk)->ep = NULL;
278 sock_put(sk); 279 sock_put(sk);
279 } 280 }
280 281
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0e06a278d2a9..ba9ad32fc447 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
473 struct sctp_association **app, 473 struct sctp_association **app,
474 struct sctp_transport **tpp) 474 struct sctp_transport **tpp)
475{ 475{
476 struct sctp_init_chunk *chunkhdr, _chunkhdr;
476 union sctp_addr saddr; 477 union sctp_addr saddr;
477 union sctp_addr daddr; 478 union sctp_addr daddr;
478 struct sctp_af *af; 479 struct sctp_af *af;
479 struct sock *sk = NULL; 480 struct sock *sk = NULL;
480 struct sctp_association *asoc; 481 struct sctp_association *asoc;
481 struct sctp_transport *transport = NULL; 482 struct sctp_transport *transport = NULL;
482 struct sctp_init_chunk *chunkhdr;
483 __u32 vtag = ntohl(sctphdr->vtag); 483 __u32 vtag = ntohl(sctphdr->vtag);
484 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
485 484
486 *app = NULL; *tpp = NULL; 485 *app = NULL; *tpp = NULL;
487 486
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
516 * discard the packet. 515 * discard the packet.
517 */ 516 */
518 if (vtag == 0) { 517 if (vtag == 0) {
519 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); 518 /* chunk header + first 4 octects of init header */
520 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) 519 chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
521 + sizeof(__be32) || 520 sizeof(struct sctphdr),
521 sizeof(struct sctp_chunkhdr) +
522 sizeof(__be32), &_chunkhdr);
523 if (!chunkhdr ||
522 chunkhdr->chunk_hdr.type != SCTP_CID_INIT || 524 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
523 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { 525 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
524 goto out; 526 goto out;
525 } 527
526 } else if (vtag != asoc->c.peer_vtag) { 528 } else if (vtag != asoc->c.peer_vtag) {
527 goto out; 529 goto out;
528 } 530 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 961ee59f696a..f5b45b8b8b16 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
240 struct sctp_bind_addr *bp; 240 struct sctp_bind_addr *bp;
241 struct ipv6_pinfo *np = inet6_sk(sk); 241 struct ipv6_pinfo *np = inet6_sk(sk);
242 struct sctp_sockaddr_entry *laddr; 242 struct sctp_sockaddr_entry *laddr;
243 union sctp_addr *baddr = NULL;
244 union sctp_addr *daddr = &t->ipaddr; 243 union sctp_addr *daddr = &t->ipaddr;
245 union sctp_addr dst_saddr; 244 union sctp_addr dst_saddr;
246 struct in6_addr *final_p, final; 245 struct in6_addr *final_p, final;
247 __u8 matchlen = 0; 246 __u8 matchlen = 0;
248 __u8 bmatchlen;
249 sctp_scope_t scope; 247 sctp_scope_t scope;
250 248
251 memset(fl6, 0, sizeof(struct flowi6)); 249 memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
312 */ 310 */
313 rcu_read_lock(); 311 rcu_read_lock();
314 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 312 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
315 if (!laddr->valid) 313 struct dst_entry *bdst;
314 __u8 bmatchlen;
315
316 if (!laddr->valid ||
317 laddr->state != SCTP_ADDR_SRC ||
318 laddr->a.sa.sa_family != AF_INET6 ||
319 scope > sctp_scope(&laddr->a))
316 continue; 320 continue;
317 if ((laddr->state == SCTP_ADDR_SRC) && 321
318 (laddr->a.sa.sa_family == AF_INET6) && 322 fl6->saddr = laddr->a.v6.sin6_addr;
319 (scope <= sctp_scope(&laddr->a))) { 323 fl6->fl6_sport = laddr->a.v6.sin6_port;
320 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
321 if (!baddr || (matchlen < bmatchlen)) {
322 baddr = &laddr->a;
323 matchlen = bmatchlen;
324 }
325 }
326 }
327 if (baddr) {
328 fl6->saddr = baddr->v6.sin6_addr;
329 fl6->fl6_sport = baddr->v6.sin6_port;
330 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 324 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
331 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 325 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
326
327 if (!IS_ERR(bdst) &&
328 ipv6_chk_addr(dev_net(bdst->dev),
329 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
330 if (!IS_ERR_OR_NULL(dst))
331 dst_release(dst);
332 dst = bdst;
333 break;
334 }
335
336 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
337 if (matchlen > bmatchlen)
338 continue;
339
340 if (!IS_ERR_OR_NULL(dst))
341 dst_release(dst);
342 dst = bdst;
343 matchlen = bmatchlen;
332 } 344 }
333 rcu_read_unlock(); 345 rcu_read_unlock();
334 346
@@ -665,6 +677,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
665 newnp = inet6_sk(newsk); 677 newnp = inet6_sk(newsk);
666 678
667 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 679 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
680 newnp->ipv6_mc_list = NULL;
681 newnp->ipv6_ac_list = NULL;
682 newnp->ipv6_fl_list = NULL;
668 683
669 rcu_read_lock(); 684 rcu_read_lock();
670 opt = rcu_dereference(np->opt); 685 opt = rcu_dereference(np->opt);
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954eee984..9a647214a91e 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -278,7 +278,6 @@ out:
278 278
279static int sctp_sock_dump(struct sock *sk, void *p) 279static int sctp_sock_dump(struct sock *sk, void *p)
280{ 280{
281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
282 struct sctp_comm_param *commp = p; 281 struct sctp_comm_param *commp = p;
283 struct sk_buff *skb = commp->skb; 282 struct sk_buff *skb = commp->skb;
284 struct netlink_callback *cb = commp->cb; 283 struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
287 int err = 0; 286 int err = 0;
288 287
289 lock_sock(sk); 288 lock_sock(sk);
290 list_for_each_entry(assoc, &ep->asocs, asocs) { 289 if (!sctp_sk(sk)->ep)
290 goto release;
291 list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
291 if (cb->args[4] < cb->args[1]) 292 if (cb->args[4] < cb->args[1])
292 goto next; 293 goto next;
293 294
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 8a08f13469c4..92e332e17391 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2454 * stream sequence number shall be set to 0. 2454 * stream sequence number shall be set to 0.
2455 */ 2455 */
2456 2456
2457 /* Allocate storage for the negotiated streams if it is not a temporary 2457 if (sctp_stream_init(asoc, gfp))
2458 * association. 2458 goto clean_up;
2459 */
2460 if (!asoc->temp) {
2461 if (sctp_stream_init(asoc, gfp))
2462 goto clean_up;
2463 2459
2464 if (sctp_assoc_set_id(asoc, gfp)) 2460 if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
2465 goto clean_up; 2461 goto clean_up;
2466 }
2467 2462
2468 /* ADDIP Section 4.1 ASCONF Chunk Procedures 2463 /* ADDIP Section 4.1 ASCONF Chunk Procedures
2469 * 2464 *
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4f5e6cfc7f60..f863b5573e42 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
2088 } 2088 }
2089 } 2089 }
2090 2090
2091 /* Set temp so that it won't be added into hashtable */
2092 new_asoc->temp = 1;
2093
2091 /* Compare the tie_tag in cookie with the verification tag of 2094 /* Compare the tie_tag in cookie with the verification tag of
2092 * current association. 2095 * current association.
2093 */ 2096 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f16c8d97b7f3..3a8318e518f1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
4622 4622
4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
4624 hash++, head++) { 4624 hash++, head++) {
4625 read_lock(&head->lock); 4625 read_lock_bh(&head->lock);
4626 sctp_for_each_hentry(epb, &head->chain) { 4626 sctp_for_each_hentry(epb, &head->chain) {
4627 err = cb(sctp_ep(epb), p); 4627 err = cb(sctp_ep(epb), p);
4628 if (err) 4628 if (err)
4629 break; 4629 break;
4630 } 4630 }
4631 read_unlock(&head->lock); 4631 read_unlock_bh(&head->lock);
4632 } 4632 }
4633 4633
4634 return err; 4634 return err;
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4666 if (err) 4666 if (err)
4667 return err; 4667 return err;
4668 4668
4669 sctp_transport_get_idx(net, &hti, pos); 4669 obj = sctp_transport_get_idx(net, &hti, pos + 1);
4670 obj = sctp_transport_get_next(net, &hti); 4670 for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
4671 for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
4672 struct sctp_transport *transport = obj; 4671 struct sctp_transport *transport = obj;
4673 4672
4674 if (!sctp_transport_hold(transport)) 4673 if (!sctp_transport_hold(transport))
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index c717ef0896aa..33954852f3f8 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -8,6 +8,10 @@ config SMC
8 The Linux implementation of the SMC-R solution is designed as 8 The Linux implementation of the SMC-R solution is designed as
9 a separate socket family SMC. 9 a separate socket family SMC.
10 10
11 Warning: SMC will expose all memory for remote reads and writes
12 once a connection is established. Don't enable this option except
13 for tightly controlled lab environment.
14
11 Select this option if you want to run SMC socket applications 15 Select this option if you want to run SMC socket applications
12 16
13config SMC_DIAG 17config SMC_DIAG
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e41f594a1e1d..03ec058d18df 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc)
204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
205 hton24(cclc.qpn, link->roce_qp->qp_num); 205 hton24(cclc.qpn, link->roce_qp->qp_num);
206 cclc.rmb_rkey = 206 cclc.rmb_rkey =
207 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 207 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ 208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
209 cclc.rmbe_alert_token = htonl(conn->alert_token_local); 209 cclc.rmbe_alert_token = htonl(conn->alert_token_local);
210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); 210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
@@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
257 hton24(aclc.qpn, link->roce_qp->qp_num); 257 hton24(aclc.qpn, link->roce_qp->qp_num);
258 aclc.rmb_rkey = 258 aclc.rmb_rkey =
259 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 259 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ 260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */
261 aclc.rmbe_alert_token = htonl(conn->alert_token_local); 261 aclc.rmbe_alert_token = htonl(conn->alert_token_local);
262 aclc.qp_mtu = link->path_mtu; 262 aclc.qp_mtu = link->path_mtu;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 65020e93ff21..3ac09a629ea1 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -613,19 +613,8 @@ int smc_rmb_create(struct smc_sock *smc)
613 rmb_desc = NULL; 613 rmb_desc = NULL;
614 continue; /* if mapping failed, try smaller one */ 614 continue; /* if mapping failed, try smaller one */
615 } 615 }
616 rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, 616 rmb_desc->rkey[SMC_SINGLE_LINK] =
617 IB_ACCESS_REMOTE_WRITE | 617 lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey;
618 IB_ACCESS_LOCAL_WRITE,
619 &rmb_desc->mr_rx[SMC_SINGLE_LINK]);
620 if (rc) {
621 smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
622 tmp_bufsize, rmb_desc,
623 DMA_FROM_DEVICE);
624 kfree(rmb_desc->cpu_addr);
625 kfree(rmb_desc);
626 rmb_desc = NULL;
627 continue;
628 }
629 rmb_desc->used = 1; 618 rmb_desc->used = 1;
630 write_lock_bh(&lgr->rmbs_lock); 619 write_lock_bh(&lgr->rmbs_lock);
631 list_add(&rmb_desc->list, 620 list_add(&rmb_desc->list,
@@ -668,6 +657,7 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
668 657
669 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 658 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
670 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 659 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
660 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
671 test_bit(i, lgr->rtokens_used_mask)) { 661 test_bit(i, lgr->rtokens_used_mask)) {
672 conn->rtoken_idx = i; 662 conn->rtoken_idx = i;
673 return 0; 663 return 0;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 27eb38056a27..b013cb43a327 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -93,7 +93,7 @@ struct smc_buf_desc {
93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; 93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX];
94 /* mapped address of buffer */ 94 /* mapped address of buffer */
95 void *cpu_addr; /* virtual address of buffer */ 95 void *cpu_addr; /* virtual address of buffer */
96 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 96 u32 rkey[SMC_LINKS_PER_LGR_MAX];
97 /* for rmb only: 97 /* for rmb only:
98 * rkey provided to peer 98 * rkey provided to peer
99 */ 99 */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index cb69ab977cd7..b31715505a35 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -37,24 +37,6 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
37 * identifier 37 * identifier
38 */ 38 */
39 39
40int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
41 struct ib_mr **mr)
42{
43 int rc;
44
45 if (*mr)
46 return 0; /* already done */
47
48 /* obtain unique key -
49 * next invocation of get_dma_mr returns a different key!
50 */
51 *mr = pd->device->get_dma_mr(pd, access_flags);
52 rc = PTR_ERR_OR_ZERO(*mr);
53 if (IS_ERR(*mr))
54 *mr = NULL;
55 return rc;
56}
57
58static int smc_ib_modify_qp_init(struct smc_link *lnk) 40static int smc_ib_modify_qp_init(struct smc_link *lnk)
59{ 41{
60 struct ib_qp_attr qp_attr; 42 struct ib_qp_attr qp_attr;
@@ -210,7 +192,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
210{ 192{
211 int rc; 193 int rc;
212 194
213 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); 195 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev,
196 IB_PD_UNSAFE_GLOBAL_RKEY);
214 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 197 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
215 if (IS_ERR(lnk->roce_pd)) 198 if (IS_ERR(lnk->roce_pd))
216 lnk->roce_pd = NULL; 199 lnk->roce_pd = NULL;
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 7e1f0e24d177..b567152a526d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -61,8 +61,6 @@ void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
61int smc_ib_create_protection_domain(struct smc_link *lnk); 61int smc_ib_create_protection_domain(struct smc_link *lnk);
62void smc_ib_destroy_queue_pair(struct smc_link *lnk); 62void smc_ib_destroy_queue_pair(struct smc_link *lnk);
63int smc_ib_create_queue_pair(struct smc_link *lnk); 63int smc_ib_create_queue_pair(struct smc_link *lnk);
64int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
65 struct ib_mr **mr);
66int smc_ib_ready_link(struct smc_link *lnk); 64int smc_ib_ready_link(struct smc_link *lnk);
67int smc_ib_modify_qp_rts(struct smc_link *lnk); 65int smc_ib_modify_qp_rts(struct smc_link *lnk);
68int smc_ib_modify_qp_reset(struct smc_link *lnk); 66int smc_ib_modify_qp_reset(struct smc_link *lnk);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 24fedd4b117e..03f6b5840764 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
119 119
120 for (i = 0; i < (reqs << 1); i++) { 120 for (i = 0; i < (reqs << 1); i++) {
121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
122 if (!rqst) { 122 if (!rqst)
123 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
124 __func__);
125 goto out_free; 123 goto out_free;
126 } 124
127 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 125 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
128 126
129 rqst->rq_xprt = &r_xprt->rx_xprt; 127 rqst->rq_xprt = &r_xprt->rx_xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 16aff8ddc16f..d5b54c020dec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2432 case -ENETUNREACH: 2432 case -ENETUNREACH:
2433 case -EADDRINUSE: 2433 case -EADDRINUSE:
2434 case -ENOBUFS: 2434 case -ENOBUFS:
2435 /* retry with existing socket, after a delay */ 2435 /*
2436 * xs_tcp_force_close() wakes tasks with -EIO.
2437 * We need to wake them first to ensure the
2438 * correct error code.
2439 */
2440 xprt_wake_pending_tasks(xprt, status);
2436 xs_tcp_force_close(xprt); 2441 xs_tcp_force_close(xprt);
2437 goto out; 2442 goto out;
2438 } 2443 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 312ef7de57d7..ab3087687a32 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
508 } 508 }
509 509
510 if (skb_cloned(_skb) && 510 if (skb_cloned(_skb) &&
511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) 511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
512 goto exit; 512 goto exit;
513 513
514 /* Now reverse the concerned fields */ 514 /* Now reverse the concerned fields */
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 0d4f2f455a7c..1b92b72e812f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -362,25 +362,25 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
362 return 0; 362 return 0;
363} 363}
364 364
365#define tipc_wait_for_cond(sock_, timeout_, condition_) \ 365#define tipc_wait_for_cond(sock_, timeo_, condition_) \
366({ \ 366({ \
367 int rc_ = 0; \ 367 struct sock *sk_; \
368 int done_ = 0; \ 368 int rc_; \
369 \ 369 \
370 while (!(condition_) && !done_) { \ 370 while ((rc_ = !(condition_))) { \
371 struct sock *sk_ = sock->sk; \ 371 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
372 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 372 sk_ = (sock_)->sk; \
373 \ 373 rc_ = tipc_sk_sock_err((sock_), timeo_); \
374 rc_ = tipc_sk_sock_err(sock_, timeout_); \ 374 if (rc_) \
375 if (rc_) \ 375 break; \
376 break; \ 376 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
377 prepare_to_wait(sk_sleep(sk_), &wait_, \ 377 release_sock(sk_); \
378 TASK_INTERRUPTIBLE); \ 378 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
379 done_ = sk_wait_event(sk_, timeout_, \ 379 sched_annotate_sleep(); \
380 (condition_), &wait_); \ 380 lock_sock(sk_); \
381 remove_wait_queue(sk_sleep(sk_), &wait_); \ 381 remove_wait_queue(sk_sleep(sk_), &wait_); \
382 } \ 382 } \
383 rc_; \ 383 rc_; \
384}) 384})
385 385
386/** 386/**
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6a7fe7660551..1a0c961f4ffe 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
999 struct path path = { }; 999 struct path path = { };
1000 1000
1001 err = -EINVAL; 1001 err = -EINVAL;
1002 if (sunaddr->sun_family != AF_UNIX) 1002 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1003 sunaddr->sun_family != AF_UNIX)
1003 goto out; 1004 goto out;
1004 1005
1005 if (addr_len == sizeof(short)) { 1006 if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1110 unsigned int hash; 1111 unsigned int hash;
1111 int err; 1112 int err;
1112 1113
1114 err = -EINVAL;
1115 if (alen < offsetofend(struct sockaddr, sa_family))
1116 goto out;
1117
1113 if (addr->sa_family != AF_UNSPEC) { 1118 if (addr->sa_family != AF_UNSPEC) {
1114 err = unix_mkname(sunaddr, alen, &hash); 1119 err = unix_mkname(sunaddr, alen, &hash);
1115 if (err < 0) 1120 if (err < 0)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 6f7f6757ceef..dfc8c51e4d74 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1540,8 +1540,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1540 long timeout; 1540 long timeout;
1541 int err; 1541 int err;
1542 struct vsock_transport_send_notify_data send_data; 1542 struct vsock_transport_send_notify_data send_data;
1543 1543 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1544 DEFINE_WAIT(wait);
1545 1544
1546 sk = sock->sk; 1545 sk = sock->sk;
1547 vsk = vsock_sk(sk); 1546 vsk = vsock_sk(sk);
@@ -1584,11 +1583,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1584 if (err < 0) 1583 if (err < 0)
1585 goto out; 1584 goto out;
1586 1585
1587
1588 while (total_written < len) { 1586 while (total_written < len) {
1589 ssize_t written; 1587 ssize_t written;
1590 1588
1591 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1589 add_wait_queue(sk_sleep(sk), &wait);
1592 while (vsock_stream_has_space(vsk) == 0 && 1590 while (vsock_stream_has_space(vsk) == 0 &&
1593 sk->sk_err == 0 && 1591 sk->sk_err == 0 &&
1594 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1592 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1597,33 +1595,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1597 /* Don't wait for non-blocking sockets. */ 1595 /* Don't wait for non-blocking sockets. */
1598 if (timeout == 0) { 1596 if (timeout == 0) {
1599 err = -EAGAIN; 1597 err = -EAGAIN;
1600 finish_wait(sk_sleep(sk), &wait); 1598 remove_wait_queue(sk_sleep(sk), &wait);
1601 goto out_err; 1599 goto out_err;
1602 } 1600 }
1603 1601
1604 err = transport->notify_send_pre_block(vsk, &send_data); 1602 err = transport->notify_send_pre_block(vsk, &send_data);
1605 if (err < 0) { 1603 if (err < 0) {
1606 finish_wait(sk_sleep(sk), &wait); 1604 remove_wait_queue(sk_sleep(sk), &wait);
1607 goto out_err; 1605 goto out_err;
1608 } 1606 }
1609 1607
1610 release_sock(sk); 1608 release_sock(sk);
1611 timeout = schedule_timeout(timeout); 1609 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
1612 lock_sock(sk); 1610 lock_sock(sk);
1613 if (signal_pending(current)) { 1611 if (signal_pending(current)) {
1614 err = sock_intr_errno(timeout); 1612 err = sock_intr_errno(timeout);
1615 finish_wait(sk_sleep(sk), &wait); 1613 remove_wait_queue(sk_sleep(sk), &wait);
1616 goto out_err; 1614 goto out_err;
1617 } else if (timeout == 0) { 1615 } else if (timeout == 0) {
1618 err = -EAGAIN; 1616 err = -EAGAIN;
1619 finish_wait(sk_sleep(sk), &wait); 1617 remove_wait_queue(sk_sleep(sk), &wait);
1620 goto out_err; 1618 goto out_err;
1621 } 1619 }
1622
1623 prepare_to_wait(sk_sleep(sk), &wait,
1624 TASK_INTERRUPTIBLE);
1625 } 1620 }
1626 finish_wait(sk_sleep(sk), &wait); 1621 remove_wait_queue(sk_sleep(sk), &wait);
1627 1622
1628 /* These checks occur both as part of and after the loop 1623 /* These checks occur both as part of and after the loop
1629 * conditional since we need to check before and after 1624 * conditional since we need to check before and after
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 14d5f0c8c45f..9f0901f3e42b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
322{ 322{
323 struct cfg80211_sched_scan_request *pos; 323 struct cfg80211_sched_scan_request *pos;
324 324
325 ASSERT_RTNL(); 325 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
326 326
327 list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { 327 list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
328 if (pos->reqid == reqid) 328 if (pos->reqid == reqid)
329 return pos; 329 return pos;
330 } 330 }
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
398 trace_cfg80211_sched_scan_results(wiphy, reqid); 398 trace_cfg80211_sched_scan_results(wiphy, reqid);
399 /* ignore if we're not scanning */ 399 /* ignore if we're not scanning */
400 400
401 rtnl_lock(); 401 rcu_read_lock();
402 request = cfg80211_find_sched_scan_req(rdev, reqid); 402 request = cfg80211_find_sched_scan_req(rdev, reqid);
403 if (request) { 403 if (request) {
404 request->report_results = true; 404 request->report_results = true;
405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); 405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
406 } 406 }
407 rtnl_unlock(); 407 rcu_read_unlock();
408} 408}
409EXPORT_SYMBOL(cfg80211_sched_scan_results); 409EXPORT_SYMBOL(cfg80211_sched_scan_results);
410 410
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7198373e2920..4992f1025c9d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
454 if (iftype == NL80211_IFTYPE_MESH_POINT) 454 if (iftype == NL80211_IFTYPE_MESH_POINT)
455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1); 455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
456 456
457 mesh_flags &= MESH_FLAGS_AE;
458
457 switch (hdr->frame_control & 459 switch (hdr->frame_control &
458 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 460 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
459 case cpu_to_le16(IEEE80211_FCTL_TODS): 461 case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
469 iftype != NL80211_IFTYPE_STATION)) 471 iftype != NL80211_IFTYPE_STATION))
470 return -1; 472 return -1;
471 if (iftype == NL80211_IFTYPE_MESH_POINT) { 473 if (iftype == NL80211_IFTYPE_MESH_POINT) {
472 if (mesh_flags & MESH_FLAGS_AE_A4) 474 if (mesh_flags == MESH_FLAGS_AE_A4)
473 return -1; 475 return -1;
474 if (mesh_flags & MESH_FLAGS_AE_A5_A6) { 476 if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
475 skb_copy_bits(skb, hdrlen + 477 skb_copy_bits(skb, hdrlen +
476 offsetof(struct ieee80211s_hdr, eaddr1), 478 offsetof(struct ieee80211s_hdr, eaddr1),
477 tmp.h_dest, 2 * ETH_ALEN); 479 tmp.h_dest, 2 * ETH_ALEN);
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
487 ether_addr_equal(tmp.h_source, addr))) 489 ether_addr_equal(tmp.h_source, addr)))
488 return -1; 490 return -1;
489 if (iftype == NL80211_IFTYPE_MESH_POINT) { 491 if (iftype == NL80211_IFTYPE_MESH_POINT) {
490 if (mesh_flags & MESH_FLAGS_AE_A5_A6) 492 if (mesh_flags == MESH_FLAGS_AE_A5_A6)
491 return -1; 493 return -1;
492 if (mesh_flags & MESH_FLAGS_AE_A4) 494 if (mesh_flags == MESH_FLAGS_AE_A4)
493 skb_copy_bits(skb, hdrlen + 495 skb_copy_bits(skb, hdrlen +
494 offsetof(struct ieee80211s_hdr, eaddr1), 496 offsetof(struct ieee80211s_hdr, eaddr1),
495 tmp.h_source, ETH_ALEN); 497 tmp.h_source, ETH_ALEN);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 1a4db6790e20..6cdb054484d6 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
914 * Main IOCTl dispatcher. 914 * Main IOCTl dispatcher.
915 * Check the type of IOCTL and call the appropriate wrapper... 915 * Check the type of IOCTL and call the appropriate wrapper...
916 */ 916 */
917static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
918 unsigned int cmd, 918 unsigned int cmd,
919 struct iw_request_info *info, 919 struct iw_request_info *info,
920 wext_ioctl_func standard, 920 wext_ioctl_func standard,
921 wext_ioctl_func private) 921 wext_ioctl_func private)
922{ 922{
923 struct iwreq *iwr = (struct iwreq *) ifr;
924 struct net_device *dev; 923 struct net_device *dev;
925 iw_handler handler; 924 iw_handler handler;
926 925
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
928 * The copy_to/from_user() of ifr is also dealt with in there */ 927 * The copy_to/from_user() of ifr is also dealt with in there */
929 928
930 /* Make sure the device exist */ 929 /* Make sure the device exist */
931 if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 930 if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
932 return -ENODEV; 931 return -ENODEV;
933 932
934 /* A bunch of special cases, then the generic case... 933 /* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
957 else if (private) 956 else if (private)
958 return private(dev, iwr, cmd, info, handler); 957 return private(dev, iwr, cmd, info, handler);
959 } 958 }
960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl)
962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
964} 960}
965 961
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
977} 973}
978 974
979/* entry point from dev ioctl */ 975/* entry point from dev ioctl */
980static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 976static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
981 unsigned int cmd, struct iw_request_info *info, 977 unsigned int cmd, struct iw_request_info *info,
982 wext_ioctl_func standard, 978 wext_ioctl_func standard,
983 wext_ioctl_func private) 979 wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
987 if (ret) 983 if (ret)
988 return ret; 984 return ret;
989 985
990 dev_load(net, ifr->ifr_name); 986 dev_load(net, iwr->ifr_name);
991 rtnl_lock(); 987 rtnl_lock();
992 ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 988 ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
993 rtnl_unlock(); 989 rtnl_unlock();
994 990
995 return ret; 991 return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device * dev,
1039} 1035}
1040 1036
1041 1037
1042int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1038int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
1043 void __user *arg) 1039 void __user *arg)
1044{ 1040{
1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1041 struct iw_request_info info = { .cmd = cmd, .flags = 0 };
1046 int ret; 1042 int ret;
1047 1043
1048 ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1044 ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
1049 ioctl_standard_call, 1045 ioctl_standard_call,
1050 ioctl_private_call); 1046 ioctl_private_call);
1051 if (ret >= 0 && 1047 if (ret >= 0 &&
1052 IW_IS_GET(cmd) && 1048 IW_IS_GET(cmd) &&
1053 copy_to_user(arg, ifr, sizeof(struct iwreq))) 1049 copy_to_user(arg, iwr, sizeof(struct iwreq)))
1054 return -EFAULT; 1050 return -EFAULT;
1055 1051
1056 return ret; 1052 return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
1107 info.cmd = cmd; 1103 info.cmd = cmd;
1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1104 info.flags = IW_REQUEST_FLAG_COMPAT;
1109 1105
1110 ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1106 ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
1111 compat_standard_call, 1107 compat_standard_call,
1112 compat_private_call); 1108 compat_private_call);
1113 1109
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 8b911c29860e..5a1a98df3499 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1791 1791
1792static int __init x25_init(void) 1792static int __init x25_init(void)
1793{ 1793{
1794 int rc = proto_register(&x25_proto, 0); 1794 int rc;
1795 1795
1796 if (rc != 0) 1796 rc = proto_register(&x25_proto, 0);
1797 if (rc)
1797 goto out; 1798 goto out;
1798 1799
1799 rc = sock_register(&x25_family_ops); 1800 rc = sock_register(&x25_family_ops);
1800 if (rc != 0) 1801 if (rc)
1801 goto out_proto; 1802 goto out_proto;
1802 1803
1803 dev_add_pack(&x25_packet_type); 1804 dev_add_pack(&x25_packet_type);
1804 1805
1805 rc = register_netdevice_notifier(&x25_dev_notifier); 1806 rc = register_netdevice_notifier(&x25_dev_notifier);
1806 if (rc != 0) 1807 if (rc)
1807 goto out_sock; 1808 goto out_sock;
1808 1809
1809 pr_info("Linux Version 0.2\n"); 1810 rc = x25_register_sysctl();
1811 if (rc)
1812 goto out_dev;
1810 1813
1811 x25_register_sysctl();
1812 rc = x25_proc_init(); 1814 rc = x25_proc_init();
1813 if (rc != 0) 1815 if (rc)
1814 goto out_dev; 1816 goto out_sysctl;
1817
1818 pr_info("Linux Version 0.2\n");
1819
1815out: 1820out:
1816 return rc; 1821 return rc;
1822out_sysctl:
1823 x25_unregister_sysctl();
1817out_dev: 1824out_dev:
1818 unregister_netdevice_notifier(&x25_dev_notifier); 1825 unregister_netdevice_notifier(&x25_dev_notifier);
1819out_sock: 1826out_sock:
1827 dev_remove_pack(&x25_packet_type);
1820 sock_unregister(AF_X25); 1828 sock_unregister(AF_X25);
1821out_proto: 1829out_proto:
1822 proto_unregister(&x25_proto); 1830 proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index a06dfe143c67..ba078c85f0a1 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
73 { }, 73 { },
74}; 74};
75 75
76void __init x25_register_sysctl(void) 76int __init x25_register_sysctl(void)
77{ 77{
78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
79 if (!x25_table_header)
80 return -ENOMEM;
81 return 0;
79} 82}
80 83
81void x25_unregister_sysctl(void) 84void x25_unregister_sysctl(void)
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index abf81b329dc1..55b2ac300995 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -4,8 +4,7 @@
4 4
5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
6 xfrm_input.o xfrm_output.o \ 6 xfrm_input.o xfrm_output.o \
7 xfrm_sysctl.o xfrm_replay.o 7 xfrm_sysctl.o xfrm_replay.o xfrm_device.o
8obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
9obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 8obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
10obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o 9obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
11obj-$(CONFIG_XFRM_USER) += xfrm_user.o 10obj-$(CONFIG_XFRM_USER) += xfrm_user.o
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8ec8a3fcf8d4..5aba03685d7d 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -22,6 +22,7 @@
22#include <net/xfrm.h> 22#include <net/xfrm.h>
23#include <linux/notifier.h> 23#include <linux/notifier.h>
24 24
25#ifdef CONFIG_XFRM_OFFLOAD
25int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 26int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
26{ 27{
27 int err; 28 int err;
@@ -137,6 +138,7 @@ ok:
137 return true; 138 return true;
138} 139}
139EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 140EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
141#endif
140 142
141int xfrm_dev_register(struct net_device *dev) 143int xfrm_dev_register(struct net_device *dev)
142{ 144{
@@ -170,7 +172,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
170 172
171static int xfrm_dev_down(struct net_device *dev) 173static int xfrm_dev_down(struct net_device *dev)
172{ 174{
173 if (dev->hw_features & NETIF_F_HW_ESP) 175 if (dev->features & NETIF_F_HW_ESP)
174 xfrm_dev_state_flush(dev_net(dev), dev, true); 176 xfrm_dev_state_flush(dev_net(dev), dev, true);
175 177
176 xfrm_garbage_collect(dev_net(dev)); 178 xfrm_garbage_collect(dev_net(dev));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b00a1d5a7f52..643a18f72032 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1006 err = -ESRCH; 1006 err = -ESRCH;
1007out: 1007out:
1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1009
1010 if (cnt)
1011 xfrm_garbage_collect(net);
1012
1013 return err; 1009 return err;
1014} 1010}
1015EXPORT_SYMBOL(xfrm_policy_flush); 1011EXPORT_SYMBOL(xfrm_policy_flush);
@@ -1797,43 +1793,6 @@ free_dst:
1797 goto out; 1793 goto out;
1798} 1794}
1799 1795
1800#ifdef CONFIG_XFRM_SUB_POLICY
1801static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1802{
1803 if (!*target) {
1804 *target = kmalloc(size, GFP_ATOMIC);
1805 if (!*target)
1806 return -ENOMEM;
1807 }
1808
1809 memcpy(*target, src, size);
1810 return 0;
1811}
1812#endif
1813
1814static int xfrm_dst_update_parent(struct dst_entry *dst,
1815 const struct xfrm_selector *sel)
1816{
1817#ifdef CONFIG_XFRM_SUB_POLICY
1818 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1819 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1820 sel, sizeof(*sel));
1821#else
1822 return 0;
1823#endif
1824}
1825
1826static int xfrm_dst_update_origin(struct dst_entry *dst,
1827 const struct flowi *fl)
1828{
1829#ifdef CONFIG_XFRM_SUB_POLICY
1830 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1831 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1832#else
1833 return 0;
1834#endif
1835}
1836
1837static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1796static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1838 struct xfrm_policy **pols, 1797 struct xfrm_policy **pols,
1839 int *num_pols, int *num_xfrms) 1798 int *num_pols, int *num_xfrms)
@@ -1905,16 +1864,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1905 1864
1906 xdst = (struct xfrm_dst *)dst; 1865 xdst = (struct xfrm_dst *)dst;
1907 xdst->num_xfrms = err; 1866 xdst->num_xfrms = err;
1908 if (num_pols > 1)
1909 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1910 else
1911 err = xfrm_dst_update_origin(dst, fl);
1912 if (unlikely(err)) {
1913 dst_free(dst);
1914 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1915 return ERR_PTR(err);
1916 }
1917
1918 xdst->num_pols = num_pols; 1867 xdst->num_pols = num_pols;
1919 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 1868 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1920 xdst->policy_genid = atomic_read(&pols[0]->genid); 1869 xdst->policy_genid = atomic_read(&pols[0]->genid);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fc3c5aa38754..2e291bc5f1fc 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1383 x->curlft.add_time = orig->curlft.add_time; 1383 x->curlft.add_time = orig->curlft.add_time;
1384 x->km.state = orig->km.state; 1384 x->km.state = orig->km.state;
1385 x->km.seq = orig->km.seq; 1385 x->km.seq = orig->km.seq;
1386 x->replay = orig->replay;
1387 x->preplay = orig->preplay;
1386 1388
1387 return x; 1389 return x;
1388 1390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 38614df33ec8..86116e9aaf3d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2027,6 +2027,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2027 return 0; 2027 return 0;
2028 return err; 2028 return err;
2029 } 2029 }
2030 xfrm_garbage_collect(net);
2030 2031
2031 c.data.type = type; 2032 c.data.type = type;
2032 c.event = nlh->nlmsg_type; 2033 c.event = nlh->nlmsg_type;
diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c
index b08ab4e88929..9d751e209f31 100644
--- a/samples/bpf/cookie_uid_helper_example.c
+++ b/samples/bpf/cookie_uid_helper_example.c
@@ -306,7 +306,9 @@ int main(int argc, char *argv[])
306 prog_attach_iptables(argv[2]); 306 prog_attach_iptables(argv[2]);
307 if (cfg_test_traffic) { 307 if (cfg_test_traffic) {
308 if (signal(SIGINT, finish) == SIG_ERR) 308 if (signal(SIGINT, finish) == SIG_ERR)
309 error(1, errno, "register handler failed"); 309 error(1, errno, "register SIGINT handler failed");
310 if (signal(SIGTERM, finish) == SIG_ERR)
311 error(1, errno, "register SIGTERM handler failed");
310 while (!test_finish) { 312 while (!test_finish) {
311 print_table(); 313 print_table();
312 printf("\n"); 314 printf("\n");
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
index 9cce2a66bd66..512f87a5fd20 100644
--- a/samples/bpf/offwaketime_user.c
+++ b/samples/bpf/offwaketime_user.c
@@ -100,6 +100,7 @@ int main(int argc, char **argv)
100 setrlimit(RLIMIT_MEMLOCK, &r); 100 setrlimit(RLIMIT_MEMLOCK, &r);
101 101
102 signal(SIGINT, int_exit); 102 signal(SIGINT, int_exit);
103 signal(SIGTERM, int_exit);
103 104
104 if (load_kallsyms()) { 105 if (load_kallsyms()) {
105 printf("failed to process /proc/kallsyms\n"); 106 printf("failed to process /proc/kallsyms\n");
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index be59d7dcbdde..4ed690b907ff 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -180,6 +180,7 @@ int main(int argc, char **argv)
180 return 1; 180 return 1;
181 } 181 }
182 signal(SIGINT, int_exit); 182 signal(SIGINT, int_exit);
183 signal(SIGTERM, int_exit);
183 184
184 /* do sampling */ 185 /* do sampling */
185 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n", 186 printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 0c5561d193a4..fa4336423da5 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -192,6 +192,7 @@ int main(int argc, char **argv)
192 setrlimit(RLIMIT_MEMLOCK, &r); 192 setrlimit(RLIMIT_MEMLOCK, &r);
193 193
194 signal(SIGINT, int_exit); 194 signal(SIGINT, int_exit);
195 signal(SIGTERM, int_exit);
195 196
196 if (load_kallsyms()) { 197 if (load_kallsyms()) {
197 printf("failed to process /proc/kallsyms\n"); 198 printf("failed to process /proc/kallsyms\n");
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index 7fee0f1ba9a3..7321a3f253c9 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -127,6 +127,7 @@ int main(int ac, char **argv)
127 } 127 }
128 128
129 signal(SIGINT, int_exit); 129 signal(SIGINT, int_exit);
130 signal(SIGTERM, int_exit);
130 131
131 /* start 'ping' in the background to have some kfree_skb events */ 132 /* start 'ping' in the background to have some kfree_skb events */
132 f = popen("ping -c5 localhost", "r"); 133 f = popen("ping -c5 localhost", "r");
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 378850c70eb8..2431c0321b71 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -62,13 +62,14 @@ static void usage(const char *prog)
62 fprintf(stderr, 62 fprintf(stderr,
63 "usage: %s [OPTS] IFINDEX\n\n" 63 "usage: %s [OPTS] IFINDEX\n\n"
64 "OPTS:\n" 64 "OPTS:\n"
65 " -S use skb-mode\n", 65 " -S use skb-mode\n"
66 " -N enforce native mode\n",
66 prog); 67 prog);
67} 68}
68 69
69int main(int argc, char **argv) 70int main(int argc, char **argv)
70{ 71{
71 const char *optstr = "S"; 72 const char *optstr = "SN";
72 char filename[256]; 73 char filename[256];
73 int opt; 74 int opt;
74 75
@@ -77,6 +78,9 @@ int main(int argc, char **argv)
77 case 'S': 78 case 'S':
78 xdp_flags |= XDP_FLAGS_SKB_MODE; 79 xdp_flags |= XDP_FLAGS_SKB_MODE;
79 break; 80 break;
81 case 'N':
82 xdp_flags |= XDP_FLAGS_DRV_MODE;
83 break;
80 default: 84 default:
81 usage(basename(argv[0])); 85 usage(basename(argv[0]));
82 return 1; 86 return 1;
@@ -102,6 +106,7 @@ int main(int argc, char **argv)
102 } 106 }
103 107
104 signal(SIGINT, int_exit); 108 signal(SIGINT, int_exit);
109 signal(SIGTERM, int_exit);
105 110
106 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { 111 if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
107 printf("link set xdp fd failed\n"); 112 printf("link set xdp fd failed\n");
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
index 92b8bde9337c..715cd12eaca5 100644
--- a/samples/bpf/xdp_tx_iptunnel_user.c
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -79,6 +79,8 @@ static void usage(const char *cmd)
79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); 79 printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n");
80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); 80 printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
81 printf(" -P <IP-Protocol> Default is TCP\n"); 81 printf(" -P <IP-Protocol> Default is TCP\n");
82 printf(" -S use skb-mode\n");
83 printf(" -N enforce native mode\n");
82 printf(" -h Display this help\n"); 84 printf(" -h Display this help\n");
83} 85}
84 86
@@ -138,7 +140,7 @@ int main(int argc, char **argv)
138{ 140{
139 unsigned char opt_flags[256] = {}; 141 unsigned char opt_flags[256] = {};
140 unsigned int kill_after_s = 0; 142 unsigned int kill_after_s = 0;
141 const char *optstr = "i:a:p:s:d:m:T:P:Sh"; 143 const char *optstr = "i:a:p:s:d:m:T:P:SNh";
142 int min_port = 0, max_port = 0; 144 int min_port = 0, max_port = 0;
143 struct iptnl_info tnl = {}; 145 struct iptnl_info tnl = {};
144 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 146 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
@@ -206,6 +208,9 @@ int main(int argc, char **argv)
206 case 'S': 208 case 'S':
207 xdp_flags |= XDP_FLAGS_SKB_MODE; 209 xdp_flags |= XDP_FLAGS_SKB_MODE;
208 break; 210 break;
211 case 'N':
212 xdp_flags |= XDP_FLAGS_DRV_MODE;
213 break;
209 default: 214 default:
210 usage(argv[0]); 215 usage(argv[0]);
211 return 1; 216 return 1;
@@ -239,6 +244,7 @@ int main(int argc, char **argv)
239 } 244 }
240 245
241 signal(SIGINT, int_exit); 246 signal(SIGINT, int_exit);
247 signal(SIGTERM, int_exit);
242 248
243 while (min_port <= max_port) { 249 while (min_port <= max_port) {
244 vip.dport = htons(min_port++); 250 vip.dport = htons(min_port++);
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 6ba97a1f9c5a..c583a1e1bd3c 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -8,6 +8,37 @@
8# 8#
9# ========================================================================== 9# ==========================================================================
10 10
11PHONY := __headers
12__headers:
13
14include scripts/Kbuild.include
15
16srcdir := $(srctree)/$(obj)
17
18# When make is run under a fakechroot environment, the function
19# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular
20# files. So, we are using a combination of sort/dir/wildcard which works
21# with fakechroot.
22subdirs := $(patsubst $(srcdir)/%/,%,\
23 $(filter-out $(srcdir)/,\
24 $(sort $(dir $(wildcard $(srcdir)/*/)))))
25
26# caller may set destination dir (when installing to asm/)
27_dst := $(if $(dst),$(dst),$(obj))
28
29# Recursion
30__headers: $(subdirs)
31
32.PHONY: $(subdirs)
33$(subdirs):
34 $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
35
36# Skip header install/check for include/uapi and arch/$(hdr-arch)/include/uapi.
37# We have only sub-directories there.
38skip-inst := $(if $(filter %/uapi,$(obj)),1)
39
40ifeq ($(skip-inst),)
41
11# generated header directory 42# generated header directory
12gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) 43gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
13 44
@@ -15,21 +46,14 @@ gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
15kbuild-file := $(srctree)/$(obj)/Kbuild 46kbuild-file := $(srctree)/$(obj)/Kbuild
16-include $(kbuild-file) 47-include $(kbuild-file)
17 48
18# called may set destination dir (when installing to asm/)
19_dst := $(if $(dst),$(dst),$(obj))
20
21old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild 49old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild
22ifneq ($(wildcard $(old-kbuild-file)),) 50ifneq ($(wildcard $(old-kbuild-file)),)
23include $(old-kbuild-file) 51include $(old-kbuild-file)
24endif 52endif
25 53
26include scripts/Kbuild.include
27
28installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) 54installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst))
29 55
30srcdir := $(srctree)/$(obj)
31gendir := $(objtree)/$(gen) 56gendir := $(objtree)/$(gen)
32subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.))
33header-files := $(notdir $(wildcard $(srcdir)/*.h)) 57header-files := $(notdir $(wildcard $(srcdir)/*.h))
34header-files += $(notdir $(wildcard $(srcdir)/*.agh)) 58header-files += $(notdir $(wildcard $(srcdir)/*.agh))
35header-files := $(filter-out $(no-export-headers), $(header-files)) 59header-files := $(filter-out $(no-export-headers), $(header-files))
@@ -88,11 +112,9 @@ quiet_cmd_check = CHECK $(printdir) ($(words $(all-files)) files)
88 $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \ 112 $(PERL) $< $(INSTALL_HDR_PATH)/include $(SRCARCH); \
89 touch $@ 113 touch $@
90 114
91PHONY += __headersinst __headerscheck
92
93ifndef HDRCHECK 115ifndef HDRCHECK
94# Rules for installing headers 116# Rules for installing headers
95__headersinst: $(subdirs) $(install-file) 117__headers: $(install-file)
96 @: 118 @:
97 119
98targets += $(install-file) 120targets += $(install-file)
@@ -104,7 +126,7 @@ $(install-file): scripts/headers_install.sh \
104 $(call if_changed,install) 126 $(call if_changed,install)
105 127
106else 128else
107__headerscheck: $(subdirs) $(check-file) 129__headers: $(check-file)
108 @: 130 @:
109 131
110targets += $(check-file) 132targets += $(check-file)
@@ -113,11 +135,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
113 135
114endif 136endif
115 137
116# Recursion
117.PHONY: $(subdirs)
118$(subdirs):
119 $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
120
121targets := $(wildcard $(sort $(targets))) 138targets := $(wildcard $(sort $(targets)))
122cmd_files := $(wildcard \ 139cmd_files := $(wildcard \
123 $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd)) 140 $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
@@ -126,6 +143,8 @@ ifneq ($(cmd_files),)
126 include $(cmd_files) 143 include $(cmd_files)
127endif 144endif
128 145
146endif # skip-inst
147
129.PHONY: $(PHONY) 148.PHONY: $(PHONY)
130PHONY += FORCE 149PHONY += FORCE
131FORCE: ; 150FORCE: ;
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 6dc1eda13b8e..58c05e5d9870 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -175,7 +175,7 @@ ld_flags = $(LDFLAGS) $(ldflags-y)
175 175
176dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ 176dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
177 -I$(srctree)/arch/$(SRCARCH)/boot/dts \ 177 -I$(srctree)/arch/$(SRCARCH)/boot/dts \
178 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ 178 -I$(srctree)/scripts/dtc/include-prefixes \
179 -I$(srctree)/drivers/of/testcase-data \ 179 -I$(srctree)/drivers/of/testcase-data \
180 -undef -D__DTS__ 180 -undef -D__DTS__
181 181
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 5adfc8f52b4f..4b72b530c84f 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no
873 while (size--) 873 while (size--)
874 reg = (reg << 32) | fdt32_to_cpu(*(cells++)); 874 reg = (reg << 32) | fdt32_to_cpu(*(cells++));
875 875
876 snprintf(unit_addr, sizeof(unit_addr), "%lx", reg); 876 snprintf(unit_addr, sizeof(unit_addr), "%zx", reg);
877 if (!streq(unitname, unit_addr)) 877 if (!streq(unitname, unit_addr))
878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", 878 FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"",
879 node->fullpath, unit_addr); 879 node->fullpath, unit_addr);
diff --git a/scripts/dtc/include-prefixes/arc b/scripts/dtc/include-prefixes/arc
new file mode 120000
index 000000000000..5d21b5a69a11
--- /dev/null
+++ b/scripts/dtc/include-prefixes/arc
@@ -0,0 +1 @@
../../../arch/arc/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/arm b/scripts/dtc/include-prefixes/arm
new file mode 120000
index 000000000000..eb14d4515a57
--- /dev/null
+++ b/scripts/dtc/include-prefixes/arm
@@ -0,0 +1 @@
../../../arch/arm/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/arm64 b/scripts/dtc/include-prefixes/arm64
new file mode 120000
index 000000000000..275c42c21d71
--- /dev/null
+++ b/scripts/dtc/include-prefixes/arm64
@@ -0,0 +1 @@
../../../arch/arm64/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/c6x b/scripts/dtc/include-prefixes/c6x
new file mode 120000
index 000000000000..49ded4cae2be
--- /dev/null
+++ b/scripts/dtc/include-prefixes/c6x
@@ -0,0 +1 @@
../../../arch/c6x/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/cris b/scripts/dtc/include-prefixes/cris
new file mode 120000
index 000000000000..736d998ba506
--- /dev/null
+++ b/scripts/dtc/include-prefixes/cris
@@ -0,0 +1 @@
../../../arch/cris/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/dt-bindings b/scripts/dtc/include-prefixes/dt-bindings
new file mode 120000
index 000000000000..04fdbb3af016
--- /dev/null
+++ b/scripts/dtc/include-prefixes/dt-bindings
@@ -0,0 +1 @@
../../../include/dt-bindings \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/h8300 b/scripts/dtc/include-prefixes/h8300
new file mode 120000
index 000000000000..3bdaa332c54c
--- /dev/null
+++ b/scripts/dtc/include-prefixes/h8300
@@ -0,0 +1 @@
../../../arch/h8300/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/metag b/scripts/dtc/include-prefixes/metag
new file mode 120000
index 000000000000..87a3c847db8f
--- /dev/null
+++ b/scripts/dtc/include-prefixes/metag
@@ -0,0 +1 @@
../../../arch/metag/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/microblaze b/scripts/dtc/include-prefixes/microblaze
new file mode 120000
index 000000000000..d9830330a21d
--- /dev/null
+++ b/scripts/dtc/include-prefixes/microblaze
@@ -0,0 +1 @@
../../../arch/microblaze/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/mips b/scripts/dtc/include-prefixes/mips
new file mode 120000
index 000000000000..ae8d4948dc8d
--- /dev/null
+++ b/scripts/dtc/include-prefixes/mips
@@ -0,0 +1 @@
../../../arch/mips/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/nios2 b/scripts/dtc/include-prefixes/nios2
new file mode 120000
index 000000000000..51772336d13f
--- /dev/null
+++ b/scripts/dtc/include-prefixes/nios2
@@ -0,0 +1 @@
../../../arch/nios2/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/openrisc b/scripts/dtc/include-prefixes/openrisc
new file mode 120000
index 000000000000..71c3bc75c560
--- /dev/null
+++ b/scripts/dtc/include-prefixes/openrisc
@@ -0,0 +1 @@
../../../arch/openrisc/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/powerpc b/scripts/dtc/include-prefixes/powerpc
new file mode 120000
index 000000000000..7cd6ec16e899
--- /dev/null
+++ b/scripts/dtc/include-prefixes/powerpc
@@ -0,0 +1 @@
../../../arch/powerpc/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/sh b/scripts/dtc/include-prefixes/sh
new file mode 120000
index 000000000000..67d37808c599
--- /dev/null
+++ b/scripts/dtc/include-prefixes/sh
@@ -0,0 +1 @@
../../../arch/sh/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/xtensa b/scripts/dtc/include-prefixes/xtensa
new file mode 120000
index 000000000000..d1eaf6ec7a2b
--- /dev/null
+++ b/scripts/dtc/include-prefixes/xtensa
@@ -0,0 +1 @@
../../../arch/xtensa/boot/dts \ No newline at end of file
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index f9b92ece7834..5afd1098e33a 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command):
23 super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) 23 super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
24 24
25 def invoke(self, arg, from_tty): 25 def invoke(self, arg, from_tty):
26 log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) 26 log_buf_addr = int(str(gdb.parse_and_eval(
27 log_first_idx = int(gdb.parse_and_eval("log_first_idx")) 27 "'printk.c'::log_buf")).split()[0], 16)
28 log_next_idx = int(gdb.parse_and_eval("log_next_idx")) 28 log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
29 log_buf_len = int(gdb.parse_and_eval("log_buf_len")) 29 log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
30 log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
30 31
31 inf = gdb.inferiors()[0] 32 inf = gdb.inferiors()[0]
32 start = log_buf_addr + log_first_idx 33 start = log_buf_addr + log_first_idx
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index 3bffdcaaa274..b724a0290c75 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start,
75int yylex(void); 75int yylex(void);
76int yyparse(void); 76int yyparse(void);
77 77
78void error_with_pos(const char *, ...); 78void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2)));
79 79
80/*----------------------------------------------------------------------*/ 80/*----------------------------------------------------------------------*/
81#define xmalloc(size) ({ void *__ptr = malloc(size); \ 81#define xmalloc(size) ({ void *__ptr = malloc(size); \
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 90a091b6ae4d..eb8144643b78 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -196,7 +196,7 @@ clean-files += config.pot linux.pot
196 196
197# Check that we have the required ncurses stuff installed for lxdialog (menuconfig) 197# Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
198PHONY += $(obj)/dochecklxdialog 198PHONY += $(obj)/dochecklxdialog
199$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog 199$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog
200$(obj)/dochecklxdialog: 200$(obj)/dochecklxdialog:
201 $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) 201 $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf)
202 202
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a9bc5334a478..003114779815 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS];
271static int items_num; 271static int items_num;
272static int global_exit; 272static int global_exit;
273/* the currently selected button */ 273/* the currently selected button */
274const char *current_instructions = menu_instructions; 274static const char *current_instructions = menu_instructions;
275 275
276static char *dialog_input_result; 276static char *dialog_input_result;
277static int dialog_input_result_len; 277static int dialog_input_result_len;
@@ -305,7 +305,7 @@ struct function_keys {
305}; 305};
306 306
307static const int function_keys_num = 9; 307static const int function_keys_num = 9;
308struct function_keys function_keys[] = { 308static struct function_keys function_keys[] = {
309 { 309 {
310 .key_str = "F1", 310 .key_str = "F1",
311 .func = "Help", 311 .func = "Help",
@@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag)
508 index = (index + items_num) % items_num; 508 index = (index + items_num) % items_num;
509 while (true) { 509 while (true) {
510 char *str = k_menu_items[index].str; 510 char *str = k_menu_items[index].str;
511 if (strcasestr(str, match_str) != 0) 511 if (strcasestr(str, match_str) != NULL)
512 return index; 512 return index;
513 if (flag == FIND_NEXT_MATCH_UP || 513 if (flag == FIND_NEXT_MATCH_UP ||
514 flag == MATCH_TINKER_PATTERN_UP) 514 flag == MATCH_TINKER_PATTERN_UP)
@@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans)
1067 1067
1068static void conf(struct menu *menu) 1068static void conf(struct menu *menu)
1069{ 1069{
1070 struct menu *submenu = 0; 1070 struct menu *submenu = NULL;
1071 const char *prompt = menu_get_prompt(menu); 1071 const char *prompt = menu_get_prompt(menu);
1072 struct symbol *sym; 1072 struct symbol *sym;
1073 int res; 1073 int res;
@@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu)
1234static void conf_choice(struct menu *menu) 1234static void conf_choice(struct menu *menu)
1235{ 1235{
1236 const char *prompt = _(menu_get_prompt(menu)); 1236 const char *prompt = _(menu_get_prompt(menu));
1237 struct menu *child = 0; 1237 struct menu *child = NULL;
1238 struct symbol *active; 1238 struct symbol *active;
1239 int selected_index = 0; 1239 int selected_index = 0;
1240 int last_top_row = 0; 1240 int last_top_row = 0;
@@ -1456,7 +1456,7 @@ static void conf_save(void)
1456 } 1456 }
1457} 1457}
1458 1458
1459void setup_windows(void) 1459static void setup_windows(void)
1460{ 1460{
1461 int lines, columns; 1461 int lines, columns;
1462 1462
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 4b2f44c20caf..a64b1c31253e 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -129,7 +129,7 @@ static void no_colors_theme(void)
129 mkattrn(FUNCTION_TEXT, A_REVERSE); 129 mkattrn(FUNCTION_TEXT, A_REVERSE);
130} 130}
131 131
132void set_colors() 132void set_colors(void)
133{ 133{
134 start_color(); 134 start_color();
135 use_default_colors(); 135 use_default_colors();
@@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no)
192 int lines = 0; 192 int lines = 0;
193 193
194 if (!text) 194 if (!text)
195 return 0; 195 return NULL;
196 196
197 for (i = 0; text[i] != '\0' && lines < line_no; i++) 197 for (i = 0; text[i] != '\0' && lines < line_no; i++)
198 if (text[i] == '\n') 198 if (text[i] == '\n')
diff --git a/scripts/tags.sh b/scripts/tags.sh
index d661f2f3ef61..d23dcbf17457 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
106 case "$i" in 106 case "$i" in
107 *.[cS]) 107 *.[cS])
108 j=${i/\.[cS]/\.o} 108 j=${i/\.[cS]/\.o}
109 j="${j#$tree}"
109 if [ -e $j ]; then 110 if [ -e $j ]; then
110 echo $i 111 echo $i
111 fi 112 fi
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 6fd95f76bfae..a7a23b5541f8 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -20,6 +20,10 @@ config KEYS
20 20
21 If you are unsure as to whether this is required, answer N. 21 If you are unsure as to whether this is required, answer N.
22 22
23config KEYS_COMPAT
24 def_bool y
25 depends on COMPAT && KEYS
26
23config PERSISTENT_KEYRINGS 27config PERSISTENT_KEYRINGS
24 bool "Enable register of persistent per-UID keyrings" 28 bool "Enable register of persistent per-UID keyrings"
25 depends on KEYS 29 depends on KEYS
@@ -89,9 +93,9 @@ config ENCRYPTED_KEYS
89config KEY_DH_OPERATIONS 93config KEY_DH_OPERATIONS
90 bool "Diffie-Hellman operations on retained keys" 94 bool "Diffie-Hellman operations on retained keys"
91 depends on KEYS 95 depends on KEYS
92 select MPILIB
93 select CRYPTO 96 select CRYPTO
94 select CRYPTO_HASH 97 select CRYPTO_HASH
98 select CRYPTO_DH
95 help 99 help
96 This option provides support for calculating Diffie-Hellman 100 This option provides support for calculating Diffie-Hellman
97 public keys and shared secrets using values stored as keys 101 public keys and shared secrets using values stored as keys
diff --git a/security/keys/dh.c b/security/keys/dh.c
index e603bd912e4c..4755d4b4f945 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -8,34 +8,17 @@
8 * 2 of the License, or (at your option) any later version. 8 * 2 of the License, or (at your option) any later version.
9 */ 9 */
10 10
11#include <linux/mpi.h>
12#include <linux/slab.h> 11#include <linux/slab.h>
13#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/scatterlist.h>
14#include <linux/crypto.h> 14#include <linux/crypto.h>
15#include <crypto/hash.h> 15#include <crypto/hash.h>
16#include <crypto/kpp.h>
17#include <crypto/dh.h>
16#include <keys/user-type.h> 18#include <keys/user-type.h>
17#include "internal.h" 19#include "internal.h"
18 20
19/* 21static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
20 * Public key or shared secret generation function [RFC2631 sec 2.1.1]
21 *
22 * ya = g^xa mod p;
23 * or
24 * ZZ = yb^xa mod p;
25 *
26 * where xa is the local private key, ya is the local public key, g is
27 * the generator, p is the prime, yb is the remote public key, and ZZ
28 * is the shared secret.
29 *
30 * Both are the same calculation, so g or yb are the "base" and ya or
31 * ZZ are the "result".
32 */
33static int do_dh(MPI result, MPI base, MPI xa, MPI p)
34{
35 return mpi_powm(result, base, xa, p);
36}
37
38static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
39{ 22{
40 struct key *key; 23 struct key *key;
41 key_ref_t key_ref; 24 key_ref_t key_ref;
@@ -56,19 +39,17 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
56 status = key_validate(key); 39 status = key_validate(key);
57 if (status == 0) { 40 if (status == 0) {
58 const struct user_key_payload *payload; 41 const struct user_key_payload *payload;
42 uint8_t *duplicate;
59 43
60 payload = user_key_payload_locked(key); 44 payload = user_key_payload_locked(key);
61 45
62 if (maxlen == 0) { 46 duplicate = kmemdup(payload->data, payload->datalen,
63 *mpi = NULL; 47 GFP_KERNEL);
48 if (duplicate) {
49 *data = duplicate;
64 ret = payload->datalen; 50 ret = payload->datalen;
65 } else if (payload->datalen <= maxlen) {
66 *mpi = mpi_read_raw_data(payload->data,
67 payload->datalen);
68 if (*mpi)
69 ret = payload->datalen;
70 } else { 51 } else {
71 ret = -EINVAL; 52 ret = -ENOMEM;
72 } 53 }
73 } 54 }
74 up_read(&key->sem); 55 up_read(&key->sem);
@@ -79,6 +60,29 @@ error:
79 return ret; 60 return ret;
80} 61}
81 62
63static void dh_free_data(struct dh *dh)
64{
65 kzfree(dh->key);
66 kzfree(dh->p);
67 kzfree(dh->g);
68}
69
70struct dh_completion {
71 struct completion completion;
72 int err;
73};
74
75static void dh_crypto_done(struct crypto_async_request *req, int err)
76{
77 struct dh_completion *compl = req->data;
78
79 if (err == -EINPROGRESS)
80 return;
81
82 compl->err = err;
83 complete(&compl->completion);
84}
85
82struct kdf_sdesc { 86struct kdf_sdesc {
83 struct shash_desc shash; 87 struct shash_desc shash;
84 char ctx[]; 88 char ctx[];
@@ -89,6 +93,7 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
89 struct crypto_shash *tfm; 93 struct crypto_shash *tfm;
90 struct kdf_sdesc *sdesc; 94 struct kdf_sdesc *sdesc;
91 int size; 95 int size;
96 int err;
92 97
93 /* allocate synchronous hash */ 98 /* allocate synchronous hash */
94 tfm = crypto_alloc_shash(hashname, 0, 0); 99 tfm = crypto_alloc_shash(hashname, 0, 0);
@@ -97,16 +102,25 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
97 return PTR_ERR(tfm); 102 return PTR_ERR(tfm);
98 } 103 }
99 104
105 err = -EINVAL;
106 if (crypto_shash_digestsize(tfm) == 0)
107 goto out_free_tfm;
108
109 err = -ENOMEM;
100 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); 110 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
101 sdesc = kmalloc(size, GFP_KERNEL); 111 sdesc = kmalloc(size, GFP_KERNEL);
102 if (!sdesc) 112 if (!sdesc)
103 return -ENOMEM; 113 goto out_free_tfm;
104 sdesc->shash.tfm = tfm; 114 sdesc->shash.tfm = tfm;
105 sdesc->shash.flags = 0x0; 115 sdesc->shash.flags = 0x0;
106 116
107 *sdesc_ret = sdesc; 117 *sdesc_ret = sdesc;
108 118
109 return 0; 119 return 0;
120
121out_free_tfm:
122 crypto_free_shash(tfm);
123 return err;
110} 124}
111 125
112static void kdf_dealloc(struct kdf_sdesc *sdesc) 126static void kdf_dealloc(struct kdf_sdesc *sdesc)
@@ -120,14 +134,6 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
120 kzfree(sdesc); 134 kzfree(sdesc);
121} 135}
122 136
123/* convert 32 bit integer into its string representation */
124static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
125{
126 __be32 *a = (__be32 *)buf;
127
128 *a = cpu_to_be32(val);
129}
130
131/* 137/*
132 * Implementation of the KDF in counter mode according to SP800-108 section 5.1 138 * Implementation of the KDF in counter mode according to SP800-108 section 5.1
133 * as well as SP800-56A section 5.8.1 (Single-step KDF). 139 * as well as SP800-56A section 5.8.1 (Single-step KDF).
@@ -138,25 +144,39 @@ static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
138 * 5.8.1.2). 144 * 5.8.1.2).
139 */ 145 */
140static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, 146static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
141 u8 *dst, unsigned int dlen) 147 u8 *dst, unsigned int dlen, unsigned int zlen)
142{ 148{
143 struct shash_desc *desc = &sdesc->shash; 149 struct shash_desc *desc = &sdesc->shash;
144 unsigned int h = crypto_shash_digestsize(desc->tfm); 150 unsigned int h = crypto_shash_digestsize(desc->tfm);
145 int err = 0; 151 int err = 0;
146 u8 *dst_orig = dst; 152 u8 *dst_orig = dst;
147 u32 i = 1; 153 __be32 counter = cpu_to_be32(1);
148 u8 iteration[sizeof(u32)];
149 154
150 while (dlen) { 155 while (dlen) {
151 err = crypto_shash_init(desc); 156 err = crypto_shash_init(desc);
152 if (err) 157 if (err)
153 goto err; 158 goto err;
154 159
155 crypto_kw_cpu_to_be32(i, iteration); 160 err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
156 err = crypto_shash_update(desc, iteration, sizeof(u32));
157 if (err) 161 if (err)
158 goto err; 162 goto err;
159 163
164 if (zlen && h) {
165 u8 tmpbuffer[h];
166 size_t chunk = min_t(size_t, zlen, h);
167 memset(tmpbuffer, 0, chunk);
168
169 do {
170 err = crypto_shash_update(desc, tmpbuffer,
171 chunk);
172 if (err)
173 goto err;
174
175 zlen -= chunk;
176 chunk = min_t(size_t, zlen, h);
177 } while (zlen);
178 }
179
160 if (src && slen) { 180 if (src && slen) {
161 err = crypto_shash_update(desc, src, slen); 181 err = crypto_shash_update(desc, src, slen);
162 if (err) 182 if (err)
@@ -179,7 +199,7 @@ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
179 199
180 dlen -= h; 200 dlen -= h;
181 dst += h; 201 dst += h;
182 i++; 202 counter = cpu_to_be32(be32_to_cpu(counter) + 1);
183 } 203 }
184 } 204 }
185 205
@@ -192,7 +212,7 @@ err:
192 212
193static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, 213static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
194 char __user *buffer, size_t buflen, 214 char __user *buffer, size_t buflen,
195 uint8_t *kbuf, size_t kbuflen) 215 uint8_t *kbuf, size_t kbuflen, size_t lzero)
196{ 216{
197 uint8_t *outbuf = NULL; 217 uint8_t *outbuf = NULL;
198 int ret; 218 int ret;
@@ -203,7 +223,7 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
203 goto err; 223 goto err;
204 } 224 }
205 225
206 ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen); 226 ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero);
207 if (ret) 227 if (ret)
208 goto err; 228 goto err;
209 229
@@ -221,21 +241,26 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
221 struct keyctl_kdf_params *kdfcopy) 241 struct keyctl_kdf_params *kdfcopy)
222{ 242{
223 long ret; 243 long ret;
224 MPI base, private, prime, result; 244 ssize_t dlen;
225 unsigned nbytes; 245 int secretlen;
246 int outlen;
226 struct keyctl_dh_params pcopy; 247 struct keyctl_dh_params pcopy;
227 uint8_t *kbuf; 248 struct dh dh_inputs;
228 ssize_t keylen; 249 struct scatterlist outsg;
229 size_t resultlen; 250 struct dh_completion compl;
251 struct crypto_kpp *tfm;
252 struct kpp_request *req;
253 uint8_t *secret;
254 uint8_t *outbuf;
230 struct kdf_sdesc *sdesc = NULL; 255 struct kdf_sdesc *sdesc = NULL;
231 256
232 if (!params || (!buffer && buflen)) { 257 if (!params || (!buffer && buflen)) {
233 ret = -EINVAL; 258 ret = -EINVAL;
234 goto out; 259 goto out1;
235 } 260 }
236 if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) { 261 if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
237 ret = -EFAULT; 262 ret = -EFAULT;
238 goto out; 263 goto out1;
239 } 264 }
240 265
241 if (kdfcopy) { 266 if (kdfcopy) {
@@ -244,104 +269,147 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
244 if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN || 269 if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
245 kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) { 270 kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
246 ret = -EMSGSIZE; 271 ret = -EMSGSIZE;
247 goto out; 272 goto out1;
248 } 273 }
249 274
250 /* get KDF name string */ 275 /* get KDF name string */
251 hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME); 276 hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
252 if (IS_ERR(hashname)) { 277 if (IS_ERR(hashname)) {
253 ret = PTR_ERR(hashname); 278 ret = PTR_ERR(hashname);
254 goto out; 279 goto out1;
255 } 280 }
256 281
257 /* allocate KDF from the kernel crypto API */ 282 /* allocate KDF from the kernel crypto API */
258 ret = kdf_alloc(&sdesc, hashname); 283 ret = kdf_alloc(&sdesc, hashname);
259 kfree(hashname); 284 kfree(hashname);
260 if (ret) 285 if (ret)
261 goto out; 286 goto out1;
262 } 287 }
263 288
264 /* 289 memset(&dh_inputs, 0, sizeof(dh_inputs));
265 * If the caller requests postprocessing with a KDF, allow an 290
266 * arbitrary output buffer size since the KDF ensures proper truncation. 291 dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
267 */ 292 if (dlen < 0) {
268 keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime); 293 ret = dlen;
269 if (keylen < 0 || !prime) { 294 goto out1;
270 /* buflen == 0 may be used to query the required buffer size, 295 }
271 * which is the prime key length. 296 dh_inputs.p_size = dlen;
272 */ 297
273 ret = keylen; 298 dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
274 goto out; 299 if (dlen < 0) {
300 ret = dlen;
301 goto out2;
275 } 302 }
303 dh_inputs.g_size = dlen;
276 304
277 /* The result is never longer than the prime */ 305 dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
278 resultlen = keylen; 306 if (dlen < 0) {
307 ret = dlen;
308 goto out2;
309 }
310 dh_inputs.key_size = dlen;
279 311
280 keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base); 312 secretlen = crypto_dh_key_len(&dh_inputs);
281 if (keylen < 0 || !base) { 313 secret = kmalloc(secretlen, GFP_KERNEL);
282 ret = keylen; 314 if (!secret) {
283 goto error1; 315 ret = -ENOMEM;
316 goto out2;
284 } 317 }
318 ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
319 if (ret)
320 goto out3;
285 321
286 keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private); 322 tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
287 if (keylen < 0 || !private) { 323 if (IS_ERR(tfm)) {
288 ret = keylen; 324 ret = PTR_ERR(tfm);
289 goto error2; 325 goto out3;
326 }
327
328 ret = crypto_kpp_set_secret(tfm, secret, secretlen);
329 if (ret)
330 goto out4;
331
332 outlen = crypto_kpp_maxsize(tfm);
333
334 if (!kdfcopy) {
335 /*
336 * When not using a KDF, buflen 0 is used to read the
337 * required buffer length
338 */
339 if (buflen == 0) {
340 ret = outlen;
341 goto out4;
342 } else if (outlen > buflen) {
343 ret = -EOVERFLOW;
344 goto out4;
345 }
290 } 346 }
291 347
292 result = mpi_alloc(0); 348 outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
293 if (!result) { 349 GFP_KERNEL);
350 if (!outbuf) {
294 ret = -ENOMEM; 351 ret = -ENOMEM;
295 goto error3; 352 goto out4;
296 } 353 }
297 354
298 /* allocate space for DH shared secret and SP800-56A otherinfo */ 355 sg_init_one(&outsg, outbuf, outlen);
299 kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen, 356
300 GFP_KERNEL); 357 req = kpp_request_alloc(tfm, GFP_KERNEL);
301 if (!kbuf) { 358 if (!req) {
302 ret = -ENOMEM; 359 ret = -ENOMEM;
303 goto error4; 360 goto out5;
304 } 361 }
305 362
363 kpp_request_set_input(req, NULL, 0);
364 kpp_request_set_output(req, &outsg, outlen);
365 init_completion(&compl.completion);
366 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
367 CRYPTO_TFM_REQ_MAY_SLEEP,
368 dh_crypto_done, &compl);
369
306 /* 370 /*
307 * Concatenate SP800-56A otherinfo past DH shared secret -- the 371 * For DH, generate_public_key and generate_shared_secret are
308 * input to the KDF is (DH shared secret || otherinfo) 372 * the same calculation
309 */ 373 */
310 if (kdfcopy && kdfcopy->otherinfo && 374 ret = crypto_kpp_generate_public_key(req);
311 copy_from_user(kbuf + resultlen, kdfcopy->otherinfo, 375 if (ret == -EINPROGRESS) {
312 kdfcopy->otherinfolen) != 0) { 376 wait_for_completion(&compl.completion);
313 ret = -EFAULT; 377 ret = compl.err;
314 goto error5; 378 if (ret)
379 goto out6;
315 } 380 }
316 381
317 ret = do_dh(result, base, private, prime);
318 if (ret)
319 goto error5;
320
321 ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
322 if (ret != 0)
323 goto error5;
324
325 if (kdfcopy) { 382 if (kdfcopy) {
326 ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf, 383 /*
327 resultlen + kdfcopy->otherinfolen); 384 * Concatenate SP800-56A otherinfo past DH shared secret -- the
328 } else { 385 * input to the KDF is (DH shared secret || otherinfo)
329 ret = nbytes; 386 */
330 if (copy_to_user(buffer, kbuf, nbytes) != 0) 387 if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
388 kdfcopy->otherinfolen) != 0) {
331 ret = -EFAULT; 389 ret = -EFAULT;
390 goto out6;
391 }
392
393 ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
394 req->dst_len + kdfcopy->otherinfolen,
395 outlen - req->dst_len);
396 } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
397 ret = req->dst_len;
398 } else {
399 ret = -EFAULT;
332 } 400 }
333 401
334error5: 402out6:
335 kzfree(kbuf); 403 kpp_request_free(req);
336error4: 404out5:
337 mpi_free(result); 405 kzfree(outbuf);
338error3: 406out4:
339 mpi_free(private); 407 crypto_free_kpp(tfm);
340error2: 408out3:
341 mpi_free(base); 409 kzfree(secret);
342error1: 410out2:
343 mpi_free(prime); 411 dh_free_data(&dh_inputs);
344out: 412out1:
345 kdf_dealloc(sdesc); 413 kdf_dealloc(sdesc);
346 return ret; 414 return ret;
347} 415}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 0010955d7876..bb6324d1ccec 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -30,6 +30,7 @@
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/ctype.h> 31#include <linux/ctype.h>
32#include <crypto/aes.h> 32#include <crypto/aes.h>
33#include <crypto/algapi.h>
33#include <crypto/hash.h> 34#include <crypto/hash.h>
34#include <crypto/sha.h> 35#include <crypto/sha.h>
35#include <crypto/skcipher.h> 36#include <crypto/skcipher.h>
@@ -54,13 +55,7 @@ static int blksize;
54#define MAX_DATA_SIZE 4096 55#define MAX_DATA_SIZE 4096
55#define MIN_DATA_SIZE 20 56#define MIN_DATA_SIZE 20
56 57
57struct sdesc { 58static struct crypto_shash *hash_tfm;
58 struct shash_desc shash;
59 char ctx[];
60};
61
62static struct crypto_shash *hashalg;
63static struct crypto_shash *hmacalg;
64 59
65enum { 60enum {
66 Opt_err = -1, Opt_new, Opt_load, Opt_update 61 Opt_err = -1, Opt_new, Opt_load, Opt_update
@@ -141,23 +136,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
141 */ 136 */
142static int valid_master_desc(const char *new_desc, const char *orig_desc) 137static int valid_master_desc(const char *new_desc, const char *orig_desc)
143{ 138{
144 if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { 139 int prefix_len;
145 if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) 140
146 goto out; 141 if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
147 if (orig_desc) 142 prefix_len = KEY_TRUSTED_PREFIX_LEN;
148 if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) 143 else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
149 goto out; 144 prefix_len = KEY_USER_PREFIX_LEN;
150 } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { 145 else
151 if (strlen(new_desc) == KEY_USER_PREFIX_LEN) 146 return -EINVAL;
152 goto out; 147
153 if (orig_desc) 148 if (!new_desc[prefix_len])
154 if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) 149 return -EINVAL;
155 goto out; 150
156 } else 151 if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
157 goto out; 152 return -EINVAL;
153
158 return 0; 154 return 0;
159out:
160 return -EINVAL;
161} 155}
162 156
163/* 157/*
@@ -321,53 +315,38 @@ error:
321 return ukey; 315 return ukey;
322} 316}
323 317
324static struct sdesc *alloc_sdesc(struct crypto_shash *alg) 318static int calc_hash(struct crypto_shash *tfm, u8 *digest,
325{
326 struct sdesc *sdesc;
327 int size;
328
329 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
330 sdesc = kmalloc(size, GFP_KERNEL);
331 if (!sdesc)
332 return ERR_PTR(-ENOMEM);
333 sdesc->shash.tfm = alg;
334 sdesc->shash.flags = 0x0;
335 return sdesc;
336}
337
338static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
339 const u8 *buf, unsigned int buflen) 319 const u8 *buf, unsigned int buflen)
340{ 320{
341 struct sdesc *sdesc; 321 SHASH_DESC_ON_STACK(desc, tfm);
342 int ret; 322 int err;
343 323
344 sdesc = alloc_sdesc(hmacalg); 324 desc->tfm = tfm;
345 if (IS_ERR(sdesc)) { 325 desc->flags = 0;
346 pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
347 return PTR_ERR(sdesc);
348 }
349 326
350 ret = crypto_shash_setkey(hmacalg, key, keylen); 327 err = crypto_shash_digest(desc, buf, buflen, digest);
351 if (!ret) 328 shash_desc_zero(desc);
352 ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 329 return err;
353 kfree(sdesc);
354 return ret;
355} 330}
356 331
357static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen) 332static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
333 const u8 *buf, unsigned int buflen)
358{ 334{
359 struct sdesc *sdesc; 335 struct crypto_shash *tfm;
360 int ret; 336 int err;
361 337
362 sdesc = alloc_sdesc(hashalg); 338 tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
363 if (IS_ERR(sdesc)) { 339 if (IS_ERR(tfm)) {
364 pr_info("encrypted_key: can't alloc %s\n", hash_alg); 340 pr_err("encrypted_key: can't alloc %s transform: %ld\n",
365 return PTR_ERR(sdesc); 341 hmac_alg, PTR_ERR(tfm));
342 return PTR_ERR(tfm);
366 } 343 }
367 344
368 ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 345 err = crypto_shash_setkey(tfm, key, keylen);
369 kfree(sdesc); 346 if (!err)
370 return ret; 347 err = calc_hash(tfm, digest, buf, buflen);
348 crypto_free_shash(tfm);
349 return err;
371} 350}
372 351
373enum derived_key_type { ENC_KEY, AUTH_KEY }; 352enum derived_key_type { ENC_KEY, AUTH_KEY };
@@ -385,10 +364,9 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
385 derived_buf_len = HASH_SIZE; 364 derived_buf_len = HASH_SIZE;
386 365
387 derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); 366 derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
388 if (!derived_buf) { 367 if (!derived_buf)
389 pr_err("encrypted_key: out of memory\n");
390 return -ENOMEM; 368 return -ENOMEM;
391 } 369
392 if (key_type) 370 if (key_type)
393 strcpy(derived_buf, "AUTH_KEY"); 371 strcpy(derived_buf, "AUTH_KEY");
394 else 372 else
@@ -396,8 +374,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
396 374
397 memcpy(derived_buf + strlen(derived_buf) + 1, master_key, 375 memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
398 master_keylen); 376 master_keylen);
399 ret = calc_hash(derived_key, derived_buf, derived_buf_len); 377 ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
400 kfree(derived_buf); 378 kzfree(derived_buf);
401 return ret; 379 return ret;
402} 380}
403 381
@@ -480,12 +458,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
480 struct skcipher_request *req; 458 struct skcipher_request *req;
481 unsigned int encrypted_datalen; 459 unsigned int encrypted_datalen;
482 u8 iv[AES_BLOCK_SIZE]; 460 u8 iv[AES_BLOCK_SIZE];
483 unsigned int padlen;
484 char pad[16];
485 int ret; 461 int ret;
486 462
487 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 463 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
488 padlen = encrypted_datalen - epayload->decrypted_datalen;
489 464
490 req = init_skcipher_req(derived_key, derived_keylen); 465 req = init_skcipher_req(derived_key, derived_keylen);
491 ret = PTR_ERR(req); 466 ret = PTR_ERR(req);
@@ -493,11 +468,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
493 goto out; 468 goto out;
494 dump_decrypted_data(epayload); 469 dump_decrypted_data(epayload);
495 470
496 memset(pad, 0, sizeof pad);
497 sg_init_table(sg_in, 2); 471 sg_init_table(sg_in, 2);
498 sg_set_buf(&sg_in[0], epayload->decrypted_data, 472 sg_set_buf(&sg_in[0], epayload->decrypted_data,
499 epayload->decrypted_datalen); 473 epayload->decrypted_datalen);
500 sg_set_buf(&sg_in[1], pad, padlen); 474 sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
501 475
502 sg_init_table(sg_out, 1); 476 sg_init_table(sg_out, 1);
503 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); 477 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -533,6 +507,7 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload,
533 if (!ret) 507 if (!ret)
534 dump_hmac(NULL, digest, HASH_SIZE); 508 dump_hmac(NULL, digest, HASH_SIZE);
535out: 509out:
510 memzero_explicit(derived_key, sizeof(derived_key));
536 return ret; 511 return ret;
537} 512}
538 513
@@ -561,8 +536,8 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
561 ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); 536 ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
562 if (ret < 0) 537 if (ret < 0)
563 goto out; 538 goto out;
564 ret = memcmp(digest, epayload->format + epayload->datablob_len, 539 ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
565 sizeof digest); 540 sizeof(digest));
566 if (ret) { 541 if (ret) {
567 ret = -EINVAL; 542 ret = -EINVAL;
568 dump_hmac("datablob", 543 dump_hmac("datablob",
@@ -571,6 +546,7 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
571 dump_hmac("calc", digest, HASH_SIZE); 546 dump_hmac("calc", digest, HASH_SIZE);
572 } 547 }
573out: 548out:
549 memzero_explicit(derived_key, sizeof(derived_key));
574 return ret; 550 return ret;
575} 551}
576 552
@@ -584,9 +560,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
584 struct skcipher_request *req; 560 struct skcipher_request *req;
585 unsigned int encrypted_datalen; 561 unsigned int encrypted_datalen;
586 u8 iv[AES_BLOCK_SIZE]; 562 u8 iv[AES_BLOCK_SIZE];
587 char pad[16]; 563 u8 *pad;
588 int ret; 564 int ret;
589 565
566 /* Throwaway buffer to hold the unused zero padding at the end */
567 pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
568 if (!pad)
569 return -ENOMEM;
570
590 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 571 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
591 req = init_skcipher_req(derived_key, derived_keylen); 572 req = init_skcipher_req(derived_key, derived_keylen);
592 ret = PTR_ERR(req); 573 ret = PTR_ERR(req);
@@ -594,13 +575,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
594 goto out; 575 goto out;
595 dump_encrypted_data(epayload, encrypted_datalen); 576 dump_encrypted_data(epayload, encrypted_datalen);
596 577
597 memset(pad, 0, sizeof pad);
598 sg_init_table(sg_in, 1); 578 sg_init_table(sg_in, 1);
599 sg_init_table(sg_out, 2); 579 sg_init_table(sg_out, 2);
600 sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); 580 sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
601 sg_set_buf(&sg_out[0], epayload->decrypted_data, 581 sg_set_buf(&sg_out[0], epayload->decrypted_data,
602 epayload->decrypted_datalen); 582 epayload->decrypted_datalen);
603 sg_set_buf(&sg_out[1], pad, sizeof pad); 583 sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
604 584
605 memcpy(iv, epayload->iv, sizeof(iv)); 585 memcpy(iv, epayload->iv, sizeof(iv));
606 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); 586 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +592,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
612 goto out; 592 goto out;
613 dump_decrypted_data(epayload); 593 dump_decrypted_data(epayload);
614out: 594out:
595 kfree(pad);
615 return ret; 596 return ret;
616} 597}
617 598
@@ -722,6 +703,7 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
722out: 703out:
723 up_read(&mkey->sem); 704 up_read(&mkey->sem);
724 key_put(mkey); 705 key_put(mkey);
706 memzero_explicit(derived_key, sizeof(derived_key));
725 return ret; 707 return ret;
726} 708}
727 709
@@ -828,13 +810,13 @@ static int encrypted_instantiate(struct key *key,
828 ret = encrypted_init(epayload, key->description, format, master_desc, 810 ret = encrypted_init(epayload, key->description, format, master_desc,
829 decrypted_datalen, hex_encoded_iv); 811 decrypted_datalen, hex_encoded_iv);
830 if (ret < 0) { 812 if (ret < 0) {
831 kfree(epayload); 813 kzfree(epayload);
832 goto out; 814 goto out;
833 } 815 }
834 816
835 rcu_assign_keypointer(key, epayload); 817 rcu_assign_keypointer(key, epayload);
836out: 818out:
837 kfree(datablob); 819 kzfree(datablob);
838 return ret; 820 return ret;
839} 821}
840 822
@@ -843,8 +825,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
843 struct encrypted_key_payload *epayload; 825 struct encrypted_key_payload *epayload;
844 826
845 epayload = container_of(rcu, struct encrypted_key_payload, rcu); 827 epayload = container_of(rcu, struct encrypted_key_payload, rcu);
846 memset(epayload->decrypted_data, 0, epayload->decrypted_datalen); 828 kzfree(epayload);
847 kfree(epayload);
848} 829}
849 830
850/* 831/*
@@ -902,7 +883,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
902 rcu_assign_keypointer(key, new_epayload); 883 rcu_assign_keypointer(key, new_epayload);
903 call_rcu(&epayload->rcu, encrypted_rcu_free); 884 call_rcu(&epayload->rcu, encrypted_rcu_free);
904out: 885out:
905 kfree(buf); 886 kzfree(buf);
906 return ret; 887 return ret;
907} 888}
908 889
@@ -960,33 +941,26 @@ static long encrypted_read(const struct key *key, char __user *buffer,
960 941
961 up_read(&mkey->sem); 942 up_read(&mkey->sem);
962 key_put(mkey); 943 key_put(mkey);
944 memzero_explicit(derived_key, sizeof(derived_key));
963 945
964 if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) 946 if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
965 ret = -EFAULT; 947 ret = -EFAULT;
966 kfree(ascii_buf); 948 kzfree(ascii_buf);
967 949
968 return asciiblob_len; 950 return asciiblob_len;
969out: 951out:
970 up_read(&mkey->sem); 952 up_read(&mkey->sem);
971 key_put(mkey); 953 key_put(mkey);
954 memzero_explicit(derived_key, sizeof(derived_key));
972 return ret; 955 return ret;
973} 956}
974 957
975/* 958/*
976 * encrypted_destroy - before freeing the key, clear the decrypted data 959 * encrypted_destroy - clear and free the key's payload
977 *
978 * Before freeing the key, clear the memory containing the decrypted
979 * key data.
980 */ 960 */
981static void encrypted_destroy(struct key *key) 961static void encrypted_destroy(struct key *key)
982{ 962{
983 struct encrypted_key_payload *epayload = key->payload.data[0]; 963 kzfree(key->payload.data[0]);
984
985 if (!epayload)
986 return;
987
988 memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen);
989 kfree(key->payload.data[0]);
990} 964}
991 965
992struct key_type key_type_encrypted = { 966struct key_type key_type_encrypted = {
@@ -999,47 +973,17 @@ struct key_type key_type_encrypted = {
999}; 973};
1000EXPORT_SYMBOL_GPL(key_type_encrypted); 974EXPORT_SYMBOL_GPL(key_type_encrypted);
1001 975
1002static void encrypted_shash_release(void) 976static int __init init_encrypted(void)
1003{
1004 if (hashalg)
1005 crypto_free_shash(hashalg);
1006 if (hmacalg)
1007 crypto_free_shash(hmacalg);
1008}
1009
1010static int __init encrypted_shash_alloc(void)
1011{ 977{
1012 int ret; 978 int ret;
1013 979
1014 hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); 980 hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
1015 if (IS_ERR(hmacalg)) { 981 if (IS_ERR(hash_tfm)) {
1016 pr_info("encrypted_key: could not allocate crypto %s\n", 982 pr_err("encrypted_key: can't allocate %s transform: %ld\n",
1017 hmac_alg); 983 hash_alg, PTR_ERR(hash_tfm));
1018 return PTR_ERR(hmacalg); 984 return PTR_ERR(hash_tfm);
1019 }
1020
1021 hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
1022 if (IS_ERR(hashalg)) {
1023 pr_info("encrypted_key: could not allocate crypto %s\n",
1024 hash_alg);
1025 ret = PTR_ERR(hashalg);
1026 goto hashalg_fail;
1027 } 985 }
1028 986
1029 return 0;
1030
1031hashalg_fail:
1032 crypto_free_shash(hmacalg);
1033 return ret;
1034}
1035
1036static int __init init_encrypted(void)
1037{
1038 int ret;
1039
1040 ret = encrypted_shash_alloc();
1041 if (ret < 0)
1042 return ret;
1043 ret = aes_get_sizes(); 987 ret = aes_get_sizes();
1044 if (ret < 0) 988 if (ret < 0)
1045 goto out; 989 goto out;
@@ -1048,14 +992,14 @@ static int __init init_encrypted(void)
1048 goto out; 992 goto out;
1049 return 0; 993 return 0;
1050out: 994out:
1051 encrypted_shash_release(); 995 crypto_free_shash(hash_tfm);
1052 return ret; 996 return ret;
1053 997
1054} 998}
1055 999
1056static void __exit cleanup_encrypted(void) 1000static void __exit cleanup_encrypted(void)
1057{ 1001{
1058 encrypted_shash_release(); 1002 crypto_free_shash(hash_tfm);
1059 unregister_key_type(&key_type_encrypted); 1003 unregister_key_type(&key_type_encrypted);
1060} 1004}
1061 1005
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 595becc6d0d2..87cb260e4890 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -158,9 +158,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
158 158
159 kfree(key->description); 159 kfree(key->description);
160 160
161#ifdef KEY_DEBUGGING 161 memzero_explicit(key, sizeof(*key));
162 key->magic = KEY_DEBUG_MAGIC_X;
163#endif
164 kmem_cache_free(key_jar, key); 162 kmem_cache_free(key_jar, key);
165 } 163 }
166} 164}
diff --git a/security/keys/key.c b/security/keys/key.c
index 455c04d80bbb..83da68d98b40 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -660,14 +660,11 @@ not_found:
660 goto error; 660 goto error;
661 661
662found: 662found:
663 /* pretend it doesn't exist if it is awaiting deletion */ 663 /* A key is allowed to be looked up only if someone still owns a
664 if (refcount_read(&key->usage) == 0) 664 * reference to it - otherwise it's awaiting the gc.
665 goto not_found;
666
667 /* this races with key_put(), but that doesn't matter since key_put()
668 * doesn't actually change the key
669 */ 665 */
670 __key_get(key); 666 if (!refcount_inc_not_zero(&key->usage))
667 goto not_found;
671 668
672error: 669error:
673 spin_unlock(&key_serial_lock); 670 spin_unlock(&key_serial_lock);
@@ -966,12 +963,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
966 /* the key must be writable */ 963 /* the key must be writable */
967 ret = key_permission(key_ref, KEY_NEED_WRITE); 964 ret = key_permission(key_ref, KEY_NEED_WRITE);
968 if (ret < 0) 965 if (ret < 0)
969 goto error; 966 return ret;
970 967
971 /* attempt to update it if supported */ 968 /* attempt to update it if supported */
972 ret = -EOPNOTSUPP;
973 if (!key->type->update) 969 if (!key->type->update)
974 goto error; 970 return -EOPNOTSUPP;
975 971
976 memset(&prep, 0, sizeof(prep)); 972 memset(&prep, 0, sizeof(prep));
977 prep.data = payload; 973 prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 447a7d5cee0f..ab0b337c84b4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
99 /* pull the payload in if one was supplied */ 99 /* pull the payload in if one was supplied */
100 payload = NULL; 100 payload = NULL;
101 101
102 if (_payload) { 102 if (plen) {
103 ret = -ENOMEM; 103 ret = -ENOMEM;
104 payload = kvmalloc(plen, GFP_KERNEL); 104 payload = kvmalloc(plen, GFP_KERNEL);
105 if (!payload) 105 if (!payload)
@@ -132,7 +132,10 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
132 132
133 key_ref_put(keyring_ref); 133 key_ref_put(keyring_ref);
134 error3: 134 error3:
135 kvfree(payload); 135 if (payload) {
136 memzero_explicit(payload, plen);
137 kvfree(payload);
138 }
136 error2: 139 error2:
137 kfree(description); 140 kfree(description);
138 error: 141 error:
@@ -324,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
324 327
325 /* pull the payload in if one was supplied */ 328 /* pull the payload in if one was supplied */
326 payload = NULL; 329 payload = NULL;
327 if (_payload) { 330 if (plen) {
328 ret = -ENOMEM; 331 ret = -ENOMEM;
329 payload = kmalloc(plen, GFP_KERNEL); 332 payload = kmalloc(plen, GFP_KERNEL);
330 if (!payload) 333 if (!payload)
@@ -347,7 +350,7 @@ long keyctl_update_key(key_serial_t id,
347 350
348 key_ref_put(key_ref); 351 key_ref_put(key_ref);
349error2: 352error2:
350 kfree(payload); 353 kzfree(payload);
351error: 354error:
352 return ret; 355 return ret;
353} 356}
@@ -1093,7 +1096,10 @@ long keyctl_instantiate_key_common(key_serial_t id,
1093 keyctl_change_reqkey_auth(NULL); 1096 keyctl_change_reqkey_auth(NULL);
1094 1097
1095error2: 1098error2:
1096 kvfree(payload); 1099 if (payload) {
1100 memzero_explicit(payload, plen);
1101 kvfree(payload);
1102 }
1097error: 1103error:
1098 return ret; 1104 return ret;
1099} 1105}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4d1678e4586f..de81793f9920 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,7 +706,7 @@ descend_to_keyring:
706 * Non-keyrings avoid the leftmost branch of the root entirely (root 706 * Non-keyrings avoid the leftmost branch of the root entirely (root
707 * slots 1-15). 707 * slots 1-15).
708 */ 708 */
709 ptr = ACCESS_ONCE(keyring->keys.root); 709 ptr = READ_ONCE(keyring->keys.root);
710 if (!ptr) 710 if (!ptr)
711 goto not_this_keyring; 711 goto not_this_keyring;
712 712
@@ -720,7 +720,7 @@ descend_to_keyring:
720 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) 720 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
721 goto not_this_keyring; 721 goto not_this_keyring;
722 722
723 ptr = ACCESS_ONCE(shortcut->next_node); 723 ptr = READ_ONCE(shortcut->next_node);
724 node = assoc_array_ptr_to_node(ptr); 724 node = assoc_array_ptr_to_node(ptr);
725 goto begin_node; 725 goto begin_node;
726 } 726 }
@@ -740,7 +740,7 @@ descend_to_node:
740 if (assoc_array_ptr_is_shortcut(ptr)) { 740 if (assoc_array_ptr_is_shortcut(ptr)) {
741 shortcut = assoc_array_ptr_to_shortcut(ptr); 741 shortcut = assoc_array_ptr_to_shortcut(ptr);
742 smp_read_barrier_depends(); 742 smp_read_barrier_depends();
743 ptr = ACCESS_ONCE(shortcut->next_node); 743 ptr = READ_ONCE(shortcut->next_node);
744 BUG_ON(!assoc_array_ptr_is_node(ptr)); 744 BUG_ON(!assoc_array_ptr_is_node(ptr));
745 } 745 }
746 node = assoc_array_ptr_to_node(ptr); 746 node = assoc_array_ptr_to_node(ptr);
@@ -752,7 +752,7 @@ begin_node:
752ascend_to_node: 752ascend_to_node:
753 /* Go through the slots in a node */ 753 /* Go through the slots in a node */
754 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 754 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
755 ptr = ACCESS_ONCE(node->slots[slot]); 755 ptr = READ_ONCE(node->slots[slot]);
756 756
757 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) 757 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
758 goto descend_to_node; 758 goto descend_to_node;
@@ -790,13 +790,13 @@ ascend_to_node:
790 /* We've dealt with all the slots in the current node, so now we need 790 /* We've dealt with all the slots in the current node, so now we need
791 * to ascend to the parent and continue processing there. 791 * to ascend to the parent and continue processing there.
792 */ 792 */
793 ptr = ACCESS_ONCE(node->back_pointer); 793 ptr = READ_ONCE(node->back_pointer);
794 slot = node->parent_slot; 794 slot = node->parent_slot;
795 795
796 if (ptr && assoc_array_ptr_is_shortcut(ptr)) { 796 if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
797 shortcut = assoc_array_ptr_to_shortcut(ptr); 797 shortcut = assoc_array_ptr_to_shortcut(ptr);
798 smp_read_barrier_depends(); 798 smp_read_barrier_depends();
799 ptr = ACCESS_ONCE(shortcut->back_pointer); 799 ptr = READ_ONCE(shortcut->back_pointer);
800 slot = shortcut->parent_slot; 800 slot = shortcut->parent_slot;
801 } 801 }
802 if (!ptr) 802 if (!ptr)
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2217dfec7996..86bced9fdbdf 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -809,15 +809,14 @@ long join_session_keyring(const char *name)
809 ret = PTR_ERR(keyring); 809 ret = PTR_ERR(keyring);
810 goto error2; 810 goto error2;
811 } else if (keyring == new->session_keyring) { 811 } else if (keyring == new->session_keyring) {
812 key_put(keyring);
813 ret = 0; 812 ret = 0;
814 goto error2; 813 goto error3;
815 } 814 }
816 815
817 /* we've got a keyring - now to install it */ 816 /* we've got a keyring - now to install it */
818 ret = install_session_keyring_to_cred(new, keyring); 817 ret = install_session_keyring_to_cred(new, keyring);
819 if (ret < 0) 818 if (ret < 0)
820 goto error2; 819 goto error3;
821 820
822 commit_creds(new); 821 commit_creds(new);
823 mutex_unlock(&key_session_mutex); 822 mutex_unlock(&key_session_mutex);
@@ -827,6 +826,8 @@ long join_session_keyring(const char *name)
827okay: 826okay:
828 return ret; 827 return ret;
829 828
829error3:
830 key_put(keyring);
830error2: 831error2:
831 mutex_unlock(&key_session_mutex); 832 mutex_unlock(&key_session_mutex);
832error: 833error:
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 2ae31c5a87de..435e86e13879 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -70,7 +70,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
70 } 70 }
71 71
72 ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); 72 ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
73 kfree(sdesc); 73 kzfree(sdesc);
74 return ret; 74 return ret;
75} 75}
76 76
@@ -114,7 +114,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
114 if (!ret) 114 if (!ret)
115 ret = crypto_shash_final(&sdesc->shash, digest); 115 ret = crypto_shash_final(&sdesc->shash, digest);
116out: 116out:
117 kfree(sdesc); 117 kzfree(sdesc);
118 return ret; 118 return ret;
119} 119}
120 120
@@ -165,7 +165,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
165 paramdigest, TPM_NONCE_SIZE, h1, 165 paramdigest, TPM_NONCE_SIZE, h1,
166 TPM_NONCE_SIZE, h2, 1, &c, 0, 0); 166 TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
167out: 167out:
168 kfree(sdesc); 168 kzfree(sdesc);
169 return ret; 169 return ret;
170} 170}
171 171
@@ -246,7 +246,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
246 if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) 246 if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
247 ret = -EINVAL; 247 ret = -EINVAL;
248out: 248out:
249 kfree(sdesc); 249 kzfree(sdesc);
250 return ret; 250 return ret;
251} 251}
252 252
@@ -347,7 +347,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
347 if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) 347 if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
348 ret = -EINVAL; 348 ret = -EINVAL;
349out: 349out:
350 kfree(sdesc); 350 kzfree(sdesc);
351 return ret; 351 return ret;
352} 352}
353 353
@@ -564,7 +564,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
564 *bloblen = storedsize; 564 *bloblen = storedsize;
565 } 565 }
566out: 566out:
567 kfree(td); 567 kzfree(td);
568 return ret; 568 return ret;
569} 569}
570 570
@@ -678,7 +678,7 @@ static int key_seal(struct trusted_key_payload *p,
678 if (ret < 0) 678 if (ret < 0)
679 pr_info("trusted_key: srkseal failed (%d)\n", ret); 679 pr_info("trusted_key: srkseal failed (%d)\n", ret);
680 680
681 kfree(tb); 681 kzfree(tb);
682 return ret; 682 return ret;
683} 683}
684 684
@@ -703,7 +703,7 @@ static int key_unseal(struct trusted_key_payload *p,
703 /* pull migratable flag out of sealed key */ 703 /* pull migratable flag out of sealed key */
704 p->migratable = p->key[--p->key_len]; 704 p->migratable = p->key[--p->key_len];
705 705
706 kfree(tb); 706 kzfree(tb);
707 return ret; 707 return ret;
708} 708}
709 709
@@ -1037,12 +1037,12 @@ static int trusted_instantiate(struct key *key,
1037 if (!ret && options->pcrlock) 1037 if (!ret && options->pcrlock)
1038 ret = pcrlock(options->pcrlock); 1038 ret = pcrlock(options->pcrlock);
1039out: 1039out:
1040 kfree(datablob); 1040 kzfree(datablob);
1041 kfree(options); 1041 kzfree(options);
1042 if (!ret) 1042 if (!ret)
1043 rcu_assign_keypointer(key, payload); 1043 rcu_assign_keypointer(key, payload);
1044 else 1044 else
1045 kfree(payload); 1045 kzfree(payload);
1046 return ret; 1046 return ret;
1047} 1047}
1048 1048
@@ -1051,8 +1051,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
1051 struct trusted_key_payload *p; 1051 struct trusted_key_payload *p;
1052 1052
1053 p = container_of(rcu, struct trusted_key_payload, rcu); 1053 p = container_of(rcu, struct trusted_key_payload, rcu);
1054 memset(p->key, 0, p->key_len); 1054 kzfree(p);
1055 kfree(p);
1056} 1055}
1057 1056
1058/* 1057/*
@@ -1094,13 +1093,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1094 ret = datablob_parse(datablob, new_p, new_o); 1093 ret = datablob_parse(datablob, new_p, new_o);
1095 if (ret != Opt_update) { 1094 if (ret != Opt_update) {
1096 ret = -EINVAL; 1095 ret = -EINVAL;
1097 kfree(new_p); 1096 kzfree(new_p);
1098 goto out; 1097 goto out;
1099 } 1098 }
1100 1099
1101 if (!new_o->keyhandle) { 1100 if (!new_o->keyhandle) {
1102 ret = -EINVAL; 1101 ret = -EINVAL;
1103 kfree(new_p); 1102 kzfree(new_p);
1104 goto out; 1103 goto out;
1105 } 1104 }
1106 1105
@@ -1114,22 +1113,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1114 ret = key_seal(new_p, new_o); 1113 ret = key_seal(new_p, new_o);
1115 if (ret < 0) { 1114 if (ret < 0) {
1116 pr_info("trusted_key: key_seal failed (%d)\n", ret); 1115 pr_info("trusted_key: key_seal failed (%d)\n", ret);
1117 kfree(new_p); 1116 kzfree(new_p);
1118 goto out; 1117 goto out;
1119 } 1118 }
1120 if (new_o->pcrlock) { 1119 if (new_o->pcrlock) {
1121 ret = pcrlock(new_o->pcrlock); 1120 ret = pcrlock(new_o->pcrlock);
1122 if (ret < 0) { 1121 if (ret < 0) {
1123 pr_info("trusted_key: pcrlock failed (%d)\n", ret); 1122 pr_info("trusted_key: pcrlock failed (%d)\n", ret);
1124 kfree(new_p); 1123 kzfree(new_p);
1125 goto out; 1124 goto out;
1126 } 1125 }
1127 } 1126 }
1128 rcu_assign_keypointer(key, new_p); 1127 rcu_assign_keypointer(key, new_p);
1129 call_rcu(&p->rcu, trusted_rcu_free); 1128 call_rcu(&p->rcu, trusted_rcu_free);
1130out: 1129out:
1131 kfree(datablob); 1130 kzfree(datablob);
1132 kfree(new_o); 1131 kzfree(new_o);
1133 return ret; 1132 return ret;
1134} 1133}
1135 1134
@@ -1158,24 +1157,19 @@ static long trusted_read(const struct key *key, char __user *buffer,
1158 for (i = 0; i < p->blob_len; i++) 1157 for (i = 0; i < p->blob_len; i++)
1159 bufp = hex_byte_pack(bufp, p->blob[i]); 1158 bufp = hex_byte_pack(bufp, p->blob[i]);
1160 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { 1159 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
1161 kfree(ascii_buf); 1160 kzfree(ascii_buf);
1162 return -EFAULT; 1161 return -EFAULT;
1163 } 1162 }
1164 kfree(ascii_buf); 1163 kzfree(ascii_buf);
1165 return 2 * p->blob_len; 1164 return 2 * p->blob_len;
1166} 1165}
1167 1166
1168/* 1167/*
1169 * trusted_destroy - before freeing the key, clear the decrypted data 1168 * trusted_destroy - clear and free the key's payload
1170 */ 1169 */
1171static void trusted_destroy(struct key *key) 1170static void trusted_destroy(struct key *key)
1172{ 1171{
1173 struct trusted_key_payload *p = key->payload.data[0]; 1172 kzfree(key->payload.data[0]);
1174
1175 if (!p)
1176 return;
1177 memset(p->key, 0, p->key_len);
1178 kfree(key->payload.data[0]);
1179} 1173}
1180 1174
1181struct key_type key_type_trusted = { 1175struct key_type key_type_trusted = {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 26605134f17a..3d8c68eba516 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -86,10 +86,18 @@ EXPORT_SYMBOL_GPL(user_preparse);
86 */ 86 */
87void user_free_preparse(struct key_preparsed_payload *prep) 87void user_free_preparse(struct key_preparsed_payload *prep)
88{ 88{
89 kfree(prep->payload.data[0]); 89 kzfree(prep->payload.data[0]);
90} 90}
91EXPORT_SYMBOL_GPL(user_free_preparse); 91EXPORT_SYMBOL_GPL(user_free_preparse);
92 92
93static void user_free_payload_rcu(struct rcu_head *head)
94{
95 struct user_key_payload *payload;
96
97 payload = container_of(head, struct user_key_payload, rcu);
98 kzfree(payload);
99}
100
93/* 101/*
94 * update a user defined key 102 * update a user defined key
95 * - the key's semaphore is write-locked 103 * - the key's semaphore is write-locked
@@ -112,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
112 prep->payload.data[0] = NULL; 120 prep->payload.data[0] = NULL;
113 121
114 if (zap) 122 if (zap)
115 kfree_rcu(zap, rcu); 123 call_rcu(&zap->rcu, user_free_payload_rcu);
116 return ret; 124 return ret;
117} 125}
118EXPORT_SYMBOL_GPL(user_update); 126EXPORT_SYMBOL_GPL(user_update);
@@ -130,7 +138,7 @@ void user_revoke(struct key *key)
130 138
131 if (upayload) { 139 if (upayload) {
132 rcu_assign_keypointer(key, NULL); 140 rcu_assign_keypointer(key, NULL);
133 kfree_rcu(upayload, rcu); 141 call_rcu(&upayload->rcu, user_free_payload_rcu);
134 } 142 }
135} 143}
136 144
@@ -143,7 +151,7 @@ void user_destroy(struct key *key)
143{ 151{
144 struct user_key_payload *upayload = key->payload.data[0]; 152 struct user_key_payload *upayload = key->payload.data[0];
145 153
146 kfree(upayload); 154 kzfree(upayload);
147} 155}
148 156
149EXPORT_SYMBOL_GPL(user_destroy); 157EXPORT_SYMBOL_GPL(user_destroy);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e67a526d1f30..819fd6858b49 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1106,10 +1106,8 @@ static int selinux_parse_opts_str(char *options,
1106 1106
1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), 1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
1108 GFP_KERNEL); 1108 GFP_KERNEL);
1109 if (!opts->mnt_opts_flags) { 1109 if (!opts->mnt_opts_flags)
1110 kfree(opts->mnt_opts);
1111 goto out_err; 1110 goto out_err;
1112 }
1113 1111
1114 if (fscontext) { 1112 if (fscontext) {
1115 opts->mnt_opts[num_mnt_opts] = fscontext; 1113 opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1132,6 +1130,7 @@ static int selinux_parse_opts_str(char *options,
1132 return 0; 1130 return 0;
1133 1131
1134out_err: 1132out_err:
1133 security_free_mnt_opts(opts);
1135 kfree(context); 1134 kfree(context);
1136 kfree(defcontext); 1135 kfree(defcontext);
1137 kfree(fscontext); 1136 kfree(fscontext);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 5088d4b8db22..009e6c98754e 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2492 struct snd_pcm_substream *substream; 2492 struct snd_pcm_substream *substream;
2493 const struct snd_pcm_chmap_elem *map; 2493 const struct snd_pcm_chmap_elem *map;
2494 2494
2495 if (snd_BUG_ON(!info->chmap)) 2495 if (!info->chmap)
2496 return -EINVAL; 2496 return -EINVAL;
2497 substream = snd_pcm_chmap_substream(info, idx); 2497 substream = snd_pcm_chmap_substream(info, idx);
2498 if (!substream) 2498 if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2524 unsigned int __user *dst; 2524 unsigned int __user *dst;
2525 int c, count = 0; 2525 int c, count = 0;
2526 2526
2527 if (snd_BUG_ON(!info->chmap)) 2527 if (!info->chmap)
2528 return -EINVAL; 2528 return -EINVAL;
2529 if (size < 8) 2529 if (size < 8)
2530 return -ENOMEM; 2530 return -ENOMEM;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 2f836ca09860..cd67d1c12cf1 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
1618 if (err < 0) 1618 if (err < 0)
1619 goto __err; 1619 goto __err;
1620 1620
1621 tu->qhead = tu->qtail = tu->qused = 0;
1621 kfree(tu->queue); 1622 kfree(tu->queue);
1622 tu->queue = NULL; 1623 tu->queue = NULL;
1623 kfree(tu->tqueue); 1624 kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1959 1960
1960 tu = file->private_data; 1961 tu = file->private_data;
1961 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); 1962 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
1963 mutex_lock(&tu->ioctl_lock);
1962 spin_lock_irq(&tu->qlock); 1964 spin_lock_irq(&tu->qlock);
1963 while ((long)count - result >= unit) { 1965 while ((long)count - result >= unit) {
1964 while (!tu->qused) { 1966 while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1974 add_wait_queue(&tu->qchange_sleep, &wait); 1976 add_wait_queue(&tu->qchange_sleep, &wait);
1975 1977
1976 spin_unlock_irq(&tu->qlock); 1978 spin_unlock_irq(&tu->qlock);
1979 mutex_unlock(&tu->ioctl_lock);
1977 schedule(); 1980 schedule();
1981 mutex_lock(&tu->ioctl_lock);
1978 spin_lock_irq(&tu->qlock); 1982 spin_lock_irq(&tu->qlock);
1979 1983
1980 remove_wait_queue(&tu->qchange_sleep, &wait); 1984 remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1994 tu->qused--; 1998 tu->qused--;
1995 spin_unlock_irq(&tu->qlock); 1999 spin_unlock_irq(&tu->qlock);
1996 2000
1997 mutex_lock(&tu->ioctl_lock);
1998 if (tu->tread) { 2001 if (tu->tread) {
1999 if (copy_to_user(buffer, &tu->tqueue[qhead], 2002 if (copy_to_user(buffer, &tu->tqueue[qhead],
2000 sizeof(struct snd_timer_tread))) 2003 sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2004 sizeof(struct snd_timer_read))) 2007 sizeof(struct snd_timer_read)))
2005 err = -EFAULT; 2008 err = -EFAULT;
2006 } 2009 }
2007 mutex_unlock(&tu->ioctl_lock);
2008 2010
2009 spin_lock_irq(&tu->qlock); 2011 spin_lock_irq(&tu->qlock);
2010 if (err < 0) 2012 if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2014 } 2016 }
2015 _error: 2017 _error:
2016 spin_unlock_irq(&tu->qlock); 2018 spin_unlock_irq(&tu->qlock);
2019 mutex_unlock(&tu->ioctl_lock);
2017 return result > 0 ? result : err; 2020 return result > 0 ? result : err;
2018} 2021}
2019 2022
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9e6f54f8c45d..1e26854b3425 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
682 cycle = increment_cycle_count(cycle, 1); 682 cycle = increment_cycle_count(cycle, 1);
683 if (s->handle_packet(s, 0, cycle, i) < 0) { 683 if (s->handle_packet(s, 0, cycle, i) < 0) {
684 s->packet_index = -1; 684 s->packet_index = -1;
685 amdtp_stream_pcm_abort(s); 685 if (in_interrupt())
686 amdtp_stream_pcm_abort(s);
687 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
686 return; 688 return;
687 } 689 }
688 } 690 }
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
734 /* Queueing error or detecting invalid payload. */ 736 /* Queueing error or detecting invalid payload. */
735 if (i < packets) { 737 if (i < packets) {
736 s->packet_index = -1; 738 s->packet_index = -1;
737 amdtp_stream_pcm_abort(s); 739 if (in_interrupt())
740 amdtp_stream_pcm_abort(s);
741 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
738 return; 742 return;
739 } 743 }
740 744
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 7e8831722821..ea1a91e99875 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -135,7 +135,7 @@ struct amdtp_stream {
135 /* For a PCM substream processing. */ 135 /* For a PCM substream processing. */
136 struct snd_pcm_substream *pcm; 136 struct snd_pcm_substream *pcm;
137 struct tasklet_struct period_tasklet; 137 struct tasklet_struct period_tasklet;
138 unsigned int pcm_buffer_pointer; 138 snd_pcm_uframes_t pcm_buffer_pointer;
139 unsigned int pcm_period_pointer; 139 unsigned int pcm_period_pointer;
140 140
141 /* To wait for first packet. */ 141 /* To wait for first packet. */
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index d6fb2d5d01a7..60ce1cfc300f 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -295,6 +295,8 @@ struct hda_codec {
295 295
296#define list_for_each_codec(c, bus) \ 296#define list_for_each_codec(c, bus) \
297 list_for_each_entry(c, &(bus)->core.codec_list, core.list) 297 list_for_each_entry(c, &(bus)->core.codec_list, core.list)
298#define list_for_each_codec_safe(c, n, bus) \
299 list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
298 300
299/* snd_hda_codec_read/write optional flags */ 301/* snd_hda_codec_read/write optional flags */
300#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) 302#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 3715a5725613..1c60beb5b70a 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1337,8 +1337,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
1337/* configure each codec instance */ 1337/* configure each codec instance */
1338int azx_codec_configure(struct azx *chip) 1338int azx_codec_configure(struct azx *chip)
1339{ 1339{
1340 struct hda_codec *codec; 1340 struct hda_codec *codec, *next;
1341 list_for_each_codec(codec, &chip->bus) { 1341
1342 /* use _safe version here since snd_hda_codec_configure() deregisters
1343 * the device upon error and deletes itself from the bus list.
1344 */
1345 list_for_each_codec_safe(codec, next, &chip->bus) {
1342 snd_hda_codec_configure(codec); 1346 snd_hda_codec_configure(codec);
1343 } 1347 }
1344 return 0; 1348 return 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2842c82363c0..71545b56b4c8 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3174,6 +3174,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
3174 spec->input_paths[i][nums]); 3174 spec->input_paths[i][nums]);
3175 spec->input_paths[i][nums] = 3175 spec->input_paths[i][nums] =
3176 spec->input_paths[i][n]; 3176 spec->input_paths[i][n];
3177 spec->input_paths[i][n] = 0;
3177 } 3178 }
3178 } 3179 }
3179 nums++; 3180 nums++;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1770f085c2a6..01eb1dc7b5b3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -370,10 +370,12 @@ enum {
370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 370#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) 371#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 372#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
373#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
373#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) 374#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
374#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 375#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
375 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ 376#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
376 IS_GLK(pci) 377 IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
378 IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
377 379
378static char *driver_short_names[] = { 380static char *driver_short_names[] = {
379 [AZX_DRIVER_ICH] = "HDA Intel", 381 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
2378 /* Kabylake-H */ 2380 /* Kabylake-H */
2379 { PCI_DEVICE(0x8086, 0xa2f0), 2381 { PCI_DEVICE(0x8086, 0xa2f0),
2380 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2382 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2383 /* Coffelake */
2384 { PCI_DEVICE(0x8086, 0xa348),
2385 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
2381 /* Broxton-P(Apollolake) */ 2386 /* Broxton-P(Apollolake) */
2382 { PCI_DEVICE(0x8086, 0x5a98), 2387 { PCI_DEVICE(0x8086, 0x5a98),
2383 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2388 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 58df440013c5..cbeebc0a9711 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2324,10 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2324 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), 2324 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
2325 2325
2326 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2326 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2327 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2328 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2329 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2327 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2330 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), 2328 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2329 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2330 SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2331 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2331 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2332 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2332 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2333 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2333 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2334 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2342,6 +2343,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
2342 {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, 2343 {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
2343 {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, 2344 {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"},
2344 {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, 2345 {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
2346 {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
2345 {} 2347 {}
2346}; 2348};
2347 2349
@@ -5852,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5852 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5853 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5854 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5857 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5855 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5858 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5859 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5860 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5861 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5856 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), 5862 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
5857 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 5863 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
5858 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 5864 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5860,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5860 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 5866 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
5861 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 5867 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
5862 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 5868 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
5869 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
5863 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 5870 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
5864 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5865 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5871 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5866 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5872 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5867 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5868 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5869 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5870 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 5873 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
5871 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 5874 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
5872 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), 5875 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -6014,6 +6017,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
6014 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, 6017 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
6015 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, 6018 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
6016 {.id = ALC292_FIXUP_TPT460, .name = "tpt460"}, 6019 {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
6020 {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
6017 {} 6021 {}
6018}; 6022};
6019#define ALC225_STANDARD_PINS \ 6023#define ALC225_STANDARD_PINS \
@@ -6465,8 +6469,11 @@ static int patch_alc269(struct hda_codec *codec)
6465 break; 6469 break;
6466 case 0x10ec0225: 6470 case 0x10ec0225:
6467 case 0x10ec0295: 6471 case 0x10ec0295:
6472 spec->codec_variant = ALC269_TYPE_ALC225;
6473 break;
6468 case 0x10ec0299: 6474 case 0x10ec0299:
6469 spec->codec_variant = ALC269_TYPE_ALC225; 6475 spec->codec_variant = ALC269_TYPE_ALC225;
6476 spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
6470 break; 6477 break;
6471 case 0x10ec0234: 6478 case 0x10ec0234:
6472 case 0x10ec0274: 6479 case 0x10ec0274:
@@ -7338,6 +7345,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
7338 {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"}, 7345 {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
7339 {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"}, 7346 {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
7340 {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 7347 {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
7348 {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
7341 {} 7349 {}
7342}; 7350};
7343 7351
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index faa3d38bac0b..6cefdf6c0b75 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1559,6 +1559,8 @@ static const struct snd_pci_quirk stac9200_fixup_tbl[] = {
1559 "Dell Inspiron 1501", STAC_9200_DELL_M26), 1559 "Dell Inspiron 1501", STAC_9200_DELL_M26),
1560 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6, 1560 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
1561 "unknown Dell", STAC_9200_DELL_M26), 1561 "unknown Dell", STAC_9200_DELL_M26),
1562 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
1563 "Dell Latitude D430", STAC_9200_DELL_M22),
1562 /* Panasonic */ 1564 /* Panasonic */
1563 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC), 1565 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
1564 /* Gateway machines needs EAPD to be set on resume */ 1566 /* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 7ae46c2647d4..b7ef8c59b49a 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
301 return 0; 301 return 0;
302} 302}
303 303
304static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
305{
306 struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
307 struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
308
309 return regcache_sync(dd->regmap);
310}
311
304static struct regmap *atmel_classd_codec_get_remap(struct device *dev) 312static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
305{ 313{
306 return dev_get_regmap(dev, NULL); 314 return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
308 316
309static struct snd_soc_codec_driver soc_codec_dev_classd = { 317static struct snd_soc_codec_driver soc_codec_dev_classd = {
310 .probe = atmel_classd_codec_probe, 318 .probe = atmel_classd_codec_probe,
319 .resume = atmel_classd_codec_resume,
311 .get_regmap = atmel_classd_codec_get_remap, 320 .get_regmap = atmel_classd_codec_get_remap,
312 .component_driver = { 321 .component_driver = {
313 .controls = atmel_classd_snd_controls, 322 .controls = atmel_classd_snd_controls,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 6dd7578f0bb8..024d83fa6a7f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
772 ++i; 772 ++i;
773 msleep(50); 773 msleep(50);
774 } 774 }
775 } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock)); 775 } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
776 776
777 if (!srm_lock) 777 if (!srm_lock)
778 dev_warn(codec->dev, "SRM failed to lock\n"); 778 dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7f758d..7899a2cdeb42 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") 1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
1109 } 1109 }
1110 }, 1110 },
1111 {
1112 .ident = "Thinkpad Helix 2nd",
1113 .matches = {
1114 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1115 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
1116 }
1117 },
1111 1118
1112 { } 1119 { }
1113}; 1120};
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2c9dedab5184..bc136d2bd7cd 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
202 if (ret < 0) 202 if (ret < 0)
203 return ret; 203 return ret;
204 204
205 ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX); 205 ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
206 if (ret < 0) 206 if (ret < 0)
207 return ret; 207 return ret;
208 208
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 58c525096a7c..498b15345b1a 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; 413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
414 u64 *ipc_header = (u64 *)(&header); 414 u64 *ipc_header = (u64 *)(&header);
415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc); 415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
416 unsigned long flags;
416 417
418 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
417 msg = skl_ipc_reply_get_msg(ipc, *ipc_header); 419 msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
420 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
418 if (msg == NULL) { 421 if (msg == NULL) {
419 dev_dbg(ipc->dev, "ipc: rx list is empty\n"); 422 dev_dbg(ipc->dev, "ipc: rx list is empty\n");
420 return; 423 return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
456 } 459 }
457 } 460 }
458 461
462 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
459 list_del(&msg->list); 463 list_del(&msg->list);
460 sst_ipc_tx_msg_reply_complete(ipc, msg); 464 sst_ipc_tx_msg_reply_complete(ipc, msg);
465 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
461} 466}
462 467
463irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context) 468irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3a99712e44a8..64a0f8ed33e1 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
2502 2502
2503 if (ret < 0) 2503 if (ret < 0)
2504 return ret; 2504 return ret;
2505 tkn_count += ret; 2505 tkn_count = ret;
2506 2506
2507 tuple_size += tkn_count * 2507 tuple_size += tkn_count *
2508 sizeof(struct snd_soc_tplg_vendor_string_elem); 2508 sizeof(struct snd_soc_tplg_vendor_string_elem);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 6df3b317a476..4c9b5781282b 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
410 struct skl *skl = ebus_to_skl(ebus); 410 struct skl *skl = ebus_to_skl(ebus);
411 struct hdac_bus *bus = ebus_to_hbus(ebus); 411 struct hdac_bus *bus = ebus_to_hbus(ebus);
412 412
413 skl->init_failed = 1; /* to be sure */ 413 skl->init_done = 0; /* to be sure */
414 414
415 snd_hdac_ext_stop_streams(ebus); 415 snd_hdac_ext_stop_streams(ebus);
416 416
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
428 428
429 snd_hdac_ext_bus_exit(ebus); 429 snd_hdac_ext_bus_exit(ebus);
430 430
431 cancel_work_sync(&skl->probe_work);
431 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 432 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
432 snd_hdac_i915_exit(&ebus->bus); 433 snd_hdac_i915_exit(&ebus->bus);
434
433 return 0; 435 return 0;
434} 436}
435 437
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
566 .get_response = snd_hdac_bus_get_response, 568 .get_response = snd_hdac_bus_get_response,
567}; 569};
568 570
571static int skl_i915_init(struct hdac_bus *bus)
572{
573 int err;
574
575 /*
576 * The HDMI codec is in GPU so we need to ensure that it is powered
577 * up and ready for probe
578 */
579 err = snd_hdac_i915_init(bus);
580 if (err < 0)
581 return err;
582
583 err = snd_hdac_display_power(bus, true);
584 if (err < 0)
585 dev_err(bus->dev, "Cannot turn on display power on i915\n");
586
587 return err;
588}
589
590static void skl_probe_work(struct work_struct *work)
591{
592 struct skl *skl = container_of(work, struct skl, probe_work);
593 struct hdac_ext_bus *ebus = &skl->ebus;
594 struct hdac_bus *bus = ebus_to_hbus(ebus);
595 struct hdac_ext_link *hlink = NULL;
596 int err;
597
598 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
599 err = skl_i915_init(bus);
600 if (err < 0)
601 return;
602 }
603
604 err = skl_init_chip(bus, true);
605 if (err < 0) {
606 dev_err(bus->dev, "Init chip failed with err: %d\n", err);
607 goto out_err;
608 }
609
610 /* codec detection */
611 if (!bus->codec_mask)
612 dev_info(bus->dev, "no hda codecs found!\n");
613
614 /* create codec instances */
615 err = skl_codec_create(ebus);
616 if (err < 0)
617 goto out_err;
618
619 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
620 err = snd_hdac_display_power(bus, false);
621 if (err < 0) {
622 dev_err(bus->dev, "Cannot turn off display power on i915\n");
623 return;
624 }
625 }
626
627 /* register platform dai and controls */
628 err = skl_platform_register(bus->dev);
629 if (err < 0)
630 return;
631 /*
632 * we are done probing so decrement link counts
633 */
634 list_for_each_entry(hlink, &ebus->hlink_list, list)
635 snd_hdac_ext_bus_link_put(ebus, hlink);
636
637 /* configure PM */
638 pm_runtime_put_noidle(bus->dev);
639 pm_runtime_allow(bus->dev);
640 skl->init_done = 1;
641
642 return;
643
644out_err:
645 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
646 err = snd_hdac_display_power(bus, false);
647}
648
569/* 649/*
570 * constructor 650 * constructor
571 */ 651 */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
593 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops); 673 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
594 ebus->bus.use_posbuf = 1; 674 ebus->bus.use_posbuf = 1;
595 skl->pci = pci; 675 skl->pci = pci;
676 INIT_WORK(&skl->probe_work, skl_probe_work);
596 677
597 ebus->bus.bdl_pos_adj = 0; 678 ebus->bus.bdl_pos_adj = 0;
598 679
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
601 return 0; 682 return 0;
602} 683}
603 684
604static int skl_i915_init(struct hdac_bus *bus)
605{
606 int err;
607
608 /*
609 * The HDMI codec is in GPU so we need to ensure that it is powered
610 * up and ready for probe
611 */
612 err = snd_hdac_i915_init(bus);
613 if (err < 0)
614 return err;
615
616 err = snd_hdac_display_power(bus, true);
617 if (err < 0) {
618 dev_err(bus->dev, "Cannot turn on display power on i915\n");
619 return err;
620 }
621
622 return err;
623}
624
625static int skl_first_init(struct hdac_ext_bus *ebus) 685static int skl_first_init(struct hdac_ext_bus *ebus)
626{ 686{
627 struct skl *skl = ebus_to_skl(ebus); 687 struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
684 /* initialize chip */ 744 /* initialize chip */
685 skl_init_pci(skl); 745 skl_init_pci(skl);
686 746
687 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 747 return skl_init_chip(bus, true);
688 err = skl_i915_init(bus);
689 if (err < 0)
690 return err;
691 }
692
693 skl_init_chip(bus, true);
694
695 /* codec detection */
696 if (!bus->codec_mask) {
697 dev_info(bus->dev, "no hda codecs found!\n");
698 }
699
700 return 0;
701} 748}
702 749
703static int skl_probe(struct pci_dev *pci, 750static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
706 struct skl *skl; 753 struct skl *skl;
707 struct hdac_ext_bus *ebus = NULL; 754 struct hdac_ext_bus *ebus = NULL;
708 struct hdac_bus *bus = NULL; 755 struct hdac_bus *bus = NULL;
709 struct hdac_ext_link *hlink = NULL;
710 int err; 756 int err;
711 757
712 /* we use ext core ops, so provide NULL for ops here */ 758 /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
729 775
730 if (skl->nhlt == NULL) { 776 if (skl->nhlt == NULL) {
731 err = -ENODEV; 777 err = -ENODEV;
732 goto out_display_power_off; 778 goto out_free;
733 } 779 }
734 780
735 err = skl_nhlt_create_sysfs(skl); 781 err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
760 if (bus->mlcap) 806 if (bus->mlcap)
761 snd_hdac_ext_bus_get_ml_capabilities(ebus); 807 snd_hdac_ext_bus_get_ml_capabilities(ebus);
762 808
809 snd_hdac_bus_stop_chip(bus);
810
763 /* create device for soc dmic */ 811 /* create device for soc dmic */
764 err = skl_dmic_device_register(skl); 812 err = skl_dmic_device_register(skl);
765 if (err < 0) 813 if (err < 0)
766 goto out_dsp_free; 814 goto out_dsp_free;
767 815
768 /* register platform dai and controls */ 816 schedule_work(&skl->probe_work);
769 err = skl_platform_register(bus->dev);
770 if (err < 0)
771 goto out_dmic_free;
772
773 /* create codec instances */
774 err = skl_codec_create(ebus);
775 if (err < 0)
776 goto out_unregister;
777
778 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
779 err = snd_hdac_display_power(bus, false);
780 if (err < 0) {
781 dev_err(bus->dev, "Cannot turn off display power on i915\n");
782 return err;
783 }
784 }
785
786 /*
787 * we are done probling so decrement link counts
788 */
789 list_for_each_entry(hlink, &ebus->hlink_list, list)
790 snd_hdac_ext_bus_link_put(ebus, hlink);
791
792 /* configure PM */
793 pm_runtime_put_noidle(bus->dev);
794 pm_runtime_allow(bus->dev);
795 817
796 return 0; 818 return 0;
797 819
798out_unregister:
799 skl_platform_unregister(bus->dev);
800out_dmic_free:
801 skl_dmic_device_unregister(skl);
802out_dsp_free: 820out_dsp_free:
803 skl_free_dsp(skl); 821 skl_free_dsp(skl);
804out_mach_free: 822out_mach_free:
805 skl_machine_device_unregister(skl); 823 skl_machine_device_unregister(skl);
806out_nhlt_free: 824out_nhlt_free:
807 skl_nhlt_free(skl->nhlt); 825 skl_nhlt_free(skl->nhlt);
808out_display_power_off:
809 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
810 snd_hdac_display_power(bus, false);
811out_free: 826out_free:
812 skl->init_failed = 1;
813 skl_free(ebus); 827 skl_free(ebus);
814 828
815 return err; 829 return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
828 842
829 skl = ebus_to_skl(ebus); 843 skl = ebus_to_skl(ebus);
830 844
831 if (skl->init_failed) 845 if (!skl->init_done)
832 return; 846 return;
833 847
834 snd_hdac_ext_stop_streams(ebus); 848 snd_hdac_ext_stop_streams(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index a454f6035f3e..2a630fcb7f08 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -46,7 +46,7 @@ struct skl {
46 struct hdac_ext_bus ebus; 46 struct hdac_ext_bus ebus;
47 struct pci_dev *pci; 47 struct pci_dev *pci;
48 48
49 unsigned int init_failed:1; /* delayed init failed */ 49 unsigned int init_done:1; /* delayed init status */
50 struct platform_device *dmic_dev; 50 struct platform_device *dmic_dev;
51 struct platform_device *i2s_dev; 51 struct platform_device *i2s_dev;
52 struct snd_soc_platform *platform; 52 struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
64 const struct firmware *tplg; 64 const struct firmware *tplg;
65 65
66 int supend_active; 66 int supend_active;
67
68 struct work_struct probe_work;
67}; 69};
68 70
69#define skl_to_ebus(s) (&(s)->ebus) 71#define skl_to_ebus(s) (&(s)->ebus)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 66203d107a11..d3b0dc145a56 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
507 rbga = rbgx; 507 rbga = rbgx;
508 adg->rbga_rate_for_441khz = rate / div; 508 adg->rbga_rate_for_441khz = rate / div;
509 ckr |= brg_table[i] << 20; 509 ckr |= brg_table[i] << 20;
510 if (req_441kHz_rate) 510 if (req_441kHz_rate &&
511 !(adg_mode_flags(adg) & AUDIO_OUT_48))
511 parent_clk_name = __clk_get_name(clk); 512 parent_clk_name = __clk_get_name(clk);
512 } 513 }
513 } 514 }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
522 rbgb = rbgx; 523 rbgb = rbgx;
523 adg->rbgb_rate_for_48khz = rate / div; 524 adg->rbgb_rate_for_48khz = rate / div;
524 ckr |= brg_table[i] << 16; 525 ckr |= brg_table[i] << 16;
525 if (req_48kHz_rate) 526 if (req_48kHz_rate &&
527 (adg_mode_flags(adg) & AUDIO_OUT_48))
526 parent_clk_name = __clk_get_name(clk); 528 parent_clk_name = __clk_get_name(clk);
527 } 529 }
528 } 530 }
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24b7cfa..d879c010cf03 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
89 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
90 90
91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data); 91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
92 rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
92 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 93 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
93 94
94 rsnd_adg_set_cmd_timsel_gen2(mod, io); 95 rsnd_adg_set_cmd_timsel_gen2(mod, io);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1744015408c3..8c1f4e2e0c4f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
343 return 0x76543210; 343 return 0x76543210;
344} 344}
345 345
346u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
347{
348 enum rsnd_mod_type playback_mods[] = {
349 RSND_MOD_SRC,
350 RSND_MOD_CMD,
351 RSND_MOD_SSIU,
352 };
353 enum rsnd_mod_type capture_mods[] = {
354 RSND_MOD_CMD,
355 RSND_MOD_SRC,
356 RSND_MOD_SSIU,
357 };
358 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
359 struct rsnd_mod *tmod = NULL;
360 enum rsnd_mod_type *mods =
361 rsnd_io_is_play(io) ?
362 playback_mods : capture_mods;
363 int i;
364
365 /*
366 * This is needed for 24bit data
367 * We need to shift 8bit
368 *
369 * Linux 24bit data is located as 0x00******
370 * HW 24bit data is located as 0x******00
371 *
372 */
373 switch (runtime->sample_bits) {
374 case 16:
375 return 0;
376 case 32:
377 break;
378 }
379
380 for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
381 tmod = rsnd_io_to_mod(io, mods[i]);
382 if (tmod)
383 break;
384 }
385
386 if (tmod != mod)
387 return 0;
388
389 if (rsnd_io_is_play(io))
390 return (0 << 20) | /* shift to Left */
391 (8 << 16); /* 8bit */
392 else
393 return (1 << 20) | /* shift to Right */
394 (8 << 16); /* 8bit */
395}
396
346/* 397/*
347 * rsnd_dai functions 398 * rsnd_dai functions
348 */ 399 */
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 63b6d3c28021..4b0980728e13 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20), 236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20), 237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20), 238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_MODE, 0x184, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20), 240 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20),
240 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20), 241 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20),
241 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20), 242 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index dbf4163427e8..323af41ecfcb 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -73,6 +73,7 @@ enum rsnd_reg {
73 RSND_REG_SCU_SYS_INT_EN0, 73 RSND_REG_SCU_SYS_INT_EN0,
74 RSND_REG_SCU_SYS_INT_EN1, 74 RSND_REG_SCU_SYS_INT_EN1,
75 RSND_REG_CMD_CTRL, 75 RSND_REG_CMD_CTRL,
76 RSND_REG_CMD_BUSIF_MODE,
76 RSND_REG_CMD_BUSIF_DALIGN, 77 RSND_REG_CMD_BUSIF_DALIGN,
77 RSND_REG_CMD_ROUTE_SLCT, 78 RSND_REG_CMD_ROUTE_SLCT,
78 RSND_REG_CMDOUT_TIMSEL, 79 RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
204 u32 mask, u32 data); 205 u32 mask, u32 data);
205u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 206u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
206u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 207u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
208u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
207 209
208/* 210/*
209 * R-Car DMA 211 * R-Car DMA
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 20b5b2ec625e..76a477a3ccb5 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
191 struct device *dev = rsnd_priv_to_dev(priv); 191 struct device *dev = rsnd_priv_to_dev(priv);
192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
193 int is_play = rsnd_io_is_play(io);
193 int use_src = 0; 194 int use_src = 0;
194 u32 fin, fout; 195 u32 fin, fout;
195 u32 ifscr, fsrate, adinr; 196 u32 ifscr, fsrate, adinr;
196 u32 cr, route; 197 u32 cr, route;
197 u32 bsdsr, bsisr; 198 u32 bsdsr, bsisr;
199 u32 i_busif, o_busif, tmp;
198 uint ratio; 200 uint ratio;
199 201
200 if (!runtime) 202 if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
270 break; 272 break;
271 } 273 }
272 274
275 /* BUSIF_MODE */
276 tmp = rsnd_get_busif_shift(io, mod);
277 i_busif = ( is_play ? tmp : 0) | 1;
278 o_busif = (!is_play ? tmp : 0) | 1;
279
273 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route); 280 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
274 281
275 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ 282 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
281 rsnd_mod_write(mod, SRC_BSISR, bsisr); 288 rsnd_mod_write(mod, SRC_BSISR, bsisr);
282 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */ 289 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */
283 290
284 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1); 291 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
285 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1); 292 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
293
286 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 294 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
287 295
288 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout); 296 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 135c5669f796..91e5c07911b4 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
302 * always use 32bit system word. 302 * always use 32bit system word.
303 * see also rsnd_ssi_master_clk_enable() 303 * see also rsnd_ssi_master_clk_enable()
304 */ 304 */
305 cr_own = FORCE | SWL_32 | PDTA; 305 cr_own = FORCE | SWL_32;
306 306
307 if (rdai->bit_clk_inv) 307 if (rdai->bit_clk_inv)
308 cr_own |= SCKP; 308 cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
551 u32 *buf = (u32 *)(runtime->dma_area + 551 u32 *buf = (u32 *)(runtime->dma_area +
552 rsnd_dai_pointer_offset(io, 0)); 552 rsnd_dai_pointer_offset(io, 0));
553 int shift = 0;
554
555 switch (runtime->sample_bits) {
556 case 32:
557 shift = 8;
558 break;
559 }
553 560
554 /* 561 /*
555 * 8/16/32 data can be assesse to TDR/RDR register 562 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
557 * see rsnd_ssi_init() 564 * see rsnd_ssi_init()
558 */ 565 */
559 if (rsnd_io_is_play(io)) 566 if (rsnd_io_is_play(io))
560 rsnd_mod_write(mod, SSITDR, *buf); 567 rsnd_mod_write(mod, SSITDR, (*buf) << shift);
561 else 568 else
562 *buf = rsnd_mod_read(mod, SSIRDR); 569 *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
563 570
564 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); 571 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
565 } 572 }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
709 struct rsnd_priv *priv) 716 struct rsnd_priv *priv)
710{ 717{
711 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 718 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
719 struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
720
721 /* Do nothing for SSI parent mod */
722 if (ssi_parent_mod == mod)
723 return 0;
712 724
713 /* PIO will request IRQ again */ 725 /* PIO will request IRQ again */
714 free_irq(ssi->irq, mod); 726 free_irq(ssi->irq, mod);
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 14fafdaf1395..512d238b79e2 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
144 (rsnd_io_is_play(io) ? 144 (rsnd_io_is_play(io) ?
145 rsnd_runtime_channel_after_ctu(io) : 145 rsnd_runtime_channel_after_ctu(io) :
146 rsnd_runtime_channel_original(io))); 146 rsnd_runtime_channel_original(io)));
147 rsnd_mod_write(mod, SSI_BUSIF_MODE, 1); 147 rsnd_mod_write(mod, SSI_BUSIF_MODE,
148 rsnd_get_busif_shift(io, mod) | 1);
148 rsnd_mod_write(mod, SSI_BUSIF_DALIGN, 149 rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
149 rsnd_get_dalign(mod, io)); 150 rsnd_get_dalign(mod, io));
150 } 151 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae099c0e502..754e3ef8d7ae 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2286 list_for_each_entry(rtd, &card->rtd_list, list) 2286 list_for_each_entry(rtd, &card->rtd_list, list)
2287 flush_delayed_work(&rtd->delayed_work); 2287 flush_delayed_work(&rtd->delayed_work);
2288 2288
2289 /* free the ALSA card at first; this syncs with pending operations */
2290 snd_card_free(card->snd_card);
2291
2289 /* remove and free each DAI */ 2292 /* remove and free each DAI */
2290 soc_remove_dai_links(card); 2293 soc_remove_dai_links(card);
2291 soc_remove_pcm_runtimes(card); 2294 soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2300 if (card->remove) 2303 if (card->remove)
2301 card->remove(card); 2304 card->remove(card);
2302 2305
2303 snd_card_free(card->snd_card);
2304 return 0; 2306 return 0;
2305
2306} 2307}
2307 2308
2308/* removes a socdev */ 2309/* removes a socdev */
diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
index dc48eedea92e..26ed23b18b77 100644
--- a/sound/usb/mixer_us16x08.c
+++ b/sound/usb/mixer_us16x08.c
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
698 struct snd_usb_audio *chip = elem->head.mixer->chip; 698 struct snd_usb_audio *chip = elem->head.mixer->chip;
699 struct snd_us16x08_meter_store *store = elem->private_data; 699 struct snd_us16x08_meter_store *store = elem->private_data;
700 u8 meter_urb[64]; 700 u8 meter_urb[64];
701 char tmp[sizeof(mix_init_msg2)] = {0};
702 701
703 switch (kcontrol->private_value) { 702 switch (kcontrol->private_value) {
704 case 0: 703 case 0: {
705 snd_us16x08_send_urb(chip, (char *)mix_init_msg1, 704 char tmp[sizeof(mix_init_msg1)];
706 sizeof(mix_init_msg1)); 705
706 memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1));
707 snd_us16x08_send_urb(chip, tmp, 4);
707 snd_us16x08_recv_urb(chip, meter_urb, 708 snd_us16x08_recv_urb(chip, meter_urb,
708 sizeof(meter_urb)); 709 sizeof(meter_urb));
709 kcontrol->private_value++; 710 kcontrol->private_value++;
710 break; 711 break;
712 }
711 case 1: 713 case 1:
712 snd_us16x08_recv_urb(chip, meter_urb, 714 snd_us16x08_recv_urb(chip, meter_urb,
713 sizeof(meter_urb)); 715 sizeof(meter_urb));
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
718 sizeof(meter_urb)); 720 sizeof(meter_urb));
719 kcontrol->private_value++; 721 kcontrol->private_value++;
720 break; 722 break;
721 case 3: 723 case 3: {
724 char tmp[sizeof(mix_init_msg2)];
725
722 memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2)); 726 memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2));
723 tmp[2] = snd_get_meter_comp_index(store); 727 tmp[2] = snd_get_meter_comp_index(store);
724 snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2)); 728 snd_us16x08_send_urb(chip, tmp, 10);
725 snd_us16x08_recv_urb(chip, meter_urb, 729 snd_us16x08_recv_urb(chip, meter_urb,
726 sizeof(meter_urb)); 730 sizeof(meter_urb));
727 kcontrol->private_value = 0; 731 kcontrol->private_value = 0;
728 break; 732 break;
729 } 733 }
734 }
730 735
731 for (set = 0; set < 6; set++) 736 for (set = 0; set < 6; set++)
732 get_meter_levels_from_urb(set, store, meter_urb); 737 get_meter_levels_from_urb(set, store, meter_urb);
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = {
1135 .control_id = SND_US16X08_ID_EQLOWMIDWIDTH, 1140 .control_id = SND_US16X08_ID_EQLOWMIDWIDTH,
1136 .type = USB_MIXER_U8, 1141 .type = USB_MIXER_U8,
1137 .num_channels = 16, 1142 .num_channels = 16,
1138 .name = "EQ MidQLow Q", 1143 .name = "EQ MidLow Q",
1139 }, 1144 },
1140 { /* EQ mid high gain */ 1145 { /* EQ mid high gain */
1141 .kcontrol_new = &snd_us16x08_eq_gain_ctl, 1146 .kcontrol_new = &snd_us16x08_eq_gain_ctl,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 01eff6ce6401..d7b0b0a3a2db 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1364,7 +1364,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1364 /* Amanero Combo384 USB interface with native DSD support */ 1364 /* Amanero Combo384 USB interface with native DSD support */
1365 case USB_ID(0x16d0, 0x071a): 1365 case USB_ID(0x16d0, 0x071a):
1366 if (fp->altsetting == 2) { 1366 if (fp->altsetting == 2) {
1367 switch (chip->dev->descriptor.bcdDevice) { 1367 switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
1368 case 0x199: 1368 case 0x199:
1369 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1369 return SNDRV_PCM_FMTBIT_DSD_U32_LE;
1370 case 0x19b: 1370 case 0x19b:
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 664b7fe206d6..b11d3920b9a5 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1809,10 +1809,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
1809 pdata->notify_pending = false; 1809 pdata->notify_pending = false;
1810 spin_unlock_irq(&pdata->lpe_audio_slock); 1810 spin_unlock_irq(&pdata->lpe_audio_slock);
1811 1811
1812 /* runtime PM isn't enabled as default, since it won't save much on
1813 * BYT/CHT devices; user who want the runtime PM should adjust the
1814 * power/ontrol and power/autosuspend_delay_ms sysfs entries instead
1815 */
1816 pm_runtime_use_autosuspend(&pdev->dev); 1812 pm_runtime_use_autosuspend(&pdev->dev);
1817 pm_runtime_mark_last_busy(&pdev->dev); 1813 pm_runtime_mark_last_busy(&pdev->dev);
1818 pm_runtime_set_active(&pdev->dev); 1814 pm_runtime_set_active(&pdev->dev);
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 6ebd3e6a1fd1..5e3c673fa3f4 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,8 @@
27#define __KVM_HAVE_IRQ_LINE 27#define __KVM_HAVE_IRQ_LINE
28#define __KVM_HAVE_READONLY_MEM 28#define __KVM_HAVE_READONLY_MEM
29 29
30#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
31
30#define KVM_REG_SIZE(id) \ 32#define KVM_REG_SIZE(id) \
31 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 33 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
32 34
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
114}; 116};
115 117
116struct kvm_sync_regs { 118struct kvm_sync_regs {
119 /* Used with KVM_CAP_ARM_USER_IRQ */
120 __u64 device_irq_level;
117}; 121};
118 122
119struct kvm_arch_memory_slot { 123struct kvm_arch_memory_slot {
@@ -192,13 +196,17 @@ struct kvm_arch_memory_slot {
192#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 196#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
193#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 197#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
194#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 198#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
199#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
195#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 200#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
196#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ 201#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
197 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) 202 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
198#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff 203#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
199#define VGIC_LEVEL_INFO_LINE_LEVEL 0 204#define VGIC_LEVEL_INFO_LINE_LEVEL 0
200 205
201#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 206#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
207#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
208#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
209#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
202 210
203/* KVM_IRQ_LINE irq field index values */ 211/* KVM_IRQ_LINE irq field index values */
204#define KVM_ARM_IRQ_TYPE_SHIFT 24 212#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index c2860358ae3e..70eea2ecc663 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,8 @@
39#define __KVM_HAVE_IRQ_LINE 39#define __KVM_HAVE_IRQ_LINE
40#define __KVM_HAVE_READONLY_MEM 40#define __KVM_HAVE_READONLY_MEM
41 41
42#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
43
42#define KVM_REG_SIZE(id) \ 44#define KVM_REG_SIZE(id) \
43 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) 45 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
44 46
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
143#define KVM_GUESTDBG_USE_HW (1 << 17) 145#define KVM_GUESTDBG_USE_HW (1 << 17)
144 146
145struct kvm_sync_regs { 147struct kvm_sync_regs {
148 /* Used with KVM_CAP_ARM_USER_IRQ */
149 __u64 device_irq_level;
146}; 150};
147 151
148struct kvm_arch_memory_slot { 152struct kvm_arch_memory_slot {
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot {
212#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 216#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
213#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 217#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
214#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 218#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
219#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
215#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 220#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
216#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ 221#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
217 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) 222 (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
218#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff 223#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
219#define VGIC_LEVEL_INFO_LINE_LEVEL 0 224#define VGIC_LEVEL_INFO_LINE_LEVEL 0
220 225
221#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 226#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
227#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
228#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
229#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
222 230
223/* Device Control API on vcpu fd */ 231/* Device Control API on vcpu fd */
224#define KVM_ARM_VCPU_PMU_V3_CTRL 0 232#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 4edbe4bb0e8b..07fbeb927834 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -29,6 +29,9 @@
29#define __KVM_HAVE_IRQ_LINE 29#define __KVM_HAVE_IRQ_LINE
30#define __KVM_HAVE_GUEST_DEBUG 30#define __KVM_HAVE_GUEST_DEBUG
31 31
32/* Not always available, but if it is, this is the correct offset. */
33#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
34
32struct kvm_regs { 35struct kvm_regs {
33 __u64 pc; 36 __u64 pc;
34 __u64 cr; 37 __u64 cr;
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 7f4fd65e9208..3dd2a1d308dd 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -26,6 +26,8 @@
26#define KVM_DEV_FLIC_ADAPTER_REGISTER 6 26#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
27#define KVM_DEV_FLIC_ADAPTER_MODIFY 7 27#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
28#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 28#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
29#define KVM_DEV_FLIC_AISM 9
30#define KVM_DEV_FLIC_AIRQ_INJECT 10
29/* 31/*
30 * We can have up to 4*64k pending subchannels + 8 adapter interrupts, 32 * We can have up to 4*64k pending subchannels + 8 adapter interrupts,
31 * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. 33 * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
41 __u8 isc; 43 __u8 isc;
42 __u8 maskable; 44 __u8 maskable;
43 __u8 swap; 45 __u8 swap;
44 __u8 pad; 46 __u8 flags;
47};
48
49#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
50
51struct kvm_s390_ais_req {
52 __u8 isc;
53 __u16 mode;
45}; 54};
46 55
47#define KVM_S390_IO_ADAPTER_MASK 1 56#define KVM_S390_IO_ADAPTER_MASK 1
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
110#define KVM_S390_VM_CPU_FEAT_CMMA 10 119#define KVM_S390_VM_CPU_FEAT_CMMA 10
111#define KVM_S390_VM_CPU_FEAT_PFMFI 11 120#define KVM_S390_VM_CPU_FEAT_PFMFI 11
112#define KVM_S390_VM_CPU_FEAT_SIGPIF 12 121#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
122#define KVM_S390_VM_CPU_FEAT_KSS 13
113struct kvm_s390_vm_cpu_feat { 123struct kvm_s390_vm_cpu_feat {
114 __u64 feat[16]; 124 __u64 feat[16];
115}; 125};
@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch {
198#define KVM_SYNC_VRS (1UL << 6) 208#define KVM_SYNC_VRS (1UL << 6)
199#define KVM_SYNC_RICCB (1UL << 7) 209#define KVM_SYNC_RICCB (1UL << 7)
200#define KVM_SYNC_FPRS (1UL << 8) 210#define KVM_SYNC_FPRS (1UL << 8)
211#define KVM_SYNC_GSCB (1UL << 9)
212/* length and alignment of the sdnx as a power of two */
213#define SDNXC 8
214#define SDNXL (1UL << SDNXC)
201/* definition of registers in kvm_run */ 215/* definition of registers in kvm_run */
202struct kvm_sync_regs { 216struct kvm_sync_regs {
203 __u64 prefix; /* prefix register */ 217 __u64 prefix; /* prefix register */
@@ -218,8 +232,16 @@ struct kvm_sync_regs {
218 }; 232 };
219 __u8 reserved[512]; /* for future vector expansion */ 233 __u8 reserved[512]; /* for future vector expansion */
220 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ 234 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
221 __u8 padding[52]; /* riccb needs to be 64byte aligned */ 235 __u8 padding1[52]; /* riccb needs to be 64byte aligned */
222 __u8 riccb[64]; /* runtime instrumentation controls block */ 236 __u8 riccb[64]; /* runtime instrumentation controls block */
237 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
238 union {
239 __u8 sdnx[SDNXL]; /* state description annex */
240 struct {
241 __u64 reserved1[2];
242 __u64 gscb[4];
243 };
244 };
223}; 245};
224 246
225#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) 247#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 0fe00446f9ca..2701e5f8145b 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -202,6 +202,8 @@
202#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ 202#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
203#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 203#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
204 204
205#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
206
205/* Virtualization flags: Linux defined, word 8 */ 207/* Virtualization flags: Linux defined, word 8 */
206#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 208#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
207#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 209#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 85599ad4d024..5dff775af7cd 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -36,6 +36,12 @@
36# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) 36# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
37#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ 37#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
38 38
39#ifdef CONFIG_X86_5LEVEL
40# define DISABLE_LA57 0
41#else
42# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
43#endif
44
39/* 45/*
40 * Make sure to add features to the correct mask 46 * Make sure to add features to the correct mask
41 */ 47 */
@@ -55,7 +61,7 @@
55#define DISABLED_MASK13 0 61#define DISABLED_MASK13 0
56#define DISABLED_MASK14 0 62#define DISABLED_MASK14 0
57#define DISABLED_MASK15 0 63#define DISABLED_MASK15 0
58#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) 64#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
59#define DISABLED_MASK17 0 65#define DISABLED_MASK17 0
60#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 66#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
61 67
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index fac9a5c0abe9..d91ba04dd007 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -53,6 +53,12 @@
53# define NEED_MOVBE 0 53# define NEED_MOVBE 0
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_5LEVEL
57# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
58#else
59# define NEED_LA57 0
60#endif
61
56#ifdef CONFIG_X86_64 62#ifdef CONFIG_X86_64
57#ifdef CONFIG_PARAVIRT 63#ifdef CONFIG_PARAVIRT
58/* Paravirtualized systems may not have PSE or PGE available */ 64/* Paravirtualized systems may not have PSE or PGE available */
@@ -98,7 +104,7 @@
98#define REQUIRED_MASK13 0 104#define REQUIRED_MASK13 0
99#define REQUIRED_MASK14 0 105#define REQUIRED_MASK14 0
100#define REQUIRED_MASK15 0 106#define REQUIRED_MASK15 0
101#define REQUIRED_MASK16 0 107#define REQUIRED_MASK16 (NEED_LA57)
102#define REQUIRED_MASK17 0 108#define REQUIRED_MASK17 0
103#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 109#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
104 110
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 739c0c594022..c2824d02ba37 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -9,6 +9,9 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11 11
12#define KVM_PIO_PAGE_OFFSET 1
13#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
14
12#define DE_VECTOR 0 15#define DE_VECTOR 0
13#define DB_VECTOR 1 16#define DB_VECTOR 1
14#define BP_VECTOR 3 17#define BP_VECTOR 3
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 14458658e988..690a2dcf4078 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -76,7 +76,11 @@
76#define EXIT_REASON_WBINVD 54 76#define EXIT_REASON_WBINVD 54
77#define EXIT_REASON_XSETBV 55 77#define EXIT_REASON_XSETBV 55
78#define EXIT_REASON_APIC_WRITE 56 78#define EXIT_REASON_APIC_WRITE 56
79#define EXIT_REASON_RDRAND 57
79#define EXIT_REASON_INVPCID 58 80#define EXIT_REASON_INVPCID 58
81#define EXIT_REASON_VMFUNC 59
82#define EXIT_REASON_ENCLS 60
83#define EXIT_REASON_RDSEED 61
80#define EXIT_REASON_PML_FULL 62 84#define EXIT_REASON_PML_FULL 62
81#define EXIT_REASON_XSAVES 63 85#define EXIT_REASON_XSAVES 63
82#define EXIT_REASON_XRSTORS 64 86#define EXIT_REASON_XRSTORS 64
@@ -90,6 +94,7 @@
90 { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ 94 { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
91 { EXIT_REASON_CPUID, "CPUID" }, \ 95 { EXIT_REASON_CPUID, "CPUID" }, \
92 { EXIT_REASON_HLT, "HLT" }, \ 96 { EXIT_REASON_HLT, "HLT" }, \
97 { EXIT_REASON_INVD, "INVD" }, \
93 { EXIT_REASON_INVLPG, "INVLPG" }, \ 98 { EXIT_REASON_INVLPG, "INVLPG" }, \
94 { EXIT_REASON_RDPMC, "RDPMC" }, \ 99 { EXIT_REASON_RDPMC, "RDPMC" }, \
95 { EXIT_REASON_RDTSC, "RDTSC" }, \ 100 { EXIT_REASON_RDTSC, "RDTSC" }, \
@@ -108,6 +113,8 @@
108 { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ 113 { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
109 { EXIT_REASON_MSR_READ, "MSR_READ" }, \ 114 { EXIT_REASON_MSR_READ, "MSR_READ" }, \
110 { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ 115 { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
116 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
117 { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
111 { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ 118 { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
112 { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ 119 { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \
113 { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ 120 { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
@@ -115,20 +122,24 @@
115 { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ 122 { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
116 { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ 123 { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
117 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ 124 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
118 { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ 125 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
119 { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ 126 { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
127 { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
120 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ 128 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
121 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ 129 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
122 { EXIT_REASON_INVEPT, "INVEPT" }, \ 130 { EXIT_REASON_INVEPT, "INVEPT" }, \
131 { EXIT_REASON_RDTSCP, "RDTSCP" }, \
123 { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ 132 { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
133 { EXIT_REASON_INVVPID, "INVVPID" }, \
124 { EXIT_REASON_WBINVD, "WBINVD" }, \ 134 { EXIT_REASON_WBINVD, "WBINVD" }, \
135 { EXIT_REASON_XSETBV, "XSETBV" }, \
125 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ 136 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
126 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ 137 { EXIT_REASON_RDRAND, "RDRAND" }, \
127 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
128 { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
129 { EXIT_REASON_INVD, "INVD" }, \
130 { EXIT_REASON_INVVPID, "INVVPID" }, \
131 { EXIT_REASON_INVPCID, "INVPCID" }, \ 138 { EXIT_REASON_INVPCID, "INVPCID" }, \
139 { EXIT_REASON_VMFUNC, "VMFUNC" }, \
140 { EXIT_REASON_ENCLS, "ENCLS" }, \
141 { EXIT_REASON_RDSEED, "RDSEED" }, \
142 { EXIT_REASON_PML_FULL, "PML_FULL" }, \
132 { EXIT_REASON_XSAVES, "XSAVES" }, \ 143 { EXIT_REASON_XSAVES, "XSAVES" }, \
133 { EXIT_REASON_XRSTORS, "XRSTORS" } 144 { EXIT_REASON_XRSTORS, "XRSTORS" }
134 145
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index ebc6dceddb58..7598361ef1f1 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -29,6 +29,7 @@ int main(void)
29 attr.log_size = 0; 29 attr.log_size = 0;
30 attr.log_level = 0; 30 attr.log_level = 0;
31 attr.kern_version = 0; 31 attr.kern_version = 0;
32 attr.prog_flags = 0;
32 33
33 /* 34 /*
34 * Test existence of __NR_bpf and BPF_PROG_LOAD. 35 * Test existence of __NR_bpf and BPF_PROG_LOAD.
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 390d7c9685fd..4ce25d43e8e3 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -208,6 +208,16 @@
208 .off = OFF, \ 208 .off = OFF, \
209 .imm = IMM }) 209 .imm = IMM })
210 210
211/* Unconditional jumps, goto pc + off16 */
212
213#define BPF_JMP_A(OFF) \
214 ((struct bpf_insn) { \
215 .code = BPF_JMP | BPF_JA, \
216 .dst_reg = 0, \
217 .src_reg = 0, \
218 .off = OFF, \
219 .imm = 0 })
220
211/* Function call */ 221/* Function call */
212 222
213#define BPF_EMIT_CALL(FUNC) \ 223#define BPF_EMIT_CALL(FUNC) \
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e553529929f6..94dfa9def355 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -132,6 +132,13 @@ enum bpf_attach_type {
132 */ 132 */
133#define BPF_F_ALLOW_OVERRIDE (1U << 0) 133#define BPF_F_ALLOW_OVERRIDE (1U << 0)
134 134
135/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
136 * verifier will perform strict alignment checking as if the kernel
137 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
138 * and NET_IP_ALIGN defined to 2.
139 */
140#define BPF_F_STRICT_ALIGNMENT (1U << 0)
141
135#define BPF_PSEUDO_MAP_FD 1 142#define BPF_PSEUDO_MAP_FD 1
136 143
137/* flags for BPF_MAP_UPDATE_ELEM command */ 144/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -177,6 +184,7 @@ union bpf_attr {
177 __u32 log_size; /* size of user buffer */ 184 __u32 log_size; /* size of user buffer */
178 __aligned_u64 log_buf; /* user supplied buffer */ 185 __aligned_u64 log_buf; /* user supplied buffer */
179 __u32 kern_version; /* checked when prog_type=kprobe */ 186 __u32 kern_version; /* checked when prog_type=kprobe */
187 __u32 prog_flags;
180 }; 188 };
181 189
182 struct { /* anonymous struct used by BPF_OBJ_* commands */ 190 struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -481,8 +489,7 @@ union bpf_attr {
481 * u32 bpf_get_socket_uid(skb) 489 * u32 bpf_get_socket_uid(skb)
482 * Get the owner uid of the socket stored inside sk_buff. 490 * Get the owner uid of the socket stored inside sk_buff.
483 * @skb: pointer to skb 491 * @skb: pointer to skb
484 * Return: uid of the socket owner on success or 0 if the socket pointer 492 * Return: uid of the socket owner on success or overflowuid if failed.
485 * inside sk_buff is NULL
486 */ 493 */
487#define __BPF_FUNC_MAPPER(FN) \ 494#define __BPF_FUNC_MAPPER(FN) \
488 FN(unspec), \ 495 FN(unspec), \
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index d538897b8e08..17b10304c393 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -48,17 +48,13 @@
48 * tv_sec holds the number of seconds before (negative) or after (positive) 48 * tv_sec holds the number of seconds before (negative) or after (positive)
49 * 00:00:00 1st January 1970 UTC. 49 * 00:00:00 1st January 1970 UTC.
50 * 50 *
51 * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is 51 * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
52 * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
53 *
54 * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
55 * either be both positive or both negative.
56 * 52 *
57 * __reserved is held in case we need a yet finer resolution. 53 * __reserved is held in case we need a yet finer resolution.
58 */ 54 */
59struct statx_timestamp { 55struct statx_timestamp {
60 __s64 tv_sec; 56 __s64 tv_sec;
61 __s32 tv_nsec; 57 __u32 tv_nsec;
62 __s32 __reserved; 58 __s32 __reserved;
63}; 59};
64 60
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 4fe444b8092e..6e178987af8e 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -117,6 +117,28 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 117 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
118} 118}
119 119
120int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
121 size_t insns_cnt, int strict_alignment,
122 const char *license, __u32 kern_version,
123 char *log_buf, size_t log_buf_sz)
124{
125 union bpf_attr attr;
126
127 bzero(&attr, sizeof(attr));
128 attr.prog_type = type;
129 attr.insn_cnt = (__u32)insns_cnt;
130 attr.insns = ptr_to_u64(insns);
131 attr.license = ptr_to_u64(license);
132 attr.log_buf = ptr_to_u64(log_buf);
133 attr.log_size = log_buf_sz;
134 attr.log_level = 2;
135 log_buf[0] = 0;
136 attr.kern_version = kern_version;
137 attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
138
139 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
140}
141
120int bpf_map_update_elem(int fd, const void *key, const void *value, 142int bpf_map_update_elem(int fd, const void *key, const void *value,
121 __u64 flags) 143 __u64 flags)
122{ 144{
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index edb4daeff7a5..972bd8333eb7 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,6 +35,10 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
35 size_t insns_cnt, const char *license, 35 size_t insns_cnt, const char *license,
36 __u32 kern_version, char *log_buf, 36 __u32 kern_version, char *log_buf,
37 size_t log_buf_sz); 37 size_t log_buf_sz);
38int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
39 size_t insns_cnt, int strict_alignment,
40 const char *license, __u32 kern_version,
41 char *log_buf, size_t log_buf_sz);
38 42
39int bpf_map_update_elem(int fd, const void *key, const void *value, 43int bpf_map_update_elem(int fd, const void *key, const void *value,
40 __u64 flags); 44 __u64 flags);
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 282a60368b14..5f66697fe1e0 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
192 "complete_and_exit", 192 "complete_and_exit",
193 "kvm_spurious_fault", 193 "kvm_spurious_fault",
194 "__reiserfs_panic", 194 "__reiserfs_panic",
195 "lbug_with_loc" 195 "lbug_with_loc",
196 "fortify_panic",
196 }; 197 };
197 198
198 if (func->bind == STB_WEAK) 199 if (func->bind == STB_WEAK)
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index e6c9902c6d82..165c2b1d4317 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable:
240 or 240 or
241 ./perf probe --add='schedule:12 cpu' 241 ./perf probe --add='schedule:12 cpu'
242 242
243 this will add one or more probes which has the name start with "schedule". 243Add one or more probes which has the name start with "schedule".
244 244
245 Add probes on lines in schedule() function which calls update_rq_clock(). 245 ./perf probe schedule*
246 or
247 ./perf probe --add='schedule*'
248
249Add probes on lines in schedule() function which calls update_rq_clock().
246 250
247 ./perf probe 'schedule;update_rq_clock*' 251 ./perf probe 'schedule;update_rq_clock*'
248 or 252 or
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index dfbb506d2c34..142606c0ec9c 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -39,7 +39,7 @@ EVENT HANDLERS
39When perf script is invoked using a trace script, a user-defined 39When perf script is invoked using a trace script, a user-defined
40'handler function' is called for each event in the trace. If there's 40'handler function' is called for each event in the trace. If there's
41no handler function defined for a given event type, the event is 41no handler function defined for a given event type, the event is
42ignored (or passed to a 'trace_handled' function, see below) and the 42ignored (or passed to a 'trace_unhandled' function, see below) and the
43next event is processed. 43next event is processed.
44 44
45Most of the event's field values are passed as arguments to the 45Most of the event's field values are passed as arguments to the
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 54acba221558..51ec2d20068a 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
149 print "id=%d, args=%s\n" % \ 149 print "id=%d, args=%s\n" % \
150 (id, args), 150 (id, args),
151 151
152def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, 152def trace_unhandled(event_name, context, event_fields_dict):
153 common_pid, common_comm): 153 print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
154 print_header(event_name, common_cpu, common_secs, common_nsecs,
155 common_pid, common_comm)
156 154
157def print_header(event_name, cpu, secs, nsecs, pid, comm): 155def print_header(event_name, cpu, secs, nsecs, pid, comm):
158 print "%-20s %5u %05u.%09u %8u %-20s " % \ 156 print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script. The
321process can be generalized to any tracepoint or set of tracepoints 319process can be generalized to any tracepoint or set of tracepoints
322you're interested in - basically find the tracepoint(s) you're 320you're interested in - basically find the tracepoint(s) you're
323interested in by looking at the list of available events shown by 321interested in by looking at the list of available events shown by
324'perf list' and/or look in /sys/kernel/debug/tracing events for 322'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
325detailed event and field info, record the corresponding trace data 323detailed event and field info, record the corresponding trace data
326using 'perf record', passing it the list of interesting events, 324using 'perf record', passing it the list of interesting events,
327generate a skeleton script using 'perf script -g python' and modify the 325generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other
334scripts listed by the 'perf script -l' command e.g.: 332scripts listed by the 'perf script -l' command e.g.:
335 333
336---- 334----
337root@tropicana:~# perf script -l 335# perf script -l
338List of available trace scripts: 336List of available trace scripts:
339 wakeup-latency system-wide min/max/avg wakeup latency 337 wakeup-latency system-wide min/max/avg wakeup latency
340 rw-by-file <comm> r/w activity for a program, by file 338 rw-by-file <comm> r/w activity for a program, by file
@@ -383,8 +381,6 @@ source tree:
383 381
384---- 382----
385# ls -al kernel-source/tools/perf/scripts/python 383# ls -al kernel-source/tools/perf/scripts/python
386
387root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
388total 32 384total 32
389drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . 385drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
390drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. 386drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l'
399should show a new entry for your script: 395should show a new entry for your script:
400 396
401---- 397----
402root@tropicana:~# perf script -l 398# perf script -l
403List of available trace scripts: 399List of available trace scripts:
404 wakeup-latency system-wide min/max/avg wakeup latency 400 wakeup-latency system-wide min/max/avg wakeup latency
405 rw-by-file <comm> r/w activity for a program, by file 401 rw-by-file <comm> r/w activity for a program, by file
@@ -437,7 +433,7 @@ EVENT HANDLERS
437When perf script is invoked using a trace script, a user-defined 433When perf script is invoked using a trace script, a user-defined
438'handler function' is called for each event in the trace. If there's 434'handler function' is called for each event in the trace. If there's
439no handler function defined for a given event type, the event is 435no handler function defined for a given event type, the event is
440ignored (or passed to a 'trace_handled' function, see below) and the 436ignored (or passed to a 'trace_unhandled' function, see below) and the
441next event is processed. 437next event is processed.
442 438
443Most of the event's field values are passed as arguments to the 439Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@ can implement a set of optional functions:
532gives scripts a chance to do setup tasks: 528gives scripts a chance to do setup tasks:
533 529
534---- 530----
535def trace_begin: 531def trace_begin():
536 pass 532 pass
537---- 533----
538 534
@@ -541,7 +537,7 @@ def trace_begin:
541 as display results: 537 as display results:
542 538
543---- 539----
544def trace_end: 540def trace_end():
545 pass 541 pass
546---- 542----
547 543
@@ -550,8 +546,7 @@ def trace_end:
550 of common arguments are passed into it: 546 of common arguments are passed into it:
551 547
552---- 548----
553def trace_unhandled(event_name, context, common_cpu, common_secs, 549def trace_unhandled(event_name, context, event_fields_dict):
554 common_nsecs, common_pid, common_comm):
555 pass 550 pass
556---- 551----
557 552
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index cb0eda3925e6..3517e204a2b3 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -311,6 +311,10 @@ include::itrace.txt[]
311 Set the maximum number of program blocks to print with brstackasm for 311 Set the maximum number of program blocks to print with brstackasm for
312 each sample. 312 each sample.
313 313
314--inline::
315 If a callgraph address belongs to an inlined function, the inline stack
316 will be printed. Each entry has function name and file/line.
317
314SEE ALSO 318SEE ALSO
315-------- 319--------
316linkperf:perf-record[1], linkperf:perf-script-perl[1], 320linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8354d04b392f..1f4fbc9a3292 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
19 19
20include $(srctree)/tools/scripts/Makefile.arch 20include $(srctree)/tools/scripts/Makefile.arch
21 21
22$(call detected_var,ARCH) 22$(call detected_var,SRCARCH)
23 23
24NO_PERF_REGS := 1 24NO_PERF_REGS := 1
25 25
26# Additional ARCH settings for ppc 26# Additional ARCH settings for ppc
27ifeq ($(ARCH),powerpc) 27ifeq ($(SRCARCH),powerpc)
28 NO_PERF_REGS := 0 28 NO_PERF_REGS := 0
29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
30endif 30endif
31 31
32# Additional ARCH settings for x86 32# Additional ARCH settings for x86
33ifeq ($(ARCH),x86) 33ifeq ($(SRCARCH),x86)
34 $(call detected,CONFIG_X86) 34 $(call detected,CONFIG_X86)
35 ifeq (${IS_64_BIT}, 1) 35 ifeq (${IS_64_BIT}, 1)
36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated 36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
43 NO_PERF_REGS := 0 43 NO_PERF_REGS := 0
44endif 44endif
45 45
46ifeq ($(ARCH),arm) 46ifeq ($(SRCARCH),arm)
47 NO_PERF_REGS := 0 47 NO_PERF_REGS := 0
48 LIBUNWIND_LIBS = -lunwind -lunwind-arm 48 LIBUNWIND_LIBS = -lunwind -lunwind-arm
49endif 49endif
50 50
51ifeq ($(ARCH),arm64) 51ifeq ($(SRCARCH),arm64)
52 NO_PERF_REGS := 0 52 NO_PERF_REGS := 0
53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
54endif 54endif
@@ -61,7 +61,7 @@ endif
61# Disable it on all other architectures in case libdw unwind 61# Disable it on all other architectures in case libdw unwind
62# support is detected in system. Add supported architectures 62# support is detected in system. Add supported architectures
63# to the check. 63# to the check.
64ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) 64ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
65 NO_LIBDW_DWARF_UNWIND := 1 65 NO_LIBDW_DWARF_UNWIND := 1
66endif 66endif
67 67
@@ -115,9 +115,9 @@ endif
115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) 115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
117 117
118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi 118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
119# include ARCH specific config 119# include ARCH specific config
120-include $(src-perf)/arch/$(ARCH)/Makefile 120-include $(src-perf)/arch/$(SRCARCH)/Makefile
121 121
122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@ ifeq ($(DEBUG),0)
228endif 228endif
229 229
230INC_FLAGS += -I$(src-perf)/util/include 230INC_FLAGS += -I$(src-perf)/util/include
231INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include 231INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
232INC_FLAGS += -I$(srctree)/tools/include/uapi 232INC_FLAGS += -I$(srctree)/tools/include/uapi
233INC_FLAGS += -I$(srctree)/tools/include/ 233INC_FLAGS += -I$(srctree)/tools/include/
234INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi 234INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
235INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ 235INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
236INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ 236INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
237 237
238# $(obj-perf) for generated common-cmds.h 238# $(obj-perf) for generated common-cmds.h
239# $(obj-perf)/util for generated bison/flex headers 239# $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@ ifndef NO_LIBELF
355 355
356 ifndef NO_DWARF 356 ifndef NO_DWARF
357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
358 msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 358 msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
359 NO_DWARF := 1 359 NO_DWARF := 1
360 else 360 else
361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) 361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@ ifndef NO_LIBELF
380 CFLAGS += -DHAVE_BPF_PROLOGUE 380 CFLAGS += -DHAVE_BPF_PROLOGUE
381 $(call detected,CONFIG_BPF_PROLOGUE) 381 $(call detected,CONFIG_BPF_PROLOGUE)
382 else 382 else
383 msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); 383 msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
384 endif 384 endif
385 else 385 else
386 msg := $(warning DWARF support is off, BPF prologue is disabled); 386 msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP
406 endif 406 endif
407endif 407endif
408 408
409ifeq ($(ARCH),powerpc) 409ifeq ($(SRCARCH),powerpc)
410 ifndef NO_DWARF 410 ifndef NO_DWARF
411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX 411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
412 endif 412 endif
@@ -487,7 +487,7 @@ else
487endif 487endif
488 488
489ifndef NO_LOCAL_LIBUNWIND 489ifndef NO_LOCAL_LIBUNWIND
490 ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) 490 ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
491 $(call feature_check,libunwind-debug-frame) 491 $(call feature_check,libunwind-debug-frame)
492 ifneq ($(feature-libunwind-debug-frame), 1) 492 ifneq ($(feature-libunwind-debug-frame), 1)
493 msg := $(warning No debug_frame support found in libunwind); 493 msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1)
740 NO_PERF_READ_VDSO32 := 1 740 NO_PERF_READ_VDSO32 := 1
741 endif 741 endif
742 endif 742 endif
743 ifneq ($(ARCH), x86) 743 ifneq ($(SRCARCH), x86)
744 NO_PERF_READ_VDSOX32 := 1 744 NO_PERF_READ_VDSOX32 := 1
745 endif 745 endif
746 ifndef NO_PERF_READ_VDSOX32 746 ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@ ifdef LIBBABELTRACE
769endif 769endif
770 770
771ifndef NO_AUXTRACE 771ifndef NO_AUXTRACE
772 ifeq ($(ARCH),x86) 772 ifeq ($(SRCARCH),x86)
773 ifeq ($(feature-get_cpuid), 0) 773 ifeq ($(feature-get_cpuid), 0)
774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
775 NO_AUXTRACE := 1 775 NO_AUXTRACE := 1
@@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc
872ETC_PERFCONFIG = etc/perfconfig 872ETC_PERFCONFIG = etc/perfconfig
873endif 873endif
874ifndef lib 874ifndef lib
875ifeq ($(ARCH)$(IS_64_BIT), x861) 875ifeq ($(SRCARCH)$(IS_64_BIT), x861)
876lib = lib64 876lib = lib64
877else 877else
878lib = lib 878lib = lib
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 79fe31f20a17..5008f51a08a2 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -226,7 +226,7 @@ endif
226 226
227ifeq ($(config),0) 227ifeq ($(config),0)
228include $(srctree)/tools/scripts/Makefile.arch 228include $(srctree)/tools/scripts/Makefile.arch
229-include arch/$(ARCH)/Makefile 229-include arch/$(SRCARCH)/Makefile
230endif 230endif
231 231
232# The FEATURE_DUMP_EXPORT holds location of the actual 232# The FEATURE_DUMP_EXPORT holds location of the actual
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 109eb75cf7de..d9b6af837c7d 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
1libperf-y += common.o 1libperf-y += common.o
2libperf-y += $(ARCH)/ 2libperf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 837067f48a4c..6b40e9f01740 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
26 26
27const char *const powerpc_triplets[] = { 27const char *const powerpc_triplets[] = {
28 "powerpc-unknown-linux-gnu-", 28 "powerpc-unknown-linux-gnu-",
29 "powerpc-linux-gnu-",
29 "powerpc64-unknown-linux-gnu-", 30 "powerpc64-unknown-linux-gnu-",
30 "powerpc64-linux-gnu-", 31 "powerpc64-linux-gnu-",
31 "powerpc64le-linux-gnu-", 32 "powerpc64le-linux-gnu-",
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d05aec491cff..4761b0d7fcb5 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv)
2494 "Enable kernel symbol demangling"), 2494 "Enable kernel symbol demangling"),
2495 OPT_STRING(0, "time", &script.time_str, "str", 2495 OPT_STRING(0, "time", &script.time_str, "str",
2496 "Time span of interest (start,stop)"), 2496 "Time span of interest (start,stop)"),
2497 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
2498 "Show inline function"),
2497 OPT_END() 2499 OPT_END()
2498 }; 2500 };
2499 const char * const script_subcommands[] = { "record", "report", NULL }; 2501 const char * const script_subcommands[] = { "record", "report", NULL };
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a935b5023732..ad9324d1daf9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv)
1578static void print_footer(void) 1578static void print_footer(void)
1579{ 1579{
1580 FILE *output = stat_config.output; 1580 FILE *output = stat_config.output;
1581 int n;
1581 1582
1582 if (!null_run) 1583 if (!null_run)
1583 fprintf(output, "\n"); 1584 fprintf(output, "\n");
@@ -1590,7 +1591,9 @@ static void print_footer(void)
1590 } 1591 }
1591 fprintf(output, "\n\n"); 1592 fprintf(output, "\n\n");
1592 1593
1593 if (print_free_counters_hint) 1594 if (print_free_counters_hint &&
1595 sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
1596 n > 0)
1594 fprintf(output, 1597 fprintf(output,
1595"Some events weren't counted. Try disabling the NMI watchdog:\n" 1598"Some events weren't counted. Try disabling the NMI watchdog:\n"
1596" echo 0 > /proc/sys/kernel/nmi_watchdog\n" 1599" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d014350adc52..4b2a5d298197 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -681,6 +681,10 @@ static struct syscall_fmt {
681 { .name = "mlockall", .errmsg = true, 681 { .name = "mlockall", .errmsg = true,
682 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 682 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
683 { .name = "mmap", .hexret = true, 683 { .name = "mmap", .hexret = true,
684/* The standard mmap maps to old_mmap on s390x */
685#if defined(__s390x__)
686 .alias = "old_mmap",
687#endif
684 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 688 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
685 [2] = SCA_MMAP_PROT, /* prot */ 689 [2] = SCA_MMAP_PROT, /* prot */
686 [3] = SCA_MMAP_FLAGS, /* flags */ }, }, 690 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 9213a1273697..999a4e878162 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -2,7 +2,7 @@ hostprogs := jevents
2 2
3jevents-y += json.o jsmn.o jevents.o 3jevents-y += json.o jsmn.o jevents.o
4pmu-events-y += pmu-events.o 4pmu-events-y += pmu-events.o
5JDIR = pmu-events/arch/$(ARCH) 5JDIR = pmu-events/arch/$(SRCARCH)
6JSON = $(shell [ -d $(JDIR) ] && \ 6JSON = $(shell [ -d $(JDIR) ] && \
7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv') 7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
8# 8#
@@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \
10# directory and create tables in pmu-events.c. 10# directory and create tables in pmu-events.c.
11# 11#
12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) 12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) 13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af58ebc243ef..84222bdb8689 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
76 $(Q)echo ';' >> $@ 76 $(Q)echo ';' >> $@
77 77
78ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) 78ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
80endif 80endif
81 81
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index e7664fe3bd33..8ba2c4618fe9 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused)
288 return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ? 288 return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
289 TEST_OK : TEST_FAIL; 289 TEST_OK : TEST_FAIL;
290} 290}
291
292bool test__bp_signal_is_supported(void)
293{
294/*
295 * The powerpc so far does not have support to even create
296 * instruction breakpoint using the perf event interface.
297 * Once it's there we can release this.
298 */
299#ifdef __powerpc__
300 return false;
301#else
302 return true;
303#endif
304}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 9e08d297f1a9..3ccfd58a8c3c 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
97 { 97 {
98 .desc = "Breakpoint overflow signal handler", 98 .desc = "Breakpoint overflow signal handler",
99 .func = test__bp_signal, 99 .func = test__bp_signal,
100 .is_supported = test__bp_signal_is_supported,
100 }, 101 },
101 { 102 {
102 .desc = "Breakpoint overflow sampling", 103 .desc = "Breakpoint overflow sampling",
103 .func = test__bp_signal_overflow, 104 .func = test__bp_signal_overflow,
105 .is_supported = test__bp_signal_is_supported,
104 }, 106 },
105 { 107 {
106 .desc = "Number of exit events of a simple workload", 108 .desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
401 if (!perf_test__matches(t, curr, argc, argv)) 403 if (!perf_test__matches(t, curr, argc, argv))
402 continue; 404 continue;
403 405
406 if (t->is_supported && !t->is_supported()) {
407 pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
408 continue;
409 }
410
404 pr_info("%2d: %-*s:", i, width, t->desc); 411 pr_info("%2d: %-*s:", i, width, t->desc);
405 412
406 if (intlist__find(skiplist, i)) { 413 if (intlist__find(skiplist, i)) {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f14e7612cbb..94b7c7b02bde 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
229 unsigned char buf2[BUFSZ]; 229 unsigned char buf2[BUFSZ];
230 size_t ret_len; 230 size_t ret_len;
231 u64 objdump_addr; 231 u64 objdump_addr;
232 const char *objdump_name;
233 char decomp_name[KMOD_DECOMP_LEN];
232 int ret; 234 int ret;
233 235
234 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 236 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
289 state->done[state->done_cnt++] = al.map->start; 291 state->done[state->done_cnt++] = al.map->start;
290 } 292 }
291 293
294 objdump_name = al.map->dso->long_name;
295 if (dso__needs_decompress(al.map->dso)) {
296 if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
297 decomp_name,
298 sizeof(decomp_name)) < 0) {
299 pr_debug("decompression failed\n");
300 return -1;
301 }
302
303 objdump_name = decomp_name;
304 }
305
292 /* Read the object code using objdump */ 306 /* Read the object code using objdump */
293 objdump_addr = map__rip_2objdump(al.map, al.addr); 307 objdump_addr = map__rip_2objdump(al.map, al.addr);
294 ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); 308 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
309
310 if (dso__needs_decompress(al.map->dso))
311 unlink(objdump_name);
312
295 if (ret > 0) { 313 if (ret > 0) {
296 /* 314 /*
297 * The kernel maps are inaccurate - assume objdump is right in 315 * The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 32873ec91a4e..cf00ebad2ef5 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused)
83 83
84 evsel = perf_evlist__first(evlist); 84 evsel = perf_evlist__first(evlist);
85 evsel->attr.task = 1; 85 evsel->attr.task = 1;
86 evsel->attr.sample_freq = 0; 86 evsel->attr.sample_freq = 1;
87 evsel->attr.inherit = 0; 87 evsel->attr.inherit = 0;
88 evsel->attr.watermark = 0; 88 evsel->attr.watermark = 0;
89 evsel->attr.wakeup_events = 1; 89 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 631859629403..577363809c9b 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -34,6 +34,7 @@ struct test {
34 int (*get_nr)(void); 34 int (*get_nr)(void);
35 const char *(*get_desc)(int subtest); 35 const char *(*get_desc)(int subtest);
36 } subtest; 36 } subtest;
37 bool (*is_supported)(void);
37}; 38};
38 39
39/* Tests */ 40/* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
99int test__clang_subtest_get_nr(void); 100int test__clang_subtest_get_nr(void);
100int test__unit_number__scnprint(int subtest); 101int test__unit_number__scnprint(int subtest);
101 102
103bool test__bp_signal_is_supported(void);
104
102#if defined(__arm__) || defined(__aarch64__) 105#if defined(__arm__) || defined(__aarch64__)
103#ifdef HAVE_DWARF_UNWIND_SUPPORT 106#ifdef HAVE_DWARF_UNWIND_SUPPORT
104struct thread; 107struct thread;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 59addd52d9cd..ddb2c6fbdf91 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
210 return 0; 210 return 0;
211 211
212 ret = b->callchain->max_depth - a->callchain->max_depth; 212 ret = b->callchain->max_depth - a->callchain->max_depth;
213 if (callchain_param.order == ORDER_CALLER)
214 ret = -ret;
213 } 215 }
214 return ret; 216 return ret;
215} 217}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 683f8340460c..ddbd56df9187 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
239 const char *s = strchr(ops->raw, '+'); 239 const char *s = strchr(ops->raw, '+');
240 const char *c = strchr(ops->raw, ','); 240 const char *c = strchr(ops->raw, ',');
241 241
242 if (c++ != NULL) 242 /*
243 * skip over possible up to 2 operands to get to address, e.g.:
244 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
245 */
246 if (c++ != NULL) {
243 ops->target.addr = strtoull(c, NULL, 16); 247 ops->target.addr = strtoull(c, NULL, 16);
244 else 248 if (!ops->target.addr) {
249 c = strchr(c, ',');
250 if (c++ != NULL)
251 ops->target.addr = strtoull(c, NULL, 16);
252 }
253 } else {
245 ops->target.addr = strtoull(ops->raw, NULL, 16); 254 ops->target.addr = strtoull(ops->raw, NULL, 16);
255 }
246 256
247 if (s++ != NULL) { 257 if (s++ != NULL) {
248 ops->target.offset = strtoull(s, NULL, 16); 258 ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
257static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 267static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
258 struct ins_operands *ops) 268 struct ins_operands *ops)
259{ 269{
270 const char *c = strchr(ops->raw, ',');
271
260 if (!ops->target.addr || ops->target.offset < 0) 272 if (!ops->target.addr || ops->target.offset < 0)
261 return ins__raw_scnprintf(ins, bf, size, ops); 273 return ins__raw_scnprintf(ins, bf, size, ops);
262 274
263 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); 275 if (c != NULL) {
276 const char *c2 = strchr(c + 1, ',');
277
278 /* check for 3-op insn */
279 if (c2 != NULL)
280 c = c2;
281 c++;
282
283 /* mirror arch objdump's space-after-comma style */
284 if (*c == ' ')
285 c++;
286 }
287
288 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
289 ins->name, c ? c - ops->raw : 0, ops->raw,
290 ops->target.offset);
264} 291}
265 292
266static struct ins_ops jump_ops = { 293static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1294 char linkname[PATH_MAX]; 1321 char linkname[PATH_MAX];
1295 char *build_id_filename; 1322 char *build_id_filename;
1296 char *build_id_path = NULL; 1323 char *build_id_path = NULL;
1324 char *pos;
1297 1325
1298 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1326 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1299 !dso__is_kcore(dso)) 1327 !dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1313 if (!build_id_path) 1341 if (!build_id_path)
1314 return -1; 1342 return -1;
1315 1343
1316 dirname(build_id_path); 1344 /*
1345 * old style build-id cache has name of XX/XXXXXXX.. while
1346 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1347 * extract the build-id part of dirname in the new style only.
1348 */
1349 pos = strrchr(build_id_path, '/');
1350 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1351 dirname(build_id_path);
1317 1352
1318 if (dso__is_kcore(dso) || 1353 if (dso__is_kcore(dso) ||
1319 readlink(build_id_path, linkname, sizeof(linkname)) < 0 || 1354 readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1396,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
1396 sizeof(symfs_filename)); 1431 sizeof(symfs_filename));
1397 } 1432 }
1398 } else if (dso__needs_decompress(dso)) { 1433 } else if (dso__needs_decompress(dso)) {
1399 char tmp[PATH_MAX]; 1434 char tmp[KMOD_DECOMP_LEN];
1400 struct kmod_path m;
1401 int fd;
1402 bool ret;
1403
1404 if (kmod_path__parse_ext(&m, symfs_filename))
1405 goto out;
1406
1407 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1408
1409 fd = mkstemp(tmp);
1410 if (fd < 0) {
1411 free(m.ext);
1412 goto out;
1413 }
1414
1415 ret = decompress_to_file(m.ext, symfs_filename, fd);
1416
1417 if (ret)
1418 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1419
1420 free(m.ext);
1421 close(fd);
1422 1435
1423 if (!ret) 1436 if (dso__decompress_kmodule_path(dso, symfs_filename,
1437 tmp, sizeof(tmp)) < 0)
1424 goto out; 1438 goto out;
1425 1439
1426 strcpy(symfs_filename, tmp); 1440 strcpy(symfs_filename, tmp);
@@ -1429,7 +1443,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
1429 snprintf(command, sizeof(command), 1443 snprintf(command, sizeof(command),
1430 "%s %s%s --start-address=0x%016" PRIx64 1444 "%s %s%s --start-address=0x%016" PRIx64
1431 " --stop-address=0x%016" PRIx64 1445 " --stop-address=0x%016" PRIx64
1432 " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand", 1446 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1433 objdump_path ? objdump_path : "objdump", 1447 objdump_path ? objdump_path : "objdump",
1434 disassembler_style ? "-M " : "", 1448 disassembler_style ? "-M " : "",
1435 disassembler_style ? disassembler_style : "", 1449 disassembler_style ? disassembler_style : "",
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 168cc49654e7..e0148b081bdf 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
278 return bf; 278 return bf;
279} 279}
280 280
281bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
282{
283 char *id_name = NULL, *ch;
284 struct stat sb;
285 char sbuild_id[SBUILD_ID_SIZE];
286
287 if (!dso->has_build_id)
288 goto err;
289
290 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
291 id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
292 if (!id_name)
293 goto err;
294 if (access(id_name, F_OK))
295 goto err;
296 if (lstat(id_name, &sb) == -1)
297 goto err;
298 if ((size_t)sb.st_size > size - 1)
299 goto err;
300 if (readlink(id_name, bf, size - 1) < 0)
301 goto err;
302
303 bf[sb.st_size] = '\0';
304
305 /*
306 * link should be:
307 * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
308 */
309 ch = strrchr(bf, '/');
310 if (!ch)
311 goto err;
312 if (ch - 3 < bf)
313 goto err;
314
315 free(id_name);
316 return strncmp(".ko", ch - 3, 3) == 0;
317err:
318 pr_err("Invalid build id: %s\n", id_name ? :
319 dso->long_name ? :
320 dso->short_name ? :
321 "[unknown]");
322 free(id_name);
323 return false;
324}
325
326#define dsos__for_each_with_build_id(pos, head) \ 281#define dsos__for_each_with_build_id(pos, head) \
327 list_for_each_entry(pos, head, node) \ 282 list_for_each_entry(pos, head, node) \
328 if (!pos->has_build_id) \ 283 if (!pos->has_build_id) \
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8a89b195c1fc..96690a55c62c 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
17 size_t size); 17 size_t size);
18 18
19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); 19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
20bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
21 20
22int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, 21int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
23 struct perf_sample *sample, struct perf_evsel *evsel, 22 struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 81fc29ac798f..b4204b43ed58 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -621,14 +621,19 @@ enum match_result {
621static enum match_result match_chain_srcline(struct callchain_cursor_node *node, 621static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
622 struct callchain_list *cnode) 622 struct callchain_list *cnode)
623{ 623{
624 char *left = get_srcline(cnode->ms.map->dso, 624 char *left = NULL;
625 char *right = NULL;
626 enum match_result ret = MATCH_EQ;
627 int cmp;
628
629 if (cnode->ms.map)
630 left = get_srcline(cnode->ms.map->dso,
625 map__rip_2objdump(cnode->ms.map, cnode->ip), 631 map__rip_2objdump(cnode->ms.map, cnode->ip),
626 cnode->ms.sym, true, false); 632 cnode->ms.sym, true, false);
627 char *right = get_srcline(node->map->dso, 633 if (node->map)
634 right = get_srcline(node->map->dso,
628 map__rip_2objdump(node->map, node->ip), 635 map__rip_2objdump(node->map, node->ip),
629 node->sym, true, false); 636 node->sym, true, false);
630 enum match_result ret = MATCH_EQ;
631 int cmp;
632 637
633 if (left && right) 638 if (left && right)
634 cmp = strcmp(left, right); 639 cmp = strcmp(left, right);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index a96a99d2369f..4e7ab611377a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
249} 249}
250 250
251static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
252{
253 int fd = -1;
254 struct kmod_path m;
255
256 if (!dso__needs_decompress(dso))
257 return -1;
258
259 if (kmod_path__parse_ext(&m, dso->long_name))
260 return -1;
261
262 if (!m.comp)
263 goto out;
264
265 fd = mkstemp(tmpbuf);
266 if (fd < 0) {
267 dso->load_errno = errno;
268 goto out;
269 }
270
271 if (!decompress_to_file(m.ext, name, fd)) {
272 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
273 close(fd);
274 fd = -1;
275 }
276
277out:
278 free(m.ext);
279 return fd;
280}
281
282int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
283{
284 char tmpbuf[] = KMOD_DECOMP_NAME;
285 int fd;
286
287 fd = decompress_kmodule(dso, name, tmpbuf);
288 unlink(tmpbuf);
289 return fd;
290}
291
292int dso__decompress_kmodule_path(struct dso *dso, const char *name,
293 char *pathname, size_t len)
294{
295 char tmpbuf[] = KMOD_DECOMP_NAME;
296 int fd;
297
298 fd = decompress_kmodule(dso, name, tmpbuf);
299 if (fd < 0) {
300 unlink(tmpbuf);
301 return -1;
302 }
303
304 strncpy(pathname, tmpbuf, len);
305 close(fd);
306 return 0;
307}
308
251/* 309/*
252 * Parses kernel module specified in @path and updates 310 * Parses kernel module specified in @path and updates
253 * @m argument like: 311 * @m argument like:
@@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
335 return 0; 393 return 0;
336} 394}
337 395
396void dso__set_module_info(struct dso *dso, struct kmod_path *m,
397 struct machine *machine)
398{
399 if (machine__is_host(machine))
400 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
401 else
402 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
403
404 /* _KMODULE_COMP should be next to _KMODULE */
405 if (m->kmod && m->comp)
406 dso->symtab_type++;
407
408 dso__set_short_name(dso, strdup(m->name), true);
409}
410
338/* 411/*
339 * Global list of open DSOs and the counter. 412 * Global list of open DSOs and the counter.
340 */ 413 */
@@ -381,7 +454,7 @@ static int do_open(char *name)
381 454
382static int __open_dso(struct dso *dso, struct machine *machine) 455static int __open_dso(struct dso *dso, struct machine *machine)
383{ 456{
384 int fd; 457 int fd = -EINVAL;
385 char *root_dir = (char *)""; 458 char *root_dir = (char *)"";
386 char *name = malloc(PATH_MAX); 459 char *name = malloc(PATH_MAX);
387 460
@@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
392 root_dir = machine->root_dir; 465 root_dir = machine->root_dir;
393 466
394 if (dso__read_binary_type_filename(dso, dso->binary_type, 467 if (dso__read_binary_type_filename(dso, dso->binary_type,
395 root_dir, name, PATH_MAX)) { 468 root_dir, name, PATH_MAX))
396 free(name); 469 goto out;
397 return -EINVAL;
398 }
399 470
400 if (!is_regular_file(name)) 471 if (!is_regular_file(name))
401 return -EINVAL; 472 goto out;
473
474 if (dso__needs_decompress(dso)) {
475 char newpath[KMOD_DECOMP_LEN];
476 size_t len = sizeof(newpath);
477
478 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
479 fd = -dso->load_errno;
480 goto out;
481 }
482
483 strcpy(name, newpath);
484 }
402 485
403 fd = do_open(name); 486 fd = do_open(name);
487
488 if (dso__needs_decompress(dso))
489 unlink(name);
490
491out:
404 free(name); 492 free(name);
405 return fd; 493 return fd;
406} 494}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 12350b171727..bd061ba7b47c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
244bool is_kernel_module(const char *pathname, int cpumode); 244bool is_kernel_module(const char *pathname, int cpumode);
245bool decompress_to_file(const char *ext, const char *filename, int output_fd); 245bool decompress_to_file(const char *ext, const char *filename, int output_fd);
246bool dso__needs_decompress(struct dso *dso); 246bool dso__needs_decompress(struct dso *dso);
247int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
248int dso__decompress_kmodule_path(struct dso *dso, const char *name,
249 char *pathname, size_t len);
250
251#define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX"
252#define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME)
247 253
248struct kmod_path { 254struct kmod_path {
249 char *name; 255 char *name;
@@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
259#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) 265#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
260#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) 266#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true)
261 267
268void dso__set_module_info(struct dso *dso, struct kmod_path *m,
269 struct machine *machine);
270
262/* 271/*
263 * The dso__data_* external interface provides following functions: 272 * The dso__data_* external interface provides following functions:
264 * dso__data_get_fd 273 * dso__data_get_fd
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4f7902d5afa..cda44b0e821c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
273 struct perf_evsel *evsel; 273 struct perf_evsel *evsel;
274 274
275 event_attr_init(&attr); 275 event_attr_init(&attr);
276 /*
277 * Unnamed union member, not supported as struct member named
278 * initializer in older compilers such as gcc 4.4.7
279 *
280 * Just for probing the precise_ip:
281 */
282 attr.sample_period = 1;
276 283
277 perf_event_attr__set_max_precise_ip(&attr); 284 perf_event_attr__set_max_precise_ip(&attr);
285 /*
286 * Now let the usual logic to set up the perf_event_attr defaults
287 * to kick in when we return and before perf_evsel__open() is called.
288 */
289 attr.sample_period = 0;
278 290
279 evsel = perf_evsel__new(&attr); 291 evsel = perf_evsel__new(&attr);
280 if (evsel == NULL) 292 if (evsel == NULL)
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index e415aee6a245..583f3a602506 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -7,6 +7,7 @@
7#include "map.h" 7#include "map.h"
8#include "strlist.h" 8#include "strlist.h"
9#include "symbol.h" 9#include "symbol.h"
10#include "srcline.h"
10 11
11static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) 12static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
12{ 13{
@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
168 if (!print_oneline) 169 if (!print_oneline)
169 printed += fprintf(fp, "\n"); 170 printed += fprintf(fp, "\n");
170 171
172 if (symbol_conf.inline_name && node->map) {
173 struct inline_node *inode;
174
175 addr = map__rip_2objdump(node->map, node->ip),
176 inode = dso__parse_addr_inlines(node->map->dso, addr);
177
178 if (inode) {
179 struct inline_list *ilist;
180
181 list_for_each_entry(ilist, &inode->val, list) {
182 if (print_arrow)
183 printed += fprintf(fp, " <-");
184
185 /* IP is same, just skip it */
186 if (print_ip)
187 printed += fprintf(fp, "%c%16s",
188 s, "");
189 if (print_sym)
190 printed += fprintf(fp, " %s",
191 ilist->funcname);
192 if (print_srcline)
193 printed += fprintf(fp, "\n %s:%d",
194 ilist->filename,
195 ilist->line_nr);
196 if (!print_oneline)
197 printed += fprintf(fp, "\n");
198 }
199
200 inline_node__delete(inode);
201 }
202 }
203
171 if (symbol_conf.bt_stop_list && 204 if (symbol_conf.bt_stop_list &&
172 node->sym && 205 node->sym &&
173 strlist__has_entry(symbol_conf.bt_stop_list, 206 strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 314a07151fb7..b5baff3007bb 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
841 841
842/* 842/*
843 * default get_cpuid(): nothing gets recorded 843 * default get_cpuid(): nothing gets recorded
844 * actual implementation must be in arch/$(ARCH)/util/header.c 844 * actual implementation must be in arch/$(SRCARCH)/util/header.c
845 */ 845 */
846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
847{ 847{
@@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev,
1469 1469
1470 dso__set_build_id(dso, &bev->build_id); 1470 dso__set_build_id(dso, &bev->build_id);
1471 1471
1472 if (!is_kernel_module(filename, cpumode)) 1472 if (dso_type != DSO_TYPE_USER) {
1473 dso->kernel = dso_type; 1473 struct kmod_path m = { .name = NULL, };
1474
1475 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1476 dso__set_module_info(dso, &m, machine);
1477 else
1478 dso->kernel = dso_type;
1479
1480 free(m.name);
1481 }
1474 1482
1475 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1483 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1476 sbuild_id); 1484 sbuild_id);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index d97e014c3df3..5de2b86b9880 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
572 if (dso == NULL) 572 if (dso == NULL)
573 goto out_unlock; 573 goto out_unlock;
574 574
575 if (machine__is_host(machine)) 575 dso__set_module_info(dso, m, machine);
576 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
577 else
578 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
579
580 /* _KMODULE_COMP should be next to _KMODULE */
581 if (m->kmod && m->comp)
582 dso->symtab_type++;
583
584 dso__set_short_name(dso, strdup(m->name), true);
585 dso__set_long_name(dso, strdup(filename), true); 576 dso__set_long_name(dso, strdup(filename), true);
586 } 577 }
587 578
@@ -1218,10 +1209,12 @@ int machine__create_kernel_maps(struct machine *machine)
1218 */ 1209 */
1219 map_groups__fixup_end(&machine->kmaps); 1210 map_groups__fixup_end(&machine->kmaps);
1220 1211
1221 if (machine__get_running_kernel_start(machine, &name, &addr)) { 1212 if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1222 } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1213 if (name &&
1223 machine__destroy_kernel_maps(machine); 1214 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1224 return -1; 1215 machine__destroy_kernel_maps(machine);
1216 return -1;
1217 }
1225 } 1218 }
1226 1219
1227 return 0; 1220 return 0;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 84e7e698411e..a2670e9d652d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
619 struct map *map, unsigned long offs) 619 struct map *map, unsigned long offs)
620{ 620{
621 struct symbol *sym; 621 struct symbol *sym;
622 u64 addr = tp->address + tp->offset - offs; 622 u64 addr = tp->address - offs;
623 623
624 sym = map__find_symbol(map, addr); 624 sym = map__find_symbol(map, addr);
625 if (!sym) 625 if (!sym)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9d92af7d0718..40de3cb40d21 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
1219 fprintf(ofp, "# be retrieved using Python functions of the form " 1219 fprintf(ofp, "# be retrieved using Python functions of the form "
1220 "common_*(context).\n"); 1220 "common_*(context).\n");
1221 1221
1222 fprintf(ofp, "# See the perf-trace-python Documentation for the list " 1222 fprintf(ofp, "# See the perf-script-python Documentation for the list "
1223 "of available functions.\n\n"); 1223 "of available functions.\n\n");
1224 1224
1225 fprintf(ofp, "import os\n"); 1225 fprintf(ofp, "import os\n");
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index df051a52393c..ebc88a74e67b 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
56 } 56 }
57 } 57 }
58 58
59 list_add_tail(&ilist->list, &node->val); 59 if (callchain_param.order == ORDER_CALLEE)
60 list_add_tail(&ilist->list, &node->val);
61 else
62 list_add(&ilist->list, &node->val);
60 63
61 return 0; 64 return 0;
62} 65}
@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l)
200 203
201#define MAX_INLINE_NEST 1024 204#define MAX_INLINE_NEST 1024
202 205
203static void inline_list__reverse(struct inline_node *node) 206static int inline_list__append_dso_a2l(struct dso *dso,
207 struct inline_node *node)
204{ 208{
205 struct inline_list *ilist, *n; 209 struct a2l_data *a2l = dso->a2l;
210 char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL;
211 char *filename = a2l->filename ? strdup(a2l->filename) : NULL;
206 212
207 list_for_each_entry_safe_reverse(ilist, n, &node->val, list) 213 return inline_list__append(filename, funcname, a2l->line, node, dso);
208 list_move_tail(&ilist->list, &node->val);
209} 214}
210 215
211static int addr2line(const char *dso_name, u64 addr, 216static int addr2line(const char *dso_name, u64 addr,
@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr,
230 235
231 bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); 236 bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
232 237
233 if (a2l->found && unwind_inlines) { 238 if (!a2l->found)
239 return 0;
240
241 if (unwind_inlines) {
234 int cnt = 0; 242 int cnt = 0;
235 243
244 if (node && inline_list__append_dso_a2l(dso, node))
245 return 0;
246
236 while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, 247 while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
237 &a2l->funcname, &a2l->line) && 248 &a2l->funcname, &a2l->line) &&
238 cnt++ < MAX_INLINE_NEST) { 249 cnt++ < MAX_INLINE_NEST) {
239 250
240 if (node != NULL) { 251 if (node != NULL) {
241 if (inline_list__append(strdup(a2l->filename), 252 if (inline_list__append_dso_a2l(dso, node))
242 strdup(a2l->funcname),
243 a2l->line, node,
244 dso) != 0)
245 return 0; 253 return 0;
254 // found at least one inline frame
255 ret = 1;
246 } 256 }
247 } 257 }
258 }
248 259
249 if ((node != NULL) && 260 if (file) {
250 (callchain_param.order != ORDER_CALLEE)) { 261 *file = a2l->filename ? strdup(a2l->filename) : NULL;
251 inline_list__reverse(node); 262 ret = *file ? 1 : 0;
252 }
253 } 263 }
254 264
255 if (a2l->found && a2l->filename) { 265 if (line)
256 *file = strdup(a2l->filename);
257 *line = a2l->line; 266 *line = a2l->line;
258 267
259 if (*file)
260 ret = 1;
261 }
262
263 return ret; 268 return ret;
264} 269}
265 270
@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso)
278static struct inline_node *addr2inlines(const char *dso_name, u64 addr, 283static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
279 struct dso *dso) 284 struct dso *dso)
280{ 285{
281 char *file = NULL;
282 unsigned int line = 0;
283 struct inline_node *node; 286 struct inline_node *node;
284 287
285 node = zalloc(sizeof(*node)); 288 node = zalloc(sizeof(*node));
@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
291 INIT_LIST_HEAD(&node->val); 294 INIT_LIST_HEAD(&node->val);
292 node->addr = addr; 295 node->addr = addr;
293 296
294 if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node)) 297 if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node))
295 goto out_free_inline_node; 298 goto out_free_inline_node;
296 299
297 if (list_empty(&node->val)) 300 if (list_empty(&node->val))
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index e7ee47f7377a..502505cf236a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
637 return 0; 637 return 0;
638} 638}
639 639
640static int decompress_kmodule(struct dso *dso, const char *name,
641 enum dso_binary_type type)
642{
643 int fd = -1;
644 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
645 struct kmod_path m;
646
647 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
648 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
649 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
650 return -1;
651
652 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
653 name = dso->long_name;
654
655 if (kmod_path__parse_ext(&m, name) || !m.comp)
656 return -1;
657
658 fd = mkstemp(tmpbuf);
659 if (fd < 0) {
660 dso->load_errno = errno;
661 goto out;
662 }
663
664 if (!decompress_to_file(m.ext, name, fd)) {
665 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
666 close(fd);
667 fd = -1;
668 }
669
670 unlink(tmpbuf);
671
672out:
673 free(m.ext);
674 return fd;
675}
676
677bool symsrc__possibly_runtime(struct symsrc *ss) 640bool symsrc__possibly_runtime(struct symsrc *ss)
678{ 641{
679 return ss->dynsym || ss->opdsec; 642 return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
705 int fd; 668 int fd;
706 669
707 if (dso__needs_decompress(dso)) { 670 if (dso__needs_decompress(dso)) {
708 fd = decompress_kmodule(dso, name, type); 671 fd = dso__decompress_kmodule_fd(dso, name);
709 if (fd < 0) 672 if (fd < 0)
710 return -1; 673 return -1;
674
675 type = dso->symtab_type;
711 } else { 676 } else {
712 fd = open(name, O_RDONLY); 677 fd = open(name, O_RDONLY);
713 if (fd < 0) { 678 if (fd < 0) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8f2b068ff756..e7a98dbd2aed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
1562 if (!runtime_ss && syms_ss) 1562 if (!runtime_ss && syms_ss)
1563 runtime_ss = syms_ss; 1563 runtime_ss = syms_ss;
1564 1564
1565 if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
1566 if (dso__build_id_is_kmod(dso, name, PATH_MAX))
1567 kmod = true;
1568
1569 if (syms_ss) 1565 if (syms_ss)
1570 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1566 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1571 else 1567 else
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index f90e11a555b2..7755a5e0fe5e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
39 return 0; 39 return 0;
40 40
41 mod = dwfl_addrmodule(ui->dwfl, ip); 41 mod = dwfl_addrmodule(ui->dwfl, ip);
42 if (mod) {
43 Dwarf_Addr s;
44
45 dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
46 if (s != al->map->start)
47 mod = 0;
48 }
49
42 if (!mod) 50 if (!mod)
43 mod = dwfl_report_elf(ui->dwfl, dso->short_name, 51 mod = dwfl_report_elf(ui->dwfl, dso->short_name,
44 dso->long_name, -1, al->map->start, 52 dso->long_name, -1, al->map->start,
@@ -168,12 +176,24 @@ frame_callback(Dwfl_Frame *state, void *arg)
168{ 176{
169 struct unwind_info *ui = arg; 177 struct unwind_info *ui = arg;
170 Dwarf_Addr pc; 178 Dwarf_Addr pc;
179 bool isactivation;
171 180
172 if (!dwfl_frame_pc(state, &pc, NULL)) { 181 if (!dwfl_frame_pc(state, &pc, NULL)) {
173 pr_err("%s", dwfl_errmsg(-1)); 182 pr_err("%s", dwfl_errmsg(-1));
174 return DWARF_CB_ABORT; 183 return DWARF_CB_ABORT;
175 } 184 }
176 185
186 // report the module before we query for isactivation
187 report_module(pc, ui);
188
189 if (!dwfl_frame_pc(state, &pc, &isactivation)) {
190 pr_err("%s", dwfl_errmsg(-1));
191 return DWARF_CB_ABORT;
192 }
193
194 if (!isactivation)
195 --pc;
196
177 return entry(pc, ui) || !(--ui->max_stack) ? 197 return entry(pc, ui) || !(--ui->max_stack) ?
178 DWARF_CB_ABORT : DWARF_CB_OK; 198 DWARF_CB_ABORT : DWARF_CB_OK;
179} 199}
@@ -220,7 +240,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
220 240
221 err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui); 241 err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
222 242
223 if (err && !ui->max_stack) 243 if (err && ui->max_stack != max_stack)
224 err = 0; 244 err = 0;
225 245
226 /* 246 /*
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index f8455bed6e65..672c2ada9357 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
692 692
693 while (!ret && (unw_step(&c) > 0) && i < max_stack) { 693 while (!ret && (unw_step(&c) > 0) && i < max_stack) {
694 unw_get_reg(&c, UNW_REG_IP, &ips[i]); 694 unw_get_reg(&c, UNW_REG_IP, &ips[i]);
695
696 /*
697 * Decrement the IP for any non-activation frames.
698 * this is required to properly find the srcline
699 * for caller frames.
700 * See also the documentation for dwfl_frame_pc(),
701 * which this code tries to replicate.
702 */
703 if (unw_is_signal_frame(&c) <= 0)
704 --ips[i];
705
695 ++i; 706 ++i;
696 } 707 }
697 708
diff --git a/tools/power/acpi/.gitignore b/tools/power/acpi/.gitignore
new file mode 100644
index 000000000000..cba3d994995c
--- /dev/null
+++ b/tools/power/acpi/.gitignore
@@ -0,0 +1,4 @@
1acpidbg
2acpidump
3ec
4include
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 91edd0566237..f389b02d43a0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,7 +11,8 @@ endif
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
12LDLIBS += -lcap -lelf 12LDLIBS += -lcap -lelf
13 13
14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
15 test_align
15 16
16TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o 17TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o
17 18
@@ -34,6 +35,7 @@ $(BPFOBJ): force
34CLANG ?= clang 35CLANG ?= clang
35 36
36%.o: %.c 37%.o: %.c
37 $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \ 38 $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
39 -I../../../../samples/bpf/ \
38 -Wno-compare-distinct-pointer-types \ 40 -Wno-compare-distinct-pointer-types \
39 -O2 -target bpf -c $< -o $@ 41 -O2 -target bpf -c $< -o $@
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h
index 19d0604f8694..487cbfb89beb 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/testing/selftests/bpf/bpf_endian.h
@@ -1,23 +1,42 @@
1#ifndef __BPF_ENDIAN__ 1#ifndef __BPF_ENDIAN__
2#define __BPF_ENDIAN__ 2#define __BPF_ENDIAN__
3 3
4#include <asm/byteorder.h> 4#include <linux/swab.h>
5 5
6#if __BYTE_ORDER == __LITTLE_ENDIAN 6/* LLVM's BPF target selects the endianness of the CPU
7# define __bpf_ntohs(x) __builtin_bswap16(x) 7 * it compiles on, or the user specifies (bpfel/bpfeb),
8# define __bpf_htons(x) __builtin_bswap16(x) 8 * respectively. The used __BYTE_ORDER__ is defined by
9#elif __BYTE_ORDER == __BIG_ENDIAN 9 * the compiler, we cannot rely on __BYTE_ORDER from
10# define __bpf_ntohs(x) (x) 10 * libc headers, since it doesn't reflect the actual
11# define __bpf_htons(x) (x) 11 * requested byte order.
12 *
13 * Note, LLVM's BPF target has different __builtin_bswapX()
14 * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
15 * in bpfel and bpfeb case, which means below, that we map
16 * to cpu_to_be16(). We could use it unconditionally in BPF
17 * case, but better not rely on it, so that this header here
18 * can be used from application and BPF program side, which
19 * use different targets.
20 */
21#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
22# define __bpf_ntohs(x) __builtin_bswap16(x)
23# define __bpf_htons(x) __builtin_bswap16(x)
24# define __bpf_constant_ntohs(x) ___constant_swab16(x)
25# define __bpf_constant_htons(x) ___constant_swab16(x)
26#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
27# define __bpf_ntohs(x) (x)
28# define __bpf_htons(x) (x)
29# define __bpf_constant_ntohs(x) (x)
30# define __bpf_constant_htons(x) (x)
12#else 31#else
13# error "Fix your __BYTE_ORDER?!" 32# error "Fix your compiler's __BYTE_ORDER__?!"
14#endif 33#endif
15 34
16#define bpf_htons(x) \ 35#define bpf_htons(x) \
17 (__builtin_constant_p(x) ? \ 36 (__builtin_constant_p(x) ? \
18 __constant_htons(x) : __bpf_htons(x)) 37 __bpf_constant_htons(x) : __bpf_htons(x))
19#define bpf_ntohs(x) \ 38#define bpf_ntohs(x) \
20 (__builtin_constant_p(x) ? \ 39 (__builtin_constant_p(x) ? \
21 __constant_ntohs(x) : __bpf_ntohs(x)) 40 __bpf_constant_ntohs(x) : __bpf_ntohs(x))
22 41
23#endif 42#endif /* __BPF_ENDIAN__ */
diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/testing/selftests/bpf/include/uapi/linux/types.h
new file mode 100644
index 000000000000..51841848fbfe
--- /dev/null
+++ b/tools/testing/selftests/bpf/include/uapi/linux/types.h
@@ -0,0 +1,22 @@
1#ifndef _UAPI_LINUX_TYPES_H
2#define _UAPI_LINUX_TYPES_H
3
4#include <asm-generic/int-ll64.h>
5
6/* copied from linux:include/uapi/linux/types.h */
7#define __bitwise
8typedef __u16 __bitwise __le16;
9typedef __u16 __bitwise __be16;
10typedef __u32 __bitwise __le32;
11typedef __u32 __bitwise __be32;
12typedef __u64 __bitwise __le64;
13typedef __u64 __bitwise __be64;
14
15typedef __u16 __bitwise __sum16;
16typedef __u32 __bitwise __wsum;
17
18#define __aligned_u64 __u64 __attribute__((aligned(8)))
19#define __aligned_be64 __be64 __attribute__((aligned(8)))
20#define __aligned_le64 __le64 __attribute__((aligned(8)))
21
22#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
new file mode 100644
index 000000000000..9644d4e069de
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -0,0 +1,453 @@
1#include <asm/types.h>
2#include <linux/types.h>
3#include <stdint.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <string.h>
9#include <stddef.h>
10#include <stdbool.h>
11
12#include <linux/unistd.h>
13#include <linux/filter.h>
14#include <linux/bpf_perf_event.h>
15#include <linux/bpf.h>
16
17#include <bpf/bpf.h>
18
19#include "../../../include/linux/filter.h"
20
21#ifndef ARRAY_SIZE
22# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
23#endif
24
25#define MAX_INSNS 512
26#define MAX_MATCHES 16
27
28struct bpf_align_test {
29 const char *descr;
30 struct bpf_insn insns[MAX_INSNS];
31 enum {
32 UNDEF,
33 ACCEPT,
34 REJECT
35 } result;
36 enum bpf_prog_type prog_type;
37 const char *matches[MAX_MATCHES];
38};
39
40static struct bpf_align_test tests[] = {
41 {
42 .descr = "mov",
43 .insns = {
44 BPF_MOV64_IMM(BPF_REG_3, 2),
45 BPF_MOV64_IMM(BPF_REG_3, 4),
46 BPF_MOV64_IMM(BPF_REG_3, 8),
47 BPF_MOV64_IMM(BPF_REG_3, 16),
48 BPF_MOV64_IMM(BPF_REG_3, 32),
49 BPF_MOV64_IMM(BPF_REG_0, 0),
50 BPF_EXIT_INSN(),
51 },
52 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
53 .matches = {
54 "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
55 "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
56 "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
57 "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
58 "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp",
59 },
60 },
61 {
62 .descr = "shift",
63 .insns = {
64 BPF_MOV64_IMM(BPF_REG_3, 1),
65 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
66 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
67 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
68 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
69 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
70 BPF_MOV64_IMM(BPF_REG_4, 32),
71 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
72 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
73 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
74 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
75 BPF_MOV64_IMM(BPF_REG_0, 0),
76 BPF_EXIT_INSN(),
77 },
78 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
79 .matches = {
80 "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp",
81 "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
82 "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
83 "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
84 "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
85 "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp",
86 "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp",
87 "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp",
88 "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
89 "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
90 "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp",
91 },
92 },
93 {
94 .descr = "addsub",
95 .insns = {
96 BPF_MOV64_IMM(BPF_REG_3, 4),
97 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
98 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
99 BPF_MOV64_IMM(BPF_REG_4, 8),
100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
102 BPF_MOV64_IMM(BPF_REG_0, 0),
103 BPF_EXIT_INSN(),
104 },
105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
106 .matches = {
107 "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp",
108 "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp",
109 "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp",
110 "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp",
111 "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp",
112 "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp",
113 },
114 },
115 {
116 .descr = "mul",
117 .insns = {
118 BPF_MOV64_IMM(BPF_REG_3, 7),
119 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
120 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
121 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
122 BPF_MOV64_IMM(BPF_REG_0, 0),
123 BPF_EXIT_INSN(),
124 },
125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
126 .matches = {
127 "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp",
128 "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp",
129 "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp",
130 "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp",
131 },
132 },
133
134#define PREP_PKT_POINTERS \
135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
136 offsetof(struct __sk_buff, data)), \
137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
138 offsetof(struct __sk_buff, data_end))
139
140#define LOAD_UNKNOWN(DST_REG) \
141 PREP_PKT_POINTERS, \
142 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
144 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
145 BPF_EXIT_INSN(), \
146 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
147
148 {
149 .descr = "unknown shift",
150 .insns = {
151 LOAD_UNKNOWN(BPF_REG_3),
152 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
154 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
155 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
156 LOAD_UNKNOWN(BPF_REG_4),
157 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
158 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
160 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
161 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
162 BPF_MOV64_IMM(BPF_REG_0, 0),
163 BPF_EXIT_INSN(),
164 },
165 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
166 .matches = {
167 "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp",
168 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp",
169 "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp",
170 "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp",
171 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp",
172 "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp",
173 "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp",
174 "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp",
175 "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp",
176 "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp",
177 "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp",
178 },
179 },
180 {
181 .descr = "unknown mul",
182 .insns = {
183 LOAD_UNKNOWN(BPF_REG_3),
184 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
185 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
186 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
187 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
188 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
189 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
190 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
191 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
192 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
193 BPF_MOV64_IMM(BPF_REG_0, 0),
194 BPF_EXIT_INSN(),
195 },
196 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
197 .matches = {
198 "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp",
199 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
200 "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp",
201 "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
202 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp",
203 "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
204 "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp",
205 "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp",
206 "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp",
207 "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp"
208 },
209 },
210 {
211 .descr = "packet const offset",
212 .insns = {
213 PREP_PKT_POINTERS,
214 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
215
216 BPF_MOV64_IMM(BPF_REG_0, 0),
217
218 /* Skip over ethernet header. */
219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
220 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
222 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
223 BPF_EXIT_INSN(),
224
225 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
226 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
227 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
228 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
229 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
230 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
231 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
232
233 BPF_MOV64_IMM(BPF_REG_0, 0),
234 BPF_EXIT_INSN(),
235 },
236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
237 .matches = {
238 "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp",
239 "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp",
240 "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp",
241 "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp",
242 "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp",
243 "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp",
244 },
245 },
246 {
247 .descr = "packet variable offset",
248 .insns = {
249 LOAD_UNKNOWN(BPF_REG_6),
250 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
251
252 /* First, add a constant to the R5 packet pointer,
253 * then a variable with a known alignment.
254 */
255 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
256 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
257 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
258 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
260 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
261 BPF_EXIT_INSN(),
262 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
263
264 /* Now, test in the other direction. Adding first
265 * the variable offset to R5, then the constant.
266 */
267 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
268 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
270 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
272 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
273 BPF_EXIT_INSN(),
274 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
275
276 /* Test multiple accumulations of unknown values
277 * into a packet pointer.
278 */
279 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
281 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
283 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
284 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
286 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
287 BPF_EXIT_INSN(),
288 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
289
290 BPF_MOV64_IMM(BPF_REG_0, 0),
291 BPF_EXIT_INSN(),
292 },
293 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
294 .matches = {
295 /* Calculated offset in R6 has unknown value, but known
296 * alignment of 4.
297 */
298 "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp",
299
300 /* Offset is added to packet pointer R5, resulting in known
301 * auxiliary alignment and offset.
302 */
303 "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
304
305 /* At the time the word size load is performed from R5,
306 * it's total offset is NET_IP_ALIGN + reg->off (0) +
307 * reg->aux_off (14) which is 16. Then the variable
308 * offset is considered using reg->aux_off_align which
309 * is 4 and meets the load's requirements.
310 */
311 "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
312
313
314 /* Variable offset is added to R5 packet pointer,
315 * resulting in auxiliary alignment of 4.
316 */
317 "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
318
319 /* Constant offset is added to R5, resulting in
320 * reg->off of 14.
321 */
322 "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
323
324 /* At the time the word size load is performed from R5,
325 * it's total offset is NET_IP_ALIGN + reg->off (14) which
326 * is 16. Then the variable offset is considered using
327 * reg->aux_off_align which is 4 and meets the load's
328 * requirements.
329 */
330 "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp",
331
332 /* Constant offset is added to R5 packet pointer,
333 * resulting in reg->off value of 14.
334 */
335 "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp",
336 /* Variable offset is added to R5, resulting in an
337 * auxiliary offset of 14, and an auxiliary alignment of 4.
338 */
339 "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
340 /* Constant is added to R5 again, setting reg->off to 4. */
341 "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
342 /* And once more we add a variable, which causes an accumulation
343 * of reg->off into reg->aux_off_align, with resulting value of
344 * 18. The auxiliary alignment stays at 4.
345 */
346 "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
347 /* At the time the word size load is performed from R5,
348 * it's total offset is NET_IP_ALIGN + reg->off (0) +
349 * reg->aux_off (18) which is 20. Then the variable offset
350 * is considered using reg->aux_off_align which is 4 and meets
351 * the load's requirements.
352 */
353 "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp",
354 },
355 },
356};
357
358static int probe_filter_length(const struct bpf_insn *fp)
359{
360 int len;
361
362 for (len = MAX_INSNS - 1; len > 0; --len)
363 if (fp[len].code != 0 || fp[len].imm != 0)
364 break;
365 return len + 1;
366}
367
368static char bpf_vlog[32768];
369
370static int do_test_single(struct bpf_align_test *test)
371{
372 struct bpf_insn *prog = test->insns;
373 int prog_type = test->prog_type;
374 int prog_len, i;
375 int fd_prog;
376 int ret;
377
378 prog_len = probe_filter_length(prog);
379 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
380 prog, prog_len, 1, "GPL", 0,
381 bpf_vlog, sizeof(bpf_vlog));
382 if (fd_prog < 0) {
383 printf("Failed to load program.\n");
384 printf("%s", bpf_vlog);
385 ret = 1;
386 } else {
387 ret = 0;
388 for (i = 0; i < MAX_MATCHES; i++) {
389 const char *t, *m = test->matches[i];
390
391 if (!m)
392 break;
393 t = strstr(bpf_vlog, m);
394 if (!t) {
395 printf("Failed to find match: %s\n", m);
396 ret = 1;
397 printf("%s", bpf_vlog);
398 break;
399 }
400 }
401 close(fd_prog);
402 }
403 return ret;
404}
405
406static int do_test(unsigned int from, unsigned int to)
407{
408 int all_pass = 0;
409 int all_fail = 0;
410 unsigned int i;
411
412 for (i = from; i < to; i++) {
413 struct bpf_align_test *test = &tests[i];
414 int fail;
415
416 printf("Test %3d: %s ... ",
417 i, test->descr);
418 fail = do_test_single(test);
419 if (fail) {
420 all_fail++;
421 printf("FAIL\n");
422 } else {
423 all_pass++;
424 printf("PASS\n");
425 }
426 }
427 printf("Results: %d pass %d fail\n",
428 all_pass, all_fail);
429 return 0;
430}
431
432int main(int argc, char **argv)
433{
434 unsigned int from = 0, to = ARRAY_SIZE(tests);
435
436 if (argc == 3) {
437 unsigned int l = atoi(argv[argc - 2]);
438 unsigned int u = atoi(argv[argc - 1]);
439
440 if (l < to && u < to) {
441 from = l;
442 to = u + 1;
443 }
444 } else if (argc == 2) {
445 unsigned int t = atoi(argv[argc - 1]);
446
447 if (t < to) {
448 from = t;
449 to = t + 1;
450 }
451 }
452 return do_test(from, to);
453}
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c
index 39387bb7e08c..6e11ba11709e 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/test_pkt_access.c
@@ -5,6 +5,7 @@
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#include <stddef.h> 7#include <stddef.h>
8#include <string.h>
8#include <linux/bpf.h> 9#include <linux/bpf.h>
9#include <linux/if_ether.h> 10#include <linux/if_ether.h>
10#include <linux/if_packet.h> 11#include <linux/if_packet.h>
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3773562056da..0ff8c55c0464 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -49,6 +49,7 @@
49#define MAX_NR_MAPS 4 49#define MAX_NR_MAPS 4
50 50
51#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 51#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
52 53
53struct bpf_test { 54struct bpf_test {
54 const char *descr; 55 const char *descr;
@@ -2615,6 +2616,30 @@ static struct bpf_test tests[] = {
2615 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2616 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2616 }, 2617 },
2617 { 2618 {
2619 "direct packet access: test17 (pruning, alignment)",
2620 .insns = {
2621 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2622 offsetof(struct __sk_buff, data)),
2623 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2624 offsetof(struct __sk_buff, data_end)),
2625 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2626 offsetof(struct __sk_buff, mark)),
2627 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2629 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2630 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2631 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2632 BPF_MOV64_IMM(BPF_REG_0, 0),
2633 BPF_EXIT_INSN(),
2634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2635 BPF_JMP_A(-6),
2636 },
2637 .errstr = "misaligned packet access off 2+15+-4 size 4",
2638 .result = REJECT,
2639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2640 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2641 },
2642 {
2618 "helper access to packet: test1, valid packet_ptr range", 2643 "helper access to packet: test1, valid packet_ptr range",
2619 .insns = { 2644 .insns = {
2620 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2645 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -3341,6 +3366,70 @@ static struct bpf_test tests[] = {
3341 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3366 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3342 }, 3367 },
3343 { 3368 {
3369 "alu ops on ptr_to_map_value_or_null, 1",
3370 .insns = {
3371 BPF_MOV64_IMM(BPF_REG_1, 10),
3372 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3375 BPF_LD_MAP_FD(BPF_REG_1, 0),
3376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3377 BPF_FUNC_map_lookup_elem),
3378 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3382 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3383 BPF_EXIT_INSN(),
3384 },
3385 .fixup_map1 = { 4 },
3386 .errstr = "R4 invalid mem access",
3387 .result = REJECT,
3388 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3389 },
3390 {
3391 "alu ops on ptr_to_map_value_or_null, 2",
3392 .insns = {
3393 BPF_MOV64_IMM(BPF_REG_1, 10),
3394 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3397 BPF_LD_MAP_FD(BPF_REG_1, 0),
3398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3399 BPF_FUNC_map_lookup_elem),
3400 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3401 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3402 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3403 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3404 BPF_EXIT_INSN(),
3405 },
3406 .fixup_map1 = { 4 },
3407 .errstr = "R4 invalid mem access",
3408 .result = REJECT,
3409 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3410 },
3411 {
3412 "alu ops on ptr_to_map_value_or_null, 3",
3413 .insns = {
3414 BPF_MOV64_IMM(BPF_REG_1, 10),
3415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3418 BPF_LD_MAP_FD(BPF_REG_1, 0),
3419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3420 BPF_FUNC_map_lookup_elem),
3421 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3422 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3423 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3424 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3425 BPF_EXIT_INSN(),
3426 },
3427 .fixup_map1 = { 4 },
3428 .errstr = "R4 invalid mem access",
3429 .result = REJECT,
3430 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3431 },
3432 {
3344 "invalid memory access with multiple map_lookup_elem calls", 3433 "invalid memory access with multiple map_lookup_elem calls",
3345 .insns = { 3434 .insns = {
3346 BPF_MOV64_IMM(BPF_REG_1, 10), 3435 BPF_MOV64_IMM(BPF_REG_1, 10),
@@ -3660,6 +3749,72 @@ static struct bpf_test tests[] = {
3660 .errstr = "invalid bpf_context access", 3749 .errstr = "invalid bpf_context access",
3661 }, 3750 },
3662 { 3751 {
3752 "leak pointer into ctx 1",
3753 .insns = {
3754 BPF_MOV64_IMM(BPF_REG_0, 0),
3755 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
3756 offsetof(struct __sk_buff, cb[0])),
3757 BPF_LD_MAP_FD(BPF_REG_2, 0),
3758 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
3759 offsetof(struct __sk_buff, cb[0])),
3760 BPF_EXIT_INSN(),
3761 },
3762 .fixup_map1 = { 2 },
3763 .errstr_unpriv = "R2 leaks addr into mem",
3764 .result_unpriv = REJECT,
3765 .result = ACCEPT,
3766 },
3767 {
3768 "leak pointer into ctx 2",
3769 .insns = {
3770 BPF_MOV64_IMM(BPF_REG_0, 0),
3771 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
3772 offsetof(struct __sk_buff, cb[0])),
3773 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
3774 offsetof(struct __sk_buff, cb[0])),
3775 BPF_EXIT_INSN(),
3776 },
3777 .errstr_unpriv = "R10 leaks addr into mem",
3778 .result_unpriv = REJECT,
3779 .result = ACCEPT,
3780 },
3781 {
3782 "leak pointer into ctx 3",
3783 .insns = {
3784 BPF_MOV64_IMM(BPF_REG_0, 0),
3785 BPF_LD_MAP_FD(BPF_REG_2, 0),
3786 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
3787 offsetof(struct __sk_buff, cb[0])),
3788 BPF_EXIT_INSN(),
3789 },
3790 .fixup_map1 = { 1 },
3791 .errstr_unpriv = "R2 leaks addr into ctx",
3792 .result_unpriv = REJECT,
3793 .result = ACCEPT,
3794 },
3795 {
3796 "leak pointer into map val",
3797 .insns = {
3798 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
3799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3802 BPF_LD_MAP_FD(BPF_REG_1, 0),
3803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3804 BPF_FUNC_map_lookup_elem),
3805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
3806 BPF_MOV64_IMM(BPF_REG_3, 0),
3807 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
3808 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3809 BPF_MOV64_IMM(BPF_REG_0, 0),
3810 BPF_EXIT_INSN(),
3811 },
3812 .fixup_map1 = { 4 },
3813 .errstr_unpriv = "R6 leaks addr into mem",
3814 .result_unpriv = REJECT,
3815 .result = ACCEPT,
3816 },
3817 {
3663 "helper access to map: full range", 3818 "helper access to map: full range",
3664 .insns = { 3819 .insns = {
3665 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -4937,7 +5092,149 @@ static struct bpf_test tests[] = {
4937 .fixup_map_in_map = { 3 }, 5092 .fixup_map_in_map = { 3 },
4938 .errstr = "R1 type=map_value_or_null expected=map_ptr", 5093 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4939 .result = REJECT, 5094 .result = REJECT,
4940 } 5095 },
5096 {
5097 "ld_abs: check calling conv, r1",
5098 .insns = {
5099 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5100 BPF_MOV64_IMM(BPF_REG_1, 0),
5101 BPF_LD_ABS(BPF_W, -0x200000),
5102 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5103 BPF_EXIT_INSN(),
5104 },
5105 .errstr = "R1 !read_ok",
5106 .result = REJECT,
5107 },
5108 {
5109 "ld_abs: check calling conv, r2",
5110 .insns = {
5111 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5112 BPF_MOV64_IMM(BPF_REG_2, 0),
5113 BPF_LD_ABS(BPF_W, -0x200000),
5114 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5115 BPF_EXIT_INSN(),
5116 },
5117 .errstr = "R2 !read_ok",
5118 .result = REJECT,
5119 },
5120 {
5121 "ld_abs: check calling conv, r3",
5122 .insns = {
5123 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5124 BPF_MOV64_IMM(BPF_REG_3, 0),
5125 BPF_LD_ABS(BPF_W, -0x200000),
5126 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5127 BPF_EXIT_INSN(),
5128 },
5129 .errstr = "R3 !read_ok",
5130 .result = REJECT,
5131 },
5132 {
5133 "ld_abs: check calling conv, r4",
5134 .insns = {
5135 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5136 BPF_MOV64_IMM(BPF_REG_4, 0),
5137 BPF_LD_ABS(BPF_W, -0x200000),
5138 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5139 BPF_EXIT_INSN(),
5140 },
5141 .errstr = "R4 !read_ok",
5142 .result = REJECT,
5143 },
5144 {
5145 "ld_abs: check calling conv, r5",
5146 .insns = {
5147 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5148 BPF_MOV64_IMM(BPF_REG_5, 0),
5149 BPF_LD_ABS(BPF_W, -0x200000),
5150 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5151 BPF_EXIT_INSN(),
5152 },
5153 .errstr = "R5 !read_ok",
5154 .result = REJECT,
5155 },
5156 {
5157 "ld_abs: check calling conv, r7",
5158 .insns = {
5159 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5160 BPF_MOV64_IMM(BPF_REG_7, 0),
5161 BPF_LD_ABS(BPF_W, -0x200000),
5162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5163 BPF_EXIT_INSN(),
5164 },
5165 .result = ACCEPT,
5166 },
5167 {
5168 "ld_ind: check calling conv, r1",
5169 .insns = {
5170 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5171 BPF_MOV64_IMM(BPF_REG_1, 1),
5172 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5173 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5174 BPF_EXIT_INSN(),
5175 },
5176 .errstr = "R1 !read_ok",
5177 .result = REJECT,
5178 },
5179 {
5180 "ld_ind: check calling conv, r2",
5181 .insns = {
5182 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5183 BPF_MOV64_IMM(BPF_REG_2, 1),
5184 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5185 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5186 BPF_EXIT_INSN(),
5187 },
5188 .errstr = "R2 !read_ok",
5189 .result = REJECT,
5190 },
5191 {
5192 "ld_ind: check calling conv, r3",
5193 .insns = {
5194 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5195 BPF_MOV64_IMM(BPF_REG_3, 1),
5196 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5197 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5198 BPF_EXIT_INSN(),
5199 },
5200 .errstr = "R3 !read_ok",
5201 .result = REJECT,
5202 },
5203 {
5204 "ld_ind: check calling conv, r4",
5205 .insns = {
5206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5207 BPF_MOV64_IMM(BPF_REG_4, 1),
5208 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5210 BPF_EXIT_INSN(),
5211 },
5212 .errstr = "R4 !read_ok",
5213 .result = REJECT,
5214 },
5215 {
5216 "ld_ind: check calling conv, r5",
5217 .insns = {
5218 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5219 BPF_MOV64_IMM(BPF_REG_5, 1),
5220 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5221 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5222 BPF_EXIT_INSN(),
5223 },
5224 .errstr = "R5 !read_ok",
5225 .result = REJECT,
5226 },
5227 {
5228 "ld_ind: check calling conv, r7",
5229 .insns = {
5230 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5231 BPF_MOV64_IMM(BPF_REG_7, 1),
5232 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5233 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5234 BPF_EXIT_INSN(),
5235 },
5236 .result = ACCEPT,
5237 },
4941}; 5238};
4942 5239
4943static int probe_filter_length(const struct bpf_insn *fp) 5240static int probe_filter_length(const struct bpf_insn *fp)
@@ -5059,9 +5356,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
5059 5356
5060 do_test_fixup(test, prog, map_fds); 5357 do_test_fixup(test, prog, map_fds);
5061 5358
5062 fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 5359 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
5063 prog, prog_len, "GPL", 0, bpf_vlog, 5360 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
5064 sizeof(bpf_vlog)); 5361 "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
5065 5362
5066 expected_ret = unpriv && test->result_unpriv != UNDEF ? 5363 expected_ret = unpriv && test->result_unpriv != UNDEF ?
5067 test->result_unpriv : test->result; 5364 test->result_unpriv : test->result;
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 32e6211e1c6e..717581145cfc 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -58,7 +58,7 @@ parse_opts() { # opts
58 ;; 58 ;;
59 --verbose|-v|-vv) 59 --verbose|-v|-vv)
60 VERBOSE=$((VERBOSE + 1)) 60 VERBOSE=$((VERBOSE + 1))
61 [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1)) 61 [ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1))
62 shift 1 62 shift 1
63 ;; 63 ;;
64 --debug|-d) 64 --debug|-d)
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
index 07bb3e5930b4..aa31368851c9 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
@@ -48,7 +48,7 @@ test_event_enabled() {
48 e=`cat $EVENT_ENABLE` 48 e=`cat $EVENT_ENABLE`
49 if [ "$e" != $val ]; then 49 if [ "$e" != $val ]; then
50 echo "Expected $val but found $e" 50 echo "Expected $val but found $e"
51 exit -1 51 exit 1
52 fi 52 fi
53} 53}
54 54
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 9aec6fcb7729..f2019b37370d 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -34,10 +34,10 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
34 echo > set_ftrace_filter 34 echo > set_ftrace_filter
35 grep -v '^#' set_ftrace_filter | while read t; do 35 grep -v '^#' set_ftrace_filter | while read t; do
36 tr=`echo $t | cut -d: -f2` 36 tr=`echo $t | cut -d: -f2`
37 if [ "$tr" == "" ]; then 37 if [ "$tr" = "" ]; then
38 continue 38 continue
39 fi 39 fi
40 if [ $tr == "enable_event" -o $tr == "disable_event" ]; then 40 if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
41 tr=`echo $t | cut -d: -f1-4` 41 tr=`echo $t | cut -d: -f1-4`
42 limit=`echo $t | cut -d: -f5` 42 limit=`echo $t | cut -d: -f5`
43 else 43 else
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index 4c5a061a5b4e..c73db7863adb 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -75,9 +75,13 @@ rmdir foo
75if [ -d foo ]; then 75if [ -d foo ]; then
76 fail "foo still exists" 76 fail "foo still exists"
77fi 77fi
78exit 0
79
80 78
79mkdir foo
80echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter
81rmdir foo
82if [ -d foo ]; then
83 fail "foo still exists"
84fi
81 85
82 86
83instance_slam() { 87instance_slam() {
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
new file mode 100644
index 000000000000..f4d1ff785d67
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -0,0 +1,21 @@
1#!/bin/sh
2# description: Register/unregister many kprobe events
3
4# ftrace fentry skip size depends on the machine architecture.
5# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc
6case `uname -m` in
7 x86_64|i[3456]86) OFFS=5;;
8 ppc*) OFFS=4;;
9 *) OFFS=0;;
10esac
11
12echo "Setup up to 256 kprobes"
13grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
14head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
15
16echo 1 > events/kprobes/enable
17echo 0 > events/kprobes/enable
18echo > kprobe_events
19echo "Waiting for unoptimizing & freeing"
20sleep 5
21echo "Done"
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index a676d3eefefb..13f5198ba0ee 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -305,7 +305,7 @@ function perf_test()
305 echo "Running remote perf test $WITH DMA" 305 echo "Running remote perf test $WITH DMA"
306 write_file "" $REMOTE_PERF/run 306 write_file "" $REMOTE_PERF/run
307 echo -n " " 307 echo -n " "
308 read_file $LOCAL_PERF/run 308 read_file $REMOTE_PERF/run
309 echo " Passed" 309 echo " Passed"
310 310
311 _modprobe -r ntb_perf 311 _modprobe -r ntb_perf
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 427621792229..2f1f7b013293 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -11,3 +11,4 @@ tm-signal-context-chk-fpu
11tm-signal-context-chk-gpr 11tm-signal-context-chk-gpr
12tm-signal-context-chk-vmx 12tm-signal-context-chk-vmx
13tm-signal-context-chk-vsx 13tm-signal-context-chk-vsx
14tm-vmx-unavail
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 5576ee6a51f2..958c11c14acd 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -2,7 +2,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
2 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 2 tm-signal-context-chk-vmx tm-signal-context-chk-vsx
3 3
4TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ 4TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
5 tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS) 5 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \
6 $(SIGNAL_CONTEXT_CHK_TESTS)
6 7
7include ../../lib.mk 8include ../../lib.mk
8 9
@@ -13,6 +14,7 @@ CFLAGS += -mhtm
13$(OUTPUT)/tm-syscall: tm-syscall-asm.S 14$(OUTPUT)/tm-syscall: tm-syscall-asm.S
14$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 15$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
15$(OUTPUT)/tm-tmspr: CFLAGS += -pthread 16$(OUTPUT)/tm-tmspr: CFLAGS += -pthread
17$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
16 18
17SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) 19SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
18$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S 20$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index d9c49f41515e..e79ccd6aada1 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -42,12 +42,12 @@ int test_body(void)
42 printf("Check DSCR TM context switch: "); 42 printf("Check DSCR TM context switch: ");
43 fflush(stdout); 43 fflush(stdout);
44 for (;;) { 44 for (;;) {
45 rv = 1;
46 asm __volatile__ ( 45 asm __volatile__ (
47 /* set a known value into the DSCR */ 46 /* set a known value into the DSCR */
48 "ld 3, %[dscr1];" 47 "ld 3, %[dscr1];"
49 "mtspr %[sprn_dscr], 3;" 48 "mtspr %[sprn_dscr], 3;"
50 49
50 "li %[rv], 1;"
51 /* start and suspend a transaction */ 51 /* start and suspend a transaction */
52 "tbegin.;" 52 "tbegin.;"
53 "beq 1f;" 53 "beq 1f;"
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
new file mode 100644
index 000000000000..137185ba4937
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2017, Michael Neuling, IBM Corp.
3 * Licensed under GPLv2.
4 * Original: Breno Leitao <brenohl@br.ibm.com> &
5 * Gustavo Bueno Romero <gromero@br.ibm.com>
6 * Edited: Michael Neuling
7 *
8 * Force VMX unavailable during a transaction and see if it corrupts
9 * the checkpointed VMX register state after the abort.
10 */
11
12#include <inttypes.h>
13#include <htmintrin.h>
14#include <string.h>
15#include <stdlib.h>
16#include <stdio.h>
17#include <pthread.h>
18#include <sys/mman.h>
19#include <unistd.h>
20#include <pthread.h>
21
22#include "tm.h"
23#include "utils.h"
24
25int passed;
26
27void *worker(void *unused)
28{
29 __int128 vmx0;
30 uint64_t texasr;
31
32 asm goto (
33 "li 3, 1;" /* Stick non-zero value in VMX0 */
34 "std 3, 0(%[vmx0_ptr]);"
35 "lvx 0, 0, %[vmx0_ptr];"
36
37 /* Wait here a bit so we get scheduled out 255 times */
38 "lis 3, 0x3fff;"
39 "1: ;"
40 "addi 3, 3, -1;"
41 "cmpdi 3, 0;"
42 "bne 1b;"
43
44 /* Kernel will hopefully turn VMX off now */
45
46 "tbegin. ;"
47 "beq failure;"
48
49 /* Cause VMX unavail. Any VMX instruction */
50 "vaddcuw 0,0,0;"
51
52 "tend. ;"
53 "b %l[success];"
54
55 /* Check VMX0 sanity after abort */
56 "failure: ;"
57 "lvx 1, 0, %[vmx0_ptr];"
58 "vcmpequb. 2, 0, 1;"
59 "bc 4, 24, %l[value_mismatch];"
60 "b %l[value_match];"
61 :
62 : [vmx0_ptr] "r"(&vmx0)
63 : "r3"
64 : success, value_match, value_mismatch
65 );
66
67 /* HTM aborted and VMX0 is corrupted */
68value_mismatch:
69 texasr = __builtin_get_texasr();
70
71 printf("\n\n==============\n\n");
72 printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr));
73 printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr));
74 printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr));
75
76 passed = 0;
77 return NULL;
78
79 /* HTM aborted but VMX0 is correct */
80value_match:
81// printf("!");
82 return NULL;
83
84success:
85// printf(".");
86 return NULL;
87}
88
89int tm_vmx_unavail_test()
90{
91 int threads;
92 pthread_t *thread;
93
94 SKIP_IF(!have_htm());
95
96 passed = 1;
97
98 threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
99 thread = malloc(sizeof(pthread_t)*threads);
100 if (!thread)
101 return EXIT_FAILURE;
102
103 for (uint64_t i = 0; i < threads; i++)
104 pthread_create(&thread[i], NULL, &worker, NULL);
105
106 for (uint64_t i = 0; i < threads; i++)
107 pthread_join(thread[i], NULL);
108
109 free(thread);
110
111 return passed ? EXIT_SUCCESS : EXIT_FAILURE;
112}
113
114
115int main(int argc, char **argv)
116{
117 return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test");
118}
diff --git a/usr/Kconfig b/usr/Kconfig
index c0c48507e44e..ad0543e21760 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4
220endchoice 220endchoice
221 221
222config INITRAMFS_COMPRESSION 222config INITRAMFS_COMPRESSION
223 depends on INITRAMFS_SOURCE!=""
223 string 224 string
224 default "" if INITRAMFS_COMPRESSION_NONE 225 default "" if INITRAMFS_COMPRESSION_NONE
225 default ".gz" if INITRAMFS_COMPRESSION_GZIP 226 default ".gz" if INITRAMFS_COMPRESSION_GZIP
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index bce6037cf01d..87940364570b 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
22#include <asm/kvm_hyp.h> 22#include <asm/kvm_hyp.h>
23 23
24#define vtr_to_max_lr_idx(v) ((v) & 0xf) 24#define vtr_to_max_lr_idx(v) ((v) & 0xf)
25#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) 25#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
26 26
27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28{ 28{
@@ -135,13 +135,13 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
135 135
136 if (used_lrs) { 136 if (used_lrs) {
137 int i; 137 int i;
138 u32 nr_pri_bits; 138 u32 nr_pre_bits;
139 139
140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); 140 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
141 141
142 write_gicreg(0, ICH_HCR_EL2); 142 write_gicreg(0, ICH_HCR_EL2);
143 val = read_gicreg(ICH_VTR_EL2); 143 val = read_gicreg(ICH_VTR_EL2);
144 nr_pri_bits = vtr_to_nr_pri_bits(val); 144 nr_pre_bits = vtr_to_nr_pre_bits(val);
145 145
146 for (i = 0; i < used_lrs; i++) { 146 for (i = 0; i < used_lrs; i++) {
147 if (cpu_if->vgic_elrsr & (1 << i)) 147 if (cpu_if->vgic_elrsr & (1 << i))
@@ -152,7 +152,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
152 __gic_v3_set_lr(0, i); 152 __gic_v3_set_lr(0, i);
153 } 153 }
154 154
155 switch (nr_pri_bits) { 155 switch (nr_pre_bits) {
156 case 7: 156 case 7:
157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); 157 cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); 158 cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
@@ -162,7 +162,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); 162 cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
163 } 163 }
164 164
165 switch (nr_pri_bits) { 165 switch (nr_pre_bits) {
166 case 7: 166 case 7:
167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); 167 cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); 168 cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
@@ -198,7 +198,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 198 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 199 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
200 u64 val; 200 u64 val;
201 u32 nr_pri_bits; 201 u32 nr_pre_bits;
202 int i; 202 int i;
203 203
204 /* 204 /*
@@ -217,12 +217,12 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
217 } 217 }
218 218
219 val = read_gicreg(ICH_VTR_EL2); 219 val = read_gicreg(ICH_VTR_EL2);
220 nr_pri_bits = vtr_to_nr_pri_bits(val); 220 nr_pre_bits = vtr_to_nr_pre_bits(val);
221 221
222 if (used_lrs) { 222 if (used_lrs) {
223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 223 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
224 224
225 switch (nr_pri_bits) { 225 switch (nr_pre_bits) {
226 case 7: 226 case 7:
227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 227 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); 228 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
@@ -232,7 +232,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 232 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
233 } 233 }
234 234
235 switch (nr_pri_bits) { 235 switch (nr_pre_bits) {
236 case 7: 236 case 7:
237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); 237 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); 238 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 313ee646480f..e2e5effba2a9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
295 assert_spin_locked(&kvm->mmu_lock); 295 assert_spin_locked(&kvm->mmu_lock);
296 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
297 do { 297 do {
298 /*
299 * Make sure the page table is still active, as another thread
300 * could have possibly freed the page table, while we released
301 * the lock.
302 */
303 if (!READ_ONCE(kvm->arch.pgd))
304 break;
298 next = stage2_pgd_addr_end(addr, end); 305 next = stage2_pgd_addr_end(addr, end);
299 if (!stage2_pgd_none(*pgd)) 306 if (!stage2_pgd_none(*pgd))
300 unmap_stage2_puds(kvm, pgd, addr, next); 307 unmap_stage2_puds(kvm, pgd, addr, next);
@@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm)
829 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all 836 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
830 * underlying level-2 and level-3 tables before freeing the actual level-1 table 837 * underlying level-2 and level-3 tables before freeing the actual level-1 table
831 * and setting the struct pointer to NULL. 838 * and setting the struct pointer to NULL.
832 *
833 * Note we don't need locking here as this is only called when the VM is
834 * destroyed, which can only be done once.
835 */ 839 */
836void kvm_free_stage2_pgd(struct kvm *kvm) 840void kvm_free_stage2_pgd(struct kvm *kvm)
837{ 841{
838 if (kvm->arch.pgd == NULL) 842 void *pgd = NULL;
839 return;
840 843
841 spin_lock(&kvm->mmu_lock); 844 spin_lock(&kvm->mmu_lock);
842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 845 if (kvm->arch.pgd) {
846 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
847 pgd = READ_ONCE(kvm->arch.pgd);
848 kvm->arch.pgd = NULL;
849 }
843 spin_unlock(&kvm->mmu_lock); 850 spin_unlock(&kvm->mmu_lock);
844 851
845 /* Free the HW pgd, one page at a time */ 852 /* Free the HW pgd, one page at a time */
846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 853 if (pgd)
847 kvm->arch.pgd = NULL; 854 free_pages_exact(pgd, S2_PGD_SIZE);
848} 855}
849 856
850static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, 857static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -872,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
872 pmd_t *pmd; 879 pmd_t *pmd;
873 880
874 pud = stage2_get_pud(kvm, cache, addr); 881 pud = stage2_get_pud(kvm, cache, addr);
882 if (!pud)
883 return NULL;
884
875 if (stage2_pud_none(*pud)) { 885 if (stage2_pud_none(*pud)) {
876 if (!cache) 886 if (!cache)
877 return NULL; 887 return NULL;
@@ -1170,11 +1180,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1170 * large. Otherwise, we may see kernel panics with 1180 * large. Otherwise, we may see kernel panics with
1171 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, 1181 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1172 * CONFIG_LOCKDEP. Additionally, holding the lock too long 1182 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1173 * will also starve other vCPUs. 1183 * will also starve other vCPUs. We have to also make sure
1184 * that the page tables are not freed while we released
1185 * the lock.
1174 */ 1186 */
1175 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 1187 cond_resched_lock(&kvm->mmu_lock);
1176 cond_resched_lock(&kvm->mmu_lock); 1188 if (!READ_ONCE(kvm->arch.pgd))
1177 1189 break;
1178 next = stage2_pgd_addr_end(addr, end); 1190 next = stage2_pgd_addr_end(addr, end);
1179 if (stage2_pgd_present(*pgd)) 1191 if (stage2_pgd_present(*pgd))
1180 stage2_wp_puds(pgd, addr, next); 1192 stage2_wp_puds(pgd, addr, next);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index dc68e2e424ab..3a0b8999f011 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -242,8 +242,11 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
242 * If we are creating a VCPU with a GICv3 we must also register the 242 * If we are creating a VCPU with a GICv3 we must also register the
243 * KVM io device for the redistributor that belongs to this VCPU. 243 * KVM io device for the redistributor that belongs to this VCPU.
244 */ 244 */
245 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 245 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
246 mutex_lock(&vcpu->kvm->lock);
246 ret = vgic_register_redist_iodev(vcpu); 247 ret = vgic_register_redist_iodev(vcpu);
248 mutex_unlock(&vcpu->kvm->lock);
249 }
247 return ret; 250 return ret;
248} 251}
249 252
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 0a4283ed9aa7..63e0bbdcddcc 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
226 226
227 switch (addr & 0xff) { 227 switch (addr & 0xff) {
228 case GIC_CPU_CTRL: 228 case GIC_CPU_CTRL:
229 val = vmcr.ctlr; 229 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
230 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
231 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
232 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
233 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
234 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
235
230 break; 236 break;
231 case GIC_CPU_PRIMASK: 237 case GIC_CPU_PRIMASK:
232 /* 238 /*
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
267 273
268 switch (addr & 0xff) { 274 switch (addr & 0xff) {
269 case GIC_CPU_CTRL: 275 case GIC_CPU_CTRL:
270 vmcr.ctlr = val; 276 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
277 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
278 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
279 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
280 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
281 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
282
271 break; 283 break;
272 case GIC_CPU_PRIMASK: 284 case GIC_CPU_PRIMASK:
273 /* 285 /*
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 99da1a207c19..201d5e2e973d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -586,7 +586,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
586 if (!vgic_v3_check_base(kvm)) 586 if (!vgic_v3_check_base(kvm))
587 return -EINVAL; 587 return -EINVAL;
588 588
589 rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; 589 rd_base = vgic->vgic_redist_base + vgic->vgic_redist_free_offset;
590 sgi_base = rd_base + SZ_64K; 590 sgi_base = rd_base + SZ_64K;
591 591
592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); 592 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
@@ -614,11 +614,15 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
614 mutex_lock(&kvm->slots_lock); 614 mutex_lock(&kvm->slots_lock);
615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, 615 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
616 SZ_64K, &sgi_dev->dev); 616 SZ_64K, &sgi_dev->dev);
617 mutex_unlock(&kvm->slots_lock); 617 if (ret) {
618 if (ret)
619 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 618 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
620 &rd_dev->dev); 619 &rd_dev->dev);
620 goto out;
621 }
621 622
623 vgic->vgic_redist_free_offset += 2 * SZ_64K;
624out:
625 mutex_unlock(&kvm->slots_lock);
622 return ret; 626 return ret;
623} 627}
624 628
@@ -644,10 +648,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
644 648
645 if (ret) { 649 if (ret) {
646 /* The current c failed, so we start with the previous one. */ 650 /* The current c failed, so we start with the previous one. */
651 mutex_lock(&kvm->slots_lock);
647 for (c--; c >= 0; c--) { 652 for (c--; c >= 0; c--) {
648 vcpu = kvm_get_vcpu(kvm, c); 653 vcpu = kvm_get_vcpu(kvm, c);
649 vgic_unregister_redist_iodev(vcpu); 654 vgic_unregister_redist_iodev(vcpu);
650 } 655 }
656 mutex_unlock(&kvm->slots_lock);
651 } 657 }
652 658
653 return ret; 659 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index a65757aab6d3..e4187e52bb26 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -149,6 +149,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
149 if (irq->hw) { 149 if (irq->hw) {
150 val |= GICH_LR_HW; 150 val |= GICH_LR_HW;
151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; 151 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
152 /*
153 * Never set pending+active on a HW interrupt, as the
154 * pending state is kept at the physical distributor
155 * level.
156 */
157 if (irq->active && irq_is_pending(irq))
158 val &= ~GICH_LR_PENDING_BIT;
152 } else { 159 } else {
153 if (irq->config == VGIC_CONFIG_LEVEL) 160 if (irq->config == VGIC_CONFIG_LEVEL)
154 val |= GICH_LR_EOI; 161 val |= GICH_LR_EOI;
@@ -170,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
170 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
171 u32 vmcr; 178 u32 vmcr;
172 179
173 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 180 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
181 GICH_VMCR_ENABLE_GRP0_MASK;
182 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
183 GICH_VMCR_ENABLE_GRP1_MASK;
184 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
185 GICH_VMCR_ACK_CTL_MASK;
186 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
187 GICH_VMCR_FIQ_EN_MASK;
188 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
189 GICH_VMCR_CBPR_MASK;
190 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
191 GICH_VMCR_EOI_MODE_MASK;
174 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & 192 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
175 GICH_VMCR_ALIAS_BINPOINT_MASK; 193 GICH_VMCR_ALIAS_BINPOINT_MASK;
176 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 194 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -188,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
188 206
189 vmcr = cpu_if->vgic_vmcr; 207 vmcr = cpu_if->vgic_vmcr;
190 208
191 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> 209 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
192 GICH_VMCR_CTRL_SHIFT; 210 GICH_VMCR_ENABLE_GRP0_SHIFT;
211 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
212 GICH_VMCR_ENABLE_GRP1_SHIFT;
213 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
214 GICH_VMCR_ACK_CTL_SHIFT;
215 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
216 GICH_VMCR_FIQ_EN_SHIFT;
217 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
218 GICH_VMCR_CBPR_SHIFT;
219 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
220 GICH_VMCR_EOI_MODE_SHIFT;
221
193 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> 222 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
194 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 223 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
195 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 224 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8fa737edde6f..030248e669f6 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -127,6 +127,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
127 if (irq->hw) { 127 if (irq->hw) {
128 val |= ICH_LR_HW; 128 val |= ICH_LR_HW;
129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; 129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
130 /*
131 * Never set pending+active on a HW interrupt, as the
132 * pending state is kept at the physical distributor
133 * level.
134 */
135 if (irq->active && irq_is_pending(irq))
136 val &= ~ICH_LR_PENDING_BIT;
130 } else { 137 } else {
131 if (irq->config == VGIC_CONFIG_LEVEL) 138 if (irq->config == VGIC_CONFIG_LEVEL)
132 val |= ICH_LR_EOI; 139 val |= ICH_LR_EOI;
@@ -152,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
152void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 159void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
153{ 160{
154 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
162 u32 model = vcpu->kvm->arch.vgic.vgic_model;
155 u32 vmcr; 163 u32 vmcr;
156 164
157 /* 165 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
158 * Ignore the FIQen bit, because GIC emulation always implies 166 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
159 * SRE=1 which means the vFIQEn bit is also RES1. 167 ICH_VMCR_ACK_CTL_MASK;
160 */ 168 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
161 vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << 169 ICH_VMCR_FIQ_EN_MASK;
162 ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 170 } else {
163 vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 171 /*
172 * When emulating GICv3 on GICv3 with SRE=1 on the
173 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
174 */
175 vmcr = ICH_VMCR_FIQ_EN_MASK;
176 }
177
178 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
179 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
164 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 180 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
165 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 181 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
166 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 182 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -173,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
173void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 189void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
174{ 190{
175 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 191 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
192 u32 model = vcpu->kvm->arch.vgic.vgic_model;
176 u32 vmcr; 193 u32 vmcr;
177 194
178 vmcr = cpu_if->vgic_vmcr; 195 vmcr = cpu_if->vgic_vmcr;
179 196
180 /* 197 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
181 * Ignore the FIQen bit, because GIC emulation always implies 198 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
182 * SRE=1 which means the vFIQEn bit is also RES1. 199 ICH_VMCR_ACK_CTL_SHIFT;
183 */ 200 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
184 vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << 201 ICH_VMCR_FIQ_EN_SHIFT;
185 ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 202 } else {
186 vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 203 /*
204 * When emulating GICv3 on GICv3 with SRE=1 on the
205 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
206 */
207 vmcrp->fiqen = 1;
208 vmcrp->ackctl = 0;
209 }
210
211 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
212 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
187 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 213 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
188 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 214 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
189 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 215 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index da83e4caa272..bba7fa22a7f7 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
111 * registers regardless of the hardware backed GIC used. 111 * registers regardless of the hardware backed GIC used.
112 */ 112 */
113struct vgic_vmcr { 113struct vgic_vmcr {
114 u32 ctlr; 114 u32 grpen0;
115 u32 grpen1;
116
117 u32 ackctl;
118 u32 fiqen;
119 u32 cbpr;
120 u32 eoim;
121
115 u32 abpr; 122 u32 abpr;
116 u32 bpr; 123 u32 bpr;
117 u32 pmr; /* Priority mask field in the GICC_PMR and 124 u32 pmr; /* Priority mask field in the GICC_PMR and
118 * ICC_PMR_EL1 priority field format */ 125 * ICC_PMR_EL1 priority field format */
119 /* Below member variable are valid only for GICv3 */
120 u32 grpen0;
121 u32 grpen1;
122}; 126};
123 127
124struct vgic_reg_attr { 128struct vgic_reg_attr {