aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2016-10-06 03:56:43 -0400
committerRussell King <rmk+kernel@armlinux.org.uk>2016-10-06 03:56:43 -0400
commit301a36fa700f9add6e14f5a95c7573e01578343a (patch)
tree6ff94ff3b08b838353b9127eb5f8055b3de6b25f
parentb828f960215f02e5d2c88bbd27565c694254a15a (diff)
parentb60752f2b20c167859943e001727f0d4da419b23 (diff)
Merge branches 'misc' and 'sa1111-base' into for-linus
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/stable/sysfs-devices2
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt24
-rw-r--r--Documentation/PCI/pci.txt1
-rw-r--r--Documentation/arm/CCN.txt16
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/block/queue-sysfs.txt18
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt7
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-st.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt10
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt10
-rw-r--r--Documentation/filesystems/overlayfs.txt8
-rw-r--r--Documentation/hwmon/ftsteutates4
-rw-r--r--Documentation/i2c/slave-interface5
-rw-r--r--Documentation/kernel-documentation.rst6
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/networking/dsa/dsa.txt20
-rw-r--r--Documentation/networking/rxrpc.txt21
-rw-r--r--Documentation/power/basic-pm-debugging.txt27
-rw-r--r--Documentation/power/interface.txt151
-rw-r--r--Documentation/powerpc/transactional_memory.txt2
-rw-r--r--Documentation/rapidio/mport_cdev.txt4
-rw-r--r--Documentation/sphinx-static/theme_overrides.css3
-rw-r--r--MAINTAINERS62
-rw-r--r--Makefile9
-rw-r--r--arch/Kconfig20
-rw-r--r--arch/alpha/include/asm/uaccess.h19
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/uaccess.h11
-rw-r--r--arch/arc/include/uapi/asm/elf.h11
-rw-r--r--arch/arc/kernel/arcksyms.c2
-rw-r--r--arch/arc/kernel/process.c2
-rw-r--r--arch/arc/kernel/setup.c6
-rw-r--r--arch/arc/mm/cache.c9
-rw-r--r--arch/arc/mm/highmem.c1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx-a9.dts9
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts8
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts2
-rw-r--r--arch/arm/boot/dts/integratorap.dts2
-rw-r--r--arch/arm/boot/dts/integratorcp.dts2
-rw-r--r--arch/arm/boot/dts/keystone.dtsi8
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-openrd.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi11
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi2
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi10
-rw-r--r--arch/arm/boot/dts/stih410.dtsi12
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts2
-rw-r--r--arch/arm/boot/dts/tegra114-roth.dts2
-rw-r--r--arch/arm/boot/dts/tegra114-tn7.dts2
-rw-r--r--arch/arm/common/locomo.c5
-rw-r--r--arch/arm/common/sa1111.c466
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig2
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig2
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h4
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kernel/hyp-stub.S13
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kvm/arm.c8
-rw-r--r--arch/arm/kvm/mmu.c6
-rw-r--r--arch/arm/mach-clps711x/Kconfig2
-rw-r--r--arch/arm/mach-exynos/suspend.c6
-rw-r--r--arch/arm/mach-imx/gpc.c6
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c1
-rw-r--r--arch/arm/mach-imx/pm-imx6.c4
-rw-r--r--arch/arm/mach-mvebu/Makefile3
-rw-r--r--arch/arm/mach-omap2/cm33xx.c6
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c12
-rw-r--r--arch/arm/mach-oxnas/Kconfig2
-rw-r--r--arch/arm/mach-pxa/corgi.c1
-rw-r--r--arch/arm/mach-pxa/idp.c3
-rw-r--r--arch/arm/mach-pxa/lubbock.c14
-rw-r--r--arch/arm/mach-pxa/spitz.c1
-rw-r--r--arch/arm/mach-pxa/xcep.c3
-rw-r--r--arch/arm/mach-realview/Makefile3
-rw-r--r--arch/arm/mach-realview/core.c3
-rw-r--r--arch/arm/mach-s5pv210/Makefile2
-rw-r--r--arch/arm/mach-sa1100/clock.c5
-rw-r--r--arch/arm/mach-sa1100/generic.c4
-rw-r--r--arch/arm/mach-sa1100/generic.h2
-rw-r--r--arch/arm/mach-sa1100/pleb.c2
-rw-r--r--arch/arm/mach-shmobile/platsmp.c3
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c62
-rw-r--r--arch/arm/mm/mmu.c23
-rw-r--r--arch/arm/mm/proc-v7.S1
-rw-r--r--arch/arm/xen/enlighten.c9
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.platforms4
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi8
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi8
l---------arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837.dtsi2
l---------arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi1
l---------arch/arm64/boot/dts/broadcom/bcm283x.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi8
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts3
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi8
-rw-r--r--arch/arm64/configs/defconfig53
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/kernel/entry.S7
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/hibernate.c82
-rw-r--r--arch/arm64/kernel/probes/kprobes.c31
-rw-r--r--arch/arm64/kernel/sleep.S10
-rw-r--r--arch/arm64/kernel/smp.c8
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/arm64/mm/dump.c6
-rw-r--r--arch/arm64/mm/fault.c14
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/avr32/include/asm/uaccess.h11
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c2
-rw-r--r--arch/avr32/lib/copy_user.S8
-rw-r--r--arch/blackfin/include/asm/uaccess.h9
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c3
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c3
-rw-r--r--arch/cris/include/asm/uaccess.h71
-rw-r--r--arch/frv/include/asm/uaccess.h12
-rw-r--r--arch/h8300/include/asm/io.h2
-rw-r--r--arch/hexagon/include/asm/uaccess.h3
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/uaccess.h31
-rw-r--r--arch/m32r/include/asm/uaccess.h2
-rw-r--r--arch/m68k/kernel/signal.c1
-rw-r--r--arch/metag/include/asm/uaccess.h3
-rw-r--r--arch/metag/mm/init.c1
-rw-r--r--arch/microblaze/include/asm/uaccess.h11
-rw-r--r--arch/mips/include/asm/page.h4
-rw-r--r--arch/mips/include/asm/uaccess.h3
-rw-r--r--arch/mips/kvm/emulate.c35
-rw-r--r--arch/mips/kvm/mmu.c70
-rw-r--r--arch/mn10300/include/asm/uaccess.h1
-rw-r--r--arch/mn10300/lib/usercopy.c4
-rw-r--r--arch/nios2/include/asm/uaccess.h13
-rw-r--r--arch/openrisc/include/asm/uaccess.h35
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/parisc/include/asm/uaccess.h27
-rw-r--r--arch/parisc/include/uapi/asm/errno.h4
-rw-r--r--arch/parisc/kernel/processor.c8
-rw-r--r--arch/parisc/kernel/time.c12
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Makefile22
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h13
-rw-r--r--arch/powerpc/include/asm/cputhreads.h1
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h1
-rw-r--r--arch/powerpc/include/asm/hmi.h2
-rw-r--r--arch/powerpc/include/asm/paca.h12
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/switch_to.h8
-rw-r--r--arch/powerpc/include/asm/uaccess.h29
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/eeh.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S98
-rw-r--r--arch/powerpc/kernel/idle_book3s.S27
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/mce.c3
-rw-r--r--arch/powerpc/kernel/pci-common.c43
-rw-r--r--arch/powerpc/kernel/process.c20
-rw-r--r--arch/powerpc/kernel/prom_init.c11
-rw-r--r--arch/powerpc/kernel/ptrace.c19
-rw-r--r--arch/powerpc/kernel/setup_32.c9
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile6
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile6
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_hv_hmi.c (renamed from arch/powerpc/kernel/hmi.c)0
-rw-r--r--arch/powerpc/kvm/book3s_xics.c12
-rw-r--r--arch/powerpc/lib/checksum_32.S8
-rw-r--r--arch/powerpc/lib/feature-fixups.c3
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/slb_low.S7
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_lpbfifo.c1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c7
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c7
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c78
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c26
-rw-r--r--arch/powerpc/platforms/pseries/pci.c4
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c7
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c6
-rw-r--r--arch/powerpc/sysdev/cpm_common.c3
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c2
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig1
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c12
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c4
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c4
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c59
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/boot/compressed/head.S11
-rw-r--r--arch/s390/configs/default_defconfig3
-rw-r--r--arch/s390/configs/gcov_defconfig3
-rw-r--r--arch/s390/configs/performance_defconfig3
-rw-r--r--arch/s390/crypto/crc32-vx.c6
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/include/asm/uaccess.h27
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c15
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/lib/string.c16
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/score/include/asm/uaccess.h46
-rw-r--r--arch/sh/include/asm/uaccess.h5
-rw-r--r--arch/sh/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h13
-rw-r--r--arch/sparc/include/asm/uaccess_64.h10
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/uaccess.h22
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/um/kernel/skas/syscall.c10
-rw-r--r--arch/unicore32/include/asm/mmu_context.h2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/boot/compressed/eboot.c134
-rw-r--r--arch/x86/configs/tiny.config2
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c4
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S7
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c4
-rw-r--r--arch/x86/entry/Makefile2
-rw-r--r--arch/x86/entry/entry_64.S25
-rw-r--r--arch/x86/events/amd/core.c4
-rw-r--r--arch/x86/events/amd/uncore.c22
-rw-r--r--arch/x86/events/intel/bts.c123
-rw-r--r--arch/x86/events/intel/core.c15
-rw-r--r--arch/x86/events/intel/cqm.c9
-rw-r--r--arch/x86/events/intel/ds.c19
-rw-r--r--arch/x86/events/intel/pt.c18
-rw-r--r--arch/x86/events/intel/uncore_snb.c14
-rw-r--r--arch/x86/events/intel/uncore_snbep.c10
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/hardirq.h4
-rw-r--r--arch/x86/include/asm/init.h4
-rw-r--r--arch/x86/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/realmode.h10
-rw-r--r--arch/x86/include/asm/thread_info.h44
-rw-r--r--arch/x86/include/asm/tlbflush.h7
-rw-r--r--arch/x86/include/asm/uaccess.h101
-rw-r--r--arch/x86/include/asm/uaccess_32.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/uv/bios.h5
-rw-r--r--arch/x86/kernel/apic/apic.c35
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c13
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c42
-rw-r--r--arch/x86/kernel/cpu/amd.c12
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c18
-rw-r--r--arch/x86/kernel/fpu/xstate.c138
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/kernel/smpboot.c25
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/uprobes.c22
-rw-r--r--arch/x86/kvm/ioapic.c8
-rw-r--r--arch/x86/kvm/pmu_amd.c4
-rw-r--r--arch/x86/kvm/vmx.c136
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lib/hweight.S2
-rw-r--r--arch/x86/lib/kaslr.c2
-rw-r--r--arch/x86/mm/ident_map.c19
-rw-r--r--arch/x86/mm/init.c14
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/mm/pat.c17
-rw-r--r--arch/x86/pci/fixup.c20
-rw-r--r--arch/x86/pci/vmd.c10
-rw-r--r--arch/x86/platform/efi/quirks.c21
-rw-r--r--arch/x86/platform/uv/bios_uv.c11
-rw-r--r--arch/x86/power/hibernate_64.c4
-rw-r--r--arch/x86/realmode/init.c47
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c4
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/bio.c21
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-merge.c55
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/elevator.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/cryptd.c12
-rw-r--r--crypto/sha3_generic.c16
-rw-r--r--drivers/acpi/nfit/core.c3
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/base/power/runtime.c14
-rw-r--r--drivers/base/regmap/regcache-rbtree.c38
-rw-r--r--drivers/base/regmap/regcache.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/floppy.c27
-rw-r--r--drivers/block/rbd.c10
-rw-r--r--drivers/block/virtio_blk.c26
-rw-r--r--drivers/block/xen-blkfront.c97
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/arm-ccn.c112
-rw-r--r--drivers/bus/vexpress-config.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c3
-rw-r--r--drivers/char/virtio_console.c23
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c9
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c6
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c26
-rw-r--r--drivers/clocksource/bcm_kona_timer.c16
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/pxa_timer.c2
-rw-r--r--drivers/clocksource/sun4i_timer.c9
-rw-r--r--drivers/clocksource/time-armada-370-xp.c1
-rw-r--r--drivers/clocksource/time-pistachio.c8
-rw-r--r--drivers/clocksource/timer-atmel-pit.c7
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c21
-rw-r--r--drivers/crypto/caam/caamalg.c90
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/dax/dax.c2
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/pxa_dma.c11
-rw-r--r--drivers/dma/sh/usb-dmac.c19
-rw-r--r--drivers/edac/Kconfig8
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/sb_edac.c15
-rw-r--r--drivers/edac/skx_edac.c1121
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/dmi-id.c8
-rw-r--r--drivers/firmware/efi/capsule-loader.c8
-rw-r--r--drivers/firmware/efi/capsule.c6
-rw-r--r--drivers/firmware/efi/efi.c7
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c169
-rw-r--r--drivers/firmware/efi/libstub/fdt.c54
-rw-r--r--drivers/firmware/efi/libstub/random.c12
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/gpio-max730x.c8
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c13
-rw-r--r--drivers/gpu/drm/drm_atomic.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c11
-rw-r--r--drivers/gpu/drm/drm_edid.c22
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c211
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c27
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c280
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c26
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c21
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c10
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c10
-rw-r--r--drivers/gpu/host1x/mipi.c63
-rw-r--r--drivers/hwmon/it87.c3
-rw-r--r--drivers/i2c/busses/i2c-at91.c24
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c4
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c16
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c14
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c14
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c19
-rw-r--r--drivers/iio/accel/Kconfig5
-rw-r--r--drivers/iio/accel/bma220_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c11
-rw-r--r--drivers/iio/accel/kxsd9.c2
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ad799x.c1
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c30
-rw-r--r--drivers/iio/adc/ti-ads1015.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c16
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/dac/stx104.c38
-rw-r--r--drivers/iio/humidity/Kconfig2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c27
-rw-r--r--drivers/iio/industrialio-buffer.c27
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/pressure/bmp280-core.c8
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/core/multicast.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c21
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c92
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c146
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c11
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h24
-rw-r--r--drivers/infiniband/hw/hfi1/init.c5
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c26
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c12
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c32
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h3
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c40
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c14
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c37
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c22
-rw-r--r--drivers/infiniband/hw/mlx5/main.c18
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c25
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c9
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.c3
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/silead.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c34
-rw-r--r--drivers/iommu/dma-iommu.c14
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c4
-rw-r--r--drivers/iommu/mtk_iommu.h6
-rw-r--r--drivers/irqchip/irq-atmel-aic.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c7
-rw-r--r--drivers/irqchip/irq-gic-v3.c11
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c18
-rw-r--r--drivers/macintosh/ams/ams-i2c.c1
-rw-r--r--drivers/macintosh/windfarm_pm112.c1
-rw-r--r--drivers/macintosh/windfarm_pm72.c1
-rw-r--r--drivers/macintosh/windfarm_rm31.c1
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c11
-rw-r--r--drivers/md/bcache/super.c14
-rw-r--r--drivers/md/bitmap.c47
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-crypt.c11
-rw-r--r--drivers/md/dm-flakey.c27
-rw-r--r--drivers/md/dm-log-writes.c10
-rw-r--r--drivers/md/dm-log.c11
-rw-r--r--drivers/md/dm-raid.c82
-rw-r--r--drivers/md/dm-round-robin.c7
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md.c40
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5-cache.c46
-rw-r--r--drivers/md/raid5.c74
-rw-r--r--drivers/memory/omap-gpmc.c21
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile4
-rw-r--r--drivers/misc/bh1780gli.c259
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/native.c2
-rw-r--r--drivers/misc/cxl/pci.c12
-rw-r--r--drivers/misc/cxl/vphb.c12
-rw-r--r--drivers/misc/lkdtm_rodata.c2
-rw-r--r--drivers/misc/lkdtm_usercopy.c27
-rw-r--r--drivers/misc/mei/hw-me.c10
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mmc/card/queue.h4
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c16
-rw-r--r--drivers/mmc/host/sdhci-st.c15
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/dsa/b53/b53_regs.h2
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/cadence/macb.c23
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c66
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c23
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c131
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c7
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c241
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h67
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c213
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c55
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h24
-rw-r--r--drivers/net/hyperv/netvsc.c19
-rw-r--r--drivers/net/hyperv/netvsc_drv.c105
-rw-r--r--drivers/net/macsec.c52
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c5
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c29
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team_mode_loadbalance.c15
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/kaweth.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--drivers/nvdimm/btt.c1
-rw-r--r--drivers/nvdimm/btt_devs.c20
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/Kconfig4
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/pci.c27
-rw-r--r--drivers/nvme/host/rdma.c229
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c6
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c105
-rw-r--r--drivers/of/base.c14
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/msi.c20
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/pxa2xx_base.c9
-rw-r--r--drivers/pcmcia/pxa2xx_base.h2
-rw-r--r--drivers/pcmcia/sa1111_badge4.c22
-rw-r--r--drivers/pcmcia/sa1111_generic.c22
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c25
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c32
-rw-r--r--drivers/pcmcia/sa1111_neponset.c26
-rw-r--r--drivers/pcmcia/sa11xx_base.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/perf/arm_pmu.c26
-rw-r--r--drivers/phy/phy-brcm-sata.c2
-rw-r--r--drivers/phy/phy-sun4i-usb.c68
-rw-r--r--drivers/phy/phy-sun9i-usb.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c20
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c21
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c4
-rw-r--r--drivers/platform/olpc/olpc-ec.c8
-rw-r--r--drivers/platform/x86/dell-wmi.c4
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c8
-rw-r--r--drivers/power/max17042_battery.c15
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/hisi-reboot.c5
-rw-r--r--drivers/power/tps65217_charger.c1
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/rio_cm.c24
-rw-r--r--drivers/regulator/max14577-regulator.c4
-rw-r--r--drivers/regulator/max77693-regulator.c4
-rw-r--r--drivers/regulator/qcom_smd-regulator.c30
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/device_status.c7
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c113
-rw-r--r--drivers/s390/virtio/Makefile6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c4
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/constants.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2
-rw-r--r--drivers/scsi/ipr.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c22
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_transport_sas.c16
-rw-r--r--drivers/scsi/ses.c5
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-mt65xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c1
-rw-r--r--drivers/spi/spi-qup.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c3
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c1
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c46
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c43
-rw-r--r--drivers/staging/wilc1000/host_interface.c3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c4
-rw-r--r--drivers/thermal/clock_cooling.c1
-rw-r--r--drivers/thermal/cpu_cooling.c21
-rw-r--r--drivers/thermal/fair_share.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thermal/imx_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c1
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c11
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/rcar_thermal.c1
-rw-r--r--drivers/thermal/step_wise.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thunderbolt/nhi.c6
-rw-r--r--drivers/thunderbolt/switch.c4
-rw-r--r--drivers/tty/serial/8250/8250.h35
-rw-r--r--drivers/tty/serial/8250/8250_core.c9
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c4
-rw-r--r--drivers/tty/serial/8250/8250_mid.c3
-rw-r--r--drivers/tty/serial/8250/8250_omap.c31
-rw-r--r--drivers/tty/serial/8250/8250_pci.c139
-rw-r--r--drivers/tty/serial/8250/8250_port.c7
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/usb/chipidea/udc.c16
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/config.c94
-rw-r--r--drivers/usb/core/devio.c23
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/dwc2/core.h1
-rw-r--r--drivers/usb/dwc2/platform.c22
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/debug.h2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c15
-rw-r--r--drivers/usb/dwc3/gadget.c61
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c12
-rw-r--r--drivers/usb/gadget/function/f_rndis.c3
-rw-r--r--drivers/usb/gadget/function/rndis.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.c3
-rw-r--r--drivers/usb/gadget/function/u_serial.c7
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/core.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c9
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c22
-rw-r--r--drivers/usb/misc/ftdi-elan.c10
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_virthub.c7
-rw-r--r--drivers/usb/phy/phy-generic.c8
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c31
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c85
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/vhost/test.c8
-rw-r--r--drivers/vhost/vsock.c6
-rw-r--r--drivers/virtio/virtio_ring.c5
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/afs/cmservice.c78
-rw-r--r--fs/afs/fsclient.c221
-rw-r--r--fs/afs/internal.h14
-rw-r--r--fs/afs/rxrpc.c73
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/aio.c7
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/btrfs/backref.c1
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-ref.c34
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/disk-io.c56
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c214
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/file.c36
-rw-r--r--fs/btrfs/inode-map.c3
-rw-r--r--fs/btrfs/inode.c83
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/qgroup.c62
-rw-r--r--fs/btrfs/qgroup.h36
-rw-r--r--fs/btrfs/relocation.c128
-rw-r--r--fs/btrfs/root-tree.c27
-rw-r--r--fs/btrfs/send.c181
-rw-r--r--fs/btrfs/super.c16
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/btrfs/tree-log.c107
-rw-r--r--fs/btrfs/tree-log.h5
-rw-r--r--fs/btrfs/volumes.c27
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/mds_client.c1
-rw-r--r--fs/cifs/cifsfs.c29
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/connect.c31
-rw-r--r--fs/crypto/policy.c41
-rw-r--r--fs/devpts/inode.c3
-rw-r--r--fs/dlm/debug_fs.c62
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/super.c18
-rw-r--r--fs/ext4/xattr.c53
-rw-r--r--fs/ext4/xattr.h1
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/f2fs.h12
-rw-r--r--fs/f2fs/file.c22
-rw-r--r--fs/f2fs/node.c47
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/fs-writeback.c6
-rw-r--r--fs/fuse/file.c7
-rw-r--r--fs/ioctl.c6
-rw-r--r--fs/iomap.c26
-rw-r--r--fs/kernfs/file.c28
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/blocklayout/blocklayout.h3
-rw-r--r--fs/nfs/blocklayout/extent_tree.c10
-rw-r--r--fs/nfs/callback.c1
-rw-r--r--fs/nfs/callback_proc.c8
-rw-r--r--fs/nfs/client.c10
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c45
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c23
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/nfs42proc.c36
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4proc.c119
-rw-r--r--fs/nfs/nfs4renewd.c20
-rw-r--r--fs/nfs/nfs4session.c53
-rw-r--r--fs/nfs/nfs4session.h7
-rw-r--r--fs/nfs/nfs4state.c9
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/super.c19
-rw-r--r--fs/nfsd/nfs4state.c65
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/dir.c58
-rw-r--r--fs/overlayfs/inode.c108
-rw-r--r--fs/overlayfs/overlayfs.h17
-rw-r--r--fs/overlayfs/readdir.c63
-rw-r--r--fs/overlayfs/super.c93
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/proc/base.c7
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/ubifs/tnc_commit.c2
-rw-r--r--fs/ubifs/xattr.c5
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c16
-rw-r--r--fs/xfs/libxfs/xfs_btree.c14
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--fs/xfs/libxfs/xfs_defer.h2
-rw-r--r--fs/xfs/libxfs/xfs_format.h13
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--fs/xfs/xfs_buf.c3
-rw-r--r--fs/xfs/xfs_file.c13
-rw-r--r--fs/xfs/xfs_fsops.c1
-rw-r--r--fs/xfs/xfs_iomap.c69
-rw-r--r--fs/xfs/xfs_iomap.h1
-rw-r--r--fs/xfs/xfs_iops.c9
-rw-r--r--fs/xfs/xfs_super.c9
-rw-r--r--fs/xfs/xfs_trace.h3
-rw-r--r--include/asm-generic/qrwlock.h27
-rw-r--r--include/asm-generic/uaccess.h21
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/compiler-gcc.h10
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/efi.h27
-rw-r--r--include/linux/fence.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fscrypto.h5
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/iio/sw_trigger.h2
-rw-r--r--include/linux/iomap.h8
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/mempolicy.h4
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h153
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h4
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/pci.h19
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/printk.h48
-rw-r--r--include/linux/qed/qed_if.h8
-rw-r--r--include/linux/sctp.h64
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/smc91x.h10
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/thread_info.h25
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/net/act_api.h23
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/cfg80211.h9
-rw-r--r--include/net/gre.h1
-rw-r--r--include/net/inet_ecn.h3
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netfilter/nft_reject.h4
-rw-r--r--include/net/pkt_cls.h41
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/scsi/scsi_transport_sas.h5
-rw-r--r--include/trace/events/timer.h14
-rw-r--r--include/uapi/linux/atm_zatm.h1
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/if_pppol2tp.h3
-rw-r--r--include/uapi/linux/if_pppox.h3
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/ipx.h13
-rw-r--r--include/uapi/linux/libc-compat.h26
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/openvswitch.h6
-rw-r--r--include/uapi/linux/sctp.h64
-rw-r--r--include/uapi/linux/virtio_vsock.h2
-rw-r--r--include/uapi/misc/cxl.h4
-rw-r--r--include/xen/xen-ops.h4
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/audit_watch.c8
-rw-r--r--kernel/bpf/hashtab.c84
-rw-r--r--kernel/bpf/verifier.c7
-rw-r--r--kernel/configs/tiny.config8
-rw-r--r--kernel/cpuset.c15
-rw-r--r--kernel/events/core.c211
-rw-r--r--kernel/events/ring_buffer.c15
-rw-r--r--kernel/events/uprobes.c5
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c37
-rw-r--r--kernel/futex.c23
-rw-r--r--kernel/irq/affinity.c2
-rw-r--r--kernel/irq/chip.c11
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/irq/msi.c11
-rw-r--r--kernel/kexec_file.c3
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/locking/qspinlock_stat.h1
-rw-r--r--kernel/memremap.c9
-rw-r--r--kernel/power/hibernate.c4
-rw-r--r--kernel/power/qos.c11
-rw-r--r--kernel/power/snapshot.c10
-rw-r--r--kernel/printk/braille.c4
-rw-r--r--kernel/printk/internal.h16
-rw-r--r--kernel/printk/nmi.c51
-rw-r--r--kernel/printk/printk.c27
-rw-r--r--kernel/sched/core.c41
-rw-r--r--kernel/sched/cpudeadline.c2
-rw-r--r--kernel/sched/cputime.c41
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/seccomp.c12
-rw-r--r--kernel/sysctl.c45
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/timekeeping_debug.c9
-rw-r--r--kernel/time/timer.c5
-rw-r--r--kernel/trace/blktrace.c2
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile1
-rw-r--r--lib/iov_iter.c24
-rw-r--r--lib/rhashtable.c20
-rw-r--r--lib/strncpy_from_user.c8
-rw-r--r--lib/strnlen_user.c7
-rw-r--r--lib/test_hash.c26
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--lib/usercopy.c9
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Makefile4
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/kasan/quarantine.c7
-rw-r--r--mm/memcontrol.c82
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c17
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c121
-rw-r--r--mm/readahead.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c30
-rw-r--r--mm/slub.c46
-rw-r--r--mm/usercopy.c277
-rw-r--r--mm/vmscan.c22
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_request.c2
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c14
-rw-r--r--net/bridge/br_fdb.c52
-rw-r--r--net/bridge/br_input.c7
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c1
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/ceph/string_table.c8
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/filter.c109
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/ipv4/devinet.c11
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_tunnel_core.c8
-rw-r--r--net/ipv4/ip_vti.c31
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c1
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_diag.c7
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c56
-rw-r--r--net/ipv6/calipso.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c1
-rw-r--r--net/ipv6/ping.c42
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/irda/iriap.c8
-rw-r--r--net/kcm/kcmsock.c3
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/status.c14
-rw-r--r--net/mac80211/tdls.c7
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_tables_netdev.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c23
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c65
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_meta.c17
-rw-r--r--net/netfilter/nft_rbtree.c10
-rw-r--r--net/netfilter/nft_reject.c16
-rw-r--r--net/netfilter/nft_reject_inet.c7
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_nfacct.c2
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/vport-geneve.c9
-rw-r--r--net/openvswitch/vport-gre.c11
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c9
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_accept.c1
-rw-r--r--net/rxrpc/call_event.c7
-rw-r--r--net/rxrpc/call_object.c11
-rw-r--r--net/rxrpc/input.c39
-rw-r--r--net/rxrpc/recvmsg.c25
-rw-r--r--net/rxrpc/skbuff.c41
-rw-r--r--net/sched/act_api.c34
-rw-r--r--net/sched/act_ife.c18
-rw-r--r--net/sched/act_police.c62
-rw-r--r--net/sched/cls_api.c51
-rw-r--r--net/sched/sch_generic.c9
-rw-r--r--net/sctp/input.c11
-rw-r--r--net/sctp/inqueue.c13
-rw-r--r--net/sctp/output.c13
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/sctp_diag.c24
-rw-r--r--net/sctp/ulpevent.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c5
-rw-r--r--net/sunrpc/clnt.c28
-rw-r--r--net/sunrpc/xprt.c26
-rw-r--r--net/sunrpc/xprtrdma/verbs.c45
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c62
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c111
-rw-r--r--net/vmw_vsock/virtio_transport.c10
-rw-r--r--net/wireless/chan.c1
-rw-r--r--net/wireless/nl80211.c34
-rw-r--r--net/wireless/wext-core.c25
-rw-r--r--net/xfrm/xfrm_input.c14
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c13
-rw-r--r--samples/bpf/bpf_helpers.h4
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c2
-rw-r--r--samples/bpf/test_maps.c15
-rw-r--r--scripts/Kbuild.include10
-rw-r--r--scripts/Makefile.gcc-plugins39
-rwxr-xr-xscripts/checkpatch.pl9
-rwxr-xr-xscripts/gcc-plugin.sh14
-rw-r--r--scripts/gcc-plugins/Makefile12
-rwxr-xr-xscripts/get_maintainer.pl6
-rwxr-xr-xscripts/package/builddeb4
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/Kconfig40
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/timer.c34
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c71
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c5
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c33
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/patch_realtek.c22
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/codecs/da7213.c4
-rw-r--r--sound/soc/codecs/max98371.c1
-rw-r--r--sound/soc/codecs/nau8825.c78
-rw-r--r--sound/soc/codecs/wm2000.c2
-rw-r--r--sound/soc/generic/Makefile6
-rw-r--r--sound/soc/generic/simple-card-utils.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c5
-rw-r--r--sound/soc/intel/skylake/skl.c4
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c61
-rw-r--r--sound/soc/omap/omap-mcpdm.c22
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c7
-rw-r--r--sound/soc/sh/rcar/src.c6
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/soc-dapm.c10
-rw-r--r--sound/usb/line6/pcm.c3
-rw-r--r--sound/usb/line6/pod.c12
-rw-r--r--sound/usb/quirks.c3
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h41
-rw-r--r--tools/arch/s390/include/uapi/asm/sie.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h9
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h2
-rw-r--r--tools/arch/x86/include/asm/required-features.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h4
-rw-r--r--tools/gpio/gpio-event-mon.c2
-rw-r--r--tools/iio/iio_generic_buffer.c4
-rw-r--r--tools/include/linux/string.h6
-rw-r--r--tools/include/uapi/linux/bpf.h86
-rw-r--r--tools/lguest/lguest.c6
-rw-r--r--tools/perf/Documentation/perf-probe.txt10
-rw-r--r--tools/perf/Documentation/perf-script.txt4
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c29
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/perf/builtin-mem.c3
-rw-r--r--tools/perf/builtin-script.c15
-rw-r--r--tools/perf/builtin-stat.c31
-rw-r--r--tools/perf/util/evsel.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c44
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c24
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/probe-event.c60
-rw-r--r--tools/perf/util/probe-event.h6
-rw-r--r--tools/perf/util/probe-file.c36
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/sort.c6
-rw-r--r--tools/perf/util/symbol-elf.c3
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/testing/nvdimm/test/nfit.c2
-rw-r--r--tools/testing/selftests/powerpc/Makefile2
-rw-r--r--tools/virtio/linux/dma-mapping.h16
-rw-r--r--tools/virtio/linux/kernel.h14
-rw-r--r--tools/virtio/linux/slab.h4
-rw-r--r--tools/virtio/linux/virtio.h6
-rw-r--r--tools/virtio/linux/virtio_config.h13
-rw-r--r--tools/virtio/ringtest/ptr_ring.c1
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c17
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c158
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c26
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c10
-rw-r--r--virt/kvm/arm/vgic/vgic.h6
-rw-r--r--virt/kvm/kvm_main.c16
1370 files changed, 15591 insertions, 8121 deletions
diff --git a/.mailmap b/.mailmap
index 2a91c14c80bf..de22daefd9da 100644
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org>
88Kenneth W Chen <kenneth.w.chen@intel.com> 88Kenneth W Chen <kenneth.w.chen@intel.com>
89Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 89Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
90Koushik <raghavendra.koushik@neterion.com> 90Koushik <raghavendra.koushik@neterion.com>
91Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
91Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> 92Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
92Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 93Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
93Leonid I Ananiev <leonid.i.ananiev@intel.com> 94Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -158,6 +159,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
158Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com> 159Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
159Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> 160Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
160Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com> 161Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
162Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
163Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
161Takashi YOSHII <takashi.yoshii.zj@renesas.com> 164Takashi YOSHII <takashi.yoshii.zj@renesas.com>
162Yusuke Goda <goda.yusuke@renesas.com> 165Yusuke Goda <goda.yusuke@renesas.com>
163Gustavo Padovan <gustavo@las.ic.unicamp.br> 166Gustavo Padovan <gustavo@las.ic.unicamp.br>
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index 43f78b88da28..df449d79b563 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -1,7 +1,7 @@
1# Note: This documents additional properties of any device beyond what 1# Note: This documents additional properties of any device beyond what
2# is documented in Documentation/sysfs-rules.txt 2# is documented in Documentation/sysfs-rules.txt
3 3
4What: /sys/devices/*/of_path 4What: /sys/devices/*/of_node
5Date: February 2015 5Date: February 2015
6Contact: Device Tree mailing list <devicetree@vger.kernel.org> 6Contact: Device Tree mailing list <devicetree@vger.kernel.org>
7Description: 7Description:
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index c55df2911136..cd9c9f6a7cd9 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
94min_vecs argument set to this limit, and the PCI core will return -ENOSPC 94min_vecs argument set to this limit, and the PCI core will return -ENOSPC
95if it can't meet the minimum number of vectors. 95if it can't meet the minimum number of vectors.
96 96
97The flags argument should normally be set to 0, but can be used to pass the 97The flags argument is used to specify which type of interrupt can be used
98PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support 98by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
99MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in 99A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
100case the device does not support legacy interrupt lines. 100any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
101 101pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
102By default this function will spread the interrupts around the available
103CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
104flag.
105 102
106To get the Linux IRQ numbers passed to request_irq() and free_irq() and the 103To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
107vectors, use the following function: 104vectors, use the following function:
@@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
131capped to the supported limit, so there is no need to query the number of 128capped to the supported limit, so there is no need to query the number of
132vectors supported beforehand: 129vectors supported beforehand:
133 130
134 nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0); 131 nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
135 if (nvec < 0) 132 if (nvec < 0)
136 goto out_err; 133 goto out_err;
137 134
@@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
140number to pci_alloc_irq_vectors() function as both 'min_vecs' and 137number to pci_alloc_irq_vectors() function as both 'min_vecs' and
141'max_vecs' parameters: 138'max_vecs' parameters:
142 139
143 ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0); 140 ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
144 if (ret < 0) 141 if (ret < 0)
145 goto out_err; 142 goto out_err;
146 143
@@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
148the single MSI mode for a device. It could be done by passing two 1s as 145the single MSI mode for a device. It could be done by passing two 1s as
149'min_vecs' and 'max_vecs': 146'min_vecs' and 'max_vecs':
150 147
151 ret = pci_alloc_irq_vectors(pdev, 1, 1, 0); 148 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
152 if (ret < 0) 149 if (ret < 0)
153 goto out_err; 150 goto out_err;
154 151
155Some devices might not support using legacy line interrupts, in which case 152Some devices might not support using legacy line interrupts, in which case
156the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform 153the driver can specify that only MSI or MSI-X is acceptable:
157can't provide MSI or MSI-X interrupts:
158 154
159 nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY); 155 nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
160 if (nvec < 0) 156 if (nvec < 0)
161 goto out_err; 157 goto out_err;
162 158
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 123881f62219..77f49dc5be23 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
124 124
125The ID table is an array of struct pci_device_id entries ending with an 125The ID table is an array of struct pci_device_id entries ending with an
126all-zero entry. Definitions with static const are generally preferred. 126all-zero entry. Definitions with static const are generally preferred.
127Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
128 127
129Each entry consists of: 128Each entry consists of:
130 129
diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt
index ffca443a19b4..15cdb7bc57c3 100644
--- a/Documentation/arm/CCN.txt
+++ b/Documentation/arm/CCN.txt
@@ -18,13 +18,17 @@ and config2 fields of the perf_event_attr structure. The "events"
18directory provides configuration templates for all documented 18directory provides configuration templates for all documented
19events, that can be used with perf tool. For example "xp_valid_flit" 19events, that can be used with perf tool. For example "xp_valid_flit"
20is an equivalent of "type=0x8,event=0x4". Other parameters must be 20is an equivalent of "type=0x8,event=0x4". Other parameters must be
21explicitly specified. For events originating from device, "node" 21explicitly specified.
22defines its index. All crosspoint events require "xp" (index),
23"port" (device port number) and "vc" (virtual channel ID) and
24"dir" (direction). Watchpoints (special "event" value 0xfe) also
25require comparator values ("cmp_l" and "cmp_h") and "mask", being
26index of the comparator mask.
27 22
23For events originating from device, "node" defines its index.
24
25Crosspoint PMU events require "xp" (index), "bus" (bus number)
26and "vc" (virtual channel ID).
27
28Crosspoint watchpoint-based events (special "event" value 0xfe)
29require "xp" and "vc" as as above plus "port" (device port index),
30"dir" (transmit/receive direction), comparator values ("cmp_l"
31and "cmp_h") and "mask", being index of the comparator mask.
28Masks are defined separately from the event description 32Masks are defined separately from the event description
29(due to limited number of the config values) in the "cmp_mask" 33(due to limited number of the config values) in the "cmp_mask"
30directory, with first 8 configurable by user and additional 34directory, with first 8 configurable by user and additional
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 4da60b463995..ccc60324e738 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -53,6 +53,7 @@ stable kernels.
53| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | 53| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
54| ARM | Cortex-A57 | #852523 | N/A | 54| ARM | Cortex-A57 | #852523 | N/A |
55| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | 55| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
56| ARM | Cortex-A72 | #853709 | N/A |
56| ARM | MMU-500 | #841119,#826419 | N/A | 57| ARM | MMU-500 | #841119,#826419 | N/A |
57| | | | | 58| | | | |
58| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | 59| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index d515d58962b9..2a3904030dea 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -14,6 +14,12 @@ add_random (RW)
14This file allows to turn off the disk entropy contribution. Default 14This file allows to turn off the disk entropy contribution. Default
15value of this file is '1'(on). 15value of this file is '1'(on).
16 16
17dax (RO)
18--------
19This file indicates whether the device supports Direct Access (DAX),
20used by CPU-addressable storage to bypass the pagecache. It shows '1'
21if true, '0' if not.
22
17discard_granularity (RO) 23discard_granularity (RO)
18----------------------- 24-----------------------
19This shows the size of internal allocation of the device in bytes, if 25This shows the size of internal allocation of the device in bytes, if
@@ -46,6 +52,12 @@ hw_sector_size (RO)
46------------------- 52-------------------
47This is the hardware sector size of the device, in bytes. 53This is the hardware sector size of the device, in bytes.
48 54
55io_poll (RW)
56------------
57When read, this file shows the total number of block IO polls and how
58many returned success. Writing '0' to this file will disable polling
59for this device. Writing any non-zero value will enable this feature.
60
49iostats (RW) 61iostats (RW)
50------------- 62-------------
51This file is used to control (on/off) the iostats accounting of the 63This file is used to control (on/off) the iostats accounting of the
@@ -151,5 +163,11 @@ device state. This means that it might not be safe to toggle the
151setting from "write back" to "write through", since that will also 163setting from "write back" to "write through", since that will also
152eliminate cache flushes issued by the kernel. 164eliminate cache flushes issued by the kernel.
153 165
166write_same_max_bytes (RO)
167-------------------------
168This is the number of bytes the device can write in a single write-same
169command. A value of '0' means write-same is not supported by this
170device.
171
154 172
155Jens Axboe <jens.axboe@oracle.com>, February 2009 173Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 96b7aa66c89c..106ae9c740b9 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -131,7 +131,7 @@ pygments_style = 'sphinx'
131todo_include_todos = False 131todo_include_todos = False
132 132
133primary_domain = 'C' 133primary_domain = 'C'
134highlight_language = 'C' 134highlight_language = 'guess'
135 135
136# -- Options for HTML output ---------------------------------------------- 136# -- Options for HTML output ----------------------------------------------
137 137
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index fc647492e940..8d9773f23550 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -103,7 +103,7 @@ Config Main Menu
103 Power management options (ACPI, APM) ---> 103 Power management options (ACPI, APM) --->
104 CPU Frequency scaling ---> 104 CPU Frequency scaling --->
105 [*] CPU Frequency scaling 105 [*] CPU Frequency scaling
106 <*> CPU frequency translation statistics 106 [*] CPU frequency translation statistics
107 [*] CPU frequency translation statistics details 107 [*] CPU frequency translation statistics details
108 108
109 109
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index bf99e2f24788..205593f56fe7 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -16,6 +16,11 @@ Required properties:
16- vref-supply: The regulator supply ADC reference voltage. 16- vref-supply: The regulator supply ADC reference voltage.
17- #io-channel-cells: Should be 1, see ../iio-bindings.txt 17- #io-channel-cells: Should be 1, see ../iio-bindings.txt
18 18
19Optional properties:
20- resets: Must contain an entry for each entry in reset-names if need support
21 this option. See ../reset/reset.txt for details.
22- reset-names: Must include the name "saradc-apb".
23
19Example: 24Example:
20 saradc: saradc@2006c000 { 25 saradc: saradc@2006c000 {
21 compatible = "rockchip,saradc"; 26 compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
23 interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; 28 interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
24 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; 29 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
25 clock-names = "saradc", "apb_pclk"; 30 clock-names = "saradc", "apb_pclk";
31 resets = <&cru SRST_SARADC>;
32 reset-names = "saradc-apb";
26 #io-channel-cells = <1>; 33 #io-channel-cells = <1>;
27 vref-supply = <&vcc18>; 34 vref-supply = <&vcc18>;
28 }; 35 };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
index 88faa91125bf..3cd4c43a3260 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
@@ -10,7 +10,7 @@ Required properties:
10 subsystem (mmcss) inside the FlashSS (available in STiH407 SoC 10 subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
11 family). 11 family).
12 12
13- clock-names: Should be "mmc". 13- clock-names: Should be "mmc" and "icn". (NB: The latter is not compulsory)
14 See: Documentation/devicetree/bindings/resource-names.txt 14 See: Documentation/devicetree/bindings/resource-names.txt
15- clocks: Phandle to the clock. 15- clocks: Phandle to the clock.
16 See: Documentation/devicetree/bindings/clock/clock-bindings.txt 16 See: Documentation/devicetree/bindings/clock/clock-bindings.txt
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index f5561ac7e17e..936ab5b87324 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -42,9 +42,6 @@ Optional properties:
42- auto-flow-control: one way to enable automatic flow control support. The 42- auto-flow-control: one way to enable automatic flow control support. The
43 driver is allowed to detect support for the capability even without this 43 driver is allowed to detect support for the capability even without this
44 property. 44 property.
45- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
46 line respectively. It will use specified GPIO instead of the peripheral
47 function pin for the UART feature. If unsure, don't specify this property.
48 45
49Note: 46Note:
50* fsl,ns16550: 47* fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
66 interrupts = <10>; 63 interrupts = <10>;
67 reg-shift = <2>; 64 reg-shift = <2>;
68 }; 65 };
69
70Example for OMAP UART using GPIO-based modem control signals:
71
72 uart4: serial@49042000 {
73 compatible = "ti,omap3-uart";
74 reg = <0x49042000 0x400>;
75 interrupts = <80>;
76 ti,hwmods = "uart4";
77 clock-frequency = <48000000>;
78 cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
79 rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
80 dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
81 dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
82 dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
83 rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
84 };
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
index 6f6c2f8e908d..0741dff048dd 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -8,8 +8,6 @@ Required properties:
8- interrupts: Interrupt number for McPDM 8- interrupts: Interrupt number for McPDM
9- interrupt-parent: The parent interrupt controller 9- interrupt-parent: The parent interrupt controller
10- ti,hwmods: Name of the hwmod associated to the McPDM 10- ti,hwmods: Name of the hwmod associated to the McPDM
11- clocks: phandle for the pdmclk provider, likely <&twl6040>
12- clock-names: Must be "pdmclk"
13 11
14Example: 12Example:
15 13
@@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
21 interrupt-parent = <&gic>; 19 interrupt-parent = <&gic>;
22 ti,hwmods = "mcpdm"; 20 ti,hwmods = "mcpdm";
23}; 21};
24
25In board DTS file the pdmclk needs to be added:
26
27&mcpdm {
28 clocks = <&twl6040>;
29 clock-names = "pdmclk";
30 status = "okay";
31};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 41b817f7b670..88b6ea1ad290 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
62Required properties: 62Required properties:
63- #cooling-cells: Used to provide cooling device specific information 63- #cooling-cells: Used to provide cooling device specific information
64 Type: unsigned while referring to it. Must be at least 2, in order 64 Type: unsigned while referring to it. Must be at least 2, in order
65 Size: one cell to specify minimum and maximum cooling state used 65 Size: one cell to specify minimum and maximum cooling state used
66 in the reference. The first cell is the minimum 66 in the reference. The first cell is the minimum
67 cooling state requested and the second cell is 67 cooling state requested and the second cell is
68 the maximum cooling state requested in the reference. 68 the maximum cooling state requested in the reference.
@@ -119,7 +119,7 @@ Required properties:
119Optional property: 119Optional property:
120- contribution: The cooling contribution to the thermal zone of the 120- contribution: The cooling contribution to the thermal zone of the
121 Type: unsigned referred cooling device at the referred trip point. 121 Type: unsigned referred cooling device at the referred trip point.
122 Size: one cell The contribution is a ratio of the sum 122 Size: one cell The contribution is a ratio of the sum
123 of all cooling contributions within a thermal zone. 123 of all cooling contributions within a thermal zone.
124 124
125Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle 125Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
@@ -145,7 +145,7 @@ Required properties:
145 Size: one cell 145 Size: one cell
146 146
147- thermal-sensors: A list of thermal sensor phandles and sensor specifier 147- thermal-sensors: A list of thermal sensor phandles and sensor specifier
148 Type: list of used while monitoring the thermal zone. 148 Type: list of used while monitoring the thermal zone.
149 phandles + sensor 149 phandles + sensor
150 specifier 150 specifier
151 151
@@ -473,7 +473,7 @@ thermal-zones {
473 <&adc>; /* pcb north */ 473 <&adc>; /* pcb north */
474 474
475 /* hotspot = 100 * bandgap - 120 * adc + 484 */ 475 /* hotspot = 100 * bandgap - 120 * adc + 484 */
476 coefficients = <100 -120 484>; 476 coefficients = <100 -120 484>;
477 477
478 trips { 478 trips {
479 ... 479 ...
@@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
502 thermal-sensors = <&adc>; 502 thermal-sensors = <&adc>;
503 503
504 /* hotspot = 1 * adc + 6000 */ 504 /* hotspot = 1 * adc + 6000 */
505 coefficients = <1 6000>; 505 coefficients = <1 6000>;
506 506
507(d) - Board thermal 507(d) - Board thermal
508 508
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index d6259c786316..bcbf9710e4af 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
183moves it over to the old name. The new file may be on a different 183moves it over to the old name. The new file may be on a different
184filesystem, so both st_dev and st_ino of the file may change. 184filesystem, so both st_dev and st_ino of the file may change.
185 185
186Any open files referring to this inode will access the old data and 186Any open files referring to this inode will access the old data.
187metadata. Similarly any file locks obtained before copy_up will not
188apply to the copied up file.
189 187
190On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and 188Any file locks (and leases) obtained before copy_up will not apply
191fsetxattr(2) will fail with EROFS. 189to the copied up file.
192 190
193If a file with multiple hard links is copied up, then this will 191If a file with multiple hard links is copied up, then this will
194"break" the link. Changes will not be propagated to other names 192"break" the link. Changes will not be propagated to other names
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates
index 2a1bf69c6a26..8c10a916de20 100644
--- a/Documentation/hwmon/ftsteutates
+++ b/Documentation/hwmon/ftsteutates
@@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
19implemented in this driver. 19implemented in this driver.
20 20
21Specification of the chip can be found here: 21Specification of the chip can be found here:
22ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf 22ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
23ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf 23ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface
index 80807adb8ded..7e2a228f21bc 100644
--- a/Documentation/i2c/slave-interface
+++ b/Documentation/i2c/slave-interface
@@ -145,6 +145,11 @@ If you want to add slave support to the bus driver:
145 145
146* Catch the slave interrupts and send appropriate i2c_slave_events to the backend. 146* Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
147 147
148Note that most hardware supports being master _and_ slave on the same bus. So,
149if you extend a bus driver, please make sure that the driver supports that as
150well. In almost all cases, slave support does not need to disable the master
151functionality.
152
148Check the i2c-rcar driver as an example. 153Check the i2c-rcar driver as an example.
149 154
150 155
diff --git a/Documentation/kernel-documentation.rst b/Documentation/kernel-documentation.rst
index c4eb5049da39..391decc66a18 100644
--- a/Documentation/kernel-documentation.rst
+++ b/Documentation/kernel-documentation.rst
@@ -366,8 +366,6 @@ Domain`_ references.
366Cross-referencing from reStructuredText 366Cross-referencing from reStructuredText
367~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 367~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
368 368
369.. highlight:: none
370
371To cross-reference the functions and types defined in the kernel-doc comments 369To cross-reference the functions and types defined in the kernel-doc comments
372from reStructuredText documents, please use the `Sphinx C Domain`_ 370from reStructuredText documents, please use the `Sphinx C Domain`_
373references. For example:: 371references. For example::
@@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
390Function documentation 388Function documentation
391---------------------- 389----------------------
392 390
393.. highlight:: c
394
395The general format of a function and function-like macro kernel-doc comment is:: 391The general format of a function and function-like macro kernel-doc comment is::
396 392
397 /** 393 /**
@@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
572Converting DocBook to Sphinx 568Converting DocBook to Sphinx
573---------------------------- 569----------------------------
574 570
575.. highlight:: none
576
577Over time, we expect all of the documents under ``Documentation/DocBook`` to be 571Over time, we expect all of the documents under ``Documentation/DocBook`` to be
578converted to Sphinx and reStructuredText. For most DocBook XML documents, a good 572converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
579enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script, 573enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 46c030a49186..a4f4d693e2c1 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3032 PAGE_SIZE is used as alignment. 3032 PAGE_SIZE is used as alignment.
3033 PCI-PCI bridge can be specified, if resource 3033 PCI-PCI bridge can be specified, if resource
3034 windows need to be expanded. 3034 windows need to be expanded.
3035 To specify the alignment for several
3036 instances of a device, the PCI vendor,
3037 device, subvendor, and subdevice may be
3038 specified, e.g., 4096@pci:8086:9c22:103c:198f
3035 ecrc= Enable/disable PCIe ECRC (transaction layer 3039 ecrc= Enable/disable PCIe ECRC (transaction layer
3036 end-to-end CRC checking). 3040 end-to-end CRC checking).
3037 bios: Use BIOS/firmware settings. This is the 3041 bios: Use BIOS/firmware settings. This is the
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 9d05ed7f7da5..f20c884c048a 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
587TODO 587TODO
588==== 588====
589 589
590The platform device problem
591---------------------------
592DSA is currently implemented as a platform device driver which is far from ideal
593as was discussed in this thread:
594
595http://permalink.gmane.org/gmane.linux.network/329848
596
597This basically prevents the device driver model to be properly used and applied,
598and support non-MDIO, non-MMIO Ethernet connected switches.
599
600Another problem with the platform device driver approach is that it prevents the
601use of a modular switch drivers build due to a circular dependency, illustrated
602here:
603
604http://comments.gmane.org/gmane.linux.network/345803
605
606Attempts of reworking this has been done here:
607
608https://lwn.net/Articles/643149/
609
610Making SWITCHDEV and DSA converge towards an unified codebase 590Making SWITCHDEV and DSA converge towards an unified codebase
611------------------------------------------------------------- 591-------------------------------------------------------------
612 592
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 16a924c486bf..70c926ae212d 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -790,13 +790,12 @@ The kernel interface functions are as follows:
790 Data messages can have their contents extracted with the usual bunch of 790 Data messages can have their contents extracted with the usual bunch of
791 socket buffer manipulation functions. A data message can be determined to 791 socket buffer manipulation functions. A data message can be determined to
792 be the last one in a sequence with rxrpc_kernel_is_data_last(). When a 792 be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
793 data message has been used up, rxrpc_kernel_data_delivered() should be 793 data message has been used up, rxrpc_kernel_data_consumed() should be
794 called on it.. 794 called on it.
795 795
796 Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose 796 Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
797 of. It is possible to get extra refs on all types of message for later 797 is possible to get extra refs on all types of message for later freeing,
798 freeing, but this may pin the state of a call until the message is finally 798 but this may pin the state of a call until the message is finally freed.
799 freed.
800 799
801 (*) Accept an incoming call. 800 (*) Accept an incoming call.
802 801
@@ -821,12 +820,14 @@ The kernel interface functions are as follows:
821 Other errors may be returned if the call had been aborted (-ECONNABORTED) 820 Other errors may be returned if the call had been aborted (-ECONNABORTED)
822 or had timed out (-ETIME). 821 or had timed out (-ETIME).
823 822
824 (*) Record the delivery of a data message and free it. 823 (*) Record the delivery of a data message.
825 824
826 void rxrpc_kernel_data_delivered(struct sk_buff *skb); 825 void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
826 struct sk_buff *skb);
827 827
828 This is used to record a data message as having been delivered and to 828 This is used to record a data message as having been consumed and to
829 update the ACK state for the call. The socket buffer will be freed. 829 update the ACK state for the call. The message must still be passed to
830 rxrpc_kernel_free_skb() for disposal by the caller.
830 831
831 (*) Free a message. 832 (*) Free a message.
832 833
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
index b96098ccfe69..708f87f78a75 100644
--- a/Documentation/power/basic-pm-debugging.txt
+++ b/Documentation/power/basic-pm-debugging.txt
@@ -164,7 +164,32 @@ load n/2 modules more and try again.
164Again, if you find the offending module(s), it(they) must be unloaded every time 164Again, if you find the offending module(s), it(they) must be unloaded every time
165before hibernation, and please report the problem with it(them). 165before hibernation, and please report the problem with it(them).
166 166
167c) Advanced debugging 167c) Using the "test_resume" hibernation option
168
169/sys/power/disk generally tells the kernel what to do after creating a
170hibernation image. One of the available options is "test_resume" which
171causes the just created image to be used for immediate restoration. Namely,
172after doing:
173
174# echo test_resume > /sys/power/disk
175# echo disk > /sys/power/state
176
177a hibernation image will be created and a resume from it will be triggered
178immediately without involving the platform firmware in any way.
179
180That test can be used to check if failures to resume from hibernation are
181related to bad interactions with the platform firmware. That is, if the above
182works every time, but resume from actual hibernation does not work or is
183unreliable, the platform firmware may be responsible for the failures.
184
185On architectures and platforms that support using different kernels to restore
186hibernation images (that is, the kernel used to read the image from storage and
187load it into memory is different from the one included in the image) or support
188kernel address space randomization, it also can be used to check if failures
189to resume may be related to the differences between the restore and image
190kernels.
191
192d) Advanced debugging
168 193
169In case that hibernation does not work on your system even in the minimal 194In case that hibernation does not work on your system even in the minimal
170configuration and compiling more drivers as modules is not practical or some 195configuration and compiling more drivers as modules is not practical or some
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index f1f0f59a7c47..974916ff6608 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -1,75 +1,76 @@
1Power Management Interface 1Power Management Interface for System Sleep
2 2
3 3Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
4The power management subsystem provides a unified sysfs interface to 4
5userspace, regardless of what architecture or platform one is 5The power management subsystem provides userspace with a unified sysfs interface
6running. The interface exists in /sys/power/ directory (assuming sysfs 6for system sleep regardless of the underlying system architecture or platform.
7is mounted at /sys). 7The interface is located in the /sys/power/ directory (assuming that sysfs is
8 8mounted at /sys).
9/sys/power/state controls system power state. Reading from this file 9
10returns what states are supported, which is hard-coded to 'freeze', 10/sys/power/state is the system sleep state control file.
11'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' 11
12(Suspend-to-Disk). 12Reading from it returns a list of supported sleep states, encoded as:
13 13
14Writing to this file one of those strings causes the system to 14'freeze' (Suspend-to-Idle)
15transition into that state. Please see the file 15'standby' (Power-On Suspend)
16Documentation/power/states.txt for a description of each of those 16'mem' (Suspend-to-RAM)
17states. 17'disk' (Suspend-to-Disk)
18 18
19 19Suspend-to-Idle is always supported. Suspend-to-Disk is always supported
20/sys/power/disk controls the operating mode of the suspend-to-disk 20too as long the kernel has been configured to support hibernation at all
21mechanism. Suspend-to-disk can be handled in several ways. We have a 21(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support
22few options for putting the system to sleep - using the platform driver 22for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
23(e.g. ACPI or other suspend_ops), powering off the system or rebooting the 23platform.
24system (for testing). 24
25 25If one of the strings listed in /sys/power/state is written to it, the system
26Additionally, /sys/power/disk can be used to turn on one of the two testing 26will attempt to transition into the corresponding sleep state. Refer to
27modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the 27Documentation/power/states.txt for a description of each of those states.
28suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to 28
29/sys/power/state will cause the kernel to disable nonboot CPUs and freeze 29/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
30tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is 30Specifically, it tells the kernel what to do after creating a hibernation image.
31in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel 31
32to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait 32Reading from it returns a list of supported options encoded as:
33for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then, 33
34we are able to look in the log messages and work out, for example, which code 34'platform' (put the system into sleep using a platform-provided method)
35is being slow and which device drivers are misbehaving. 35'shutdown' (shut the system down)
36 36'reboot' (reboot the system)
37Reading from this file will display all supported modes and the currently 37'suspend' (trigger a Suspend-to-RAM transition)
38selected one in brackets, for example 38'test_resume' (resume-after-hibernation test mode)
39 39
40 [shutdown] reboot test testproc 40The currently selected option is printed in square brackets.
41 41
42Writing to this file will accept one of 42The 'platform' option is only available if the platform provides a special
43 43mechanism to put the system to sleep after creating a hibernation image (ACPI
44 'platform' (only if the platform supports it) 44does that, for example). The 'suspend' option is available if Suspend-to-RAM
45 'shutdown' 45is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
46 'reboot' 46description of the 'test_resume' option.
47 'testproc' 47
48 'test' 48To select an option, write the string representing it to /sys/power/disk.
49 49
50/sys/power/image_size controls the size of the image created by 50/sys/power/image_size controls the size of hibernation images.
51the suspend-to-disk mechanism. It can be written a string 51
52representing a non-negative integer that will be used as an upper 52It can be written a string representing a non-negative integer that will be
53limit of the image size, in bytes. The suspend-to-disk mechanism will 53used as a best-effort upper limit of the image size, in bytes. The hibernation
54do its best to ensure the image size will not exceed that number. However, 54core will do its best to ensure that the image size will not exceed that number.
55if this turns out to be impossible, it will try to suspend anyway using the 55However, if that turns out to be impossible to achieve, a hibernation image will
56smallest image possible. In particular, if "0" is written to this file, the 56still be created and its size will be as small as possible. In particular,
57suspend image will be as small as possible. 57writing '0' to this file will enforce hibernation images to be as small as
58 58possible.
59Reading from this file will display the current image size limit, which 59
60is set to 2/5 of available RAM by default. 60Reading from this file returns the current image size limit, which is set to
61 61around 2/5 of available RAM by default.
62/sys/power/pm_trace controls the code which saves the last PM event point in 62
63the RTC across reboots, so that you can debug a machine that just hangs 63/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
64during suspend (or more commonly, during resume). Namely, the RTC is only 64or resume event point in the RTC across reboots.
65used to save the last PM event point if this file contains '1'. Initially it 65
66contains '0' which may be changed to '1' by writing a string representing a 66It helps to debug hard lockups or reboots due to device driver failures that
67nonzero integer into it. 67occur during system suspend or resume (which is more common) more effectively.
68 68
69To use this debugging feature you should attempt to suspend the machine, then 69If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
70reboot it and run 70event point in turn will be stored in the RTC memory (overwriting the actual
71 71RTC information), so it will survive a system crash if one occurs right after
72 dmesg -s 1000000 | grep 'hash matches' 72storing it and it can be used later to identify the driver that caused the crash
73 73to happen (see Documentation/power/s2ram.txt for more information).
74CAUTION: Using it will cause your machine's real-time (CMOS) clock to be 74
75set to a random invalid time after a resume. 75Initially it contains '0' which may be changed to '1' by writing a string
76representing a nonzero integer into it.
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index ba0a2a4a54ba..e32fdbb4c9a7 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -167,6 +167,8 @@ signal will be rolled back anyway.
167For signals taken in non-TM or suspended mode, we use the 167For signals taken in non-TM or suspended mode, we use the
168normal/non-checkpointed stack pointer. 168normal/non-checkpointed stack pointer.
169 169
170Any transaction initiated inside a sighandler and suspended on return
171from the sighandler to the kernel will get reclaimed and discarded.
170 172
171Failure cause codes used by kernel 173Failure cause codes used by kernel
172================================== 174==================================
diff --git a/Documentation/rapidio/mport_cdev.txt b/Documentation/rapidio/mport_cdev.txt
index 6e491a662461..a53f786ee2e9 100644
--- a/Documentation/rapidio/mport_cdev.txt
+++ b/Documentation/rapidio/mport_cdev.txt
@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
80 80
81III. Module parameters 81III. Module parameters
82 82
83- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
84 This parameter set a maximum completion wait time for SYNC mode DMA
85 transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
86
83- 'dbg_level' - This parameter allows to control amount of debug information 87- 'dbg_level' - This parameter allows to control amount of debug information
84 generated by this device driver. This parameter is formed by set of 88 generated by this device driver. This parameter is formed by set of
85 bit masks that correspond to the specific functional blocks. 89 bit masks that correspond to the specific functional blocks.
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css
index 3a2ac4bcfd78..e88461c4c1e6 100644
--- a/Documentation/sphinx-static/theme_overrides.css
+++ b/Documentation/sphinx-static/theme_overrides.css
@@ -42,11 +42,12 @@
42 caption a.headerlink { opacity: 0; } 42 caption a.headerlink { opacity: 0; }
43 caption a.headerlink:hover { opacity: 1; } 43 caption a.headerlink:hover { opacity: 1; }
44 44
45 /* inline literal: drop the borderbox and red color */ 45 /* inline literal: drop the borderbox, padding and red color */
46 46
47 code, .rst-content tt, .rst-content code { 47 code, .rst-content tt, .rst-content code {
48 color: inherit; 48 color: inherit;
49 border: none; 49 border: none;
50 padding: unset;
50 background: inherit; 51 background: inherit;
51 font-size: 85%; 52 font-size: 85%;
52 } 53 }
diff --git a/MAINTAINERS b/MAINTAINERS
index 20bb1d00098c..644ff65d336d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -798,6 +798,7 @@ M: Laura Abbott <labbott@redhat.com>
798M: Sumit Semwal <sumit.semwal@linaro.org> 798M: Sumit Semwal <sumit.semwal@linaro.org>
799L: devel@driverdev.osuosl.org 799L: devel@driverdev.osuosl.org
800S: Supported 800S: Supported
801F: Documentation/devicetree/bindings/staging/ion/
801F: drivers/staging/android/ion 802F: drivers/staging/android/ion
802F: drivers/staging/android/uapi/ion.h 803F: drivers/staging/android/uapi/ion.h
803F: drivers/staging/android/uapi/ion_test.h 804F: drivers/staging/android/uapi/ion_test.h
@@ -881,6 +882,15 @@ S: Supported
881F: drivers/gpu/drm/arc/ 882F: drivers/gpu/drm/arc/
882F: Documentation/devicetree/bindings/display/snps,arcpgu.txt 883F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
883 884
885ARM ARCHITECTED TIMER DRIVER
886M: Mark Rutland <mark.rutland@arm.com>
887M: Marc Zyngier <marc.zyngier@arm.com>
888L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
889S: Maintained
890F: arch/arm/include/asm/arch_timer.h
891F: arch/arm64/include/asm/arch_timer.h
892F: drivers/clocksource/arm_arch_timer.c
893
884ARM HDLCD DRM DRIVER 894ARM HDLCD DRM DRIVER
885M: Liviu Dudau <liviu.dudau@arm.com> 895M: Liviu Dudau <liviu.dudau@arm.com>
886S: Supported 896S: Supported
@@ -1004,6 +1014,7 @@ N: meson
1004ARM/Annapurna Labs ALPINE ARCHITECTURE 1014ARM/Annapurna Labs ALPINE ARCHITECTURE
1005M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 1015M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
1006M: Antoine Tenart <antoine.tenart@free-electrons.com> 1016M: Antoine Tenart <antoine.tenart@free-electrons.com>
1017L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1007S: Maintained 1018S: Maintained
1008F: arch/arm/mach-alpine/ 1019F: arch/arm/mach-alpine/
1009F: arch/arm/boot/dts/alpine* 1020F: arch/arm/boot/dts/alpine*
@@ -1613,7 +1624,8 @@ N: rockchip
1613 1624
1614ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1625ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1615M: Kukjin Kim <kgene@kernel.org> 1626M: Kukjin Kim <kgene@kernel.org>
1616M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 1627M: Krzysztof Kozlowski <krzk@kernel.org>
1628R: Javier Martinez Canillas <javier@osg.samsung.com>
1617L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1629L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1618L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1630L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1619S: Maintained 1631S: Maintained
@@ -1633,7 +1645,6 @@ F: drivers/*/*s3c64xx*
1633F: drivers/*/*s5pv210* 1645F: drivers/*/*s5pv210*
1634F: drivers/memory/samsung/* 1646F: drivers/memory/samsung/*
1635F: drivers/soc/samsung/* 1647F: drivers/soc/samsung/*
1636F: drivers/spi/spi-s3c*
1637F: Documentation/arm/Samsung/ 1648F: Documentation/arm/Samsung/
1638F: Documentation/devicetree/bindings/arm/samsung/ 1649F: Documentation/devicetree/bindings/arm/samsung/
1639F: Documentation/devicetree/bindings/sram/samsung-sram.txt 1650F: Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1821,6 +1832,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
1821ARM/UNIPHIER ARCHITECTURE 1832ARM/UNIPHIER ARCHITECTURE
1822M: Masahiro Yamada <yamada.masahiro@socionext.com> 1833M: Masahiro Yamada <yamada.masahiro@socionext.com>
1823L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1834L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1835T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
1824S: Maintained 1836S: Maintained
1825F: arch/arm/boot/dts/uniphier* 1837F: arch/arm/boot/dts/uniphier*
1826F: arch/arm/include/asm/hardware/cache-uniphier.h 1838F: arch/arm/include/asm/hardware/cache-uniphier.h
@@ -2474,7 +2486,7 @@ F: include/net/bluetooth/
2474BONDING DRIVER 2486BONDING DRIVER
2475M: Jay Vosburgh <j.vosburgh@gmail.com> 2487M: Jay Vosburgh <j.vosburgh@gmail.com>
2476M: Veaceslav Falico <vfalico@gmail.com> 2488M: Veaceslav Falico <vfalico@gmail.com>
2477M: Andy Gospodarek <gospo@cumulusnetworks.com> 2489M: Andy Gospodarek <andy@greyhouse.net>
2478L: netdev@vger.kernel.org 2490L: netdev@vger.kernel.org
2479W: http://sourceforge.net/projects/bonding/ 2491W: http://sourceforge.net/projects/bonding/
2480S: Supported 2492S: Supported
@@ -3237,7 +3249,7 @@ F: kernel/cpuset.c
3237CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 3249CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
3238M: Johannes Weiner <hannes@cmpxchg.org> 3250M: Johannes Weiner <hannes@cmpxchg.org>
3239M: Michal Hocko <mhocko@kernel.org> 3251M: Michal Hocko <mhocko@kernel.org>
3240M: Vladimir Davydov <vdavydov@virtuozzo.com> 3252M: Vladimir Davydov <vdavydov.dev@gmail.com>
3241L: cgroups@vger.kernel.org 3253L: cgroups@vger.kernel.org
3242L: linux-mm@kvack.org 3254L: linux-mm@kvack.org
3243S: Maintained 3255S: Maintained
@@ -3258,7 +3270,7 @@ S: Maintained
3258F: drivers/net/wan/cosa* 3270F: drivers/net/wan/cosa*
3259 3271
3260CPMAC ETHERNET DRIVER 3272CPMAC ETHERNET DRIVER
3261M: Florian Fainelli <florian@openwrt.org> 3273M: Florian Fainelli <f.fainelli@gmail.com>
3262L: netdev@vger.kernel.org 3274L: netdev@vger.kernel.org
3263S: Maintained 3275S: Maintained
3264F: drivers/net/ethernet/ti/cpmac.c 3276F: drivers/net/ethernet/ti/cpmac.c
@@ -4524,6 +4536,12 @@ L: linux-edac@vger.kernel.org
4524S: Maintained 4536S: Maintained
4525F: drivers/edac/sb_edac.c 4537F: drivers/edac/sb_edac.c
4526 4538
4539EDAC-SKYLAKE
4540M: Tony Luck <tony.luck@intel.com>
4541L: linux-edac@vger.kernel.org
4542S: Maintained
4543F: drivers/edac/skx_edac.c
4544
4527EDAC-XGENE 4545EDAC-XGENE
4528APPLIED MICRO (APM) X-GENE SOC EDAC 4546APPLIED MICRO (APM) X-GENE SOC EDAC
4529M: Loc Ho <lho@apm.com> 4547M: Loc Ho <lho@apm.com>
@@ -7448,7 +7466,8 @@ F: Documentation/devicetree/bindings/sound/max9860.txt
7448F: sound/soc/codecs/max9860.* 7466F: sound/soc/codecs/max9860.*
7449 7467
7450MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS 7468MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
7451M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 7469M: Krzysztof Kozlowski <krzk@kernel.org>
7470M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
7452L: linux-pm@vger.kernel.org 7471L: linux-pm@vger.kernel.org
7453S: Supported 7472S: Supported
7454F: drivers/power/max14577_charger.c 7473F: drivers/power/max14577_charger.c
@@ -7464,7 +7483,8 @@ F: include/dt-bindings/*/*max77802.h
7464 7483
7465MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS 7484MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
7466M: Chanwoo Choi <cw00.choi@samsung.com> 7485M: Chanwoo Choi <cw00.choi@samsung.com>
7467M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 7486M: Krzysztof Kozlowski <krzk@kernel.org>
7487M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
7468L: linux-kernel@vger.kernel.org 7488L: linux-kernel@vger.kernel.org
7469S: Supported 7489S: Supported
7470F: drivers/*/max14577*.c 7490F: drivers/*/max14577*.c
@@ -7654,7 +7674,7 @@ L: linux-rdma@vger.kernel.org
7654S: Supported 7674S: Supported
7655W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home 7675W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
7656Q: http://patchwork.kernel.org/project/linux-rdma/list/ 7676Q: http://patchwork.kernel.org/project/linux-rdma/list/
7657F: drivers/infiniband/hw/rxe/ 7677F: drivers/infiniband/sw/rxe/
7658F: include/uapi/rdma/rdma_user_rxe.h 7678F: include/uapi/rdma/rdma_user_rxe.h
7659 7679
7660MEMBARRIER SUPPORT 7680MEMBARRIER SUPPORT
@@ -9230,7 +9250,7 @@ F: drivers/pinctrl/sh-pfc/
9230 9250
9231PIN CONTROLLER - SAMSUNG 9251PIN CONTROLLER - SAMSUNG
9232M: Tomasz Figa <tomasz.figa@gmail.com> 9252M: Tomasz Figa <tomasz.figa@gmail.com>
9233M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 9253M: Krzysztof Kozlowski <krzk@kernel.org>
9234M: Sylwester Nawrocki <s.nawrocki@samsung.com> 9254M: Sylwester Nawrocki <s.nawrocki@samsung.com>
9235L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9255L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9236L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 9256L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -10163,7 +10183,7 @@ S: Maintained
10163F: drivers/platform/x86/samsung-laptop.c 10183F: drivers/platform/x86/samsung-laptop.c
10164 10184
10165SAMSUNG AUDIO (ASoC) DRIVERS 10185SAMSUNG AUDIO (ASoC) DRIVERS
10166M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 10186M: Krzysztof Kozlowski <krzk@kernel.org>
10167M: Sangbeom Kim <sbkim73@samsung.com> 10187M: Sangbeom Kim <sbkim73@samsung.com>
10168M: Sylwester Nawrocki <s.nawrocki@samsung.com> 10188M: Sylwester Nawrocki <s.nawrocki@samsung.com>
10169L: alsa-devel@alsa-project.org (moderated for non-subscribers) 10189L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10178,7 +10198,8 @@ F: drivers/video/fbdev/s3c-fb.c
10178 10198
10179SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS 10199SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
10180M: Sangbeom Kim <sbkim73@samsung.com> 10200M: Sangbeom Kim <sbkim73@samsung.com>
10181M: Krzysztof Kozlowski <k.kozlowski@samsung.com> 10201M: Krzysztof Kozlowski <krzk@kernel.org>
10202M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
10182L: linux-kernel@vger.kernel.org 10203L: linux-kernel@vger.kernel.org
10183L: linux-samsung-soc@vger.kernel.org 10204L: linux-samsung-soc@vger.kernel.org
10184S: Supported 10205S: Supported
@@ -10237,6 +10258,17 @@ S: Supported
10237L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 10258L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
10238F: drivers/clk/samsung/ 10259F: drivers/clk/samsung/
10239 10260
10261SAMSUNG SPI DRIVERS
10262M: Kukjin Kim <kgene@kernel.org>
10263M: Krzysztof Kozlowski <krzk@kernel.org>
10264M: Andi Shyti <andi.shyti@samsung.com>
10265L: linux-spi@vger.kernel.org
10266L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
10267S: Maintained
10268F: Documentation/devicetree/bindings/spi/spi-samsung.txt
10269F: drivers/spi/spi-s3c*
10270F: include/linux/platform_data/spi-s3c64xx.h
10271
10240SAMSUNG SXGBE DRIVERS 10272SAMSUNG SXGBE DRIVERS
10241M: Byungho An <bh74.an@samsung.com> 10273M: Byungho An <bh74.an@samsung.com>
10242M: Girish K S <ks.giri@samsung.com> 10274M: Girish K S <ks.giri@samsung.com>
@@ -11216,12 +11248,8 @@ S: Odd Fixes
11216F: drivers/staging/vt665?/ 11248F: drivers/staging/vt665?/
11217 11249
11218STAGING - WILC1000 WIFI DRIVER 11250STAGING - WILC1000 WIFI DRIVER
11219M: Johnny Kim <johnny.kim@atmel.com> 11251M: Aditya Shankar <aditya.shankar@microchip.com>
11220M: Austin Shin <austin.shin@atmel.com> 11252M: Ganesh Krishna <ganesh.krishna@microchip.com>
11221M: Chris Park <chris.park@atmel.com>
11222M: Tony Cho <tony.cho@atmel.com>
11223M: Glen Lee <glen.lee@atmel.com>
11224M: Leo Kim <leo.kim@atmel.com>
11225L: linux-wireless@vger.kernel.org 11253L: linux-wireless@vger.kernel.org
11226S: Supported 11254S: Supported
11227F: drivers/staging/wilc1000/ 11255F: drivers/staging/wilc1000/
diff --git a/Makefile b/Makefile
index 70de1448c571..74e22c2f408b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 8 2PATCHLEVEL = 8
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc7
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -635,13 +635,6 @@ endif
635# Tell gcc to never replace conditional load with a non-conditional one 635# Tell gcc to never replace conditional load with a non-conditional one
636KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 636KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
637 637
638PHONY += gcc-plugins
639gcc-plugins: scripts_basic
640ifdef CONFIG_GCC_PLUGINS
641 $(Q)$(MAKE) $(build)=scripts/gcc-plugins
642endif
643 @:
644
645include scripts/Makefile.gcc-plugins 638include scripts/Makefile.gcc-plugins
646 639
647ifdef CONFIG_READABLE_ASM 640ifdef CONFIG_READABLE_ASM
diff --git a/arch/Kconfig b/arch/Kconfig
index bd8056b5b246..fd6e9712af81 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
336 results in the system call being skipped immediately. 336 results in the system call being skipped immediately.
337 - seccomp syscall wired up 337 - seccomp syscall wired up
338 338
339 For best performance, an arch should use seccomp_phase1 and
340 seccomp_phase2 directly. It should call seccomp_phase1 for all
341 syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
342 need to be called from a ptrace-safe context. It must then
343 call seccomp_phase2 if seccomp_phase1 returns anything other
344 than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
345
346 As an additional optimization, an arch may provide seccomp_data
347 directly to seccomp_phase1; this avoids multiple calls
348 to the syscall_xyz helpers for every syscall.
349
350config SECCOMP_FILTER 339config SECCOMP_FILTER
351 def_bool y 340 def_bool y
352 depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET 341 depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
@@ -461,6 +450,15 @@ config CC_STACKPROTECTOR_STRONG
461 450
462endchoice 451endchoice
463 452
453config HAVE_ARCH_WITHIN_STACK_FRAMES
454 bool
455 help
456 An architecture should select this if it can walk the kernel stack
457 frames to determine if an object is part of either the arguments
458 or local variables (i.e. that it excludes saved return addresses,
459 and similar) by implementing an inline arch_within_stack_frames(),
460 which is used by CONFIG_HARDENED_USERCOPY.
461
464config HAVE_CONTEXT_TRACKING 462config HAVE_CONTEXT_TRACKING
465 bool 463 bool
466 help 464 help
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c419b43c461d..466e42e96bfa 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
371 return __cu_len; 371 return __cu_len;
372} 372}
373 373
374extern inline long
375__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
376{
377 if (__access_ok((unsigned long)validate, len, get_fs()))
378 len = __copy_tofrom_user_nocheck(to, from, len);
379 return len;
380}
381
382#define __copy_to_user(to, from, n) \ 374#define __copy_to_user(to, from, n) \
383({ \ 375({ \
384 __chk_user_ptr(to); \ 376 __chk_user_ptr(to); \
@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
393#define __copy_to_user_inatomic __copy_to_user 385#define __copy_to_user_inatomic __copy_to_user
394#define __copy_from_user_inatomic __copy_from_user 386#define __copy_from_user_inatomic __copy_from_user
395 387
396
397extern inline long 388extern inline long
398copy_to_user(void __user *to, const void *from, long n) 389copy_to_user(void __user *to, const void *from, long n)
399{ 390{
400 return __copy_tofrom_user((__force void *)to, from, n, to); 391 if (likely(__access_ok((unsigned long)to, n, get_fs())))
392 n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
393 return n;
401} 394}
402 395
403extern inline long 396extern inline long
404copy_from_user(void *to, const void __user *from, long n) 397copy_from_user(void *to, const void __user *from, long n)
405{ 398{
406 return __copy_tofrom_user(to, (__force void *)from, n, from); 399 if (likely(__access_ok((unsigned long)from, n, get_fs())))
400 n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
401 else
402 memset(to, 0, n);
403 return n;
407} 404}
408 405
409extern void __do_clear_user(void); 406extern void __do_clear_user(void);
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index ad7860c5ce15..51597f344a62 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -142,7 +142,7 @@
142 142
143#ifdef CONFIG_ARC_CURR_IN_REG 143#ifdef CONFIG_ARC_CURR_IN_REG
144 ; Retrieve orig r25 and save it with rest of callee_regs 144 ; Retrieve orig r25 and save it with rest of callee_regs
145 ld.as r12, [r12, PT_user_r25] 145 ld r12, [r12, PT_user_r25]
146 PUSH r12 146 PUSH r12
147#else 147#else
148 PUSH r25 148 PUSH r25
@@ -198,7 +198,7 @@
198 198
199 ; SP is back to start of pt_regs 199 ; SP is back to start of pt_regs
200#ifdef CONFIG_ARC_CURR_IN_REG 200#ifdef CONFIG_ARC_CURR_IN_REG
201 st.as r12, [sp, PT_user_r25] 201 st r12, [sp, PT_user_r25]
202#endif 202#endif
203.endm 203.endm
204 204
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index c1d36458bfb7..4c6eed80cd8b 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
188.endm 188.endm
189 189
190.macro IRQ_ENABLE scratch 190.macro IRQ_ENABLE scratch
191 TRACE_ASM_IRQ_ENABLE
191 lr \scratch, [status32] 192 lr \scratch, [status32]
192 or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) 193 or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
193 flag \scratch 194 flag \scratch
194 TRACE_ASM_IRQ_ENABLE
195.endm 195.endm
196 196
197#endif /* __ASSEMBLY__ */ 197#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 0f92d97432a2..89eeb3720051 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
280 280
281#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 281#define pte_page(pte) pfn_to_page(pte_pfn(pte))
282#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 282#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
283#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 283#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
284 284
285/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ 285/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
286#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 286#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index a78d5670884f..41faf17cd28d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -83,7 +83,10 @@
83 "2: ;nop\n" \ 83 "2: ;nop\n" \
84 " .section .fixup, \"ax\"\n" \ 84 " .section .fixup, \"ax\"\n" \
85 " .align 4\n" \ 85 " .align 4\n" \
86 "3: mov %0, %3\n" \ 86 "3: # return -EFAULT\n" \
87 " mov %0, %3\n" \
88 " # zero out dst ptr\n" \
89 " mov %1, 0\n" \
87 " j 2b\n" \ 90 " j 2b\n" \
88 " .previous\n" \ 91 " .previous\n" \
89 " .section __ex_table, \"a\"\n" \ 92 " .section __ex_table, \"a\"\n" \
@@ -101,7 +104,11 @@
101 "2: ;nop\n" \ 104 "2: ;nop\n" \
102 " .section .fixup, \"ax\"\n" \ 105 " .section .fixup, \"ax\"\n" \
103 " .align 4\n" \ 106 " .align 4\n" \
104 "3: mov %0, %3\n" \ 107 "3: # return -EFAULT\n" \
108 " mov %0, %3\n" \
109 " # zero out dst ptr\n" \
110 " mov %1, 0\n" \
111 " mov %R1, 0\n" \
105 " j 2b\n" \ 112 " j 2b\n" \
106 " .previous\n" \ 113 " .previous\n" \
107 " .section __ex_table, \"a\"\n" \ 114 " .section __ex_table, \"a\"\n" \
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
index 0f99ac8fcbb2..0037a587320d 100644
--- a/arch/arc/include/uapi/asm/elf.h
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -13,8 +13,15 @@
13 13
14/* Machine specific ELF Hdr flags */ 14/* Machine specific ELF Hdr flags */
15#define EF_ARC_OSABI_MSK 0x00000f00 15#define EF_ARC_OSABI_MSK 0x00000f00
16#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */ 16
17#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */ 17#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
18#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
19
20#if __GNUC__ < 6
21#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
22#else
23#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
24#endif
18 25
19typedef unsigned long elf_greg_t; 26typedef unsigned long elf_greg_t;
20typedef unsigned long elf_fpregset_t; 27typedef unsigned long elf_fpregset_t;
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
index 4d9e77724bed..000dd041ab42 100644
--- a/arch/arc/kernel/arcksyms.c
+++ b/arch/arc/kernel/arcksyms.c
@@ -28,6 +28,7 @@ extern void __muldf3(void);
28extern void __divdf3(void); 28extern void __divdf3(void);
29extern void __floatunsidf(void); 29extern void __floatunsidf(void);
30extern void __floatunsisf(void); 30extern void __floatunsisf(void);
31extern void __udivdi3(void);
31 32
32EXPORT_SYMBOL(__ashldi3); 33EXPORT_SYMBOL(__ashldi3);
33EXPORT_SYMBOL(__ashrdi3); 34EXPORT_SYMBOL(__ashrdi3);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
45EXPORT_SYMBOL(__divdf3); 46EXPORT_SYMBOL(__divdf3);
46EXPORT_SYMBOL(__floatunsidf); 47EXPORT_SYMBOL(__floatunsidf);
47EXPORT_SYMBOL(__floatunsisf); 48EXPORT_SYMBOL(__floatunsisf);
49EXPORT_SYMBOL(__udivdi3);
48 50
49/* ARC optimised assembler routines */ 51/* ARC optimised assembler routines */
50EXPORT_SYMBOL(memset); 52EXPORT_SYMBOL(memset);
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index b5db9e7fd649..be1972bd2729 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
199 } 199 }
200 200
201 eflags = x->e_flags; 201 eflags = x->e_flags;
202 if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) { 202 if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
203 pr_err("ABI mismatch - you need newer toolchain\n"); 203 pr_err("ABI mismatch - you need newer toolchain\n");
204 force_sigsegv(SIGSEGV, current); 204 force_sigsegv(SIGSEGV, current);
205 return 0; 205 return 0;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a946400a86d0..f52a0d0dc462 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
291 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), 291 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
292 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); 292 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
293 293
294 n += scnprintf(buf + n, len - n, 294 n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
295 "OS ABI [v3]\t: no-legacy-syscalls\n"); 295 EF_ARC_OSABI_CURRENT >> 8,
296 EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
297 "no-legacy-syscalls" : "64-bit data any register aligned");
296 298
297 return buf; 299 return buf;
298} 300}
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 5a294b2c3cb3..0b10efe3a6a7 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -921,6 +921,15 @@ void arc_cache_init(void)
921 921
922 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 922 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
923 923
924 /*
925 * Only master CPU needs to execute rest of function:
926 * - Assume SMP so all cores will have same cache config so
927 * any geomtry checks will be same for all
928 * - IOC setup / dma callbacks only need to be setup once
929 */
930 if (cpu)
931 return;
932
924 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 933 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
925 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 934 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
926 935
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 04f83322c9fd..77ff64a874a1 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -61,6 +61,7 @@ void *kmap(struct page *page)
61 61
62 return kmap_high(page); 62 return kmap_high(page);
63} 63}
64EXPORT_SYMBOL(kmap);
64 65
65void *kmap_atomic(struct page *page) 66void *kmap_atomic(struct page *page)
66{ 67{
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2d601d769a1c..a9c4e48bb7ec 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
35 select HARDIRQS_SW_RESEND 35 select HARDIRQS_SW_RESEND
36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
38 select HAVE_ARCH_HARDENED_USERCOPY
38 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 39 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
39 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 40 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
40 select HAVE_ARCH_MMAP_RND_BITS if MMU 41 select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6e30b8bc21b9..6be9ee148b78 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -259,12 +259,14 @@ machdirs := $(patsubst %,arch/arm/mach-%/,$(machine-y))
259platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) 259platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
260 260
261ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) 261ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
262ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
262ifeq ($(KBUILD_SRC),) 263ifeq ($(KBUILD_SRC),)
263KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) 264KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
264else 265else
265KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) 266KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
266endif 267endif
267endif 268endif
269endif
268 270
269export TEXT_OFFSET GZFLAGS MMUEXT 271export TEXT_OFFSET GZFLAGS MMUEXT
270 272
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index c8609d8d2c55..b689172632ef 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -226,7 +226,7 @@
226 226
227 #address-cells = <1>; 227 #address-cells = <1>;
228 #size-cells = <1>; 228 #size-cells = <1>;
229 elm_id = <&elm>; 229 ti,elm-id = <&elm>;
230 }; 230 };
231}; 231};
232 232
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index df63484ef9b3..e7d9ca1305fa 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -161,7 +161,7 @@
161 161
162 #address-cells = <1>; 162 #address-cells = <1>;
163 #size-cells = <1>; 163 #size-cells = <1>;
164 elm_id = <&elm>; 164 ti,elm-id = <&elm>;
165 165
166 /* MTD partition table */ 166 /* MTD partition table */
167 partition@0 { 167 partition@0 {
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 86f773165d5c..1263c9d4cba3 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -197,7 +197,7 @@
197 gpmc,wr-access-ns = <30>; 197 gpmc,wr-access-ns = <30>;
198 gpmc,wr-data-mux-bus-ns = <0>; 198 gpmc,wr-data-mux-bus-ns = <0>;
199 199
200 elm_id = <&elm>; 200 ti,elm-id = <&elm>;
201 201
202 #address-cells = <1>; 202 #address-cells = <1>;
203 #size-cells = <1>; 203 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
index db808f92dd79..90d00b407f85 100644
--- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts
+++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
@@ -70,13 +70,12 @@
70 * associativity as these may be erroneously set 70 * associativity as these may be erroneously set
71 * up by boot loader(s). 71 * up by boot loader(s).
72 */ 72 */
73 cache-size = <1048576>; // 1MB 73 cache-size = <131072>; // 128KB
74 cache-sets = <4096>; 74 cache-sets = <512>;
75 cache-line-size = <32>; 75 cache-line-size = <32>;
76 arm,parity-disable; 76 arm,parity-disable;
77 arm,tag-latency = <1>; 77 arm,tag-latency = <1 1 1>;
78 arm,data-latency = <1 1>; 78 arm,data-latency = <1 1 1>;
79 arm,dirty-latency = <1>;
80 }; 79 };
81 80
82 scu: scu@1f000000 { 81 scu: scu@1f000000 {
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index 2e0556af6e5e..d3e6bd805006 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -390,12 +390,12 @@
390 390
391 port@0 { 391 port@0 {
392 reg = <0>; 392 reg = <0>;
393 label = "lan1"; 393 label = "lan5";
394 }; 394 };
395 395
396 port@1 { 396 port@1 {
397 reg = <1>; 397 reg = <1>;
398 label = "lan2"; 398 label = "lan4";
399 }; 399 };
400 400
401 port@2 { 401 port@2 {
@@ -405,12 +405,12 @@
405 405
406 port@3 { 406 port@3 {
407 reg = <3>; 407 reg = <3>;
408 label = "lan4"; 408 label = "lan2";
409 }; 409 };
410 410
411 port@4 { 411 port@4 {
412 reg = <4>; 412 reg = <4>;
413 label = "lan5"; 413 label = "lan1";
414 }; 414 };
415 415
416 port@5 { 416 port@5 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index caf2707680c1..e9b47b2bbc33 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -2,6 +2,7 @@
2 2
3/ { 3/ {
4 memory { 4 memory {
5 device_type = "memory";
5 reg = <0 0x10000000>; 6 reg = <0 0x10000000>;
6 }; 7 };
7 8
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index b98252232d20..445624a1a1de 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -2,7 +2,6 @@
2#include <dt-bindings/clock/bcm2835.h> 2#include <dt-bindings/clock/bcm2835.h>
3#include <dt-bindings/clock/bcm2835-aux.h> 3#include <dt-bindings/clock/bcm2835-aux.h>
4#include <dt-bindings/gpio/gpio.h> 4#include <dt-bindings/gpio/gpio.h>
5#include "skeleton.dtsi"
6 5
7/* This include file covers the common peripherals and configuration between 6/* This include file covers the common peripherals and configuration between
8 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to 7 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@@ -13,6 +12,8 @@
13 compatible = "brcm,bcm2835"; 12 compatible = "brcm,bcm2835";
14 model = "BCM2835"; 13 model = "BCM2835";
15 interrupt-parent = <&intc>; 14 interrupt-parent = <&intc>;
15 #address-cells = <1>;
16 #size-cells = <1>;
16 17
17 chosen { 18 chosen {
18 bootargs = "earlyprintk console=ttyAMA0"; 19 bootargs = "earlyprintk console=ttyAMA0";
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index d9499310a301..f6d135245a4b 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -447,14 +447,11 @@
447 samsung,dw-mshc-ciu-div = <3>; 447 samsung,dw-mshc-ciu-div = <3>;
448 samsung,dw-mshc-sdr-timing = <0 4>; 448 samsung,dw-mshc-sdr-timing = <0 4>;
449 samsung,dw-mshc-ddr-timing = <0 2>; 449 samsung,dw-mshc-ddr-timing = <0 2>;
450 samsung,dw-mshc-hs400-timing = <0 2>;
451 samsung,read-strobe-delay = <90>;
452 pinctrl-names = "default"; 450 pinctrl-names = "default";
453 pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>; 451 pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
454 bus-width = <8>; 452 bus-width = <8>;
455 cap-mmc-highspeed; 453 cap-mmc-highspeed;
456 mmc-hs200-1_8v; 454 mmc-hs200-1_8v;
457 mmc-hs400-1_8v;
458 vmmc-supply = <&ldo20_reg>; 455 vmmc-supply = <&ldo20_reg>;
459 vqmmc-supply = <&ldo11_reg>; 456 vqmmc-supply = <&ldo11_reg>;
460}; 457};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index b620ac884cfd..b13b0b2db881 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -243,7 +243,7 @@
243 clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>, 243 clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
244 <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>, 244 <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
245 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>, 245 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
246 <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>, 246 <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
247 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>; 247 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
248 clock-names = "core", "rxtx0", 248 clock-names = "core", "rxtx0",
249 "rxtx1", "rxtx2", 249 "rxtx1", "rxtx2",
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index 96ea936eeeb0..240a2864d044 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -64,7 +64,7 @@
64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; 64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
65 no-1-8-v; 65 no-1-8-v;
66 keep-power-in-suspend; 66 keep-power-in-suspend;
67 enable-sdio-wakup; 67 wakeup-source;
68 status = "okay"; 68 status = "okay";
69}; 69};
70 70
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 95ee268ed510..2f33c463cbce 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -131,7 +131,7 @@
131 ti,y-min = /bits/ 16 <0>; 131 ti,y-min = /bits/ 16 <0>;
132 ti,y-max = /bits/ 16 <0>; 132 ti,y-max = /bits/ 16 <0>;
133 ti,pressure-max = /bits/ 16 <0>; 133 ti,pressure-max = /bits/ 16 <0>;
134 ti,x-plat-ohms = /bits/ 16 <400>; 134 ti,x-plate-ohms = /bits/ 16 <400>;
135 wakeup-source; 135 wakeup-source;
136 }; 136 };
137}; 137};
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index cf06e32ee108..4b34b54e09a1 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -42,7 +42,7 @@
42 }; 42 };
43 43
44 syscon { 44 syscon {
45 compatible = "arm,integrator-ap-syscon"; 45 compatible = "arm,integrator-ap-syscon", "syscon";
46 reg = <0x11000000 0x100>; 46 reg = <0x11000000 0x100>;
47 interrupt-parent = <&pic>; 47 interrupt-parent = <&pic>;
48 /* These are the logical module IRQs */ 48 /* These are the logical module IRQs */
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index d43f15b4f79a..79430fbfec3b 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -94,7 +94,7 @@
94 }; 94 };
95 95
96 syscon { 96 syscon {
97 compatible = "arm,integrator-cp-syscon"; 97 compatible = "arm,integrator-cp-syscon", "syscon";
98 reg = <0xcb000000 0x100>; 98 reg = <0xcb000000 0x100>;
99 }; 99 };
100 100
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index 00cb314d5e4d..e23f46d15c80 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -70,14 +70,6 @@
70 cpu_on = <0x84000003>; 70 cpu_on = <0x84000003>;
71 }; 71 };
72 72
73 psci {
74 compatible = "arm,psci";
75 method = "smc";
76 cpu_suspend = <0x84000001>;
77 cpu_off = <0x84000002>;
78 cpu_on = <0x84000003>;
79 };
80
81 soc { 73 soc {
82 #address-cells = <1>; 74 #address-cells = <1>;
83 #size-cells = <1>; 75 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index ef84d8699a76..5bf62897014c 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -113,7 +113,7 @@
113 113
114 partition@e0000 { 114 partition@e0000 {
115 label = "u-boot environment"; 115 label = "u-boot environment";
116 reg = <0xe0000 0x100000>; 116 reg = <0xe0000 0x20000>;
117 }; 117 };
118 118
119 partition@100000 { 119 partition@100000 {
diff --git a/arch/arm/boot/dts/kirkwood-openrd.dtsi b/arch/arm/boot/dts/kirkwood-openrd.dtsi
index e4ecab112601..7175511a92da 100644
--- a/arch/arm/boot/dts/kirkwood-openrd.dtsi
+++ b/arch/arm/boot/dts/kirkwood-openrd.dtsi
@@ -116,6 +116,10 @@
116 }; 116 };
117}; 117};
118 118
119&pciec {
120 status = "okay";
121};
122
119&pcie0 { 123&pcie0 {
120 status = "okay"; 124 status = "okay";
121}; 125};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 365f39ff58bb..0ff1c2de95bf 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -35,10 +35,15 @@
35 ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ 35 ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */
36 36
37 nand@0,0 { 37 nand@0,0 {
38 linux,mtd-name = "micron,mt29f4g16abbda3w"; 38 compatible = "ti,omap2-nand";
39 reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ 39 reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
40 interrupt-parent = <&gpmc>;
41 interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
42 <1 IRQ_TYPE_NONE>; /* termcount */
43 linux,mtd-name = "micron,mt29f4g16abbda3w";
40 nand-bus-width = <16>; 44 nand-bus-width = <16>;
41 ti,nand-ecc-opt = "bch8"; 45 ti,nand-ecc-opt = "bch8";
46 rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
42 gpmc,sync-clk-ps = <0>; 47 gpmc,sync-clk-ps = <0>;
43 gpmc,cs-on-ns = <0>; 48 gpmc,cs-on-ns = <0>;
44 gpmc,cs-rd-off-ns = <44>; 49 gpmc,cs-rd-off-ns = <44>;
@@ -54,10 +59,6 @@
54 gpmc,wr-access-ns = <40>; 59 gpmc,wr-access-ns = <40>;
55 gpmc,wr-data-mux-bus-ns = <0>; 60 gpmc,wr-data-mux-bus-ns = <0>;
56 gpmc,device-width = <2>; 61 gpmc,device-width = <2>;
57
58 gpmc,page-burst-access-ns = <5>;
59 gpmc,cycle2cycle-delay-ns = <50>;
60
61 #address-cells = <1>; 62 #address-cells = <1>;
62 #size-cells = <1>; 63 #size-cells = <1>;
63 64
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 5e9a13c0eaf7..1c2c74655416 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -46,6 +46,7 @@
46 linux,mtd-name = "micron,mt29f4g16abbda3w"; 46 linux,mtd-name = "micron,mt29f4g16abbda3w";
47 nand-bus-width = <16>; 47 nand-bus-width = <16>;
48 ti,nand-ecc-opt = "bch8"; 48 ti,nand-ecc-opt = "bch8";
49 rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
49 gpmc,sync-clk-ps = <0>; 50 gpmc,sync-clk-ps = <0>;
50 gpmc,cs-on-ns = <0>; 51 gpmc,cs-on-ns = <0>;
51 gpmc,cs-rd-off-ns = <44>; 52 gpmc,cs-rd-off-ns = <44>;
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index de256fa8da48..3e946cac55f3 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -223,7 +223,9 @@
223}; 223};
224 224
225&gpmc { 225&gpmc {
226 ranges = <0 0 0x00000000 0x20000000>; 226 ranges = <0 0 0x30000000 0x1000000>, /* CS0 */
227 <4 0 0x2b000000 0x1000000>, /* CS4 */
228 <5 0 0x2c000000 0x1000000>; /* CS5 */
227 229
228 nand@0,0 { 230 nand@0,0 {
229 compatible = "ti,omap2-nand"; 231 compatible = "ti,omap2-nand";
diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
index 7df27926ead2..4f4c6efbd518 100644
--- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
@@ -55,8 +55,6 @@
55#include "omap-gpmc-smsc9221.dtsi" 55#include "omap-gpmc-smsc9221.dtsi"
56 56
57&gpmc { 57&gpmc {
58 ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
59
60 ethernet@gpmc { 58 ethernet@gpmc {
61 reg = <5 0 0xff>; 59 reg = <5 0 0xff>;
62 interrupt-parent = <&gpio6>; 60 interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
index 9e24b6a1d07b..1b304e2f1bd2 100644
--- a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
@@ -27,8 +27,6 @@
27#include "omap-gpmc-smsc9221.dtsi" 27#include "omap-gpmc-smsc9221.dtsi"
28 28
29&gpmc { 29&gpmc {
30 ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
31
32 ethernet@gpmc { 30 ethernet@gpmc {
33 reg = <5 0 0xff>; 31 reg = <5 0 0xff>;
34 interrupt-parent = <&gpio6>; 32 interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
index 334109e14613..82e98ee3023a 100644
--- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
@@ -15,9 +15,6 @@
15#include "omap-gpmc-smsc9221.dtsi" 15#include "omap-gpmc-smsc9221.dtsi"
16 16
17&gpmc { 17&gpmc {
18 ranges = <4 0 0x2b000000 0x1000000>, /* CS4 */
19 <5 0 0x2c000000 0x1000000>; /* CS5 */
20
21 smsc1: ethernet@gpmc { 18 smsc1: ethernet@gpmc {
22 reg = <5 0 0xff>; 19 reg = <5 0 0xff>;
23 interrupt-parent = <&gpio6>; 20 interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index c0ba86c3a2ab..0d0dae3a1694 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -197,6 +197,8 @@
197 clock-names = "saradc", "apb_pclk"; 197 clock-names = "saradc", "apb_pclk";
198 interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>; 198 interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
199 #io-channel-cells = <1>; 199 #io-channel-cells = <1>;
200 resets = <&cru SRST_SARADC>;
201 reset-names = "saradc-apb";
200 status = "disabled"; 202 status = "disabled";
201 }; 203 };
202 204
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index cd33f0170890..91c4b3c7a8d5 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -279,6 +279,8 @@
279 #io-channel-cells = <1>; 279 #io-channel-cells = <1>;
280 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; 280 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
281 clock-names = "saradc", "apb_pclk"; 281 clock-names = "saradc", "apb_pclk";
282 resets = <&cru SRST_SARADC>;
283 reset-names = "saradc-apb";
282 status = "disabled"; 284 status = "disabled";
283 }; 285 };
284 286
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 99bbcc2c9b89..e2cd683b4e4b 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -399,6 +399,8 @@
399 #io-channel-cells = <1>; 399 #io-channel-cells = <1>;
400 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; 400 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
401 clock-names = "saradc", "apb_pclk"; 401 clock-names = "saradc", "apb_pclk";
402 resets = <&cru SRST_SARADC>;
403 reset-names = "saradc-apb";
402 status = "disabled"; 404 status = "disabled";
403 }; 405 };
404 406
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index d294e82447a2..8b063ab10c19 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -550,8 +550,9 @@
550 interrupt-names = "mmcirq"; 550 interrupt-names = "mmcirq";
551 pinctrl-names = "default"; 551 pinctrl-names = "default";
552 pinctrl-0 = <&pinctrl_mmc0>; 552 pinctrl-0 = <&pinctrl_mmc0>;
553 clock-names = "mmc"; 553 clock-names = "mmc", "icn";
554 clocks = <&clk_s_c0_flexgen CLK_MMC_0>; 554 clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
555 <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
555 bus-width = <8>; 556 bus-width = <8>;
556 non-removable; 557 non-removable;
557 }; 558 };
@@ -565,8 +566,9 @@
565 interrupt-names = "mmcirq"; 566 interrupt-names = "mmcirq";
566 pinctrl-names = "default"; 567 pinctrl-names = "default";
567 pinctrl-0 = <&pinctrl_sd1>; 568 pinctrl-0 = <&pinctrl_sd1>;
568 clock-names = "mmc"; 569 clock-names = "mmc", "icn";
569 clocks = <&clk_s_c0_flexgen CLK_MMC_1>; 570 clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
571 <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
570 resets = <&softreset STIH407_MMC1_SOFTRESET>; 572 resets = <&softreset STIH407_MMC1_SOFTRESET>;
571 bus-width = <4>; 573 bus-width = <4>;
572 }; 574 };
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 18ed1ad10d32..40318869c733 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -41,7 +41,8 @@
41 compatible = "st,st-ohci-300x"; 41 compatible = "st,st-ohci-300x";
42 reg = <0x9a03c00 0x100>; 42 reg = <0x9a03c00 0x100>;
43 interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>; 43 interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
44 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; 44 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
45 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
45 resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, 46 resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
46 <&softreset STIH407_USB2_PORT0_SOFTRESET>; 47 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
47 reset-names = "power", "softreset"; 48 reset-names = "power", "softreset";
@@ -57,7 +58,8 @@
57 interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>; 58 interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
58 pinctrl-names = "default"; 59 pinctrl-names = "default";
59 pinctrl-0 = <&pinctrl_usb0>; 60 pinctrl-0 = <&pinctrl_usb0>;
60 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; 61 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
62 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
61 resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>, 63 resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
62 <&softreset STIH407_USB2_PORT0_SOFTRESET>; 64 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
63 reset-names = "power", "softreset"; 65 reset-names = "power", "softreset";
@@ -71,7 +73,8 @@
71 compatible = "st,st-ohci-300x"; 73 compatible = "st,st-ohci-300x";
72 reg = <0x9a83c00 0x100>; 74 reg = <0x9a83c00 0x100>;
73 interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>; 75 interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
74 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; 76 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
77 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
75 resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, 78 resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
76 <&softreset STIH407_USB2_PORT1_SOFTRESET>; 79 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
77 reset-names = "power", "softreset"; 80 reset-names = "power", "softreset";
@@ -87,7 +90,8 @@
87 interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>; 90 interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
88 pinctrl-names = "default"; 91 pinctrl-names = "default";
89 pinctrl-0 = <&pinctrl_usb1>; 92 pinctrl-0 = <&pinctrl_usb1>;
90 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>; 93 clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
94 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
91 resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>, 95 resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
92 <&softreset STIH407_USB2_PORT1_SOFTRESET>; 96 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
93 reset-names = "power", "softreset"; 97 reset-names = "power", "softreset";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index e012890e0cf2..a17ba0243db3 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -84,7 +84,7 @@
84 trips { 84 trips {
85 cpu_alert0: cpu_alert0 { 85 cpu_alert0: cpu_alert0 {
86 /* milliCelsius */ 86 /* milliCelsius */
87 temperature = <850000>; 87 temperature = <85000>;
88 hysteresis = <2000>; 88 hysteresis = <2000>;
89 type = "passive"; 89 type = "passive";
90 }; 90 };
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 1dfc492cc004..1444fbd543e7 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -897,7 +897,7 @@
897 palmas: tps65913@58 { 897 palmas: tps65913@58 {
898 compatible = "ti,palmas"; 898 compatible = "ti,palmas";
899 reg = <0x58>; 899 reg = <0x58>;
900 interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>; 900 interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
901 901
902 #interrupt-cells = <2>; 902 #interrupt-cells = <2>;
903 interrupt-controller; 903 interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index 70cf40996c3f..966a7fc044af 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -802,7 +802,7 @@
802 palmas: pmic@58 { 802 palmas: pmic@58 {
803 compatible = "ti,palmas"; 803 compatible = "ti,palmas";
804 reg = <0x58>; 804 reg = <0x58>;
805 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>; 805 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
806 806
807 #interrupt-cells = <2>; 807 #interrupt-cells = <2>;
808 interrupt-controller; 808 interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index 17dd14545862..a161fa1dfb61 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -63,7 +63,7 @@
63 palmas: pmic@58 { 63 palmas: pmic@58 {
64 compatible = "ti,palmas"; 64 compatible = "ti,palmas";
65 reg = <0x58>; 65 reg = <0x58>;
66 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>; 66 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
67 67
68 #interrupt-cells = <2>; 68 #interrupt-cells = <2>;
69 interrupt-controller; 69 interrupt-controller;
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 0e97b4b871f9..6c7b06854fce 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
140 140
141static void locomo_handler(struct irq_desc *desc) 141static void locomo_handler(struct irq_desc *desc)
142{ 142{
143 struct locomo *lchip = irq_desc_get_chip_data(desc); 143 struct locomo *lchip = irq_desc_get_handler_data(desc);
144 int req, i; 144 int req, i;
145 145
146 /* Acknowledge the parent IRQ */ 146 /* Acknowledge the parent IRQ */
@@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
200 * Install handler for IRQ_LOCOMO_HW. 200 * Install handler for IRQ_LOCOMO_HW.
201 */ 201 */
202 irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); 202 irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
203 irq_set_chip_data(lchip->irq, lchip); 203 irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
204 irq_set_chained_handler(lchip->irq, locomo_handler);
205 204
206 /* Install handlers for IRQ_LOCOMO_* */ 205 /* Install handlers for IRQ_LOCOMO_* */
207 for ( ; irq <= lchip->irq_base + 3; irq++) { 206 for ( ; irq <= lchip->irq_base + 3; irq++) {
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index fb0a0a4dfea4..4ecd5120fce7 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -15,6 +15,7 @@
15 * from machine specific code with proper arguments when required. 15 * from machine specific code with proper arguments when required.
16 */ 16 */
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/gpio/driver.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/irq.h> 20#include <linux/irq.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -107,6 +108,7 @@ struct sa1111 {
107 spinlock_t lock; 108 spinlock_t lock;
108 void __iomem *base; 109 void __iomem *base;
109 struct sa1111_platform_data *pdata; 110 struct sa1111_platform_data *pdata;
111 struct gpio_chip gc;
110#ifdef CONFIG_PM 112#ifdef CONFIG_PM
111 void *saved_state; 113 void *saved_state;
112#endif 114#endif
@@ -231,132 +233,44 @@ static void sa1111_irq_handler(struct irq_desc *desc)
231#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base)) 233#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
232#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32)) 234#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
233 235
234static void sa1111_ack_irq(struct irq_data *d) 236static u32 sa1111_irqmask(struct irq_data *d)
235{
236}
237
238static void sa1111_mask_lowirq(struct irq_data *d)
239{ 237{
240 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 238 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
241 void __iomem *mapbase = sachip->base + SA1111_INTC;
242 unsigned long ie0;
243 239
244 ie0 = sa1111_readl(mapbase + SA1111_INTEN0); 240 return BIT((d->irq - sachip->irq_base) & 31);
245 ie0 &= ~SA1111_IRQMASK_LO(d->irq);
246 writel(ie0, mapbase + SA1111_INTEN0);
247} 241}
248 242
249static void sa1111_unmask_lowirq(struct irq_data *d) 243static int sa1111_irqbank(struct irq_data *d)
250{ 244{
251 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 245 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
252 void __iomem *mapbase = sachip->base + SA1111_INTC;
253 unsigned long ie0;
254 246
255 ie0 = sa1111_readl(mapbase + SA1111_INTEN0); 247 return ((d->irq - sachip->irq_base) / 32) * 4;
256 ie0 |= SA1111_IRQMASK_LO(d->irq);
257 sa1111_writel(ie0, mapbase + SA1111_INTEN0);
258} 248}
259 249
260/* 250static void sa1111_ack_irq(struct irq_data *d)
261 * Attempt to re-trigger the interrupt. The SA1111 contains a register
262 * (INTSET) which claims to do this. However, in practice no amount of
263 * manipulation of INTEN and INTSET guarantees that the interrupt will
264 * be triggered. In fact, its very difficult, if not impossible to get
265 * INTSET to re-trigger the interrupt.
266 */
267static int sa1111_retrigger_lowirq(struct irq_data *d)
268{
269 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
270 void __iomem *mapbase = sachip->base + SA1111_INTC;
271 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
272 unsigned long ip0;
273 int i;
274
275 ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
276 for (i = 0; i < 8; i++) {
277 sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
278 sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
279 if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
280 break;
281 }
282
283 if (i == 8)
284 pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
285 d->irq);
286 return i == 8 ? -1 : 0;
287}
288
289static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
290{
291 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
292 void __iomem *mapbase = sachip->base + SA1111_INTC;
293 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
294 unsigned long ip0;
295
296 if (flags == IRQ_TYPE_PROBE)
297 return 0;
298
299 if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
300 return -EINVAL;
301
302 ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
303 if (flags & IRQ_TYPE_EDGE_RISING)
304 ip0 &= ~mask;
305 else
306 ip0 |= mask;
307 sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
308 sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0);
309
310 return 0;
311}
312
313static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
314{ 251{
315 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
316 void __iomem *mapbase = sachip->base + SA1111_INTC;
317 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
318 unsigned long we0;
319
320 we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
321 if (on)
322 we0 |= mask;
323 else
324 we0 &= ~mask;
325 sa1111_writel(we0, mapbase + SA1111_WAKEEN0);
326
327 return 0;
328} 252}
329 253
330static struct irq_chip sa1111_low_chip = { 254static void sa1111_mask_irq(struct irq_data *d)
331 .name = "SA1111-l",
332 .irq_ack = sa1111_ack_irq,
333 .irq_mask = sa1111_mask_lowirq,
334 .irq_unmask = sa1111_unmask_lowirq,
335 .irq_retrigger = sa1111_retrigger_lowirq,
336 .irq_set_type = sa1111_type_lowirq,
337 .irq_set_wake = sa1111_wake_lowirq,
338};
339
340static void sa1111_mask_highirq(struct irq_data *d)
341{ 255{
342 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 256 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
343 void __iomem *mapbase = sachip->base + SA1111_INTC; 257 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
344 unsigned long ie1; 258 u32 ie;
345 259
346 ie1 = sa1111_readl(mapbase + SA1111_INTEN1); 260 ie = sa1111_readl(mapbase + SA1111_INTEN0);
347 ie1 &= ~SA1111_IRQMASK_HI(d->irq); 261 ie &= ~sa1111_irqmask(d);
348 sa1111_writel(ie1, mapbase + SA1111_INTEN1); 262 sa1111_writel(ie, mapbase + SA1111_INTEN0);
349} 263}
350 264
351static void sa1111_unmask_highirq(struct irq_data *d) 265static void sa1111_unmask_irq(struct irq_data *d)
352{ 266{
353 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 267 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
354 void __iomem *mapbase = sachip->base + SA1111_INTC; 268 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
355 unsigned long ie1; 269 u32 ie;
356 270
357 ie1 = sa1111_readl(mapbase + SA1111_INTEN1); 271 ie = sa1111_readl(mapbase + SA1111_INTEN0);
358 ie1 |= SA1111_IRQMASK_HI(d->irq); 272 ie |= sa1111_irqmask(d);
359 sa1111_writel(ie1, mapbase + SA1111_INTEN1); 273 sa1111_writel(ie, mapbase + SA1111_INTEN0);
360} 274}
361 275
362/* 276/*
@@ -366,19 +280,18 @@ static void sa1111_unmask_highirq(struct irq_data *d)
366 * be triggered. In fact, its very difficult, if not impossible to get 280 * be triggered. In fact, its very difficult, if not impossible to get
367 * INTSET to re-trigger the interrupt. 281 * INTSET to re-trigger the interrupt.
368 */ 282 */
369static int sa1111_retrigger_highirq(struct irq_data *d) 283static int sa1111_retrigger_irq(struct irq_data *d)
370{ 284{
371 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 285 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
372 void __iomem *mapbase = sachip->base + SA1111_INTC; 286 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
373 unsigned int mask = SA1111_IRQMASK_HI(d->irq); 287 u32 ip, mask = sa1111_irqmask(d);
374 unsigned long ip1;
375 int i; 288 int i;
376 289
377 ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); 290 ip = sa1111_readl(mapbase + SA1111_INTPOL0);
378 for (i = 0; i < 8; i++) { 291 for (i = 0; i < 8; i++) {
379 sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1); 292 sa1111_writel(ip ^ mask, mapbase + SA1111_INTPOL0);
380 sa1111_writel(ip1, mapbase + SA1111_INTPOL1); 293 sa1111_writel(ip, mapbase + SA1111_INTPOL0);
381 if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask) 294 if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
382 break; 295 break;
383 } 296 }
384 297
@@ -388,12 +301,11 @@ static int sa1111_retrigger_highirq(struct irq_data *d)
388 return i == 8 ? -1 : 0; 301 return i == 8 ? -1 : 0;
389} 302}
390 303
391static int sa1111_type_highirq(struct irq_data *d, unsigned int flags) 304static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
392{ 305{
393 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 306 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
394 void __iomem *mapbase = sachip->base + SA1111_INTC; 307 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
395 unsigned int mask = SA1111_IRQMASK_HI(d->irq); 308 u32 ip, mask = sa1111_irqmask(d);
396 unsigned long ip1;
397 309
398 if (flags == IRQ_TYPE_PROBE) 310 if (flags == IRQ_TYPE_PROBE)
399 return 0; 311 return 0;
@@ -401,42 +313,41 @@ static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
401 if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) 313 if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
402 return -EINVAL; 314 return -EINVAL;
403 315
404 ip1 = sa1111_readl(mapbase + SA1111_INTPOL1); 316 ip = sa1111_readl(mapbase + SA1111_INTPOL0);
405 if (flags & IRQ_TYPE_EDGE_RISING) 317 if (flags & IRQ_TYPE_EDGE_RISING)
406 ip1 &= ~mask; 318 ip &= ~mask;
407 else 319 else
408 ip1 |= mask; 320 ip |= mask;
409 sa1111_writel(ip1, mapbase + SA1111_INTPOL1); 321 sa1111_writel(ip, mapbase + SA1111_INTPOL0);
410 sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1); 322 sa1111_writel(ip, mapbase + SA1111_WAKEPOL0);
411 323
412 return 0; 324 return 0;
413} 325}
414 326
415static int sa1111_wake_highirq(struct irq_data *d, unsigned int on) 327static int sa1111_wake_irq(struct irq_data *d, unsigned int on)
416{ 328{
417 struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 329 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
418 void __iomem *mapbase = sachip->base + SA1111_INTC; 330 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
419 unsigned int mask = SA1111_IRQMASK_HI(d->irq); 331 u32 we, mask = sa1111_irqmask(d);
420 unsigned long we1;
421 332
422 we1 = sa1111_readl(mapbase + SA1111_WAKEEN1); 333 we = sa1111_readl(mapbase + SA1111_WAKEEN0);
423 if (on) 334 if (on)
424 we1 |= mask; 335 we |= mask;
425 else 336 else
426 we1 &= ~mask; 337 we &= ~mask;
427 sa1111_writel(we1, mapbase + SA1111_WAKEEN1); 338 sa1111_writel(we, mapbase + SA1111_WAKEEN0);
428 339
429 return 0; 340 return 0;
430} 341}
431 342
432static struct irq_chip sa1111_high_chip = { 343static struct irq_chip sa1111_irq_chip = {
433 .name = "SA1111-h", 344 .name = "SA1111",
434 .irq_ack = sa1111_ack_irq, 345 .irq_ack = sa1111_ack_irq,
435 .irq_mask = sa1111_mask_highirq, 346 .irq_mask = sa1111_mask_irq,
436 .irq_unmask = sa1111_unmask_highirq, 347 .irq_unmask = sa1111_unmask_irq,
437 .irq_retrigger = sa1111_retrigger_highirq, 348 .irq_retrigger = sa1111_retrigger_irq,
438 .irq_set_type = sa1111_type_highirq, 349 .irq_set_type = sa1111_type_irq,
439 .irq_set_wake = sa1111_wake_highirq, 350 .irq_set_wake = sa1111_wake_irq,
440}; 351};
441 352
442static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) 353static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
@@ -472,8 +383,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
472 * specifies that S0ReadyInt and S1ReadyInt should be '1'. 383 * specifies that S0ReadyInt and S1ReadyInt should be '1'.
473 */ 384 */
474 sa1111_writel(0, irqbase + SA1111_INTPOL0); 385 sa1111_writel(0, irqbase + SA1111_INTPOL0);
475 sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) | 386 sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
476 SA1111_IRQMASK_HI(IRQ_S1_READY_NINT), 387 BIT(IRQ_S1_READY_NINT & 31),
477 irqbase + SA1111_INTPOL1); 388 irqbase + SA1111_INTPOL1);
478 389
479 /* clear all IRQs */ 390 /* clear all IRQs */
@@ -482,16 +393,14 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
482 393
483 for (i = IRQ_GPAIN0; i <= SSPROR; i++) { 394 for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
484 irq = sachip->irq_base + i; 395 irq = sachip->irq_base + i;
485 irq_set_chip_and_handler(irq, &sa1111_low_chip, 396 irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
486 handle_edge_irq);
487 irq_set_chip_data(irq, sachip); 397 irq_set_chip_data(irq, sachip);
488 irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); 398 irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
489 } 399 }
490 400
491 for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) { 401 for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
492 irq = sachip->irq_base + i; 402 irq = sachip->irq_base + i;
493 irq_set_chip_and_handler(irq, &sa1111_high_chip, 403 irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
494 handle_edge_irq);
495 irq_set_chip_data(irq, sachip); 404 irq_set_chip_data(irq, sachip);
496 irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); 405 irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
497 } 406 }
@@ -509,6 +418,181 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
509 return 0; 418 return 0;
510} 419}
511 420
421static void sa1111_remove_irq(struct sa1111 *sachip)
422{
423 void __iomem *irqbase = sachip->base + SA1111_INTC;
424
425 /* disable all IRQs */
426 sa1111_writel(0, irqbase + SA1111_INTEN0);
427 sa1111_writel(0, irqbase + SA1111_INTEN1);
428 sa1111_writel(0, irqbase + SA1111_WAKEEN0);
429 sa1111_writel(0, irqbase + SA1111_WAKEEN1);
430
431 if (sachip->irq != NO_IRQ) {
432 irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
433 irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
434
435 release_mem_region(sachip->phys + SA1111_INTC, 512);
436 }
437}
438
439enum {
440 SA1111_GPIO_PXDDR = (SA1111_GPIO_PADDR - SA1111_GPIO_PADDR),
441 SA1111_GPIO_PXDRR = (SA1111_GPIO_PADRR - SA1111_GPIO_PADDR),
442 SA1111_GPIO_PXDWR = (SA1111_GPIO_PADWR - SA1111_GPIO_PADDR),
443 SA1111_GPIO_PXSDR = (SA1111_GPIO_PASDR - SA1111_GPIO_PADDR),
444 SA1111_GPIO_PXSSR = (SA1111_GPIO_PASSR - SA1111_GPIO_PADDR),
445};
446
447static struct sa1111 *gc_to_sa1111(struct gpio_chip *gc)
448{
449 return container_of(gc, struct sa1111, gc);
450}
451
452static void __iomem *sa1111_gpio_map_reg(struct sa1111 *sachip, unsigned offset)
453{
454 void __iomem *reg = sachip->base + SA1111_GPIO;
455
456 if (offset < 4)
457 return reg + SA1111_GPIO_PADDR;
458 if (offset < 10)
459 return reg + SA1111_GPIO_PBDDR;
460 if (offset < 18)
461 return reg + SA1111_GPIO_PCDDR;
462 return NULL;
463}
464
465static u32 sa1111_gpio_map_bit(unsigned offset)
466{
467 if (offset < 4)
468 return BIT(offset);
469 if (offset < 10)
470 return BIT(offset - 4);
471 if (offset < 18)
472 return BIT(offset - 10);
473 return 0;
474}
475
476static void sa1111_gpio_modify(void __iomem *reg, u32 mask, u32 set)
477{
478 u32 val;
479
480 val = readl_relaxed(reg);
481 val &= ~mask;
482 val |= mask & set;
483 writel_relaxed(val, reg);
484}
485
486static int sa1111_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
487{
488 struct sa1111 *sachip = gc_to_sa1111(gc);
489 void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
490 u32 mask = sa1111_gpio_map_bit(offset);
491
492 return !!(readl_relaxed(reg + SA1111_GPIO_PXDDR) & mask);
493}
494
495static int sa1111_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
496{
497 struct sa1111 *sachip = gc_to_sa1111(gc);
498 unsigned long flags;
499 void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
500 u32 mask = sa1111_gpio_map_bit(offset);
501
502 spin_lock_irqsave(&sachip->lock, flags);
503 sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, mask);
504 sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, mask);
505 spin_unlock_irqrestore(&sachip->lock, flags);
506
507 return 0;
508}
509
510static int sa1111_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
511 int value)
512{
513 struct sa1111 *sachip = gc_to_sa1111(gc);
514 unsigned long flags;
515 void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
516 u32 mask = sa1111_gpio_map_bit(offset);
517
518 spin_lock_irqsave(&sachip->lock, flags);
519 sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0);
520 sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0);
521 sa1111_gpio_modify(reg + SA1111_GPIO_PXDDR, mask, 0);
522 sa1111_gpio_modify(reg + SA1111_GPIO_PXSDR, mask, 0);
523 spin_unlock_irqrestore(&sachip->lock, flags);
524
525 return 0;
526}
527
528static int sa1111_gpio_get(struct gpio_chip *gc, unsigned offset)
529{
530 struct sa1111 *sachip = gc_to_sa1111(gc);
531 void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
532 u32 mask = sa1111_gpio_map_bit(offset);
533
534 return !!(readl_relaxed(reg + SA1111_GPIO_PXDRR) & mask);
535}
536
537static void sa1111_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
538{
539 struct sa1111 *sachip = gc_to_sa1111(gc);
540 unsigned long flags;
541 void __iomem *reg = sa1111_gpio_map_reg(sachip, offset);
542 u32 mask = sa1111_gpio_map_bit(offset);
543
544 spin_lock_irqsave(&sachip->lock, flags);
545 sa1111_gpio_modify(reg + SA1111_GPIO_PXDWR, mask, value ? mask : 0);
546 sa1111_gpio_modify(reg + SA1111_GPIO_PXSSR, mask, value ? mask : 0);
547 spin_unlock_irqrestore(&sachip->lock, flags);
548}
549
550static void sa1111_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
551 unsigned long *bits)
552{
553 struct sa1111 *sachip = gc_to_sa1111(gc);
554 unsigned long flags;
555 void __iomem *reg = sachip->base + SA1111_GPIO;
556 u32 msk, val;
557
558 msk = *mask;
559 val = *bits;
560
561 spin_lock_irqsave(&sachip->lock, flags);
562 sa1111_gpio_modify(reg + SA1111_GPIO_PADWR, msk & 15, val);
563 sa1111_gpio_modify(reg + SA1111_GPIO_PASSR, msk & 15, val);
564 sa1111_gpio_modify(reg + SA1111_GPIO_PBDWR, (msk >> 4) & 255, val >> 4);
565 sa1111_gpio_modify(reg + SA1111_GPIO_PBSSR, (msk >> 4) & 255, val >> 4);
566 sa1111_gpio_modify(reg + SA1111_GPIO_PCDWR, (msk >> 12) & 255, val >> 12);
567 sa1111_gpio_modify(reg + SA1111_GPIO_PCSSR, (msk >> 12) & 255, val >> 12);
568 spin_unlock_irqrestore(&sachip->lock, flags);
569}
570
571static int sa1111_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
572{
573 struct sa1111 *sachip = gc_to_sa1111(gc);
574
575 return sachip->irq_base + offset;
576}
577
578static int sa1111_setup_gpios(struct sa1111 *sachip)
579{
580 sachip->gc.label = "sa1111";
581 sachip->gc.parent = sachip->dev;
582 sachip->gc.owner = THIS_MODULE;
583 sachip->gc.get_direction = sa1111_gpio_get_direction;
584 sachip->gc.direction_input = sa1111_gpio_direction_input;
585 sachip->gc.direction_output = sa1111_gpio_direction_output;
586 sachip->gc.get = sa1111_gpio_get;
587 sachip->gc.set = sa1111_gpio_set;
588 sachip->gc.set_multiple = sa1111_gpio_set_multiple;
589 sachip->gc.to_irq = sa1111_gpio_to_irq;
590 sachip->gc.base = -1;
591 sachip->gc.ngpio = 18;
592
593 return devm_gpiochip_add_data(sachip->dev, &sachip->gc, sachip);
594}
595
512/* 596/*
513 * Bring the SA1111 out of reset. This requires a set procedure: 597 * Bring the SA1111 out of reset. This requires a set procedure:
514 * 1. nRESET asserted (by hardware) 598 * 1. nRESET asserted (by hardware)
@@ -607,7 +691,7 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
607 691
608static void sa1111_dev_release(struct device *_dev) 692static void sa1111_dev_release(struct device *_dev)
609{ 693{
610 struct sa1111_dev *dev = SA1111_DEV(_dev); 694 struct sa1111_dev *dev = to_sa1111_device(_dev);
611 695
612 kfree(dev); 696 kfree(dev);
613} 697}
@@ -696,19 +780,17 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
696 if (!pd) 780 if (!pd)
697 return -EINVAL; 781 return -EINVAL;
698 782
699 sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL); 783 sachip = devm_kzalloc(me, sizeof(struct sa1111), GFP_KERNEL);
700 if (!sachip) 784 if (!sachip)
701 return -ENOMEM; 785 return -ENOMEM;
702 786
703 sachip->clk = clk_get(me, "SA1111_CLK"); 787 sachip->clk = devm_clk_get(me, "SA1111_CLK");
704 if (IS_ERR(sachip->clk)) { 788 if (IS_ERR(sachip->clk))
705 ret = PTR_ERR(sachip->clk); 789 return PTR_ERR(sachip->clk);
706 goto err_free;
707 }
708 790
709 ret = clk_prepare(sachip->clk); 791 ret = clk_prepare(sachip->clk);
710 if (ret) 792 if (ret)
711 goto err_clkput; 793 return ret;
712 794
713 spin_lock_init(&sachip->lock); 795 spin_lock_init(&sachip->lock);
714 796
@@ -754,9 +836,14 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
754 if (sachip->irq != NO_IRQ) { 836 if (sachip->irq != NO_IRQ) {
755 ret = sa1111_setup_irq(sachip, pd->irq_base); 837 ret = sa1111_setup_irq(sachip, pd->irq_base);
756 if (ret) 838 if (ret)
757 goto err_unmap; 839 goto err_clk;
758 } 840 }
759 841
842 /* Setup the GPIOs - should really be done after the IRQ setup */
843 ret = sa1111_setup_gpios(sachip);
844 if (ret)
845 goto err_irq;
846
760#ifdef CONFIG_ARCH_SA1100 847#ifdef CONFIG_ARCH_SA1100
761 { 848 {
762 unsigned int val; 849 unsigned int val;
@@ -799,20 +886,22 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
799 886
800 return 0; 887 return 0;
801 888
889 err_irq:
890 sa1111_remove_irq(sachip);
891 err_clk:
892 clk_disable(sachip->clk);
802 err_unmap: 893 err_unmap:
803 iounmap(sachip->base); 894 iounmap(sachip->base);
804 err_clk_unprep: 895 err_clk_unprep:
805 clk_unprepare(sachip->clk); 896 clk_unprepare(sachip->clk);
806 err_clkput:
807 clk_put(sachip->clk);
808 err_free:
809 kfree(sachip);
810 return ret; 897 return ret;
811} 898}
812 899
813static int sa1111_remove_one(struct device *dev, void *data) 900static int sa1111_remove_one(struct device *dev, void *data)
814{ 901{
815 struct sa1111_dev *sadev = SA1111_DEV(dev); 902 struct sa1111_dev *sadev = to_sa1111_device(dev);
903 if (dev->bus != &sa1111_bus_type)
904 return 0;
816 device_del(&sadev->dev); 905 device_del(&sadev->dev);
817 release_resource(&sadev->res); 906 release_resource(&sadev->res);
818 put_device(&sadev->dev); 907 put_device(&sadev->dev);
@@ -821,29 +910,14 @@ static int sa1111_remove_one(struct device *dev, void *data)
821 910
822static void __sa1111_remove(struct sa1111 *sachip) 911static void __sa1111_remove(struct sa1111 *sachip)
823{ 912{
824 void __iomem *irqbase = sachip->base + SA1111_INTC;
825
826 device_for_each_child(sachip->dev, NULL, sa1111_remove_one); 913 device_for_each_child(sachip->dev, NULL, sa1111_remove_one);
827 914
828 /* disable all IRQs */ 915 sa1111_remove_irq(sachip);
829 sa1111_writel(0, irqbase + SA1111_INTEN0);
830 sa1111_writel(0, irqbase + SA1111_INTEN1);
831 sa1111_writel(0, irqbase + SA1111_WAKEEN0);
832 sa1111_writel(0, irqbase + SA1111_WAKEEN1);
833 916
834 clk_disable(sachip->clk); 917 clk_disable(sachip->clk);
835 clk_unprepare(sachip->clk); 918 clk_unprepare(sachip->clk);
836 919
837 if (sachip->irq != NO_IRQ) {
838 irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
839 irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
840
841 release_mem_region(sachip->phys + SA1111_INTC, 512);
842 }
843
844 iounmap(sachip->base); 920 iounmap(sachip->base);
845 clk_put(sachip->clk);
846 kfree(sachip);
847} 921}
848 922
849struct sa1111_save_data { 923struct sa1111_save_data {
@@ -869,9 +943,9 @@ struct sa1111_save_data {
869 943
870#ifdef CONFIG_PM 944#ifdef CONFIG_PM
871 945
872static int sa1111_suspend(struct platform_device *dev, pm_message_t state) 946static int sa1111_suspend_noirq(struct device *dev)
873{ 947{
874 struct sa1111 *sachip = platform_get_drvdata(dev); 948 struct sa1111 *sachip = dev_get_drvdata(dev);
875 struct sa1111_save_data *save; 949 struct sa1111_save_data *save;
876 unsigned long flags; 950 unsigned long flags;
877 unsigned int val; 951 unsigned int val;
@@ -934,9 +1008,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
934 * restored by their respective drivers, and must be called 1008 * restored by their respective drivers, and must be called
935 * via LDM after this function. 1009 * via LDM after this function.
936 */ 1010 */
937static int sa1111_resume(struct platform_device *dev) 1011static int sa1111_resume_noirq(struct device *dev)
938{ 1012{
939 struct sa1111 *sachip = platform_get_drvdata(dev); 1013 struct sa1111 *sachip = dev_get_drvdata(dev);
940 struct sa1111_save_data *save; 1014 struct sa1111_save_data *save;
941 unsigned long flags, id; 1015 unsigned long flags, id;
942 void __iomem *base; 1016 void __iomem *base;
@@ -952,7 +1026,7 @@ static int sa1111_resume(struct platform_device *dev)
952 id = sa1111_readl(sachip->base + SA1111_SKID); 1026 id = sa1111_readl(sachip->base + SA1111_SKID);
953 if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { 1027 if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
954 __sa1111_remove(sachip); 1028 __sa1111_remove(sachip);
955 platform_set_drvdata(dev, NULL); 1029 dev_set_drvdata(dev, NULL);
956 kfree(save); 1030 kfree(save);
957 return 0; 1031 return 0;
958 } 1032 }
@@ -1003,8 +1077,8 @@ static int sa1111_resume(struct platform_device *dev)
1003} 1077}
1004 1078
1005#else 1079#else
1006#define sa1111_suspend NULL 1080#define sa1111_suspend_noirq NULL
1007#define sa1111_resume NULL 1081#define sa1111_resume_noirq NULL
1008#endif 1082#endif
1009 1083
1010static int sa1111_probe(struct platform_device *pdev) 1084static int sa1111_probe(struct platform_device *pdev)
@@ -1017,7 +1091,7 @@ static int sa1111_probe(struct platform_device *pdev)
1017 return -EINVAL; 1091 return -EINVAL;
1018 irq = platform_get_irq(pdev, 0); 1092 irq = platform_get_irq(pdev, 0);
1019 if (irq < 0) 1093 if (irq < 0)
1020 return -ENXIO; 1094 return irq;
1021 1095
1022 return __sa1111_probe(&pdev->dev, mem, irq); 1096 return __sa1111_probe(&pdev->dev, mem, irq);
1023} 1097}
@@ -1038,6 +1112,11 @@ static int sa1111_remove(struct platform_device *pdev)
1038 return 0; 1112 return 0;
1039} 1113}
1040 1114
1115static struct dev_pm_ops sa1111_pm_ops = {
1116 .suspend_noirq = sa1111_suspend_noirq,
1117 .resume_noirq = sa1111_resume_noirq,
1118};
1119
1041/* 1120/*
1042 * Not sure if this should be on the system bus or not yet. 1121 * Not sure if this should be on the system bus or not yet.
1043 * We really want some way to register a system device at 1122 * We really want some way to register a system device at
@@ -1050,10 +1129,9 @@ static int sa1111_remove(struct platform_device *pdev)
1050static struct platform_driver sa1111_device_driver = { 1129static struct platform_driver sa1111_device_driver = {
1051 .probe = sa1111_probe, 1130 .probe = sa1111_probe,
1052 .remove = sa1111_remove, 1131 .remove = sa1111_remove,
1053 .suspend = sa1111_suspend,
1054 .resume = sa1111_resume,
1055 .driver = { 1132 .driver = {
1056 .name = "sa1111", 1133 .name = "sa1111",
1134 .pm = &sa1111_pm_ops,
1057 }, 1135 },
1058}; 1136};
1059 1137
@@ -1279,6 +1357,14 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
1279} 1357}
1280EXPORT_SYMBOL(sa1111_disable_device); 1358EXPORT_SYMBOL(sa1111_disable_device);
1281 1359
1360int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num)
1361{
1362 if (num >= ARRAY_SIZE(sadev->irq))
1363 return -EINVAL;
1364 return sadev->irq[num];
1365}
1366EXPORT_SYMBOL_GPL(sa1111_get_irq);
1367
1282/* 1368/*
1283 * SA1111 "Register Access Bus." 1369 * SA1111 "Register Access Bus."
1284 * 1370 *
@@ -1287,7 +1373,7 @@ EXPORT_SYMBOL(sa1111_disable_device);
1287 */ 1373 */
1288static int sa1111_match(struct device *_dev, struct device_driver *_drv) 1374static int sa1111_match(struct device *_dev, struct device_driver *_drv)
1289{ 1375{
1290 struct sa1111_dev *dev = SA1111_DEV(_dev); 1376 struct sa1111_dev *dev = to_sa1111_device(_dev);
1291 struct sa1111_driver *drv = SA1111_DRV(_drv); 1377 struct sa1111_driver *drv = SA1111_DRV(_drv);
1292 1378
1293 return !!(dev->devid & drv->devid); 1379 return !!(dev->devid & drv->devid);
@@ -1295,7 +1381,7 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv)
1295 1381
1296static int sa1111_bus_suspend(struct device *dev, pm_message_t state) 1382static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
1297{ 1383{
1298 struct sa1111_dev *sadev = SA1111_DEV(dev); 1384 struct sa1111_dev *sadev = to_sa1111_device(dev);
1299 struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1385 struct sa1111_driver *drv = SA1111_DRV(dev->driver);
1300 int ret = 0; 1386 int ret = 0;
1301 1387
@@ -1306,7 +1392,7 @@ static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
1306 1392
1307static int sa1111_bus_resume(struct device *dev) 1393static int sa1111_bus_resume(struct device *dev)
1308{ 1394{
1309 struct sa1111_dev *sadev = SA1111_DEV(dev); 1395 struct sa1111_dev *sadev = to_sa1111_device(dev);
1310 struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1396 struct sa1111_driver *drv = SA1111_DRV(dev->driver);
1311 int ret = 0; 1397 int ret = 0;
1312 1398
@@ -1320,12 +1406,12 @@ static void sa1111_bus_shutdown(struct device *dev)
1320 struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1406 struct sa1111_driver *drv = SA1111_DRV(dev->driver);
1321 1407
1322 if (drv && drv->shutdown) 1408 if (drv && drv->shutdown)
1323 drv->shutdown(SA1111_DEV(dev)); 1409 drv->shutdown(to_sa1111_device(dev));
1324} 1410}
1325 1411
1326static int sa1111_bus_probe(struct device *dev) 1412static int sa1111_bus_probe(struct device *dev)
1327{ 1413{
1328 struct sa1111_dev *sadev = SA1111_DEV(dev); 1414 struct sa1111_dev *sadev = to_sa1111_device(dev);
1329 struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1415 struct sa1111_driver *drv = SA1111_DRV(dev->driver);
1330 int ret = -ENODEV; 1416 int ret = -ENODEV;
1331 1417
@@ -1336,7 +1422,7 @@ static int sa1111_bus_probe(struct device *dev)
1336 1422
1337static int sa1111_bus_remove(struct device *dev) 1423static int sa1111_bus_remove(struct device *dev)
1338{ 1424{
1339 struct sa1111_dev *sadev = SA1111_DEV(dev); 1425 struct sa1111_dev *sadev = to_sa1111_device(dev);
1340 struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1426 struct sa1111_driver *drv = SA1111_DRV(dev->driver);
1341 int ret = 0; 1427 int ret = 0;
1342 1428
@@ -1401,7 +1487,7 @@ static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
1401static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, 1487static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
1402 void *data) 1488 void *data)
1403{ 1489{
1404 struct sa1111_dev *dev = SA1111_DEV(data); 1490 struct sa1111_dev *dev = to_sa1111_device(data);
1405 1491
1406 switch (action) { 1492 switch (action) {
1407 case BUS_NOTIFY_ADD_DEVICE: 1493 case BUS_NOTIFY_ADD_DEVICE:
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index b6e54ee9bdbd..ca39c04fec6b 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_IOMMU_SUPPORT is not set 58# CONFIG_IOMMU_SUPPORT is not set
59CONFIG_FIRMWARE_MEMMAP=y 59CONFIG_FIRMWARE_MEMMAP=y
60CONFIG_FANOTIFY=y 60CONFIG_FANOTIFY=y
61CONFIG_PRINTK_TIME=1 61CONFIG_PRINTK_TIME=y
62CONFIG_DYNAMIC_DEBUG=y 62CONFIG_DYNAMIC_DEBUG=y
63CONFIG_STRIP_ASM_SYMS=y 63CONFIG_STRIP_ASM_SYMS=y
64CONFIG_PAGE_POISONING=y 64CONFIG_PAGE_POISONING=y
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 892605167357..4f366b0370e9 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
59# CONFIG_IOMMU_SUPPORT is not set 59# CONFIG_IOMMU_SUPPORT is not set
60CONFIG_FIRMWARE_MEMMAP=y 60CONFIG_FIRMWARE_MEMMAP=y
61CONFIG_FANOTIFY=y 61CONFIG_FANOTIFY=y
62CONFIG_PRINTK_TIME=1 62CONFIG_PRINTK_TIME=y
63CONFIG_DYNAMIC_DEBUG=y 63CONFIG_DYNAMIC_DEBUG=y
64CONFIG_STRIP_ASM_SYMS=y 64CONFIG_STRIP_ASM_SYMS=y
65CONFIG_PAGE_POISONING=y 65CONFIG_PAGE_POISONING=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 71b42e66488a..78cd2f197e01 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -161,6 +161,7 @@ CONFIG_USB_MON=y
161CONFIG_USB_XHCI_HCD=y 161CONFIG_USB_XHCI_HCD=y
162CONFIG_USB_STORAGE=y 162CONFIG_USB_STORAGE=y
163CONFIG_USB_DWC3=y 163CONFIG_USB_DWC3=y
164CONFIG_NOP_USB_XCEIV=y
164CONFIG_KEYSTONE_USB_PHY=y 165CONFIG_KEYSTONE_USB_PHY=y
165CONFIG_NEW_LEDS=y 166CONFIG_NEW_LEDS=y
166CONFIG_LEDS_CLASS=y 167CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2c8665cd9dc5..ea3566fb92e2 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
781CONFIG_DMA_BCM2835=y 781CONFIG_DMA_BCM2835=y
782CONFIG_DMA_OMAP=y 782CONFIG_DMA_OMAP=y
783CONFIG_QCOM_BAM_DMA=y 783CONFIG_QCOM_BAM_DMA=y
784CONFIG_XILINX_VDMA=y 784CONFIG_XILINX_DMA=y
785CONFIG_DMA_SUN6I=y 785CONFIG_DMA_SUN6I=y
786CONFIG_STAGING=y 786CONFIG_STAGING=y
787CONFIG_SENSORS_ISL29018=y 787CONFIG_SENSORS_ISL29018=y
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 7c2bbc7f0be1..8979fa3bbf2d 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -420,7 +420,7 @@ struct sa1111_dev {
420 u64 dma_mask; 420 u64 dma_mask;
421}; 421};
422 422
423#define SA1111_DEV(_d) container_of((_d), struct sa1111_dev, dev) 423#define to_sa1111_device(x) container_of(x, struct sa1111_dev, dev)
424 424
425#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev) 425#define sa1111_get_drvdata(d) dev_get_drvdata(&(d)->dev)
426#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p) 426#define sa1111_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, p)
@@ -446,6 +446,8 @@ struct sa1111_driver {
446int sa1111_enable_device(struct sa1111_dev *); 446int sa1111_enable_device(struct sa1111_dev *);
447void sa1111_disable_device(struct sa1111_dev *); 447void sa1111_disable_device(struct sa1111_dev *);
448 448
449int sa1111_get_irq(struct sa1111_dev *, unsigned num);
450
449unsigned int sa1111_pll_clock(struct sa1111_dev *); 451unsigned int sa1111_pll_clock(struct sa1111_dev *);
450 452
451#define SA1111_AUDIO_ACLINK 0 453#define SA1111_AUDIO_ACLINK 0
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index d0131ee6f6af..3f82e9da7cec 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -47,6 +47,7 @@
47#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) 47#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
48#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) 48#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
49#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) 49#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
50#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
50#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) 51#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
51 52
52/* 53/*
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index f8f1cff62065..4cd664abfcd3 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -62,6 +62,7 @@
62#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */ 62#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
63#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */ 63#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
64#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */ 64#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
65#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
65 66
66/* 67/*
67 * + Level 3 descriptor (PTE) 68 * + Level 3 descriptor (PTE)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 62a6f65029e6..a93c0f99acf7 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
480static inline unsigned long __must_check 480static inline unsigned long __must_check
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 unsigned int __ua_flags = uaccess_save_and_enable(); 483 unsigned int __ua_flags;
484
485 check_object_size(to, n, false);
486 __ua_flags = uaccess_save_and_enable();
484 n = arm_copy_from_user(to, from, n); 487 n = arm_copy_from_user(to, from, n);
485 uaccess_restore(__ua_flags); 488 uaccess_restore(__ua_flags);
486 return n; 489 return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
495__copy_to_user(void __user *to, const void *from, unsigned long n) 498__copy_to_user(void __user *to, const void *from, unsigned long n)
496{ 499{
497#ifndef CONFIG_UACCESS_WITH_MEMCPY 500#ifndef CONFIG_UACCESS_WITH_MEMCPY
498 unsigned int __ua_flags = uaccess_save_and_enable(); 501 unsigned int __ua_flags;
502
503 check_object_size(from, n, true);
504 __ua_flags = uaccess_save_and_enable();
499 n = arm_copy_to_user(to, from, n); 505 n = arm_copy_to_user(to, from, n);
500 uaccess_restore(__ua_flags); 506 uaccess_restore(__ua_flags);
501 return n; 507 return n;
502#else 508#else
509 check_object_size(from, n, true);
503 return arm_copy_to_user(to, from, n); 510 return arm_copy_to_user(to, from, n);
504#endif 511#endif
505} 512}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bc5f50799d75..9f157e7c51e7 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -295,6 +295,7 @@ __und_svc_fault:
295 bl __und_fault 295 bl __und_fault
296 296
297__und_svc_finish: 297__und_svc_finish:
298 get_thread_info tsk
298 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 299 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
299 svc_exit r5 @ return from exception 300 svc_exit r5 @ return from exception
300 UNWIND(.fnend ) 301 UNWIND(.fnend )
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 0b1e4a93d67e..15d073ae5da2 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -142,6 +142,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
142 and r7, #0x1f @ Preserve HPMN 142 and r7, #0x1f @ Preserve HPMN
143 mcr p15, 4, r7, c1, c1, 1 @ HDCR 143 mcr p15, 4, r7, c1, c1, 1 @ HDCR
144 144
145 @ Make sure NS-SVC is initialised appropriately
146 mrc p15, 0, r7, c1, c0, 0 @ SCTLR
147 orr r7, #(1 << 5) @ CP15 barriers enabled
148 bic r7, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
149 bic r7, #(3 << 19) @ WXN and UWXN disabled
150 mcr p15, 0, r7, c1, c0, 0 @ SCTLR
151
152 mrc p15, 0, r7, c0, c0, 0 @ MIDR
153 mcr p15, 4, r7, c0, c0, 0 @ VPIDR
154
155 mrc p15, 0, r7, c0, c0, 5 @ MPIDR
156 mcr p15, 4, r7, c0, c0, 5 @ VMPIDR
157
145#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER) 158#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
146 @ make CNTP_* and CNTPCT accessible from PL1 159 @ make CNTP_* and CNTPCT accessible from PL1
147 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 160 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 087acb569b63..5f221acd21ae 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
279 mm_segment_t fs; 279 mm_segment_t fs;
280 long ret, err, i; 280 long ret, err, i;
281 281
282 if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) 282 if (maxevents <= 0 ||
283 maxevents > (INT_MAX/sizeof(*kbuf)) ||
284 maxevents > (INT_MAX/sizeof(*events)))
283 return -EINVAL; 285 return -EINVAL;
286 if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
287 return -EFAULT;
284 kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); 288 kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
285 if (!kbuf) 289 if (!kbuf)
286 return -ENOMEM; 290 return -ENOMEM;
@@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
317 321
318 if (nsops < 1 || nsops > SEMOPM) 322 if (nsops < 1 || nsops > SEMOPM)
319 return -EINVAL; 323 return -EINVAL;
324 if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
325 return -EFAULT;
320 sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); 326 sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
321 if (!sops) 327 if (!sops)
322 return -ENOMEM; 328 return -ENOMEM;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index d94bb9093ead..c94b90d43772 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
158{ 158{
159 int i; 159 int i;
160 160
161 kvm_free_stage2_pgd(kvm);
162
163 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 161 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
164 if (kvm->vcpus[i]) { 162 if (kvm->vcpus[i]) {
165 kvm_arch_vcpu_free(kvm->vcpus[i]); 163 kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -1009,9 +1007,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
1009 1007
1010 switch (ioctl) { 1008 switch (ioctl) {
1011 case KVM_CREATE_IRQCHIP: { 1009 case KVM_CREATE_IRQCHIP: {
1010 int ret;
1012 if (!vgic_present) 1011 if (!vgic_present)
1013 return -ENXIO; 1012 return -ENXIO;
1014 return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1013 mutex_lock(&kvm->lock);
1014 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1015 mutex_unlock(&kvm->lock);
1016 return ret;
1015 } 1017 }
1016 case KVM_ARM_SET_DEVICE_ADDR: { 1018 case KVM_ARM_SET_DEVICE_ADDR: {
1017 struct kvm_arm_device_addr dev_addr; 1019 struct kvm_arm_device_addr dev_addr;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index bda27b6b1aa2..e9a5c0e0c115 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1309 smp_rmb(); 1309 smp_rmb();
1310 1310
1311 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); 1311 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1312 if (is_error_pfn(pfn)) 1312 if (is_error_noslot_pfn(pfn))
1313 return -EFAULT; 1313 return -EFAULT;
1314 1314
1315 if (kvm_is_device_pfn(pfn)) { 1315 if (kvm_is_device_pfn(pfn)) {
@@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
1714 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); 1714 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
1715 1715
1716 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && 1716 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1717 hyp_idmap_start < kern_hyp_va(~0UL)) { 1717 hyp_idmap_start < kern_hyp_va(~0UL) &&
1718 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1718 /* 1719 /*
1719 * The idmap page is intersecting with the VA space, 1720 * The idmap page is intersecting with the VA space,
1720 * it is not safe to continue further. 1721 * it is not safe to continue further.
@@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
1893 1894
1894void kvm_arch_flush_shadow_all(struct kvm *kvm) 1895void kvm_arch_flush_shadow_all(struct kvm *kvm)
1895{ 1896{
1897 kvm_free_stage2_pgd(kvm);
1896} 1898}
1897 1899
1898void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1900void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig
index dc7c6edeab39..61284b9389cf 100644
--- a/arch/arm/mach-clps711x/Kconfig
+++ b/arch/arm/mach-clps711x/Kconfig
@@ -1,13 +1,13 @@
1menuconfig ARCH_CLPS711X 1menuconfig ARCH_CLPS711X
2 bool "Cirrus Logic EP721x/EP731x-based" 2 bool "Cirrus Logic EP721x/EP731x-based"
3 depends on ARCH_MULTI_V4T 3 depends on ARCH_MULTI_V4T
4 select ARCH_REQUIRE_GPIOLIB
5 select AUTO_ZRELADDR 4 select AUTO_ZRELADDR
6 select CLKSRC_OF 5 select CLKSRC_OF
7 select CLPS711X_TIMER 6 select CLPS711X_TIMER
8 select COMMON_CLK 7 select COMMON_CLK
9 select CPU_ARM720T 8 select CPU_ARM720T
10 select GENERIC_CLOCKEVENTS 9 select GENERIC_CLOCKEVENTS
10 select GPIOLIB
11 select MFD_SYSCON 11 select MFD_SYSCON
12 select OF_IRQ 12 select OF_IRQ
13 select USE_OF 13 select USE_OF
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 3750575c73c5..06332f626565 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
255 return -ENOMEM; 255 return -ENOMEM;
256 } 256 }
257 257
258 /*
259 * Clear the OF_POPULATED flag set in of_irq_init so that
260 * later the Exynos PMU platform device won't be skipped.
261 */
262 of_node_clear_flag(node, OF_POPULATED);
263
258 return 0; 264 return 0;
259} 265}
260 266
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index fd8720532471..0df062d8b2c9 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
271 for (i = 0; i < IMR_NUM; i++) 271 for (i = 0; i < IMR_NUM; i++)
272 writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4); 272 writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
273 273
274 /*
275 * Clear the OF_POPULATED flag set in of_irq_init so that
276 * later the GPC power domain driver will not be skipped.
277 */
278 of_node_clear_flag(node, OF_POPULATED);
279
274 return 0; 280 return 0;
275} 281}
276IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init); 282IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 5d9bfab279dd..6bb7d9cf1e38 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void)
64 if (parent == NULL) 64 if (parent == NULL)
65 pr_warn("failed to initialize soc device\n"); 65 pr_warn("failed to initialize soc device\n");
66 66
67 of_platform_default_populate(NULL, NULL, parent);
67 imx6ul_enet_init(); 68 imx6ul_enet_init();
68 imx_anatop_init(); 69 imx_anatop_init();
69 imx6ul_pm_init(); 70 imx6ul_pm_init();
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 58924b3844df..fe708e26d021 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
295 val &= ~BM_CLPCR_SBYOS; 295 val &= ~BM_CLPCR_SBYOS;
296 if (cpu_is_imx6sl()) 296 if (cpu_is_imx6sl())
297 val |= BM_CLPCR_BYPASS_PMIC_READY; 297 val |= BM_CLPCR_BYPASS_PMIC_READY;
298 if (cpu_is_imx6sl() || cpu_is_imx6sx()) 298 if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
299 val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; 299 val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
300 else 300 else
301 val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; 301 val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
310 val |= 0x3 << BP_CLPCR_STBY_COUNT; 310 val |= 0x3 << BP_CLPCR_STBY_COUNT;
311 val |= BM_CLPCR_VSTBY; 311 val |= BM_CLPCR_VSTBY;
312 val |= BM_CLPCR_SBYOS; 312 val |= BM_CLPCR_SBYOS;
313 if (cpu_is_imx6sl()) 313 if (cpu_is_imx6sl() || cpu_is_imx6sx())
314 val |= BM_CLPCR_BYPASS_PMIC_READY; 314 val |= BM_CLPCR_BYPASS_PMIC_READY;
315 if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul()) 315 if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
316 val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; 316 val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index e53c6cfcab51..6c6497e80a7b 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -1,5 +1,4 @@
1ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 1ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
2 -I$(srctree)/arch/arm/plat-orion/include
3 2
4AFLAGS_coherency_ll.o := -Wa,-march=armv7-a 3AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
5CFLAGS_pmsu.o := -march=armv7-a 4CFLAGS_pmsu.o := -march=armv7-a
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index c073fb57dd13..6f2d0aec0513 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
220{ 220{
221 int i = 0; 221 int i = 0;
222 222
223 if (!clkctrl_offs)
224 return 0;
225
226 omap_test_timeout(_is_module_ready(inst, clkctrl_offs), 223 omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
227 MAX_MODULE_READY_TIME, i); 224 MAX_MODULE_READY_TIME, i);
228 225
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
246{ 243{
247 int i = 0; 244 int i = 0;
248 245
249 if (!clkctrl_offs)
250 return 0;
251
252 omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) == 246 omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) ==
253 CLKCTRL_IDLEST_DISABLED), 247 CLKCTRL_IDLEST_DISABLED),
254 MAX_MODULE_READY_TIME, i); 248 MAX_MODULE_READY_TIME, i);
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 2c0e07ed6b99..2ab27ade136a 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
278{ 278{
279 int i = 0; 279 int i = 0;
280 280
281 if (!clkctrl_offs)
282 return 0;
283
284 omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs), 281 omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs),
285 MAX_MODULE_READY_TIME, i); 282 MAX_MODULE_READY_TIME, i);
286 283
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
304{ 301{
305 int i = 0; 302 int i = 0;
306 303
307 if (!clkctrl_offs)
308 return 0;
309
310 omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) == 304 omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) ==
311 CLKCTRL_IDLEST_DISABLED), 305 CLKCTRL_IDLEST_DISABLED),
312 MAX_MODULE_DISABLE_TIME, i); 306 MAX_MODULE_DISABLE_TIME, i);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 5b709383381c..1052b29697b8 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh)
1053 if (oh->flags & HWMOD_NO_IDLEST) 1053 if (oh->flags & HWMOD_NO_IDLEST)
1054 return 0; 1054 return 0;
1055 1055
1056 if (!oh->prcm.omap4.clkctrl_offs &&
1057 !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
1058 return 0;
1059
1056 return omap_cm_wait_module_idle(oh->clkdm->prcm_partition, 1060 return omap_cm_wait_module_idle(oh->clkdm->prcm_partition,
1057 oh->clkdm->cm_inst, 1061 oh->clkdm->cm_inst,
1058 oh->prcm.omap4.clkctrl_offs, 0); 1062 oh->prcm.omap4.clkctrl_offs, 0);
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh)
2971 if (!_find_mpu_rt_port(oh)) 2975 if (!_find_mpu_rt_port(oh))
2972 return 0; 2976 return 0;
2973 2977
2978 if (!oh->prcm.omap4.clkctrl_offs &&
2979 !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
2980 return 0;
2981
2974 /* XXX check module SIDLEMODE, hardreset status */ 2982 /* XXX check module SIDLEMODE, hardreset status */
2975 2983
2976 return omap_cm_wait_module_ready(oh->clkdm->prcm_partition, 2984 return omap_cm_wait_module_ready(oh->clkdm->prcm_partition,
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 4041bad79a9a..78904017f18c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm {
443 * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM 443 * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM
444 * module-level context loss register associated with them; this 444 * module-level context loss register associated with them; this
445 * flag bit should be set in those cases 445 * flag bit should be set in those cases
446 * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL
447 * offset of zero; this flag bit should be set in those cases to
448 * distinguish from hwmods that have no clkctrl offset.
446 */ 449 */
447#define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT (1 << 0) 450#define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT (1 << 0)
451#define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET (1 << 1)
448 452
449/** 453/**
450 * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data 454 * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 55c5878577f4..e2d84aa7f595 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -29,6 +29,7 @@
29#define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl)) 29#define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
30#define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl)) 30#define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
31#define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst)) 31#define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
32#define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag))
32 33
33/* 34/*
34 * 'l3' class 35 * 'l3' class
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
1296 CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET); 1297 CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
1297 CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET); 1298 CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
1298 CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET); 1299 CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
1300 PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
1299 CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET); 1301 CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
1300 CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET); 1302 CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
1301 CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET); 1303 CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d72ee6185d5e..1cc4a6f3954e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
722 * display serial interface controller 722 * display serial interface controller
723 */ 723 */
724 724
725static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
726 .rev_offs = 0x0000,
727 .sysc_offs = 0x0010,
728 .syss_offs = 0x0014,
729 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
730 SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
731 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
732 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
733 .sysc_fields = &omap_hwmod_sysc_type1,
734};
735
725static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { 736static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
726 .name = "dsi", 737 .name = "dsi",
738 .sysc = &omap3xxx_dsi_sysc,
727}; 739};
728 740
729static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { 741static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig
index 567496bd250a..29100beb2e7f 100644
--- a/arch/arm/mach-oxnas/Kconfig
+++ b/arch/arm/mach-oxnas/Kconfig
@@ -11,11 +11,13 @@ if ARCH_OXNAS
11 11
12config MACH_OX810SE 12config MACH_OX810SE
13 bool "Support OX810SE Based Products" 13 bool "Support OX810SE Based Products"
14 select ARCH_HAS_RESET_CONTROLLER
14 select COMMON_CLK_OXNAS 15 select COMMON_CLK_OXNAS
15 select CPU_ARM926T 16 select CPU_ARM926T
16 select MFD_SYSCON 17 select MFD_SYSCON
17 select OXNAS_RPS_TIMER 18 select OXNAS_RPS_TIMER
18 select PINCTRL_OXNAS 19 select PINCTRL_OXNAS
20 select RESET_CONTROLLER
19 select RESET_OXNAS 21 select RESET_OXNAS
20 select VERSATILE_FPGA_IRQ 22 select VERSATILE_FPGA_IRQ
21 help 23 help
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index dc109dc3a622..10bfdb169366 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h> /* symbol_get ; symbol_put */
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/major.h> 19#include <linux/major.h>
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index c410d84b243d..66070acaa888 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
83}; 83};
84 84
85static struct smc91x_platdata smc91x_platdata = { 85static struct smc91x_platdata smc91x_platdata = {
86 .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, 86 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
87 SMC91X_USE_DMA | SMC91X_NOWAIT,
87}; 88};
88 89
89static struct platform_device smc91x_device = { 90static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 7245f3359564..d6159f8ef0c2 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
137 // no D+ pullup; lubbock can't connect/disconnect in software 137 // no D+ pullup; lubbock can't connect/disconnect in software
138}; 138};
139 139
140static void lubbock_init_pcmcia(void)
141{
142 struct clk *clk;
143
144 /* Add an alias for the SA1111 PCMCIA clock */
145 clk = clk_get_sys("pxa2xx-pcmcia", NULL);
146 if (!IS_ERR(clk)) {
147 clkdev_create(clk, NULL, "1800");
148 clk_put(clk);
149 }
150}
151
140static struct resource sa1111_resources[] = { 152static struct resource sa1111_resources[] = {
141 [0] = { 153 [0] = {
142 .start = 0x10000000, 154 .start = 0x10000000,
@@ -467,6 +479,8 @@ static void __init lubbock_init(void)
467 pxa_set_btuart_info(NULL); 479 pxa_set_btuart_info(NULL);
468 pxa_set_stuart_info(NULL); 480 pxa_set_stuart_info(NULL);
469 481
482 lubbock_init_pcmcia();
483
470 clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL); 484 clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
471 pxa_set_udc_info(&udc_info); 485 pxa_set_udc_info(&udc_info);
472 pxa_set_fb_info(NULL, &sharp_lm8v31); 486 pxa_set_fb_info(NULL, &sharp_lm8v31);
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 1080580b1343..2c150bfc0cd5 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h> /* symbol_get ; symbol_put */
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/gpio_keys.h> 19#include <linux/gpio_keys.h>
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 3f06cd90567a..056369ef250e 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
120}; 120};
121 121
122static struct smc91x_platdata xcep_smc91x_info = { 122static struct smc91x_platdata xcep_smc91x_info = {
123 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, 123 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
124 SMC91X_NOWAIT | SMC91X_USE_DMA,
124}; 125};
125 126
126static struct platform_device smc91x_device = { 127static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index dae8d86ef4cc..404882130956 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -1,8 +1,7 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 4ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include
5 -I$(srctree)/arch/arm/plat-versatile/include
6 5
7obj-y := core.o 6obj-y := core.o
8obj-$(CONFIG_REALVIEW_DT) += realview-dt.o 7obj-$(CONFIG_REALVIEW_DT) += realview-dt.o
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index baf174542e36..a0ead0ae23d6 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
93}; 93};
94 94
95static struct smc91x_platdata smc91x_platdata = { 95static struct smc91x_platdata smc91x_platdata = {
96 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, 96 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
97 SMC91X_NOWAIT,
97}; 98};
98 99
99static struct platform_device realview_eth_device = { 100static struct platform_device realview_eth_device = {
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 72b9e9671507..fa7fb716e388 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -5,7 +5,7 @@
5# 5#
6# Licensed under GPLv2 6# Licensed under GPLv2
7 7
8ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include 8ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include
9 9
10# Core 10# Core
11 11
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index cbf53bb9c814..0db46895c82a 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk)
125} 125}
126 126
127static struct clkops clk_36864_ops = { 127static struct clkops clk_36864_ops = {
128 .enable = clk_cpu_enable,
129 .disable = clk_cpu_disable,
128 .get_rate = clk_36864_get_rate, 130 .get_rate = clk_36864_get_rate,
129}; 131};
130 132
@@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = {
140 CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864), 142 CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
141}; 143};
142 144
143static int __init sa11xx_clk_init(void) 145int __init sa11xx_clk_init(void)
144{ 146{
145 clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs)); 147 clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
146 return 0; 148 return 0;
147} 149}
148core_initcall(sa11xx_clk_init);
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 345e63f4eb71..3e09beddb6e8 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -34,6 +34,7 @@
34 34
35#include <mach/hardware.h> 35#include <mach/hardware.h>
36#include <mach/irqs.h> 36#include <mach/irqs.h>
37#include <mach/reset.h>
37 38
38#include "generic.h" 39#include "generic.h"
39#include <clocksource/pxa.h> 40#include <clocksource/pxa.h>
@@ -95,6 +96,8 @@ static void sa1100_power_off(void)
95 96
96void sa11x0_restart(enum reboot_mode mode, const char *cmd) 97void sa11x0_restart(enum reboot_mode mode, const char *cmd)
97{ 98{
99 clear_reset_status(RESET_STATUS_ALL);
100
98 if (mode == REBOOT_SOFT) { 101 if (mode == REBOOT_SOFT) {
99 /* Jump into ROM at address 0 */ 102 /* Jump into ROM at address 0 */
100 soft_restart(0); 103 soft_restart(0);
@@ -388,6 +391,7 @@ void __init sa1100_init_irq(void)
388 sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start); 391 sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
389 392
390 sa1100_init_gpio(); 393 sa1100_init_gpio();
394 sa11xx_clk_init();
391} 395}
392 396
393/* 397/*
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 0d92e119b36b..68199b603ff7 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -44,3 +44,5 @@ int sa11x0_pm_init(void);
44#else 44#else
45static inline int sa11x0_pm_init(void) { return 0; } 45static inline int sa11x0_pm_init(void) { return 0; }
46#endif 46#endif
47
48int sa11xx_clk_init(void);
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 1525d7b5f1b7..88149f85bc49 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
45}; 45};
46 46
47static struct smc91x_platdata smc91x_platdata = { 47static struct smc91x_platdata smc91x_platdata = {
48 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, 48 .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
49}; 49};
50 50
51static struct platform_device smc91x_device = { 51static struct platform_device smc91x_device = {
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index f3dba6f356e2..02e21bceb085 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -40,5 +40,8 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
40bool __init shmobile_smp_init_fallback_ops(void) 40bool __init shmobile_smp_init_fallback_ops(void)
41{ 41{
42 /* fallback on PSCI/smp_ops if no other DT based method is detected */ 42 /* fallback on PSCI/smp_ops if no other DT based method is detected */
43 if (!IS_ENABLED(CONFIG_SMP))
44 return false;
45
43 return platform_can_secondary_boot() ? true : false; 46 return platform_can_secondary_boot() ? true : false;
44} 47}
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 62437b57813e..73e3adbc1330 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -41,39 +41,26 @@
41 41
42#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */ 42#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
43 43
44static void __iomem *irqc; 44/* start of DA9210 System Control and Event Registers */
45
46static const u8 da9063_mask_regs[] = {
47 DA9063_REG_IRQ_MASK_A,
48 DA9063_REG_IRQ_MASK_B,
49 DA9063_REG_IRQ_MASK_C,
50 DA9063_REG_IRQ_MASK_D,
51};
52
53/* DA9210 System Control and Event Registers */
54#define DA9210_REG_MASK_A 0x54 45#define DA9210_REG_MASK_A 0x54
55#define DA9210_REG_MASK_B 0x55
56
57static const u8 da9210_mask_regs[] = {
58 DA9210_REG_MASK_A,
59 DA9210_REG_MASK_B,
60};
61
62static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
63 unsigned int nregs)
64{
65 unsigned int i;
66 46
67 dev_info(&client->dev, "Masking %s interrupt sources\n", client->name); 47static void __iomem *irqc;
68 48
69 for (i = 0; i < nregs; i++) { 49/* first byte sets the memory pointer, following are consecutive reg values */
70 int error = i2c_smbus_write_byte_data(client, regs[i], ~0); 50static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
71 if (error) { 51static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
72 dev_err(&client->dev, "i2c error %d\n", error); 52
73 return; 53static struct i2c_msg da9xxx_msgs[2] = {
74 } 54 {
75 } 55 .addr = 0x58,
76} 56 .len = ARRAY_SIZE(da9063_irq_clr),
57 .buf = da9063_irq_clr,
58 }, {
59 .addr = 0x68,
60 .len = ARRAY_SIZE(da9210_irq_clr),
61 .buf = da9210_irq_clr,
62 },
63};
77 64
78static int regulator_quirk_notify(struct notifier_block *nb, 65static int regulator_quirk_notify(struct notifier_block *nb,
79 unsigned long action, void *data) 66 unsigned long action, void *data)
@@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
93 client = to_i2c_client(dev); 80 client = to_i2c_client(dev);
94 dev_dbg(dev, "Detected %s\n", client->name); 81 dev_dbg(dev, "Detected %s\n", client->name);
95 82
96 if ((client->addr == 0x58 && !strcmp(client->name, "da9063"))) 83 if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
97 da9xxx_mask_irqs(client, da9063_mask_regs, 84 (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
98 ARRAY_SIZE(da9063_mask_regs)); 85 int ret;
99 else if (client->addr == 0x68 && !strcmp(client->name, "da9210")) 86
100 da9xxx_mask_irqs(client, da9210_mask_regs, 87 dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
101 ARRAY_SIZE(da9210_mask_regs)); 88 ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
89 if (ret != ARRAY_SIZE(da9xxx_msgs))
90 dev_err(&client->dev, "i2c error %d\n", ret);
91 }
102 92
103 mon = ioread32(irqc + IRQC_MONITOR); 93 mon = ioread32(irqc + IRQC_MONITOR);
104 if (mon & REGULATOR_IRQ_MASK) 94 if (mon & REGULATOR_IRQ_MASK)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 2396935e715c..4001dd15818d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
137 137
138 initial_pmd_value = pmd; 138 initial_pmd_value = pmd;
139 139
140 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; 140 pmd &= PMD_SECT_CACHE_MASK;
141 141
142 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) 142 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
143 if (cache_policies[i].pmd == pmd) { 143 if (cache_policies[i].pmd == pmd) {
@@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
728{ 728{
729 void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); 729 void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
730 730
731 BUG_ON(!ptr); 731 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
732 BUG();
732 return ptr; 733 return ptr;
733} 734}
734 735
@@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
1155{ 1156{
1156 phys_addr_t memblock_limit = 0; 1157 phys_addr_t memblock_limit = 0;
1157 int highmem = 0; 1158 int highmem = 0;
1158 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1159 u64 vmalloc_limit;
1159 struct memblock_region *reg; 1160 struct memblock_region *reg;
1160 bool should_use_highmem = false; 1161 bool should_use_highmem = false;
1161 1162
1163 /*
1164 * Let's use our own (unoptimized) equivalent of __pa() that is
1165 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
1166 * The result is used as the upper bound on physical memory address
1167 * and may itself be outside the valid range for which phys_addr_t
1168 * and therefore __pa() is defined.
1169 */
1170 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
1171
1162 for_each_memblock(memory, reg) { 1172 for_each_memblock(memory, reg) {
1163 phys_addr_t block_start = reg->base; 1173 phys_addr_t block_start = reg->base;
1164 phys_addr_t block_end = reg->base + reg->size; 1174 phys_addr_t block_end = reg->base + reg->size;
@@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
1183 if (reg->size > size_limit) { 1193 if (reg->size > size_limit) {
1184 phys_addr_t overlap_size = reg->size - size_limit; 1194 phys_addr_t overlap_size = reg->size - size_limit;
1185 1195
1186 pr_notice("Truncating RAM at %pa-%pa to -%pa", 1196 pr_notice("Truncating RAM at %pa-%pa",
1187 &block_start, &block_end, &vmalloc_limit); 1197 &block_start, &block_end);
1188 memblock_remove(vmalloc_limit, overlap_size);
1189 block_end = vmalloc_limit; 1198 block_end = vmalloc_limit;
1199 pr_cont(" to -%pa", &block_end);
1200 memblock_remove(vmalloc_limit, overlap_size);
1190 should_use_highmem = true; 1201 should_use_highmem = true;
1191 } 1202 }
1192 } 1203 }
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a7123b4e129d..d00d52c9de3e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -16,6 +16,7 @@
16#include <asm/hwcap.h> 16#include <asm/hwcap.h>
17#include <asm/pgtable-hwdef.h> 17#include <asm/pgtable-hwdef.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/memory.h>
19 20
20#include "proc-macros.S" 21#include "proc-macros.S"
21 22
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index b0b82f5ea338..f193414d0f6f 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
50static struct vcpu_info __percpu *xen_vcpu_info; 50static struct vcpu_info __percpu *xen_vcpu_info;
51 51
52/* Linux <-> Xen vCPU id mapping */ 52/* Linux <-> Xen vCPU id mapping */
53DEFINE_PER_CPU(int, xen_vcpu_id) = -1; 53DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
54EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); 54EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
55 55
56/* These are unused until we support booting "pre-ballooned" */ 56/* These are unused until we support booting "pre-ballooned" */
@@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
170 pr_info("Xen: initializing cpu%d\n", cpu); 170 pr_info("Xen: initializing cpu%d\n", cpu);
171 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 171 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
172 172
173 /* Direct vCPU id mapping for ARM guests. */
174 per_cpu(xen_vcpu_id, cpu) = cpu;
175
176 info.mfn = virt_to_gfn(vcpup); 173 info.mfn = virt_to_gfn(vcpup);
177 info.offset = xen_offset_in_page(vcpup); 174 info.offset = xen_offset_in_page(vcpup);
178 175
@@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
330{ 327{
331 struct xen_add_to_physmap xatp; 328 struct xen_add_to_physmap xatp;
332 struct shared_info *shared_info_page = NULL; 329 struct shared_info *shared_info_page = NULL;
330 int cpu;
333 331
334 if (!xen_domain()) 332 if (!xen_domain())
335 return 0; 333 return 0;
@@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
380 return -ENOMEM; 378 return -ENOMEM;
381 379
382 /* Direct vCPU id mapping for ARM guests. */ 380 /* Direct vCPU id mapping for ARM guests. */
383 per_cpu(xen_vcpu_id, 0) = 0; 381 for_each_possible_cpu(cpu)
382 per_cpu(xen_vcpu_id, cpu) = cpu;
384 383
385 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); 384 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
386 if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, 385 if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 69c8787bec7d..bc3f00f586f1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -54,6 +54,7 @@ config ARM64
54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
55 select HAVE_ARCH_AUDITSYSCALL 55 select HAVE_ARCH_AUDITSYSCALL
56 select HAVE_ARCH_BITREVERSE 56 select HAVE_ARCH_BITREVERSE
57 select HAVE_ARCH_HARDENED_USERCOPY
57 select HAVE_ARCH_HUGE_VMAP 58 select HAVE_ARCH_HUGE_VMAP
58 select HAVE_ARCH_JUMP_LABEL 59 select HAVE_ARCH_JUMP_LABEL
59 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48) 60 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index bb2616b16157..be5d824ebdba 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -8,7 +8,7 @@ config ARCH_SUNXI
8 8
9config ARCH_ALPINE 9config ARCH_ALPINE
10 bool "Annapurna Labs Alpine platform" 10 bool "Annapurna Labs Alpine platform"
11 select ALPINE_MSI 11 select ALPINE_MSI if PCI
12 help 12 help
13 This enables support for the Annapurna Labs Alpine 13 This enables support for the Annapurna Labs Alpine
14 Soc family. 14 Soc family.
@@ -66,7 +66,7 @@ config ARCH_LG1K
66config ARCH_HISI 66config ARCH_HISI
67 bool "Hisilicon SoC Family" 67 bool "Hisilicon SoC Family"
68 select ARM_TIMER_SP804 68 select ARM_TIMER_SP804
69 select HISILICON_IRQ_MBIGEN 69 select HISILICON_IRQ_MBIGEN if PCI
70 help 70 help
71 This enables support for Hisilicon ARMv8 SoC family 71 This enables support for Hisilicon ARMv8 SoC family
72 72
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 445aa678f914..c2b9bcb0ef61 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -255,10 +255,10 @@
255 /* Local timer */ 255 /* Local timer */
256 timer { 256 timer {
257 compatible = "arm,armv8-timer"; 257 compatible = "arm,armv8-timer";
258 interrupts = <1 13 0xf01>, 258 interrupts = <1 13 0xf08>,
259 <1 14 0xf01>, 259 <1 14 0xf08>,
260 <1 11 0xf01>, 260 <1 11 0xf08>,
261 <1 10 0xf01>; 261 <1 10 0xf08>;
262 }; 262 };
263 263
264 timer0: timer0@ffc03000 { 264 timer0: timer0@ffc03000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index e502c24b0ac7..bf6c8d051002 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -102,13 +102,13 @@
102 timer { 102 timer {
103 compatible = "arm,armv8-timer"; 103 compatible = "arm,armv8-timer";
104 interrupts = <GIC_PPI 13 104 interrupts = <GIC_PPI 13
105 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, 105 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
106 <GIC_PPI 14 106 <GIC_PPI 14
107 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, 107 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
108 <GIC_PPI 11 108 <GIC_PPI 11
109 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>, 109 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
110 <GIC_PPI 10 110 <GIC_PPI 10
111 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>; 111 (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
112 }; 112 };
113 113
114 xtal: xtal-clk { 114 xtal: xtal-clk {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index f1c2c713f9b0..c29dab9d1834 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -110,10 +110,10 @@
110 110
111 timer { 111 timer {
112 compatible = "arm,armv8-timer"; 112 compatible = "arm,armv8-timer";
113 interrupts = <1 0 0xff01>, /* Secure Phys IRQ */ 113 interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
114 <1 13 0xff01>, /* Non-secure Phys IRQ */ 114 <1 13 0xff08>, /* Non-secure Phys IRQ */
115 <1 14 0xff01>, /* Virt IRQ */ 115 <1 14 0xff08>, /* Virt IRQ */
116 <1 15 0xff01>; /* Hyp IRQ */ 116 <1 15 0xff08>; /* Hyp IRQ */
117 clock-frequency = <50000000>; 117 clock-frequency = <50000000>;
118 }; 118 };
119 119
diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
new file mode 120000
index 000000000000..3937b77cb310
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
@@ -0,0 +1 @@
../../../../arm/boot/dts/bcm2835-rpi.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
index 6f47dd2bb1db..7841b724e340 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -1,7 +1,7 @@
1/dts-v1/; 1/dts-v1/;
2#include "bcm2837.dtsi" 2#include "bcm2837.dtsi"
3#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi" 3#include "bcm2835-rpi.dtsi"
4#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi" 4#include "bcm283x-rpi-smsc9514.dtsi"
5 5
6/ { 6/ {
7 compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; 7 compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
index f2a31d06845d..8216bbb29fe0 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
@@ -1,4 +1,4 @@
1#include "../../../../arm/boot/dts/bcm283x.dtsi" 1#include "bcm283x.dtsi"
2 2
3/ { 3/ {
4 compatible = "brcm,bcm2836"; 4 compatible = "brcm,bcm2836";
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
new file mode 120000
index 000000000000..dca7c057d5a5
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
@@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
new file mode 120000
index 000000000000..5f54e4cab99b
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
@@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index f53b0955bfd3..d4a12fad8afd 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -88,13 +88,13 @@
88 timer { 88 timer {
89 compatible = "arm,armv8-timer"; 89 compatible = "arm,armv8-timer";
90 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) | 90 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
91 IRQ_TYPE_EDGE_RISING)>, 91 IRQ_TYPE_LEVEL_LOW)>,
92 <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) | 92 <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
93 IRQ_TYPE_EDGE_RISING)>, 93 IRQ_TYPE_LEVEL_LOW)>,
94 <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) | 94 <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
95 IRQ_TYPE_EDGE_RISING)>, 95 IRQ_TYPE_LEVEL_LOW)>,
96 <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) | 96 <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
97 IRQ_TYPE_EDGE_RISING)>; 97 IRQ_TYPE_LEVEL_LOW)>;
98 }; 98 };
99 99
100 pmu { 100 pmu {
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index 2eb9b225f0bc..04dc8a8d1539 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -354,10 +354,10 @@
354 354
355 timer { 355 timer {
356 compatible = "arm,armv8-timer"; 356 compatible = "arm,armv8-timer";
357 interrupts = <1 13 0xff01>, 357 interrupts = <1 13 4>,
358 <1 14 0xff01>, 358 <1 14 4>,
359 <1 11 0xff01>, 359 <1 11 4>,
360 <1 10 0xff01>; 360 <1 10 4>;
361 }; 361 };
362 362
363 pmu { 363 pmu {
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index 299f3ce969ab..c528dd52ba2d 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -12,6 +12,7 @@
12/dts-v1/; 12/dts-v1/;
13#include "exynos7.dtsi" 13#include "exynos7.dtsi"
14#include <dt-bindings/interrupt-controller/irq.h> 14#include <dt-bindings/interrupt-controller/irq.h>
15#include <dt-bindings/clock/samsung,s2mps11.h>
15 16
16/ { 17/ {
17 model = "Samsung Exynos7 Espresso board based on EXYNOS7"; 18 model = "Samsung Exynos7 Espresso board based on EXYNOS7";
@@ -43,6 +44,8 @@
43 44
44&rtc { 45&rtc {
45 status = "okay"; 46 status = "okay";
47 clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>;
48 clock-names = "rtc", "rtc_src";
46}; 49};
47 50
48&watchdog { 51&watchdog {
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index ca663dfe5189..162831546e18 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -473,10 +473,10 @@
473 473
474 timer { 474 timer {
475 compatible = "arm,armv8-timer"; 475 compatible = "arm,armv8-timer";
476 interrupts = <1 13 0xff01>, 476 interrupts = <1 13 0xff08>,
477 <1 14 0xff01>, 477 <1 14 0xff08>,
478 <1 11 0xff01>, 478 <1 11 0xff08>,
479 <1 10 0xff01>; 479 <1 10 0xff08>;
480 }; 480 };
481 481
482 pmu_system_controller: system-controller@105c0000 { 482 pmu_system_controller: system-controller@105c0000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index e669fbd7f9c3..a67e210e2019 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -119,10 +119,10 @@
119 119
120 timer { 120 timer {
121 compatible = "arm,armv8-timer"; 121 compatible = "arm,armv8-timer";
122 interrupts = <1 13 0x1>, /* Physical Secure PPI */ 122 interrupts = <1 13 0xf08>, /* Physical Secure PPI */
123 <1 14 0x1>, /* Physical Non-Secure PPI */ 123 <1 14 0xf08>, /* Physical Non-Secure PPI */
124 <1 11 0x1>, /* Virtual PPI */ 124 <1 11 0xf08>, /* Virtual PPI */
125 <1 10 0x1>; /* Hypervisor PPI */ 125 <1 10 0xf08>; /* Hypervisor PPI */
126 }; 126 };
127 127
128 pmu { 128 pmu {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 21023a388c29..e3b6034ea5d9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -191,10 +191,10 @@
191 191
192 timer { 192 timer {
193 compatible = "arm,armv8-timer"; 193 compatible = "arm,armv8-timer";
194 interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */ 194 interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
195 <1 14 0x8>, /* Physical Non-Secure PPI, active-low */ 195 <1 14 4>, /* Physical Non-Secure PPI, active-low */
196 <1 11 0x8>, /* Virtual PPI, active-low */ 196 <1 11 4>, /* Virtual PPI, active-low */
197 <1 10 0x8>; /* Hypervisor PPI, active-low */ 197 <1 10 4>; /* Hypervisor PPI, active-low */
198 }; 198 };
199 199
200 pmu { 200 pmu {
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index eab1a42fb934..c2a6745f168c 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -122,10 +122,10 @@
122 122
123 timer { 123 timer {
124 compatible = "arm,armv8-timer"; 124 compatible = "arm,armv8-timer";
125 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, 125 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
126 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, 126 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
127 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>, 127 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
128 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>; 128 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
129 }; 129 };
130 130
131 odmi: odmi@300000 { 131 odmi: odmi@300000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index d02a900378e1..4f44d1191bfd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -270,6 +270,8 @@
270 #io-channel-cells = <1>; 270 #io-channel-cells = <1>;
271 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; 271 clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
272 clock-names = "saradc", "apb_pclk"; 272 clock-names = "saradc", "apb_pclk";
273 resets = <&cru SRST_SARADC>;
274 reset-names = "saradc-apb";
273 status = "disabled"; 275 status = "disabled";
274 }; 276 };
275 277
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index c223915f0907..d73bdc8c9115 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -129,10 +129,10 @@
129 129
130 timer { 130 timer {
131 compatible = "arm,armv8-timer"; 131 compatible = "arm,armv8-timer";
132 interrupts = <1 13 0xf01>, 132 interrupts = <1 13 4>,
133 <1 14 0xf01>, 133 <1 14 4>,
134 <1 11 0xf01>, 134 <1 11 4>,
135 <1 10 0xf01>; 135 <1 10 4>;
136 }; 136 };
137 137
138 soc { 138 soc {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index e595f22e7e4b..3e2e51fbd2bc 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -65,10 +65,10 @@
65 timer { 65 timer {
66 compatible = "arm,armv8-timer"; 66 compatible = "arm,armv8-timer";
67 interrupt-parent = <&gic>; 67 interrupt-parent = <&gic>;
68 interrupts = <1 13 0xf01>, 68 interrupts = <1 13 0xf08>,
69 <1 14 0xf01>, 69 <1 14 0xf08>,
70 <1 11 0xf01>, 70 <1 11 0xf08>,
71 <1 10 0xf01>; 71 <1 10 0xf08>;
72 }; 72 };
73 73
74 amba_apu { 74 amba_apu {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 0555b7caaf2c..eadf4855ad2d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
4CONFIG_AUDIT=y 3CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
15CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
16CONFIG_MEMCG=y 15CONFIG_MEMCG=y
17CONFIG_MEMCG_SWAP=y 16CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y
18CONFIG_CGROUP_PIDS=y
18CONFIG_CGROUP_HUGETLB=y 19CONFIG_CGROUP_HUGETLB=y
19# CONFIG_UTS_NS is not set 20CONFIG_CPUSETS=y
20# CONFIG_IPC_NS is not set 21CONFIG_CGROUP_DEVICE=y
21# CONFIG_NET_NS is not set 22CONFIG_CGROUP_CPUACCT=y
23CONFIG_CGROUP_PERF=y
24CONFIG_USER_NS=y
22CONFIG_SCHED_AUTOGROUP=y 25CONFIG_SCHED_AUTOGROUP=y
23CONFIG_BLK_DEV_INITRD=y 26CONFIG_BLK_DEV_INITRD=y
24CONFIG_KALLSYMS_ALL=y 27CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
71CONFIG_KSM=y 74CONFIG_KSM=y
72CONFIG_TRANSPARENT_HUGEPAGE=y 75CONFIG_TRANSPARENT_HUGEPAGE=y
73CONFIG_CMA=y 76CONFIG_CMA=y
77CONFIG_SECCOMP=y
74CONFIG_XEN=y 78CONFIG_XEN=y
75CONFIG_KEXEC=y 79CONFIG_KEXEC=y
76# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 80# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
84CONFIG_PACKET=y 88CONFIG_PACKET=y
85CONFIG_UNIX=y 89CONFIG_UNIX=y
86CONFIG_INET=y 90CONFIG_INET=y
91CONFIG_IP_MULTICAST=y
87CONFIG_IP_PNP=y 92CONFIG_IP_PNP=y
88CONFIG_IP_PNP_DHCP=y 93CONFIG_IP_PNP_DHCP=y
89CONFIG_IP_PNP_BOOTP=y 94CONFIG_IP_PNP_BOOTP=y
90# CONFIG_IPV6 is not set 95CONFIG_IPV6=m
96CONFIG_NETFILTER=y
97CONFIG_NF_CONNTRACK=m
98CONFIG_NF_CONNTRACK_EVENTS=y
99CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
100CONFIG_NETFILTER_XT_TARGET_LOG=m
101CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
102CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
103CONFIG_NF_CONNTRACK_IPV4=m
104CONFIG_IP_NF_IPTABLES=m
105CONFIG_IP_NF_FILTER=m
106CONFIG_IP_NF_TARGET_REJECT=m
107CONFIG_IP_NF_NAT=m
108CONFIG_IP_NF_TARGET_MASQUERADE=m
109CONFIG_IP_NF_MANGLE=m
110CONFIG_NF_CONNTRACK_IPV6=m
111CONFIG_IP6_NF_IPTABLES=m
112CONFIG_IP6_NF_FILTER=m
113CONFIG_IP6_NF_TARGET_REJECT=m
114CONFIG_IP6_NF_MANGLE=m
115CONFIG_IP6_NF_NAT=m
116CONFIG_IP6_NF_TARGET_MASQUERADE=m
117CONFIG_BRIDGE=m
118CONFIG_BRIDGE_VLAN_FILTERING=y
119CONFIG_VLAN_8021Q=m
120CONFIG_VLAN_8021Q_GVRP=y
121CONFIG_VLAN_8021Q_MVRP=y
91CONFIG_BPF_JIT=y 122CONFIG_BPF_JIT=y
92CONFIG_CFG80211=m 123CONFIG_CFG80211=m
93CONFIG_MAC80211=m 124CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
103CONFIG_MTD_M25P80=y 134CONFIG_MTD_M25P80=y
104CONFIG_MTD_SPI_NOR=y 135CONFIG_MTD_SPI_NOR=y
105CONFIG_BLK_DEV_LOOP=y 136CONFIG_BLK_DEV_LOOP=y
137CONFIG_BLK_DEV_NBD=m
106CONFIG_VIRTIO_BLK=y 138CONFIG_VIRTIO_BLK=y
107CONFIG_SRAM=y 139CONFIG_SRAM=y
108# CONFIG_SCSI_PROC_FS is not set 140# CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
120CONFIG_PATA_PLATFORM=y 152CONFIG_PATA_PLATFORM=y
121CONFIG_PATA_OF_PLATFORM=y 153CONFIG_PATA_OF_PLATFORM=y
122CONFIG_NETDEVICES=y 154CONFIG_NETDEVICES=y
155CONFIG_MACVLAN=m
156CONFIG_MACVTAP=m
123CONFIG_TUN=y 157CONFIG_TUN=y
158CONFIG_VETH=m
124CONFIG_VIRTIO_NET=y 159CONFIG_VIRTIO_NET=y
125CONFIG_AMD_XGBE=y 160CONFIG_AMD_XGBE=y
126CONFIG_NET_XGENE=y 161CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
350CONFIG_PWM_SAMSUNG=y 385CONFIG_PWM_SAMSUNG=y
351CONFIG_EXT2_FS=y 386CONFIG_EXT2_FS=y
352CONFIG_EXT3_FS=y 387CONFIG_EXT3_FS=y
388CONFIG_EXT4_FS_POSIX_ACL=y
389CONFIG_BTRFS_FS=m
390CONFIG_BTRFS_FS_POSIX_ACL=y
353CONFIG_FANOTIFY=y 391CONFIG_FANOTIFY=y
354CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 392CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
355CONFIG_QUOTA=y 393CONFIG_QUOTA=y
356CONFIG_AUTOFS4_FS=y 394CONFIG_AUTOFS4_FS=y
357CONFIG_FUSE_FS=y 395CONFIG_FUSE_FS=m
358CONFIG_CUSE=y 396CONFIG_CUSE=m
397CONFIG_OVERLAY_FS=m
359CONFIG_VFAT_FS=y 398CONFIG_VFAT_FS=y
360CONFIG_TMPFS=y 399CONFIG_TMPFS=y
361CONFIG_HUGETLBFS=y 400CONFIG_HUGETLBFS=y
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index 61b49150dfa3..1737aecfcc5e 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -22,7 +22,6 @@
22 22
23#define __ARCH_WANT_KPROBES_INSN_SLOT 23#define __ARCH_WANT_KPROBES_INSN_SLOT
24#define MAX_INSN_SIZE 1 24#define MAX_INSN_SIZE 1
25#define MAX_STACK_SIZE 128
26 25
27#define flush_insn_slot(p) do { } while (0) 26#define flush_insn_slot(p) do { } while (0)
28#define kretprobe_blacklist_size 0 27#define kretprobe_blacklist_size 0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
47 struct prev_kprobe prev_kprobe; 46 struct prev_kprobe prev_kprobe;
48 struct kprobe_step_ctx ss_ctx; 47 struct kprobe_step_ctx ss_ctx;
49 struct pt_regs jprobe_saved_regs; 48 struct pt_regs jprobe_saved_regs;
50 char jprobes_stack[MAX_STACK_SIZE];
51}; 49};
52 50
53void arch_remove_kprobe(struct kprobe *); 51void arch_remove_kprobe(struct kprobe *);
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 0a456bef8c79..2fee2f59288c 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
199#define _percpu_read(pcp) \ 199#define _percpu_read(pcp) \
200({ \ 200({ \
201 typeof(pcp) __retval; \ 201 typeof(pcp) __retval; \
202 preempt_disable(); \ 202 preempt_disable_notrace(); \
203 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ 203 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
204 sizeof(pcp)); \ 204 sizeof(pcp)); \
205 preempt_enable(); \ 205 preempt_enable_notrace(); \
206 __retval; \ 206 __retval; \
207}) 207})
208 208
209#define _percpu_write(pcp, val) \ 209#define _percpu_write(pcp, val) \
210do { \ 210do { \
211 preempt_disable(); \ 211 preempt_disable_notrace(); \
212 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ 212 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
213 sizeof(pcp)); \ 213 sizeof(pcp)); \
214 preempt_enable(); \ 214 preempt_enable_notrace(); \
215} while(0) \ 215} while(0) \
216 216
217#define _pcp_protect(operation, pcp, val) \ 217#define _pcp_protect(operation, pcp, val) \
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index e875a5a551d7..89206b568cd4 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
363#define arch_read_relax(lock) cpu_relax() 363#define arch_read_relax(lock) cpu_relax()
364#define arch_write_relax(lock) cpu_relax() 364#define arch_write_relax(lock) cpu_relax()
365 365
366/*
367 * Accesses appearing in program order before a spin_lock() operation
368 * can be reordered with accesses inside the critical section, by virtue
369 * of arch_spin_lock being constructed using acquire semantics.
370 *
371 * In cases where this is problematic (e.g. try_to_wake_up), an
372 * smp_mb__before_spinlock() can restore the required ordering.
373 */
374#define smp_mb__before_spinlock() smp_mb()
375
366#endif /* __ASM_SPINLOCK_H */ 376#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5e834d10b291..c47257c91b77 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
265static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 265static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
266{ 266{
267 kasan_check_write(to, n); 267 kasan_check_write(to, n);
268 return __arch_copy_from_user(to, from, n); 268 check_object_size(to, n, false);
269 return __arch_copy_from_user(to, from, n);
269} 270}
270 271
271static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 272static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
272{ 273{
273 kasan_check_read(from, n); 274 kasan_check_read(from, n);
274 return __arch_copy_to_user(to, from, n); 275 check_object_size(from, n, true);
276 return __arch_copy_to_user(to, from, n);
275} 277}
276 278
277static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 279static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
278{ 280{
279 kasan_check_write(to, n); 281 kasan_check_write(to, n);
280 282
281 if (access_ok(VERIFY_READ, from, n)) 283 if (access_ok(VERIFY_READ, from, n)) {
284 check_object_size(to, n, false);
282 n = __arch_copy_from_user(to, from, n); 285 n = __arch_copy_from_user(to, from, n);
283 else /* security hole - plug it */ 286 } else /* security hole - plug it */
284 memset(to, 0, n); 287 memset(to, 0, n);
285 return n; 288 return n;
286} 289}
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
289{ 292{
290 kasan_check_read(from, n); 293 kasan_check_read(from, n);
291 294
292 if (access_ok(VERIFY_WRITE, to, n)) 295 if (access_ok(VERIFY_WRITE, to, n)) {
296 check_object_size(from, n, true);
293 n = __arch_copy_to_user(to, from, n); 297 n = __arch_copy_to_user(to, from, n);
298 }
294 return n; 299 return n;
295} 300}
296 301
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 96e4a2b64cc1..441420ca7d08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -353,6 +353,8 @@ el1_sync:
353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class 353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
355 b.eq el1_da 355 b.eq el1_da
356 cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
357 b.eq el1_ia
356 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap 358 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
357 b.eq el1_undef 359 b.eq el1_undef
358 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception 360 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
364 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 366 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
365 b.ge el1_dbg 367 b.ge el1_dbg
366 b el1_inv 368 b el1_inv
369
370el1_ia:
371 /*
372 * Fall through to the Data abort case
373 */
367el1_da: 374el1_da:
368 /* 375 /*
369 * Data abort handling 376 * Data abort handling
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b77f58355da1..3e7b050e99dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
757 isb 757 isb
758 bl __create_page_tables // recreate kernel mapping 758 bl __create_page_tables // recreate kernel mapping
759 759
760 tlbi vmalle1 // Remove any stale TLB entries
761 dsb nsh
762
760 msr sctlr_el1, x19 // re-enable the MMU 763 msr sctlr_el1, x19 // re-enable the MMU
761 isb 764 isb
762 ic iallu // flush instructions fetched 765 ic iallu // flush instructions fetched
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 21ab5df9fa76..65d81f965e74 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -35,6 +35,7 @@
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/smp.h> 36#include <asm/smp.h>
37#include <asm/suspend.h> 37#include <asm/suspend.h>
38#include <asm/sysreg.h>
38#include <asm/virt.h> 39#include <asm/virt.h>
39 40
40/* 41/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
217 set_pte(pte, __pte(virt_to_phys((void *)dst) | 218 set_pte(pte, __pte(virt_to_phys((void *)dst) |
218 pgprot_val(PAGE_KERNEL_EXEC))); 219 pgprot_val(PAGE_KERNEL_EXEC)));
219 220
220 /* Load our new page tables */ 221 /*
221 asm volatile("msr ttbr0_el1, %0;" 222 * Load our new page tables. A strict BBM approach requires that we
222 "isb;" 223 * ensure that TLBs are free of any entries that may overlap with the
223 "tlbi vmalle1is;" 224 * global mappings we are about to install.
224 "dsb ish;" 225 *
225 "isb" : : "r"(virt_to_phys(pgd))); 226 * For a real hibernate/resume cycle TTBR0 currently points to a zero
227 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
228 * runtime services), while for a userspace-driven test_resume cycle it
229 * points to userspace page tables (and we must point it at a zero page
230 * ourselves). Elsewhere we only (un)install the idmap with preemption
231 * disabled, so T0SZ should be as required regardless.
232 */
233 cpu_set_reserved_ttbr0();
234 local_flush_tlb_all();
235 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
236 isb();
226 237
227 *phys_dst_addr = virt_to_phys((void *)dst); 238 *phys_dst_addr = virt_to_phys((void *)dst);
228 239
@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
394 void *, phys_addr_t, phys_addr_t); 405 void *, phys_addr_t, phys_addr_t);
395 406
396 /* 407 /*
408 * Restoring the memory image will overwrite the ttbr1 page tables.
409 * Create a second copy of just the linear map, and use this when
410 * restoring.
411 */
412 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
413 if (!tmp_pg_dir) {
414 pr_err("Failed to allocate memory for temporary page tables.");
415 rc = -ENOMEM;
416 goto out;
417 }
418 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
419 if (rc)
420 goto out;
421
422 /*
423 * Since we only copied the linear map, we need to find restore_pblist's
424 * linear map address.
425 */
426 lm_restore_pblist = LMADDR(restore_pblist);
427
428 /*
429 * We need a zero page that is zero before & after resume in order to
430 * to break before make on the ttbr1 page tables.
431 */
432 zero_page = (void *)get_safe_page(GFP_ATOMIC);
433 if (!zero_page) {
434 pr_err("Failed to allocate zero page.");
435 rc = -ENOMEM;
436 goto out;
437 }
438
439 /*
397 * Locate the exit code in the bottom-but-one page, so that *NULL 440 * Locate the exit code in the bottom-but-one page, so that *NULL
398 * still has disastrous affects. 441 * still has disastrous affects.
399 */ 442 */
@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
419 __flush_dcache_area(hibernate_exit, exit_size); 462 __flush_dcache_area(hibernate_exit, exit_size);
420 463
421 /* 464 /*
422 * Restoring the memory image will overwrite the ttbr1 page tables.
423 * Create a second copy of just the linear map, and use this when
424 * restoring.
425 */
426 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
427 if (!tmp_pg_dir) {
428 pr_err("Failed to allocate memory for temporary page tables.");
429 rc = -ENOMEM;
430 goto out;
431 }
432 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
433 if (rc)
434 goto out;
435
436 /*
437 * Since we only copied the linear map, we need to find restore_pblist's
438 * linear map address.
439 */
440 lm_restore_pblist = LMADDR(restore_pblist);
441
442 /*
443 * KASLR will cause the el2 vectors to be in a different location in 465 * KASLR will cause the el2 vectors to be in a different location in
444 * the resumed kernel. Load hibernate's temporary copy into el2. 466 * the resumed kernel. Load hibernate's temporary copy into el2.
445 * 467 *
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
453 __hyp_set_vectors(el2_vectors); 475 __hyp_set_vectors(el2_vectors);
454 } 476 }
455 477
456 /*
457 * We need a zero page that is zero before & after resume in order to
458 * to break before make on the ttbr1 page tables.
459 */
460 zero_page = (void *)get_safe_page(GFP_ATOMIC);
461
462 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 478 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463 resume_hdr.reenter_kernel, lm_restore_pblist, 479 resume_hdr.reenter_kernel, lm_restore_pblist,
464 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); 480 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bf9768588288..c6b0f40620d8 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41static void __kprobes 41static void __kprobes
42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 42post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
43 43
44static inline unsigned long min_stack_size(unsigned long addr)
45{
46 unsigned long size;
47
48 if (on_irq_stack(addr, raw_smp_processor_id()))
49 size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
50 else
51 size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
52
53 return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
54}
55
56static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 44static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
57{ 45{
58 /* prepare insn slot */ 46 /* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
489{ 477{
490 struct jprobe *jp = container_of(p, struct jprobe, kp); 478 struct jprobe *jp = container_of(p, struct jprobe, kp);
491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 479 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
492 long stack_ptr = kernel_stack_pointer(regs);
493 480
494 kcb->jprobe_saved_regs = *regs; 481 kcb->jprobe_saved_regs = *regs;
495 /* 482 /*
496 * As Linus pointed out, gcc assumes that the callee 483 * Since we can't be sure where in the stack frame "stacked"
497 * owns the argument space and could overwrite it, e.g. 484 * pass-by-value arguments are stored we just don't try to
498 * tailcall optimization. So, to be absolutely safe 485 * duplicate any of the stack. Do not use jprobes on functions that
499 * we also save and restore enough stack bytes to cover 486 * use more than 64 bytes (after padding each to an 8 byte boundary)
500 * the argument area. 487 * of arguments, or pass individual arguments larger than 16 bytes.
501 */ 488 */
502 kasan_disable_current();
503 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
504 min_stack_size(stack_ptr));
505 kasan_enable_current();
506 489
507 instruction_pointer_set(regs, (unsigned long) jp->entry); 490 instruction_pointer_set(regs, (unsigned long) jp->entry);
508 preempt_disable(); 491 preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
554 } 537 }
555 unpause_graph_tracing(); 538 unpause_graph_tracing();
556 *regs = kcb->jprobe_saved_regs; 539 *regs = kcb->jprobe_saved_regs;
557 kasan_disable_current();
558 memcpy((void *)stack_addr, kcb->jprobes_stack,
559 min_stack_size(stack_addr));
560 kasan_enable_current();
561 preempt_enable_no_resched(); 540 preempt_enable_no_resched();
562 return 1; 541 return 1;
563} 542}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 9a3aec97ac09..ccf79d849e0a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
101 bl el2_setup // if in EL2 drop to EL1 cleanly 101 bl el2_setup // if in EL2 drop to EL1 cleanly
102 /* enable the MMU early - so we can access sleep_save_stash by va */ 102 /* enable the MMU early - so we can access sleep_save_stash by va */
103 adr_l lr, __enable_mmu /* __cpu_setup will return here */ 103 adr_l lr, __enable_mmu /* __cpu_setup will return here */
104 ldr x27, =_cpu_resume /* __enable_mmu will branch here */ 104 adr_l x27, _resume_switched /* __enable_mmu will branch here */
105 adrp x25, idmap_pg_dir 105 adrp x25, idmap_pg_dir
106 adrp x26, swapper_pg_dir 106 adrp x26, swapper_pg_dir
107 b __cpu_setup 107 b __cpu_setup
108ENDPROC(cpu_resume) 108ENDPROC(cpu_resume)
109 109
110 .pushsection ".idmap.text", "ax"
111_resume_switched:
112 ldr x8, =_cpu_resume
113 br x8
114ENDPROC(_resume_switched)
115 .ltorg
116 .popsection
117
110ENTRY(_cpu_resume) 118ENTRY(_cpu_resume)
111 mrs x1, mpidr_el1 119 mrs x1, mpidr_el1
112 adrp x8, mpidr_hash 120 adrp x8, mpidr_hash
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 76a6d9263908..d93d43352504 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
662 acpi_parse_gic_cpu_interface, 0); 662 acpi_parse_gic_cpu_interface, 0);
663 663
664 if (cpu_count > NR_CPUS) 664 if (cpu_count > nr_cpu_ids)
665 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 665 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
666 cpu_count, NR_CPUS); 666 cpu_count, nr_cpu_ids);
667 667
668 if (!bootcpu_valid) { 668 if (!bootcpu_valid) {
669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); 669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
677 * with entries in cpu_logical_map while initializing the cpus. 677 * with entries in cpu_logical_map while initializing the cpus.
678 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 678 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
679 */ 679 */
680 for (i = 1; i < NR_CPUS; i++) { 680 for (i = 1; i < nr_cpu_ids; i++) {
681 if (cpu_logical_map(i) != INVALID_HWID) { 681 if (cpu_logical_map(i) != INVALID_HWID) {
682 if (smp_cpu_setup(i)) 682 if (smp_cpu_setup(i))
683 cpu_logical_map(i) = INVALID_HWID; 683 cpu_logical_map(i) = INVALID_HWID;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ae7855f16ec2..5a84b4562603 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
256 256
257 /* 257 /*
258 * We must restore the 32-bit state before the sysregs, thanks 258 * We must restore the 32-bit state before the sysregs, thanks
259 * to Cortex-A57 erratum #852523. 259 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
260 */ 260 */
261 __sysreg32_restore_state(vcpu); 261 __sysreg32_restore_state(vcpu);
262 __sysreg_restore_guest_state(guest_ctxt); 262 __sysreg_restore_guest_state(guest_ctxt);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index b0b225ceca18..e51367d159d0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
823 * Architected system registers. 823 * Architected system registers.
824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
825 * 825 *
826 * We could trap ID_DFR0 and tell the guest we don't support performance
827 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
828 * NAKed, so it will read the PMCR anyway.
829 *
830 * Therefore we tell the guest we have 0 counters. Unfortunately, we
831 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
832 * all PM registers, which doesn't crash the guest kernel at least.
833 *
834 * Debug handling: We do trap most, if not all debug related system 826 * Debug handling: We do trap most, if not all debug related system
835 * registers. The implementation is good enough to ensure that a guest 827 * registers. The implementation is good enough to ensure that a guest
836 * can use these with minimal performance degradation. The drawback is 828 * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
1360 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 1352 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1361 1353
1362 /* ICC_SRE */ 1354 /* ICC_SRE */
1363 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 1355 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1364 1356
1365 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 1357 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1366 1358
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index f94b80eb295d..9c3e75df2180 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
242 242
243static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 243static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
244{ 244{
245 pte_t *pte = pte_offset_kernel(pmd, 0); 245 pte_t *pte = pte_offset_kernel(pmd, 0UL);
246 unsigned long addr; 246 unsigned long addr;
247 unsigned i; 247 unsigned i;
248 248
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
254 254
255static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 255static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
256{ 256{
257 pmd_t *pmd = pmd_offset(pud, 0); 257 pmd_t *pmd = pmd_offset(pud, 0UL);
258 unsigned long addr; 258 unsigned long addr;
259 unsigned i; 259 unsigned i;
260 260
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
271 271
272static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 272static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
273{ 273{
274 pud_t *pud = pud_offset(pgd, 0); 274 pud_t *pud = pud_offset(pgd, 0UL);
275 unsigned long addr; 275 unsigned long addr;
276 unsigned i; 276 unsigned i;
277 277
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c8beaa0da7df..05d2bd776c69 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
153} 153}
154#endif 154#endif
155 155
156static bool is_el1_instruction_abort(unsigned int esr)
157{
158 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
159}
160
156/* 161/*
157 * The kernel tried to access some page that wasn't present. 162 * The kernel tried to access some page that wasn't present.
158 */ 163 */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
161{ 166{
162 /* 167 /*
163 * Are we prepared to handle this kernel fault? 168 * Are we prepared to handle this kernel fault?
169 * We are almost certainly not prepared to handle instruction faults.
164 */ 170 */
165 if (fixup_exception(regs)) 171 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
166 return; 172 return;
167 173
168 /* 174 /*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
267 unsigned int ec = ESR_ELx_EC(esr); 273 unsigned int ec = ESR_ELx_EC(esr);
268 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; 274 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
269 275
270 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); 276 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
277 (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
271} 278}
272 279
273static bool is_el0_instruction_abort(unsigned int esr) 280static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
312 if (regs->orig_addr_limit == KERNEL_DS) 319 if (regs->orig_addr_limit == KERNEL_DS)
313 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 320 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
314 321
322 if (is_el1_instruction_abort(esr))
323 die("Attempting to execute userspace memory", regs, esr);
324
315 if (!search_exception_tables(regs->pc)) 325 if (!search_exception_tables(regs->pc))
316 die("Accessing user space memory outside uaccess.h routines", regs, esr); 326 die("Accessing user space memory outside uaccess.h routines", regs, esr);
317 } 327 }
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index c7fe3ec70774..5bb15eab6f00 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -23,6 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/of.h> 24#include <linux/of.h>
25 25
26#include <asm/acpi.h>
27
26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 28struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27EXPORT_SYMBOL(node_data); 29EXPORT_SYMBOL(node_data);
28nodemask_t numa_nodes_parsed __initdata; 30nodemask_t numa_nodes_parsed __initdata;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5bb61de23201..9d37e967fa19 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
100 100
101 msr tcr_el1, x8 101 msr tcr_el1, x8
102 msr vbar_el1, x9 102 msr vbar_el1, x9
103
104 /*
105 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
106 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
107 * exception. Mask them until local_dbg_restore() in cpu_suspend()
108 * resets them.
109 */
110 disable_dbg
103 msr mdscr_el1, x10 111 msr mdscr_el1, x10
112
104 msr sctlr_el1, x12 113 msr sctlr_el1, x12
105 /* 114 /*
106 * Restore oslsr_el1 by writing oslar_el1 115 * Restore oslsr_el1 by writing oslar_el1
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
index 68cf638faf48..b1ec1fa06463 100644
--- a/arch/avr32/include/asm/uaccess.h
+++ b/arch/avr32/include/asm/uaccess.h
@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
74 74
75extern __kernel_size_t copy_to_user(void __user *to, const void *from, 75extern __kernel_size_t copy_to_user(void __user *to, const void *from,
76 __kernel_size_t n); 76 __kernel_size_t n);
77extern __kernel_size_t copy_from_user(void *to, const void __user *from, 77extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
78 __kernel_size_t n); 78 __kernel_size_t n);
79 79
80static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, 80static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
88{ 88{
89 return __copy_user(to, (const void __force *)from, n); 89 return __copy_user(to, (const void __force *)from, n);
90} 90}
91static inline __kernel_size_t copy_from_user(void *to,
92 const void __user *from,
93 __kernel_size_t n)
94{
95 size_t res = ___copy_from_user(to, from, n);
96 if (unlikely(res))
97 memset(to + (n - res), 0, res);
98 return res;
99}
91 100
92#define __copy_to_user_inatomic __copy_to_user 101#define __copy_to_user_inatomic __copy_to_user
93#define __copy_from_user_inatomic __copy_from_user 102#define __copy_from_user_inatomic __copy_from_user
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
index d93ead02daed..7c6cf14f0985 100644
--- a/arch/avr32/kernel/avr32_ksyms.c
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
36/* 36/*
37 * Userspace access stuff. 37 * Userspace access stuff.
38 */ 38 */
39EXPORT_SYMBOL(copy_from_user); 39EXPORT_SYMBOL(___copy_from_user);
40EXPORT_SYMBOL(copy_to_user); 40EXPORT_SYMBOL(copy_to_user);
41EXPORT_SYMBOL(__copy_user); 41EXPORT_SYMBOL(__copy_user);
42EXPORT_SYMBOL(strncpy_from_user); 42EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
index ea59c04b07de..075373471da1 100644
--- a/arch/avr32/lib/copy_user.S
+++ b/arch/avr32/lib/copy_user.S
@@ -23,13 +23,13 @@
23 */ 23 */
24 .text 24 .text
25 .align 1 25 .align 1
26 .global copy_from_user 26 .global ___copy_from_user
27 .type copy_from_user, @function 27 .type ___copy_from_user, @function
28copy_from_user: 28___copy_from_user:
29 branch_if_kernel r8, __copy_user 29 branch_if_kernel r8, __copy_user
30 ret_if_privileged r8, r11, r10, r10 30 ret_if_privileged r8, r11, r10, r10
31 rjmp __copy_user 31 rjmp __copy_user
32 .size copy_from_user, . - copy_from_user 32 .size ___copy_from_user, . - ___copy_from_user
33 33
34 .global copy_to_user 34 .global copy_to_user
35 .type copy_to_user, @function 35 .type copy_to_user, @function
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 12f5d6851bbc..0a2a70096d8b 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
171static inline unsigned long __must_check 171static inline unsigned long __must_check
172copy_from_user(void *to, const void __user *from, unsigned long n) 172copy_from_user(void *to, const void __user *from, unsigned long n)
173{ 173{
174 if (access_ok(VERIFY_READ, from, n)) 174 if (likely(access_ok(VERIFY_READ, from, n))) {
175 memcpy(to, (const void __force *)from, n); 175 memcpy(to, (const void __force *)from, n);
176 else 176 return 0;
177 return n; 177 }
178 return 0; 178 memset(to, 0, n);
179 return n;
179} 180}
180 181
181static inline unsigned long __must_check 182static inline unsigned long __must_check
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index c6db52ba3a06..10c57771822d 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
146#include <linux/smc91x.h> 146#include <linux/smc91x.h>
147 147
148static struct smc91x_platdata smc91x_info = { 148static struct smc91x_platdata smc91x_info = {
149 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, 149 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
150 SMC91X_NOWAIT,
150 .leda = RPC_LED_100_10, 151 .leda = RPC_LED_100_10,
151 .ledb = RPC_LED_TX_RX, 152 .ledb = RPC_LED_TX_RX,
152}; 153};
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index f35525b55819..57d1c43726d9 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
134#include <linux/smc91x.h> 134#include <linux/smc91x.h>
135 135
136static struct smc91x_platdata smc91x_info = { 136static struct smc91x_platdata smc91x_info = {
137 .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, 137 .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
138 SMC91X_NOWAIT,
138 .leda = RPC_LED_100_10, 139 .leda = RPC_LED_100_10,
139 .ledb = RPC_LED_TX_RX, 140 .ledb = RPC_LED_TX_RX,
140}; 141};
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
index e3530d0f13ee..56c7d5750abd 100644
--- a/arch/cris/include/asm/uaccess.h
+++ b/arch/cris/include/asm/uaccess.h
@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
194extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); 194extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
195extern unsigned long __do_clear_user(void __user *to, unsigned long n); 195extern unsigned long __do_clear_user(void __user *to, unsigned long n);
196 196
197static inline unsigned long
198__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
199{
200 if (access_ok(VERIFY_WRITE, to, n))
201 return __copy_user(to, from, n);
202 return n;
203}
204
205static inline unsigned long
206__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
207{
208 if (access_ok(VERIFY_READ, from, n))
209 return __copy_user_zeroing(to, from, n);
210 return n;
211}
212
213static inline unsigned long
214__generic_clear_user(void __user *to, unsigned long n)
215{
216 if (access_ok(VERIFY_WRITE, to, n))
217 return __do_clear_user(to, n);
218 return n;
219}
220
221static inline long 197static inline long
222__strncpy_from_user(char *dst, const char __user *src, long count) 198__strncpy_from_user(char *dst, const char __user *src, long count)
223{ 199{
@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
282 else if (n == 24) 258 else if (n == 24)
283 __asm_copy_from_user_24(to, from, ret); 259 __asm_copy_from_user_24(to, from, ret);
284 else 260 else
285 ret = __generic_copy_from_user(to, from, n); 261 ret = __copy_user_zeroing(to, from, n);
286 262
287 return ret; 263 return ret;
288} 264}
@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
333 else if (n == 24) 309 else if (n == 24)
334 __asm_copy_to_user_24(to, from, ret); 310 __asm_copy_to_user_24(to, from, ret);
335 else 311 else
336 ret = __generic_copy_to_user(to, from, n); 312 ret = __copy_user(to, from, n);
337 313
338 return ret; 314 return ret;
339} 315}
@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
366 else if (n == 24) 342 else if (n == 24)
367 __asm_clear_24(to, ret); 343 __asm_clear_24(to, ret);
368 else 344 else
369 ret = __generic_clear_user(to, n); 345 ret = __do_clear_user(to, n);
370 346
371 return ret; 347 return ret;
372} 348}
373 349
374 350
375#define clear_user(to, n) \ 351static inline size_t clear_user(void __user *to, size_t n)
376 (__builtin_constant_p(n) ? \ 352{
377 __constant_clear_user(to, n) : \ 353 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
378 __generic_clear_user(to, n)) 354 return n;
355 if (__builtin_constant_p(n))
356 return __constant_clear_user(to, n);
357 else
358 return __do_clear_user(to, n);
359}
379 360
380#define copy_from_user(to, from, n) \ 361static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
381 (__builtin_constant_p(n) ? \ 362{
382 __constant_copy_from_user(to, from, n) : \ 363 if (unlikely(!access_ok(VERIFY_READ, from, n))) {
383 __generic_copy_from_user(to, from, n)) 364 memset(to, 0, n);
365 return n;
366 }
367 if (__builtin_constant_p(n))
368 return __constant_copy_from_user(to, from, n);
369 else
370 return __copy_user_zeroing(to, from, n);
371}
384 372
385#define copy_to_user(to, from, n) \ 373static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
386 (__builtin_constant_p(n) ? \ 374{
387 __constant_copy_to_user(to, from, n) : \ 375 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
388 __generic_copy_to_user(to, from, n)) 376 return n;
377 if (__builtin_constant_p(n))
378 return __constant_copy_to_user(to, from, n);
379 else
380 return __copy_user(to, from, n);
381}
389 382
390/* We let the __ versions of copy_from/to_user inline, because they're often 383/* We let the __ versions of copy_from/to_user inline, because they're often
391 * used in fast paths and have only a small space overhead. 384 * used in fast paths and have only a small space overhead.
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 3ac9a59d65d4..87d9e34c5df8 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -263,19 +263,25 @@ do { \
263extern long __memset_user(void *dst, unsigned long count); 263extern long __memset_user(void *dst, unsigned long count);
264extern long __memcpy_user(void *dst, const void *src, unsigned long count); 264extern long __memcpy_user(void *dst, const void *src, unsigned long count);
265 265
266#define clear_user(dst,count) __memset_user(____force(dst), (count)) 266#define __clear_user(dst,count) __memset_user(____force(dst), (count))
267#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) 267#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
268#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) 268#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
269 269
270#else 270#else
271 271
272#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) 272#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
273#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) 273#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
274#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) 274#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
275 275
276#endif 276#endif
277 277
278#define __clear_user clear_user 278static inline unsigned long __must_check
279clear_user(void __user *to, unsigned long n)
280{
281 if (likely(__access_ok(to, n)))
282 n = __clear_user(to, n);
283 return n;
284}
279 285
280static inline unsigned long __must_check 286static inline unsigned long __must_check
281__copy_to_user(void __user *to, const void *from, unsigned long n) 287__copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h
index 2e221c5f0203..f86918aed9e1 100644
--- a/arch/h8300/include/asm/io.h
+++ b/arch/h8300/include/asm/io.h
@@ -3,6 +3,8 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h>
7
6/* H8/300 internal I/O functions */ 8/* H8/300 internal I/O functions */
7 9
8#define __raw_readb __raw_readb 10#define __raw_readb __raw_readb
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index f000a382bc7f..f61cfb28e9f2 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
103{ 103{
104 long res = __strnlen_user(src, n); 104 long res = __strnlen_user(src, n);
105 105
106 /* return from strnlen can't be zero -- that would be rubbish. */ 106 if (unlikely(!res))
107 return -EFAULT;
107 108
108 if (res > n) { 109 if (res > n) {
109 copy_from_user(dst, src, n); 110 copy_from_user(dst, src, n);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6a15083cc366..18ca6a9ce566 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -52,6 +52,7 @@ config IA64
52 select MODULES_USE_ELF_RELA 52 select MODULES_USE_ELF_RELA
53 select ARCH_USE_CMPXCHG_LOCKREF 53 select ARCH_USE_CMPXCHG_LOCKREF
54 select HAVE_ARCH_AUDITSYSCALL 54 select HAVE_ARCH_AUDITSYSCALL
55 select HAVE_ARCH_HARDENED_USERCOPY
55 default y 56 default y
56 help 57 help
57 The Itanium Processor Family is Intel's 64-bit successor to 58 The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 2189d5ddc1ee..bfe13196f770 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,12 +241,16 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
241static inline unsigned long 241static inline unsigned long
242__copy_to_user (void __user *to, const void *from, unsigned long count) 242__copy_to_user (void __user *to, const void *from, unsigned long count)
243{ 243{
244 check_object_size(from, count, true);
245
244 return __copy_user(to, (__force void __user *) from, count); 246 return __copy_user(to, (__force void __user *) from, count);
245} 247}
246 248
247static inline unsigned long 249static inline unsigned long
248__copy_from_user (void *to, const void __user *from, unsigned long count) 250__copy_from_user (void *to, const void __user *from, unsigned long count)
249{ 251{
252 check_object_size(to, count, false);
253
250 return __copy_user((__force void __user *) to, from, count); 254 return __copy_user((__force void __user *) to, from, count);
251} 255}
252 256
@@ -258,22 +262,23 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
258 const void *__cu_from = (from); \ 262 const void *__cu_from = (from); \
259 long __cu_len = (n); \ 263 long __cu_len = (n); \
260 \ 264 \
261 if (__access_ok(__cu_to, __cu_len, get_fs())) \ 265 if (__access_ok(__cu_to, __cu_len, get_fs())) { \
262 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 266 check_object_size(__cu_from, __cu_len, true); \
267 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
268 } \
263 __cu_len; \ 269 __cu_len; \
264}) 270})
265 271
266#define copy_from_user(to, from, n) \ 272static inline unsigned long
267({ \ 273copy_from_user(void *to, const void __user *from, unsigned long n)
268 void *__cu_to = (to); \ 274{
269 const void __user *__cu_from = (from); \ 275 check_object_size(to, n, false);
270 long __cu_len = (n); \ 276 if (likely(__access_ok(from, n, get_fs())))
271 \ 277 n = __copy_user((__force void __user *) to, from, n);
272 __chk_user_ptr(__cu_from); \ 278 else
273 if (__access_ok(__cu_from, __cu_len, get_fs())) \ 279 memset(to, 0, n);
274 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 280 return n;
275 __cu_len; \ 281}
276})
277 282
278#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) 283#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
279 284
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index cac7014daef3..6f8982157a75 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
219#define __get_user_nocheck(x, ptr, size) \ 219#define __get_user_nocheck(x, ptr, size) \
220({ \ 220({ \
221 long __gu_err = 0; \ 221 long __gu_err = 0; \
222 unsigned long __gu_val; \ 222 unsigned long __gu_val = 0; \
223 might_fault(); \ 223 might_fault(); \
224 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 224 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
225 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 225 (x) = (__force __typeof__(*(ptr)))__gu_val; \
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 2dcee3a88867..9202f82dfce6 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -213,7 +213,6 @@ static inline int frame_extra_sizes(int f)
213 213
214static inline void adjustformat(struct pt_regs *regs) 214static inline void adjustformat(struct pt_regs *regs)
215{ 215{
216 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
217 /* 216 /*
218 * set format byte to make stack appear modulo 4, which it will 217 * set format byte to make stack appear modulo 4, which it will
219 * be when doing the rte 218 * be when doing the rte
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 8282cbce7e39..273e61225c27 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
204static inline unsigned long 204static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 205copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 206{
207 if (access_ok(VERIFY_READ, from, n)) 207 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 208 return __copy_user_zeroing(to, from, n);
209 memset(to, 0, n);
209 return n; 210 return n;
210} 211}
211 212
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 11fa51c89617..c0ec116b3993 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -390,7 +390,6 @@ void __init mem_init(void)
390 390
391 free_all_bootmem(); 391 free_all_bootmem();
392 mem_init_print_info(NULL); 392 mem_init_print_info(NULL);
393 show_mem(0);
394} 393}
395 394
396void free_initmem(void) 395void free_initmem(void)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 331b0d35f89c..826676778094 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -227,7 +227,7 @@ extern long __user_bad(void);
227 227
228#define __get_user(x, ptr) \ 228#define __get_user(x, ptr) \
229({ \ 229({ \
230 unsigned long __gu_val; \ 230 unsigned long __gu_val = 0; \
231 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ 231 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
232 long __gu_err; \ 232 long __gu_err; \
233 switch (sizeof(*(ptr))) { \ 233 switch (sizeof(*(ptr))) { \
@@ -373,10 +373,13 @@ extern long __user_bad(void);
373static inline long copy_from_user(void *to, 373static inline long copy_from_user(void *to,
374 const void __user *from, unsigned long n) 374 const void __user *from, unsigned long n)
375{ 375{
376 unsigned long res = n;
376 might_fault(); 377 might_fault();
377 if (access_ok(VERIFY_READ, from, n)) 378 if (likely(access_ok(VERIFY_READ, from, n)))
378 return __copy_from_user(to, from, n); 379 res = __copy_from_user(to, from, n);
379 return n; 380 if (unlikely(res))
381 memset(to + (n - res), 0, res);
382 return res;
380} 383}
381 384
382#define __copy_to_user(to, from, n) \ 385#define __copy_to_user(to, from, n) \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ea0cd9773914..5f987598054f 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
164 */ 164 */
165static inline unsigned long ___pa(unsigned long x) 165static inline unsigned long ___pa(unsigned long x)
166{ 166{
167 if (config_enabled(CONFIG_64BIT)) { 167 if (IS_ENABLED(CONFIG_64BIT)) {
168 /* 168 /*
169 * For MIPS64 the virtual address may either be in one of 169 * For MIPS64 the virtual address may either be in one of
170 * the compatibility segements ckseg0 or ckseg1, or it may 170 * the compatibility segements ckseg0 or ckseg1, or it may
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
173 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); 173 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
174 } 174 }
175 175
176 if (!config_enabled(CONFIG_EVA)) { 176 if (!IS_ENABLED(CONFIG_EVA)) {
177 /* 177 /*
178 * We're using the standard MIPS32 legacy memory map, ie. 178 * We're using the standard MIPS32 legacy memory map, ie.
179 * the address x is going to be in kseg0 or kseg1. We can 179 * the address x is going to be in kseg0 or kseg1. We can
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 11b965f98d95..21a2aaba20d5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/thread_info.h> 16#include <linux/thread_info.h>
17#include <linux/string.h>
17#include <asm/asm-eva.h> 18#include <asm/asm-eva.h>
18 19
19/* 20/*
@@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1170 __cu_len = __invoke_copy_from_user(__cu_to, \ 1171 __cu_len = __invoke_copy_from_user(__cu_to, \
1171 __cu_from, \ 1172 __cu_from, \
1172 __cu_len); \ 1173 __cu_len); \
1174 } else { \
1175 memset(__cu_to, 0, __cu_len); \
1173 } \ 1176 } \
1174 } \ 1177 } \
1175 __cu_len; \ 1178 __cu_len; \
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 6eb52b9c9818..e788515f766b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1642,8 +1642,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1642 1642
1643 preempt_disable(); 1643 preempt_disable();
1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1645 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) 1645 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1646 kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1646 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1647 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1648 __func__, va, vcpu, read_c0_entryhi());
1649 er = EMULATE_FAIL;
1650 preempt_enable();
1651 goto done;
1652 }
1647 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1653 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1648 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1654 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1649 int index; 1655 int index;
@@ -1680,12 +1686,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1680 run, vcpu); 1686 run, vcpu);
1681 preempt_enable(); 1687 preempt_enable();
1682 goto dont_update_pc; 1688 goto dont_update_pc;
1683 } else { 1689 }
1684 /* 1690 /*
1685 * We fault an entry from the guest tlb to the 1691 * We fault an entry from the guest tlb to the
1686 * shadow host TLB 1692 * shadow host TLB
1687 */ 1693 */
1688 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 1694 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1695 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1696 __func__, va, index, vcpu,
1697 read_c0_entryhi());
1698 er = EMULATE_FAIL;
1699 preempt_enable();
1700 goto done;
1689 } 1701 }
1690 } 1702 }
1691 } else { 1703 } else {
@@ -2659,7 +2671,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2659 * OK we have a Guest TLB entry, now inject it into the 2671 * OK we have a Guest TLB entry, now inject it into the
2660 * shadow host TLB 2672 * shadow host TLB
2661 */ 2673 */
2662 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 2674 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2675 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2676 __func__, va, index, vcpu,
2677 read_c0_entryhi());
2678 er = EMULATE_FAIL;
2679 }
2663 } 2680 }
2664 } 2681 }
2665 2682
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 57319ee57c4f..121008c0fcc9 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
40 srcu_idx = srcu_read_lock(&kvm->srcu); 40 srcu_idx = srcu_read_lock(&kvm->srcu);
41 pfn = gfn_to_pfn(kvm, gfn); 41 pfn = gfn_to_pfn(kvm, gfn);
42 42
43 if (is_error_pfn(pfn)) { 43 if (is_error_noslot_pfn(pfn)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); 44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
45 err = -EFAULT; 45 err = -EFAULT;
46 goto out; 46 goto out;
@@ -99,7 +99,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
99 } 99 }
100 100
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); 101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if (gfn >= kvm->arch.guest_pmap_npages) { 102 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, 103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 gfn, badvaddr); 104 gfn, badvaddr);
105 kvm_mips_dump_host_tlbs(); 105 kvm_mips_dump_host_tlbs();
@@ -138,35 +138,49 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 struct kvm *kvm = vcpu->kvm; 139 struct kvm *kvm = vcpu->kvm;
140 kvm_pfn_t pfn0, pfn1; 140 kvm_pfn_t pfn0, pfn1;
141 gfn_t gfn0, gfn1;
142 long tlb_lo[2];
141 int ret; 143 int ret;
142 144
143 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 145 tlb_lo[0] = tlb->tlb_lo[0];
144 pfn0 = 0; 146 tlb_lo[1] = tlb->tlb_lo[1];
145 pfn1 = 0; 147
146 } else { 148 /*
147 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) 149 * The commpage address must not be mapped to anything else if the guest
148 >> PAGE_SHIFT) < 0) 150 * TLB contains entries nearby, or commpage accesses will break.
149 return -1; 151 */
150 152 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
151 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) 153 VPN2_MASK & (PAGE_MASK << 1)))
152 >> PAGE_SHIFT) < 0) 154 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
153 return -1; 155
154 156 gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
155 pfn0 = kvm->arch.guest_pmap[ 157 gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
156 mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT]; 158 if (gfn0 >= kvm->arch.guest_pmap_npages ||
157 pfn1 = kvm->arch.guest_pmap[ 159 gfn1 >= kvm->arch.guest_pmap_npages) {
158 mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT]; 160 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
161 __func__, gfn0, gfn1, tlb->tlb_hi);
162 kvm_mips_dump_guest_tlbs(vcpu);
163 return -1;
159 } 164 }
160 165
166 if (kvm_mips_map_page(kvm, gfn0) < 0)
167 return -1;
168
169 if (kvm_mips_map_page(kvm, gfn1) < 0)
170 return -1;
171
172 pfn0 = kvm->arch.guest_pmap[gfn0];
173 pfn1 = kvm->arch.guest_pmap[gfn1];
174
161 /* Get attributes from the Guest TLB */ 175 /* Get attributes from the Guest TLB */
162 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 176 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
163 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 177 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
164 (tlb->tlb_lo[0] & ENTRYLO_D) | 178 (tlb_lo[0] & ENTRYLO_D) |
165 (tlb->tlb_lo[0] & ENTRYLO_V); 179 (tlb_lo[0] & ENTRYLO_V);
166 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | 180 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
167 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 181 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
168 (tlb->tlb_lo[1] & ENTRYLO_D) | 182 (tlb_lo[1] & ENTRYLO_D) |
169 (tlb->tlb_lo[1] & ENTRYLO_V); 183 (tlb_lo[1] & ENTRYLO_V);
170 184
171 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 185 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
172 tlb->tlb_lo[0], tlb->tlb_lo[1]); 186 tlb->tlb_lo[0], tlb->tlb_lo[1]);
@@ -354,9 +368,15 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
354 local_irq_restore(flags); 368 local_irq_restore(flags);
355 return KVM_INVALID_INST; 369 return KVM_INVALID_INST;
356 } 370 }
357 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 371 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
358 &vcpu->arch. 372 &vcpu->arch.guest_tlb[index])) {
359 guest_tlb[index]); 373 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
374 __func__, opc, index, vcpu,
375 read_c0_entryhi());
376 kvm_mips_dump_guest_tlbs(vcpu);
377 local_irq_restore(flags);
378 return KVM_INVALID_INST;
379 }
360 inst = *(opc); 380 inst = *(opc);
361 } 381 }
362 local_irq_restore(flags); 382 local_irq_restore(flags);
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 20f7bf6de384..d012e877a95a 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
166 "2:\n" \ 166 "2:\n" \
167 " .section .fixup,\"ax\"\n" \ 167 " .section .fixup,\"ax\"\n" \
168 "3:\n\t" \ 168 "3:\n\t" \
169 " mov 0,%1\n" \
169 " mov %3,%0\n" \ 170 " mov %3,%0\n" \
170 " jmp 2b\n" \ 171 " jmp 2b\n" \
171 " .previous\n" \ 172 " .previous\n" \
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index 7826e6c364e7..ce8899e5e171 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -9,7 +9,7 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version. 10 * 2 of the Licence, or (at your option) any later version.
11 */ 11 */
12#include <asm/uaccess.h> 12#include <linux/uaccess.h>
13 13
14unsigned long 14unsigned long
15__generic_copy_to_user(void *to, const void *from, unsigned long n) 15__generic_copy_to_user(void *to, const void *from, unsigned long n)
@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
24{ 24{
25 if (access_ok(VERIFY_READ, from, n)) 25 if (access_ok(VERIFY_READ, from, n))
26 __copy_user_zeroing(to, from, n); 26 __copy_user_zeroing(to, from, n);
27 else
28 memset(to, 0, n);
27 return n; 29 return n;
28} 30}
29 31
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index caa51ff85a3c..0ab82324c817 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
102static inline long copy_from_user(void *to, const void __user *from, 102static inline long copy_from_user(void *to, const void __user *from,
103 unsigned long n) 103 unsigned long n)
104{ 104{
105 if (!access_ok(VERIFY_READ, from, n)) 105 unsigned long res = n;
106 return n; 106 if (access_ok(VERIFY_READ, from, n))
107 return __copy_from_user(to, from, n); 107 res = __copy_from_user(to, from, n);
108 if (unlikely(res))
109 memset(to + (n - res), 0, res);
110 return res;
108} 111}
109 112
110static inline long copy_to_user(void __user *to, const void *from, 113static inline long copy_to_user(void __user *to, const void *from,
@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
139 142
140#define __get_user_unknown(val, size, ptr, err) do { \ 143#define __get_user_unknown(val, size, ptr, err) do { \
141 err = 0; \ 144 err = 0; \
142 if (copy_from_user(&(val), ptr, size)) { \ 145 if (__copy_from_user(&(val), ptr, size)) { \
143 err = -EFAULT; \ 146 err = -EFAULT; \
144 } \ 147 } \
145 } while (0) 148 } while (0)
@@ -166,7 +169,7 @@ do { \
166 ({ \ 169 ({ \
167 long __gu_err = -EFAULT; \ 170 long __gu_err = -EFAULT; \
168 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 171 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
169 unsigned long __gu_val; \ 172 unsigned long __gu_val = 0; \
170 __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ 173 __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
171 (x) = (__force __typeof__(x))__gu_val; \ 174 (x) = (__force __typeof__(x))__gu_val; \
172 __gu_err; \ 175 __gu_err; \
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index a6bd07ca3d6c..5cc6b4f1b795 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
273static inline unsigned long 273static inline unsigned long
274copy_from_user(void *to, const void *from, unsigned long n) 274copy_from_user(void *to, const void *from, unsigned long n)
275{ 275{
276 unsigned long over; 276 unsigned long res = n;
277 277
278 if (access_ok(VERIFY_READ, from, n)) 278 if (likely(access_ok(VERIFY_READ, from, n)))
279 return __copy_tofrom_user(to, from, n); 279 res = __copy_tofrom_user(to, from, n);
280 if ((unsigned long)from < TASK_SIZE) { 280 if (unlikely(res))
281 over = (unsigned long)from + n - TASK_SIZE; 281 memset(to + (n - res), 0, res);
282 return __copy_tofrom_user(to, from, n - over) + over; 282 return res;
283 }
284 return n;
285} 283}
286 284
287static inline unsigned long 285static inline unsigned long
288copy_to_user(void *to, const void *from, unsigned long n) 286copy_to_user(void *to, const void *from, unsigned long n)
289{ 287{
290 unsigned long over; 288 if (likely(access_ok(VERIFY_WRITE, to, n)))
291 289 n = __copy_tofrom_user(to, from, n);
292 if (access_ok(VERIFY_WRITE, to, n))
293 return __copy_tofrom_user(to, from, n);
294 if ((unsigned long)to < TASK_SIZE) {
295 over = (unsigned long)to + n - TASK_SIZE;
296 return __copy_tofrom_user(to, from, n - over) + over;
297 }
298 return n; 290 return n;
299} 291}
300 292
@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
303static inline __must_check unsigned long 295static inline __must_check unsigned long
304clear_user(void *addr, unsigned long size) 296clear_user(void *addr, unsigned long size)
305{ 297{
306 298 if (likely(access_ok(VERIFY_WRITE, addr, size)))
307 if (access_ok(VERIFY_WRITE, addr, size)) 299 size = __clear_user(addr, size);
308 return __clear_user(addr, size);
309 if ((unsigned long)addr < TASK_SIZE) {
310 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
311 return __clear_user(addr, size - over) + over;
312 }
313 return size; 300 return size;
314} 301}
315 302
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index cd8778103165..af12c2db9bb8 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,6 +1,5 @@
1config PARISC 1config PARISC
2 def_bool y 2 def_bool y
3 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
4 select ARCH_MIGHT_HAVE_PC_PARPORT 3 select ARCH_MIGHT_HAVE_PC_PARPORT
5 select HAVE_IDE 4 select HAVE_IDE
6 select HAVE_OPROFILE 5 select HAVE_OPROFILE
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 1a8f6f95689e..f6a4c016304b 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
245CONFIG_PROVE_RCU_DELAY=y 245CONFIG_PROVE_RCU_DELAY=y
246CONFIG_DEBUG_BLOCK_EXT_DEVT=y 246CONFIG_DEBUG_BLOCK_EXT_DEVT=y
247CONFIG_LATENCYTOP=y 247CONFIG_LATENCYTOP=y
248CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
249CONFIG_KEYS=y 248CONFIG_KEYS=y
250# CONFIG_CRYPTO_HW is not set 249# CONFIG_CRYPTO_HW is not set
251CONFIG_FONTS=y 250CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 7e0792658952..c564e6e1fa23 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
291CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y 291CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
292# CONFIG_SCHED_DEBUG is not set 292# CONFIG_SCHED_DEBUG is not set
293CONFIG_TIMER_STATS=y 293CONFIG_TIMER_STATS=y
294CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
295CONFIG_CRYPTO_MANAGER=y 294CONFIG_CRYPTO_MANAGER=y
296CONFIG_CRYPTO_ECB=m 295CONFIG_CRYPTO_ECB=m
297CONFIG_CRYPTO_PCBC=m 296CONFIG_CRYPTO_PCBC=m
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 0f59fd9ca205..482847865dac 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -10,6 +10,7 @@
10#include <asm-generic/uaccess-unaligned.h> 10#include <asm-generic/uaccess-unaligned.h>
11 11
12#include <linux/bug.h> 12#include <linux/bug.h>
13#include <linux/string.h>
13 14
14#define VERIFY_READ 0 15#define VERIFY_READ 0
15#define VERIFY_WRITE 1 16#define VERIFY_WRITE 1
@@ -208,26 +209,30 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
208#define __copy_to_user_inatomic __copy_to_user 209#define __copy_to_user_inatomic __copy_to_user
209#define __copy_from_user_inatomic __copy_from_user 210#define __copy_from_user_inatomic __copy_from_user
210 211
211extern void copy_from_user_overflow(void) 212extern void __compiletime_error("usercopy buffer size is too small")
212#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 213__bad_copy_user(void);
213 __compiletime_error("copy_from_user() buffer size is not provably correct") 214
214#else 215static inline void copy_user_overflow(int size, unsigned long count)
215 __compiletime_warning("copy_from_user() buffer size is not provably correct") 216{
216#endif 217 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
217; 218}
218 219
219static inline unsigned long __must_check copy_from_user(void *to, 220static inline unsigned long __must_check copy_from_user(void *to,
220 const void __user *from, 221 const void __user *from,
221 unsigned long n) 222 unsigned long n)
222{ 223{
223 int sz = __compiletime_object_size(to); 224 int sz = __compiletime_object_size(to);
224 int ret = -EFAULT; 225 unsigned long ret = n;
225 226
226 if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) 227 if (likely(sz == -1 || sz >= n))
227 ret = __copy_from_user(to, from, n); 228 ret = __copy_from_user(to, from, n);
228 else 229 else if (!__builtin_constant_p(n))
229 copy_from_user_overflow(); 230 copy_user_overflow(sz, n);
231 else
232 __bad_copy_user();
230 233
234 if (unlikely(ret))
235 memset(to + (n - ret), 0, ret);
231 return ret; 236 return ret;
232} 237}
233 238
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index c0ae62520d15..274d5bc6ecce 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -97,10 +97,10 @@
97#define ENOTCONN 235 /* Transport endpoint is not connected */ 97#define ENOTCONN 235 /* Transport endpoint is not connected */
98#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ 98#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
99#define ETOOMANYREFS 237 /* Too many references: cannot splice */ 99#define ETOOMANYREFS 237 /* Too many references: cannot splice */
100#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
101#define ETIMEDOUT 238 /* Connection timed out */ 100#define ETIMEDOUT 238 /* Connection timed out */
102#define ECONNREFUSED 239 /* Connection refused */ 101#define ECONNREFUSED 239 /* Connection refused */
103#define EREMOTERELEASE 240 /* Remote peer released connection */ 102#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
103#define EREMOTERELEASE 240 /* Remote peer released connection */
104#define EHOSTDOWN 241 /* Host is down */ 104#define EHOSTDOWN 241 /* Host is down */
105#define EHOSTUNREACH 242 /* No route to host */ 105#define EHOSTUNREACH 242 /* No route to host */
106 106
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 5adc339eb7c8..0c2a94a0f751 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
51 51
52DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); 52DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
53 53
54extern int update_cr16_clocksource(void); /* from time.c */
55
56/* 54/*
57** PARISC CPU driver - claim "device" and initialize CPU data structures. 55** PARISC CPU driver - claim "device" and initialize CPU data structures.
58** 56**
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
228 } 226 }
229#endif 227#endif
230 228
231 /* If we've registered more than one cpu,
232 * we'll use the jiffies clocksource since cr16
233 * is not synchronized between CPUs.
234 */
235 update_cr16_clocksource();
236
237 return 0; 229 return 0;
238} 230}
239 231
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 505cf1ac5af2..4b0b963d52a7 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
221 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 221 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
222}; 222};
223 223
224int update_cr16_clocksource(void)
225{
226 /* since the cr16 cycle counters are not synchronized across CPUs,
227 we'll check if we should switch to a safe clocksource: */
228 if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
229 clocksource_change_rating(&clocksource_cr16, 0);
230 return 1;
231 }
232
233 return 0;
234}
235
236void __init start_cpu_itimer(void) 224void __init start_cpu_itimer(void)
237{ 225{
238 unsigned int cpu = smp_processor_id(); 226 unsigned int cpu = smp_processor_id();
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ec4047e170a0..927d2ab2ce08 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -166,6 +166,7 @@ config PPC
166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
167 select GENERIC_CPU_AUTOPROBE 167 select GENERIC_CPU_AUTOPROBE
168 select HAVE_VIRT_CPU_ACCOUNTING 168 select HAVE_VIRT_CPU_ACCOUNTING
169 select HAVE_ARCH_HARDENED_USERCOPY
169 170
170config GENERIC_CSUM 171config GENERIC_CSUM
171 def_bool CPU_LITTLE_ENDIAN 172 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index ca254546cd05..1934707bf321 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -66,29 +66,28 @@ endif
66UTS_MACHINE := $(OLDARCH) 66UTS_MACHINE := $(OLDARCH)
67 67
68ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) 68ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
69override CC += -mlittle-endian
70ifneq ($(cc-name),clang)
71override CC += -mno-strict-align
72endif
73override AS += -mlittle-endian
74override LD += -EL 69override LD += -EL
75override CROSS32CC += -mlittle-endian
76override CROSS32AS += -mlittle-endian 70override CROSS32AS += -mlittle-endian
77LDEMULATION := lppc 71LDEMULATION := lppc
78GNUTARGET := powerpcle 72GNUTARGET := powerpcle
79MULTIPLEWORD := -mno-multiple 73MULTIPLEWORD := -mno-multiple
80KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) 74KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
81else 75else
82ifeq ($(call cc-option-yn,-mbig-endian),y)
83override CC += -mbig-endian
84override AS += -mbig-endian
85endif
86override LD += -EB 76override LD += -EB
87LDEMULATION := ppc 77LDEMULATION := ppc
88GNUTARGET := powerpc 78GNUTARGET := powerpc
89MULTIPLEWORD := -mmultiple 79MULTIPLEWORD := -mmultiple
90endif 80endif
91 81
82cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
83cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
84ifneq ($(cc-name),clang)
85 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
86endif
87
88aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
89aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
90
92ifeq ($(HAS_BIARCH),y) 91ifeq ($(HAS_BIARCH),y)
93override AS += -a$(CONFIG_WORD_SIZE) 92override AS += -a$(CONFIG_WORD_SIZE)
94override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) 93override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
@@ -232,6 +231,9 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200
232KBUILD_AFLAGS += $(cpu-as-y) 231KBUILD_AFLAGS += $(cpu-as-y)
233KBUILD_CFLAGS += $(cpu-as-y) 232KBUILD_CFLAGS += $(cpu-as-y)
234 233
234KBUILD_AFLAGS += $(aflags-y)
235KBUILD_CFLAGS += $(cflags-y)
236
235head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o 237head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o
236head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o 238head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
237head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o 239head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index bfe3d37a24ef..9fa046d56eba 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -4,6 +4,7 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/string.h> 5#include <linux/string.h>
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/cpufeature.h>
7#include <asm/switch_to.h> 8#include <asm/switch_to.h>
8 9
9#define CHKSUM_BLOCK_SIZE 1 10#define CHKSUM_BLOCK_SIZE 1
@@ -157,7 +158,7 @@ static void __exit crc32c_vpmsum_mod_fini(void)
157 crypto_unregister_shash(&alg); 158 crypto_unregister_shash(&alg);
158} 159}
159 160
160module_init(crc32c_vpmsum_mod_init); 161module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init);
161module_exit(crc32c_vpmsum_mod_fini); 162module_exit(crc32c_vpmsum_mod_fini);
162 163
163MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); 164MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 2ef55f8968a2..b312b152461b 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
15#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS 15#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
16#include <linux/jump_label.h> 16#include <linux/jump_label.h>
17 17
18#define NUM_CPU_FTR_KEYS 64 18#define NUM_CPU_FTR_KEYS BITS_PER_LONG
19 19
20extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS]; 20extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
21 21
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 3d7fc06532a1..01b8a13f0224 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -19,4 +19,17 @@ extern u64 pnv_first_deep_stop_state;
19 19
20#endif 20#endif
21 21
22/* Idle state entry routines */
23#ifdef CONFIG_PPC_P7_NAP
24#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
25 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
26 std r0,0(r1); \
27 ptesync; \
28 ld r0,0(r1); \
291: cmp cr0,r0,r0; \
30 bne 1b; \
31 IDLE_INST; \
32 b .
33#endif /* CONFIG_PPC_P7_NAP */
34
22#endif 35#endif
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 666bef4ebfae..9377bdf42eb8 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -3,6 +3,7 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <asm/cpu_has_feature.h>
6 7
7/* 8/*
8 * Mapping of threads to cores 9 * Mapping of threads to cores
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 57fec8ac7b92..ddf54f5bbdd1 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -186,6 +186,7 @@ label##3: \
186 186
187#ifndef __ASSEMBLY__ 187#ifndef __ASSEMBLY__
188void apply_feature_fixups(void); 188void apply_feature_fixups(void);
189void setup_feature_keys(void);
189#endif 190#endif
190 191
191#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ 192#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/hmi.h b/arch/powerpc/include/asm/hmi.h
index 88b4901ac4ee..85b7a1a21e22 100644
--- a/arch/powerpc/include/asm/hmi.h
+++ b/arch/powerpc/include/asm/hmi.h
@@ -21,7 +21,7 @@
21#ifndef __ASM_PPC64_HMI_H__ 21#ifndef __ASM_PPC64_HMI_H__
22#define __ASM_PPC64_HMI_H__ 22#define __ASM_PPC64_HMI_H__
23 23
24#ifdef CONFIG_PPC_BOOK3S_64 24#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
25 25
26#define CORE_TB_RESYNC_REQ_BIT 63 26#define CORE_TB_RESYNC_REQ_BIT 63
27#define MAX_SUBCORE_PER_CORE 4 27#define MAX_SUBCORE_PER_CORE 4
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 148303e7771f..6a6792bb39fb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -183,11 +183,6 @@ struct paca_struct {
183 */ 183 */
184 u16 in_mce; 184 u16 in_mce;
185 u8 hmi_event_available; /* HMI event is available */ 185 u8 hmi_event_available; /* HMI event is available */
186 /*
187 * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
188 * more details
189 */
190 struct sibling_subcore_state *sibling_subcore_state;
191#endif 186#endif
192 187
193 /* Stuff for accurate time accounting */ 188 /* Stuff for accurate time accounting */
@@ -202,6 +197,13 @@ struct paca_struct {
202 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 197 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
203#endif 198#endif
204 struct kvmppc_host_state kvm_hstate; 199 struct kvmppc_host_state kvm_hstate;
200#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
201 /*
202 * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
203 * more details
204 */
205 struct sibling_subcore_state *sibling_subcore_state;
206#endif
205#endif 207#endif
206}; 208};
207 209
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index b5e88e4a171a..c0309c59bed8 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
301/* Allocate & free a PCI host bridge structure */ 301/* Allocate & free a PCI host bridge structure */
302extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); 302extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
303extern void pcibios_free_controller(struct pci_controller *phb); 303extern void pcibios_free_controller(struct pci_controller *phb);
304extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
304 305
305#ifdef CONFIG_PCI 306#ifdef CONFIG_PCI
306extern int pcibios_vaddr_is_ioport(void __iomem *address); 307extern int pcibios_vaddr_is_ioport(void __iomem *address);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 0a74ebe934e1..17c8380673a6 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -75,14 +75,6 @@ static inline void disable_kernel_spe(void)
75static inline void __giveup_spe(struct task_struct *t) { } 75static inline void __giveup_spe(struct task_struct *t) { }
76#endif 76#endif
77 77
78#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
79extern void flush_tmregs_to_thread(struct task_struct *);
80#else
81static inline void flush_tmregs_to_thread(struct task_struct *t)
82{
83}
84#endif
85
86static inline void clear_task_ebb(struct task_struct *t) 78static inline void clear_task_ebb(struct task_struct *t)
87{ 79{
88#ifdef CONFIG_PPC_BOOK3S_64 80#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index b7c20f0b8fbe..c266227fdd5b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -308,29 +308,20 @@ extern unsigned long __copy_tofrom_user(void __user *to,
308static inline unsigned long copy_from_user(void *to, 308static inline unsigned long copy_from_user(void *to,
309 const void __user *from, unsigned long n) 309 const void __user *from, unsigned long n)
310{ 310{
311 unsigned long over; 311 if (likely(access_ok(VERIFY_READ, from, n))) {
312 312 check_object_size(to, n, false);
313 if (access_ok(VERIFY_READ, from, n))
314 return __copy_tofrom_user((__force void __user *)to, from, n); 313 return __copy_tofrom_user((__force void __user *)to, from, n);
315 if ((unsigned long)from < TASK_SIZE) {
316 over = (unsigned long)from + n - TASK_SIZE;
317 return __copy_tofrom_user((__force void __user *)to, from,
318 n - over) + over;
319 } 314 }
315 memset(to, 0, n);
320 return n; 316 return n;
321} 317}
322 318
323static inline unsigned long copy_to_user(void __user *to, 319static inline unsigned long copy_to_user(void __user *to,
324 const void *from, unsigned long n) 320 const void *from, unsigned long n)
325{ 321{
326 unsigned long over; 322 if (access_ok(VERIFY_WRITE, to, n)) {
327 323 check_object_size(from, n, true);
328 if (access_ok(VERIFY_WRITE, to, n))
329 return __copy_tofrom_user(to, (__force void __user *)from, n); 324 return __copy_tofrom_user(to, (__force void __user *)from, n);
330 if ((unsigned long)to < TASK_SIZE) {
331 over = (unsigned long)to + n - TASK_SIZE;
332 return __copy_tofrom_user(to, (__force void __user *)from,
333 n - over) + over;
334 } 325 }
335 return n; 326 return n;
336} 327}
@@ -372,6 +363,9 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
372 if (ret == 0) 363 if (ret == 0)
373 return 0; 364 return 0;
374 } 365 }
366
367 check_object_size(to, n, false);
368
375 return __copy_tofrom_user((__force void __user *)to, from, n); 369 return __copy_tofrom_user((__force void __user *)to, from, n);
376} 370}
377 371
@@ -398,6 +392,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
398 if (ret == 0) 392 if (ret == 0)
399 return 0; 393 return 0;
400 } 394 }
395
396 check_object_size(from, n, true);
397
401 return __copy_tofrom_user(to, (__force const void __user *)from, n); 398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
402} 399}
403 400
@@ -422,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
422 might_fault(); 419 might_fault();
423 if (likely(access_ok(VERIFY_WRITE, addr, size))) 420 if (likely(access_ok(VERIFY_WRITE, addr, size)))
424 return __clear_user(addr, size); 421 return __clear_user(addr, size);
425 if ((unsigned long)addr < TASK_SIZE) {
426 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
427 return __clear_user(addr, size - over) + over;
428 }
429 return size; 422 return size;
430} 423}
431 424
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index f5f729c11578..f0b238516e9b 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -159,6 +159,8 @@ extern void xics_teardown_cpu(void);
159extern void xics_kexec_teardown_cpu(int secondary); 159extern void xics_kexec_teardown_cpu(int secondary);
160extern void xics_migrate_irqs_away(void); 160extern void xics_migrate_irqs_away(void);
161extern void icp_native_eoi(struct irq_data *d); 161extern void icp_native_eoi(struct irq_data *d);
162extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type);
163extern int xics_retrigger(struct irq_data *data);
162#ifdef CONFIG_SMP 164#ifdef CONFIG_SMP
163extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, 165extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
164 unsigned int strict_check); 166 unsigned int strict_check);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b2027a5cf508..fe4c075bcf50 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
41obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 41obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
42obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o 42obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
43obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o 43obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
44obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o hmi.o 44obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
46obj-$(CONFIG_PPC64) += vdso64/ 46obj-$(CONFIG_PPC64) += vdso64/
47obj-$(CONFIG_ALTIVEC) += vecemu.o 47obj-$(CONFIG_ALTIVEC) += vecemu.o
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index c9bc78e9c610..7429556eb8df 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -168,10 +168,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
168 int n = 0, l = 0; 168 int n = 0, l = 0;
169 char buffer[128]; 169 char buffer[128];
170 170
171 n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", 171 n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
172 edev->phb->global_number, pdn->busno, 172 edev->phb->global_number, pdn->busno,
173 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 173 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
174 pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", 174 pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
175 edev->phb->global_number, pdn->busno, 175 edev->phb->global_number, pdn->busno,
176 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 176 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
177 177
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6b8bc0dd09d4..5afd03e5e8b8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
368tabort_syscall: 368tabort_syscall:
369 /* Firstly we need to enable TM in the kernel */ 369 /* Firstly we need to enable TM in the kernel */
370 mfmsr r10 370 mfmsr r10
371 li r13, 1 371 li r9, 1
372 rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG 372 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
373 mtmsrd r10, 0 373 mtmsrd r10, 0
374 374
375 /* tabort, this dooms the transaction, nothing else */ 375 /* tabort, this dooms the transaction, nothing else */
376 li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) 376 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
377 TABORT(R13) 377 TABORT(R9)
378 378
379 /* 379 /*
380 * Return directly to userspace. We have corrupted user register state, 380 * Return directly to userspace. We have corrupted user register state,
@@ -382,8 +382,8 @@ tabort_syscall:
382 * resume after the tbegin of the aborted transaction with the 382 * resume after the tbegin of the aborted transaction with the
383 * checkpointed register state. 383 * checkpointed register state.
384 */ 384 */
385 li r13, MSR_RI 385 li r9, MSR_RI
386 andc r10, r10, r13 386 andc r10, r10, r9
387 mtmsrd r10, 1 387 mtmsrd r10, 1
388 mtspr SPRN_SRR0, r11 388 mtspr SPRN_SRR0, r11
389 mtspr SPRN_SRR1, r12 389 mtspr SPRN_SRR1, r12
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 41091fdf9bd8..bffec73dbffc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -144,29 +144,14 @@ machine_check_pSeries_1:
144 * vector 144 * vector
145 */ 145 */
146 SET_SCRATCH0(r13) /* save r13 */ 146 SET_SCRATCH0(r13) /* save r13 */
147#ifdef CONFIG_PPC_P7_NAP 147 /*
148BEGIN_FTR_SECTION 148 * Running native on arch 2.06 or later, we may wakeup from winkle
149 /* Running native on arch 2.06 or later, check if we are 149 * inside machine check. If yes, then last bit of HSPGR0 would be set
150 * waking up from nap. We only handle no state loss and 150 * to 1. Hence clear it unconditionally.
151 * supervisor state loss. We do -not- handle hypervisor
152 * state loss at this time.
153 */ 151 */
154 mfspr r13,SPRN_SRR1 152 GET_PACA(r13)
155 rlwinm. r13,r13,47-31,30,31 153 clrrdi r13,r13,1
156 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 154 SET_PACA(r13)
157 beq 9f
158
159 mfspr r13,SPRN_SRR1
160 rlwinm. r13,r13,47-31,30,31
161 /* waking up from powersave (nap) state */
162 cmpwi cr1,r13,2
163 /* Total loss of HV state is fatal. let's just stay stuck here */
164 OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
165 bgt cr1,.
1669:
167 OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
168END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
169#endif /* CONFIG_PPC_P7_NAP */
170 EXCEPTION_PROLOG_0(PACA_EXMC) 155 EXCEPTION_PROLOG_0(PACA_EXMC)
171BEGIN_FTR_SECTION 156BEGIN_FTR_SECTION
172 b machine_check_powernv_early 157 b machine_check_powernv_early
@@ -500,7 +485,23 @@ machine_check_fwnmi:
500 EXCEPTION_PROLOG_0(PACA_EXMC) 485 EXCEPTION_PROLOG_0(PACA_EXMC)
501machine_check_pSeries_0: 486machine_check_pSeries_0:
502 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 487 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
503 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 488 /*
489 * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
490 * difference that MSR_RI is not enabled, because PACA_EXMC is being
491 * used, so nested machine check corrupts it. machine_check_common
492 * enables MSR_RI.
493 */
494 ld r12,PACAKBASE(r13)
495 ld r10,PACAKMSR(r13)
496 xori r10,r10,MSR_RI
497 mfspr r11,SPRN_SRR0
498 LOAD_HANDLER(r12, machine_check_common)
499 mtspr SPRN_SRR0,r12
500 mfspr r12,SPRN_SRR1
501 mtspr SPRN_SRR1,r10
502 rfid
503 b . /* prevent speculative execution */
504
504 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 505 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
505 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 506 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
506 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 507 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -984,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
984machine_check_common: 985machine_check_common:
985 986
986 mfspr r10,SPRN_DAR 987 mfspr r10,SPRN_DAR
987 std r10,PACA_EXGEN+EX_DAR(r13) 988 std r10,PACA_EXMC+EX_DAR(r13)
988 mfspr r10,SPRN_DSISR 989 mfspr r10,SPRN_DSISR
989 stw r10,PACA_EXGEN+EX_DSISR(r13) 990 stw r10,PACA_EXMC+EX_DSISR(r13)
990 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 991 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
991 FINISH_NAP 992 FINISH_NAP
992 RECONCILE_IRQ_STATE(r10, r11) 993 RECONCILE_IRQ_STATE(r10, r11)
993 ld r3,PACA_EXGEN+EX_DAR(r13) 994 ld r3,PACA_EXMC+EX_DAR(r13)
994 lwz r4,PACA_EXGEN+EX_DSISR(r13) 995 lwz r4,PACA_EXMC+EX_DSISR(r13)
996 /* Enable MSR_RI when finished with PACA_EXMC */
997 li r10,MSR_RI
998 mtmsrd r10,1
995 std r3,_DAR(r1) 999 std r3,_DAR(r1)
996 std r4,_DSISR(r1) 1000 std r4,_DSISR(r1)
997 bl save_nvgprs 1001 bl save_nvgprs
@@ -1273,25 +1277,51 @@ machine_check_handle_early:
1273 * Check if thread was in power saving mode. We come here when any 1277 * Check if thread was in power saving mode. We come here when any
1274 * of the following is true: 1278 * of the following is true:
1275 * a. thread wasn't in power saving mode 1279 * a. thread wasn't in power saving mode
1276 * b. thread was in power saving mode with no state loss or 1280 * b. thread was in power saving mode with no state loss,
1277 * supervisor state loss 1281 * supervisor state loss or hypervisor state loss.
1278 * 1282 *
1279 * Go back to nap again if (b) is true. 1283 * Go back to nap/sleep/winkle mode again if (b) is true.
1280 */ 1284 */
1281 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ 1285 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
1282 beq 4f /* No, it wasn;t */ 1286 beq 4f /* No, it wasn;t */
1283 /* Thread was in power saving mode. Go back to nap again. */ 1287 /* Thread was in power saving mode. Go back to nap again. */
1284 cmpwi r11,2 1288 cmpwi r11,2
1285 bne 3f 1289 blt 3f
1286 /* Supervisor state loss */ 1290 /* Supervisor/Hypervisor state loss */
1287 li r0,1 1291 li r0,1
1288 stb r0,PACA_NAPSTATELOST(r13) 1292 stb r0,PACA_NAPSTATELOST(r13)
12893: bl machine_check_queue_event 12933: bl machine_check_queue_event
1290 MACHINE_CHECK_HANDLER_WINDUP 1294 MACHINE_CHECK_HANDLER_WINDUP
1291 GET_PACA(r13) 1295 GET_PACA(r13)
1292 ld r1,PACAR1(r13) 1296 ld r1,PACAR1(r13)
1293 li r3,PNV_THREAD_NAP 1297 /*
1294 b pnv_enter_arch207_idle_mode 1298 * Check what idle state this CPU was in and go back to same mode
1299 * again.
1300 */
1301 lbz r3,PACA_THREAD_IDLE_STATE(r13)
1302 cmpwi r3,PNV_THREAD_NAP
1303 bgt 10f
1304 IDLE_STATE_ENTER_SEQ(PPC_NAP)
1305 /* No return */
130610:
1307 cmpwi r3,PNV_THREAD_SLEEP
1308 bgt 2f
1309 IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
1310 /* No return */
1311
13122:
1313 /*
1314 * Go back to winkle. Please note that this thread was woken up in
1315 * machine check from winkle and have not restored the per-subcore
1316 * state. Hence before going back to winkle, set last bit of HSPGR0
1317 * to 1. This will make sure that if this thread gets woken up
1318 * again at reset vector 0x100 then it will get chance to restore
1319 * the subcore state.
1320 */
1321 ori r13,r13,1
1322 SET_PACA(r13)
1323 IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
1324 /* No return */
12954: 13254:
1296#endif 1326#endif
1297 /* 1327 /*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index ba79d15f4ddd..bd739fed26e3 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -44,18 +44,6 @@
44 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ 44 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
45 PSSCR_MTL_MASK 45 PSSCR_MTL_MASK
46 46
47/* Idle state entry routines */
48
49#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
50 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
51 std r0,0(r1); \
52 ptesync; \
53 ld r0,0(r1); \
541: cmp cr0,r0,r0; \
55 bne 1b; \
56 IDLE_INST; \
57 b .
58
59 .text 47 .text
60 48
61/* 49/*
@@ -363,8 +351,8 @@ _GLOBAL(power9_idle_stop)
363 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 351 * cr3 - set to gt if waking up with partial/complete hypervisor state loss
364 */ 352 */
365_GLOBAL(pnv_restore_hyp_resource) 353_GLOBAL(pnv_restore_hyp_resource)
366 ld r2,PACATOC(r13);
367BEGIN_FTR_SECTION 354BEGIN_FTR_SECTION
355 ld r2,PACATOC(r13);
368 /* 356 /*
369 * POWER ISA 3. Use PSSCR to determine if we 357 * POWER ISA 3. Use PSSCR to determine if we
370 * are waking up from deep idle state 358 * are waking up from deep idle state
@@ -395,6 +383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
395 */ 383 */
396 clrldi r5,r13,63 384 clrldi r5,r13,63
397 clrrdi r13,r13,1 385 clrrdi r13,r13,1
386
387 /* Now that we are sure r13 is corrected, load TOC */
388 ld r2,PACATOC(r13);
398 cmpwi cr4,r5,1 389 cmpwi cr4,r5,1
399 mtspr SPRN_HSPRG0,r13 390 mtspr SPRN_HSPRG0,r13
400 391
@@ -420,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
420 * 411 *
421 * r13 - PACA 412 * r13 - PACA
422 * cr3 - gt if waking up with partial/complete hypervisor state loss 413 * cr3 - gt if waking up with partial/complete hypervisor state loss
423 * cr4 - eq if waking up from complete hypervisor state loss. 414 * cr4 - gt or eq if waking up from complete hypervisor state loss.
424 */ 415 */
425_GLOBAL(pnv_wakeup_tb_loss) 416_GLOBAL(pnv_wakeup_tb_loss)
426 ld r1,PACAR1(r13) 417 ld r1,PACAR1(r13)
@@ -462,7 +453,7 @@ lwarx_loop2:
462 * At this stage 453 * At this stage
463 * cr2 - eq if first thread to wakeup in core 454 * cr2 - eq if first thread to wakeup in core
464 * cr3- gt if waking up with partial/complete hypervisor state loss 455 * cr3- gt if waking up with partial/complete hypervisor state loss
465 * cr4 - eq if waking up from complete hypervisor state loss. 456 * cr4 - gt or eq if waking up from complete hypervisor state loss.
466 */ 457 */
467 458
468 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 459 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
@@ -490,7 +481,7 @@ first_thread_in_subcore:
490 * If waking up from sleep, subcore state is not lost. Hence 481 * If waking up from sleep, subcore state is not lost. Hence
491 * skip subcore state restore 482 * skip subcore state restore
492 */ 483 */
493 bne cr4,subcore_state_restored 484 blt cr4,subcore_state_restored
494 485
495 /* Restore per-subcore state */ 486 /* Restore per-subcore state */
496 ld r4,_SDR1(r1) 487 ld r4,_SDR1(r1)
@@ -535,7 +526,7 @@ timebase_resync:
535 * If waking up from sleep, per core state is not lost, skip to 526 * If waking up from sleep, per core state is not lost, skip to
536 * clear_lock. 527 * clear_lock.
537 */ 528 */
538 bne cr4,clear_lock 529 blt cr4,clear_lock
539 530
540 /* 531 /*
541 * First thread in the core to wake up and its waking up with 532 * First thread in the core to wake up and its waking up with
@@ -566,7 +557,7 @@ common_exit:
566 * If waking up from sleep, hypervisor state is not lost. Hence 557 * If waking up from sleep, hypervisor state is not lost. Hence
567 * skip hypervisor state restore. 558 * skip hypervisor state restore.
568 */ 559 */
569 bne cr4,hypervisor_state_restored 560 blt cr4,hypervisor_state_restored
570 561
571 /* Waking up from winkle */ 562 /* Waking up from winkle */
572 563
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 3ed8ec09b5c9..e785cc9e1ecd 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -29,7 +29,7 @@
29#include <linux/kprobes.h> 29#include <linux/kprobes.h>
30#include <linux/ptrace.h> 30#include <linux/ptrace.h>
31#include <linux/preempt.h> 31#include <linux/preempt.h>
32#include <linux/module.h> 32#include <linux/extable.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/code-patching.h> 35#include <asm/code-patching.h>
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index ef267fd9dd22..5e7ece0fda9f 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
92 mce->in_use = 1; 92 mce->in_use = 1;
93 93
94 mce->initiator = MCE_INITIATOR_CPU; 94 mce->initiator = MCE_INITIATOR_CPU;
95 if (handled) 95 /* Mark it recovered if we have handled it and MSR(RI=1). */
96 if (handled && (regs->msr & MSR_RI))
96 mce->disposition = MCE_DISPOSITION_RECOVERED; 97 mce->disposition = MCE_DISPOSITION_RECOVERED;
97 else 98 else
98 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; 99 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a5c0153ede37..e58908066b0e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -78,6 +78,7 @@ EXPORT_SYMBOL(get_pci_dma_ops);
78static int get_phb_number(struct device_node *dn) 78static int get_phb_number(struct device_node *dn)
79{ 79{
80 int ret, phb_id = -1; 80 int ret, phb_id = -1;
81 u32 prop_32;
81 u64 prop; 82 u64 prop;
82 83
83 /* 84 /*
@@ -86,8 +87,10 @@ static int get_phb_number(struct device_node *dn)
86 * reading "ibm,opal-phbid", only present in OPAL environment. 87 * reading "ibm,opal-phbid", only present in OPAL environment.
87 */ 88 */
88 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); 89 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
89 if (ret) 90 if (ret) {
90 ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop); 91 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
92 prop = prop_32;
93 }
91 94
92 if (!ret) 95 if (!ret)
93 phb_id = (int)(prop & (MAX_PHBS - 1)); 96 phb_id = (int)(prop & (MAX_PHBS - 1));
@@ -151,6 +154,42 @@ void pcibios_free_controller(struct pci_controller *phb)
151EXPORT_SYMBOL_GPL(pcibios_free_controller); 154EXPORT_SYMBOL_GPL(pcibios_free_controller);
152 155
153/* 156/*
157 * This function is used to call pcibios_free_controller()
158 * in a deferred manner: a callback from the PCI subsystem.
159 *
160 * _*DO NOT*_ call pcibios_free_controller() explicitly if
161 * this is used (or it may access an invalid *phb pointer).
162 *
163 * The callback occurs when all references to the root bus
164 * are dropped (e.g., child buses/devices and their users).
165 *
166 * It's called as .release_fn() of 'struct pci_host_bridge'
167 * which is associated with the 'struct pci_controller.bus'
168 * (root bus) - it expects .release_data to hold a pointer
169 * to 'struct pci_controller'.
170 *
171 * In order to use it, register .release_fn()/release_data
172 * like this:
173 *
174 * pci_set_host_bridge_release(bridge,
175 * pcibios_free_controller_deferred
176 * (void *) phb);
177 *
178 * e.g. in the pcibios_root_bridge_prepare() callback from
179 * pci_create_root_bus().
180 */
181void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
182{
183 struct pci_controller *phb = (struct pci_controller *)
184 bridge->release_data;
185
186 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
187
188 pcibios_free_controller(phb);
189}
190EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
191
192/*
154 * The function is used to return the minimal alignment 193 * The function is used to return the minimal alignment
155 * for memory or I/O windows of the associated P2P bridge. 194 * for memory or I/O windows of the associated P2P bridge.
156 * By default, 4KiB alignment for I/O windows and 1MiB for 195 * By default, 4KiB alignment for I/O windows and 1MiB for
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 58ccf86415b4..9ee2623e0f67 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1074,26 +1074,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
1074#endif 1074#endif
1075} 1075}
1076 1076
1077#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1078void flush_tmregs_to_thread(struct task_struct *tsk)
1079{
1080 /*
1081 * Process self tracing is not yet supported through
1082 * ptrace interface. Ptrace generic code should have
1083 * prevented this from happening in the first place.
1084 * Warn once here with the message, if some how it
1085 * is attempted.
1086 */
1087 WARN_ONCE(tsk == current,
1088 "Not expecting ptrace on self: TM regs may be incorrect\n");
1089
1090 /*
1091 * If task is not current, it should have been flushed
1092 * already to it's thread_struct during __switch_to().
1093 */
1094}
1095#endif
1096
1097struct task_struct *__switch_to(struct task_struct *prev, 1077struct task_struct *__switch_to(struct task_struct *prev,
1098 struct task_struct *new) 1078 struct task_struct *new)
1099{ 1079{
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 6ee4b72cda42..d3eff99e938c 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
695 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ 695 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
696 696
697 /* option vector 5: PAPR/OF options */ 697 /* option vector 5: PAPR/OF options */
698 VECTOR_LENGTH(18), /* length */ 698 VECTOR_LENGTH(21), /* length */
699 0, /* don't ignore, don't halt */ 699 0, /* don't ignore, don't halt */
700 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | 700 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
701 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | 701 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
726 0, 726 0,
727 0, 727 0,
728 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | 728 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
729 OV5_FEAT(OV5_PFO_HW_842), 729 OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */
730 OV5_FEAT(OV5_SUB_PROCESSORS), 730 0, /* Byte 18 */
731 0, /* Byte 19 */
732 0, /* Byte 20 */
733 OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */
731 734
732 /* option vector 6: IBM PAPR hints */ 735 /* option vector 6: IBM PAPR hints */
733 VECTOR_LENGTH(3), /* length */ 736 VECTOR_LENGTH(3), /* length */
@@ -2940,7 +2943,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2940 2943
2941 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 2944 /* Don't print anything after quiesce under OPAL, it crashes OFW */
2942 if (of_platform != PLATFORM_OPAL) { 2945 if (of_platform != PLATFORM_OPAL) {
2943 prom_printf("Booting Linux via __start() ...\n"); 2946 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
2944 prom_debug("->dt_header_start=0x%x\n", hdr); 2947 prom_debug("->dt_header_start=0x%x\n", hdr);
2945 } 2948 }
2946 2949
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 4f3c5756cc09..bf91658a8a40 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -38,6 +38,7 @@
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/switch_to.h> 40#include <asm/switch_to.h>
41#include <asm/tm.h>
41 42
42#define CREATE_TRACE_POINTS 43#define CREATE_TRACE_POINTS
43#include <trace/events/syscalls.h> 44#include <trace/events/syscalls.h>
@@ -118,6 +119,24 @@ static const struct pt_regs_offset regoffset_table[] = {
118 REG_OFFSET_END, 119 REG_OFFSET_END,
119}; 120};
120 121
122#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
123static void flush_tmregs_to_thread(struct task_struct *tsk)
124{
125 /*
126 * If task is not current, it will have been flushed already to
127 * it's thread_struct during __switch_to().
128 *
129 * A reclaim flushes ALL the state.
130 */
131
132 if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
133 tm_reclaim_current(TM_CAUSE_SIGNAL);
134
135}
136#else
137static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
138#endif
139
121/** 140/**
122 * regs_query_register_offset() - query register offset from its name 141 * regs_query_register_offset() - query register offset from its name
123 * @name: the name of a register 142 * @name: the name of a register
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c3e861df4b20..24ec3ea4b3a2 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -93,15 +93,16 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
93 * and we are running with enough of the MMU enabled to have our 93 * and we are running with enough of the MMU enabled to have our
94 * proper kernel virtual addresses 94 * proper kernel virtual addresses
95 * 95 *
96 * Find out what kind of machine we're on and save any data we need 96 * We do the initial parsing of the flat device-tree and prepares
97 * from the early boot process (devtree is copied on pmac by prom_init()). 97 * for the MMU to be fully initialized.
98 * This is called very early on the boot process, after a minimal
99 * MMU environment has been set up but before MMU_init is called.
100 */ 98 */
101extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ 99extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
102 100
103notrace void __init machine_init(u64 dt_ptr) 101notrace void __init machine_init(u64 dt_ptr)
104{ 102{
103 /* Configure static keys first, now that we're relocated. */
104 setup_feature_keys();
105
105 /* Enable early debugging if any specified (see udbg.h) */ 106 /* Enable early debugging if any specified (see udbg.h) */
106 udbg_early_init(); 107 udbg_early_init();
107 108
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index eafb9a79e011..7ac8e6eaab5b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -300,6 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
300 300
301 /* Apply all the dynamic patching */ 301 /* Apply all the dynamic patching */
302 apply_feature_fixups(); 302 apply_feature_fixups();
303 setup_feature_keys();
303 304
304 /* Initialize the hash table or TLB handling */ 305 /* Initialize the hash table or TLB handling */
305 early_init_mmu(); 306 early_init_mmu();
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index b6aa378aff63..a7daf749b97f 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1226 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); 1226 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1227 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 1227 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1228 goto bad; 1228 goto bad;
1229
1229#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1230#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1231 /*
1232 * If there is a transactional state then throw it away.
1233 * The purpose of a sigreturn is to destroy all traces of the
1234 * signal frame, this includes any transactional state created
1235 * within in. We only check for suspended as we can never be
1236 * active in the kernel, we are active, there is nothing better to
1237 * do than go ahead and Bad Thing later.
1238 * The cause is not important as there will never be a
1239 * recheckpoint so it's not user visible.
1240 */
1241 if (MSR_TM_SUSPENDED(mfmsr()))
1242 tm_reclaim_current(0);
1243
1230 if (__get_user(tmp, &rt_sf->uc.uc_link)) 1244 if (__get_user(tmp, &rt_sf->uc.uc_link))
1231 goto bad; 1245 goto bad;
1232 uc_transact = (struct ucontext __user *)(uintptr_t)tmp; 1246 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 7e49984d4331..70409bb90a95 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
676 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 676 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
677 goto badframe; 677 goto badframe;
678 set_current_blocked(&set); 678 set_current_blocked(&set);
679
679#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 680#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
681 /*
682 * If there is a transactional state then throw it away.
683 * The purpose of a sigreturn is to destroy all traces of the
684 * signal frame, this includes any transactional state created
685 * within in. We only check for suspended as we can never be
686 * active in the kernel, we are active, there is nothing better to
687 * do than go ahead and Bad Thing later.
688 * The cause is not important as there will never be a
689 * recheckpoint so it's not user visible.
690 */
691 if (MSR_TM_SUSPENDED(mfmsr()))
692 tm_reclaim_current(0);
693
680 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) 694 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
681 goto badframe; 695 goto badframe;
682 if (MSR_TM_ACTIVE(msr)) { 696 if (MSR_TM_ACTIVE(msr)) {
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 25a39052bf6b..9c6f3fd58059 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -830,7 +830,7 @@ int __cpu_disable(void)
830 830
831 /* Update sibling maps */ 831 /* Update sibling maps */
832 base = cpu_first_thread_sibling(cpu); 832 base = cpu_first_thread_sibling(cpu);
833 for (i = 0; i < threads_per_core; i++) { 833 for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
834 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 834 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
835 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 835 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
836 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 836 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 2cb589264cb7..62859ebe0062 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -25,7 +25,8 @@
25#include <linux/user.h> 25#include <linux/user.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/module.h> 28#include <linux/extable.h>
29#include <linux/module.h> /* print_modules */
29#include <linux/prctl.h> 30#include <linux/prctl.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include <linux/kprobes.h> 32#include <linux/kprobes.h>
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 6767605ea8da..4111d30badfa 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -22,6 +22,7 @@
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/memblock.h> 23#include <linux/memblock.h>
24 24
25#include <asm/cpu_has_feature.h>
25#include <asm/pgtable.h> 26#include <asm/pgtable.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
27#include <asm/mmu.h> 28#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index cbabd143acae..78a7449bf489 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -30,7 +30,7 @@ CPPFLAGS_vdso32.lds += -P -C -Upowerpc
30$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 30$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
31 31
32# link rule for the .so file, .lds has to be first 32# link rule for the .so file, .lds has to be first
33$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) 33$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
34 $(call if_changed,vdso32ld) 34 $(call if_changed,vdso32ld)
35 35
36# strip rule for the .so file 36# strip rule for the .so file
@@ -39,12 +39,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
39 $(call if_changed,objcopy) 39 $(call if_changed,objcopy)
40 40
41# assembly rules for the .S files 41# assembly rules for the .S files
42$(obj-vdso32): %.o: %.S 42$(obj-vdso32): %.o: %.S FORCE
43 $(call if_changed_dep,vdso32as) 43 $(call if_changed_dep,vdso32as)
44 44
45# actual build commands 45# actual build commands
46quiet_cmd_vdso32ld = VDSO32L $@ 46quiet_cmd_vdso32ld = VDSO32L $@
47 cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ 47 cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
48quiet_cmd_vdso32as = VDSO32A $@ 48quiet_cmd_vdso32as = VDSO32A $@
49 cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< 49 cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
50 50
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index c710802b8fb6..366ae09b14c1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -23,7 +23,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
23$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 23$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
24 24
25# link rule for the .so file, .lds has to be first 25# link rule for the .so file, .lds has to be first
26$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) 26$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
27 $(call if_changed,vdso64ld) 27 $(call if_changed,vdso64ld)
28 28
29# strip rule for the .so file 29# strip rule for the .so file
@@ -32,12 +32,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
32 $(call if_changed,objcopy) 32 $(call if_changed,objcopy)
33 33
34# assembly rules for the .S files 34# assembly rules for the .S files
35$(obj-vdso64): %.o: %.S 35$(obj-vdso64): %.o: %.S FORCE
36 $(call if_changed_dep,vdso64as) 36 $(call if_changed_dep,vdso64as)
37 37
38# actual build commands 38# actual build commands
39quiet_cmd_vdso64ld = VDSO64L $@ 39quiet_cmd_vdso64ld = VDSO64L $@
40 cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ 40 cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
41quiet_cmd_vdso64as = VDSO64A $@ 41quiet_cmd_vdso64as = VDSO64A $@
42 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< 42 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
43 43
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 1f9e5529e692..855d4b95d752 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
78 78
79ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 79ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
80kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ 80kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
81 book3s_hv_hmi.o \
81 book3s_hv_rmhandlers.o \ 82 book3s_hv_rmhandlers.o \
82 book3s_hv_rm_mmu.o \ 83 book3s_hv_rm_mmu.o \
83 book3s_hv_ras.o \ 84 book3s_hv_ras.o \
diff --git a/arch/powerpc/kernel/hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c
index e3f738eb1cac..e3f738eb1cac 100644
--- a/arch/powerpc/kernel/hmi.c
+++ b/arch/powerpc/kvm/book3s_hv_hmi.c
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index a75ba38a2d81..05aa11399a78 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1329,20 +1329,16 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1329 xics->kvm = kvm; 1329 xics->kvm = kvm;
1330 1330
1331 /* Already there ? */ 1331 /* Already there ? */
1332 mutex_lock(&kvm->lock);
1333 if (kvm->arch.xics) 1332 if (kvm->arch.xics)
1334 ret = -EEXIST; 1333 ret = -EEXIST;
1335 else 1334 else
1336 kvm->arch.xics = xics; 1335 kvm->arch.xics = xics;
1337 mutex_unlock(&kvm->lock);
1338 1336
1339 if (ret) { 1337 if (ret) {
1340 kfree(xics); 1338 kfree(xics);
1341 return ret; 1339 return ret;
1342 } 1340 }
1343 1341
1344 xics_debugfs_init(xics);
1345
1346#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1342#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1347 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 1343 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1348 /* Enable real mode support */ 1344 /* Enable real mode support */
@@ -1354,9 +1350,17 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1354 return 0; 1350 return 0;
1355} 1351}
1356 1352
1353static void kvmppc_xics_init(struct kvm_device *dev)
1354{
1355 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1356
1357 xics_debugfs_init(xics);
1358}
1359
1357struct kvm_device_ops kvm_xics_ops = { 1360struct kvm_device_ops kvm_xics_ops = {
1358 .name = "kvm-xics", 1361 .name = "kvm-xics",
1359 .create = kvmppc_xics_create, 1362 .create = kvmppc_xics_create,
1363 .init = kvmppc_xics_init,
1360 .destroy = kvmppc_xics_free, 1364 .destroy = kvmppc_xics_free,
1361 .set_attr = xics_set_attr, 1365 .set_attr = xics_set_attr,
1362 .get_attr = xics_get_attr, 1366 .get_attr = xics_get_attr,
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index d90870a66b60..aa8214f30c92 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -127,17 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
127 stw r7,12(r1) 127 stw r7,12(r1)
128 stw r8,8(r1) 128 stw r8,8(r1)
129 129
130 andi. r0,r4,1 /* is destination address even ? */
131 cmplwi cr7,r0,0
132 addic r12,r6,0 130 addic r12,r6,0
133 addi r6,r4,-4 131 addi r6,r4,-4
134 neg r0,r4 132 neg r0,r4
135 addi r4,r3,-4 133 addi r4,r3,-4
136 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ 134 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
135 crset 4*cr7+eq
137 beq 58f 136 beq 58f
138 137
139 cmplw 0,r5,r0 /* is this more than total to do? */ 138 cmplw 0,r5,r0 /* is this more than total to do? */
140 blt 63f /* if not much to do */ 139 blt 63f /* if not much to do */
140 rlwinm r7,r6,3,0x8
141 rlwnm r12,r12,r7,0,31 /* odd destination address: rotate one byte */
142 cmplwi cr7,r7,0 /* is destination address even ? */
141 andi. r8,r0,3 /* get it word-aligned first */ 143 andi. r8,r0,3 /* get it word-aligned first */
142 mtctr r8 144 mtctr r8
143 beq+ 61f 145 beq+ 61f
@@ -237,7 +239,7 @@ _GLOBAL(csum_partial_copy_generic)
23766: addze r3,r12 23966: addze r3,r12
238 addi r1,r1,16 240 addi r1,r1,16
239 beqlr+ cr7 241 beqlr+ cr7
240 rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */ 242 rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
241 blr 243 blr
242 244
243/* read fault */ 245/* read fault */
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 74145f02ad41..043415f0bdb1 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -188,7 +188,10 @@ void __init apply_feature_fixups(void)
188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
189#endif 189#endif
190 do_final_fixups(); 190 do_final_fixups();
191}
191 192
193void __init setup_feature_keys(void)
194{
192 /* 195 /*
193 * Initialise jump label. This causes all the cpu/mmu_has_feature() 196 * Initialise jump label. This causes all the cpu/mmu_has_feature()
194 * checks to take on their correct polarity based on the current set of 197 * checks to take on their correct polarity based on the current set of
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a4db22f65021..bb1ffc559f38 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -26,7 +26,7 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/module.h> 29#include <linux/extable.h>
30#include <linux/kprobes.h> 30#include <linux/kprobes.h>
31#include <linux/kdebug.h> 31#include <linux/kdebug.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index dfdb90cb4403..9f1983404e1a 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
113END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 113END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
114 b slb_finish_load_1T 114 b slb_finish_load_1T
115 115
1160: 1160: /*
117 * For userspace addresses, make sure this is region 0.
118 */
119 cmpdi r9, 0
120 bne 8f
121
117 /* when using slices, we extract the psize off the slice bitmaps 122 /* when using slices, we extract the psize off the slice bitmaps
118 * and then we need to get the sllp encoding off the mmu_psize_defs 123 * and then we need to get the sllp encoding off the mmu_psize_defs
119 * array. 124 * array.
diff --git a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
index 8eb82b043dd8..d93dd4acf40b 100644
--- a/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
+++ b/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
@@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
528 .remove = mpc512x_lpbfifo_remove, 528 .remove = mpc512x_lpbfifo_remove,
529 .driver = { 529 .driver = {
530 .name = DRV_NAME, 530 .name = DRV_NAME,
531 .owner = THIS_MODULE,
532 .of_match_table = mpc512x_lpbfifo_match, 531 .of_match_table = mpc512x_lpbfifo_match,
533 }, 532 },
534}; 533};
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index dbcd0303afed..63c5ab6489c9 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
222static struct i2c_driver mcu_driver = { 222static struct i2c_driver mcu_driver = {
223 .driver = { 223 .driver = {
224 .name = "mcu-mpc8349emitx", 224 .name = "mcu-mpc8349emitx",
225 .owner = THIS_MODULE,
226 .of_match_table = mcu_of_match_table, 225 .of_match_table = mcu_of_match_table,
227 }, 226 },
228 .probe = mcu_probe, 227 .probe = mcu_probe,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 5be15cff758d..2975754c65ea 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -496,8 +496,10 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
496 gang = alloc_spu_gang(); 496 gang = alloc_spu_gang();
497 SPUFS_I(inode)->i_ctx = NULL; 497 SPUFS_I(inode)->i_ctx = NULL;
498 SPUFS_I(inode)->i_gang = gang; 498 SPUFS_I(inode)->i_gang = gang;
499 if (!gang) 499 if (!gang) {
500 ret = -ENOMEM;
500 goto out_iput; 501 goto out_iput;
502 }
501 503
502 inode->i_op = &simple_dir_inode_operations; 504 inode->i_op = &simple_dir_inode_operations;
503 inode->i_fop = &simple_dir_operations; 505 inode->i_fop = &simple_dir_operations;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index dafba1057a47..dfd310031549 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -26,7 +26,7 @@
26#include <linux/tty.h> 26#include <linux/tty.h>
27#include <linux/serial_core.h> 27#include <linux/serial_core.h>
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29#include <linux/module.h> 29#include <linux/extable.h>
30 30
31#include <asm/time.h> 31#include <asm/time.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 80804f9916ee..f97bab8e37a2 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -23,7 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/kdev_t.h> 24#include <linux/kdev_t.h>
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/module.h> 26#include <linux/extable.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 309d9ccccd50..c61667e8bb06 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
187 if (dev->vendor == 0x1959 && dev->device == 0xa007 && 187 if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
188 !firmware_has_feature(FW_FEATURE_LPAR)) { 188 !firmware_has_feature(FW_FEATURE_LPAR)) {
189 dev->dev.archdata.dma_ops = &dma_direct_ops; 189 dev->dev.archdata.dma_ops = &dma_direct_ops;
190 /*
191 * Set the coherent DMA mask to prevent the iommu
192 * being used unnecessarily
193 */
194 dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
190 return; 195 return;
191 } 196 }
192#endif 197#endif
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 2ee96431f736..4c827826c05e 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
370 uint32_t dump_id, dump_size, dump_type; 370 uint32_t dump_id, dump_size, dump_type;
371 struct dump_obj *dump; 371 struct dump_obj *dump;
372 char name[22]; 372 char name[22];
373 struct kobject *kobj;
373 374
374 rc = dump_read_info(&dump_id, &dump_size, &dump_type); 375 rc = dump_read_info(&dump_id, &dump_size, &dump_type);
375 if (rc != OPAL_SUCCESS) 376 if (rc != OPAL_SUCCESS)
@@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
381 * that gracefully and not create two conflicting 382 * that gracefully and not create two conflicting
382 * entries. 383 * entries.
383 */ 384 */
384 if (kset_find_obj(dump_kset, name)) 385 kobj = kset_find_obj(dump_kset, name);
386 if (kobj) {
387 /* Drop reference added by kset_find_obj() */
388 kobject_put(kobj);
385 return 0; 389 return 0;
390 }
386 391
387 dump = create_dump_obj(dump_id, dump_size, dump_type); 392 dump = create_dump_obj(dump_id, dump_size, dump_type);
388 if (!dump) 393 if (!dump)
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 37f959bf392e..f2344cbd2f46 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
247 uint64_t elog_type; 247 uint64_t elog_type;
248 int rc; 248 int rc;
249 char name[2+16+1]; 249 char name[2+16+1];
250 struct kobject *kobj;
250 251
251 rc = opal_get_elog_size(&id, &size, &type); 252 rc = opal_get_elog_size(&id, &size, &type);
252 if (rc != OPAL_SUCCESS) { 253 if (rc != OPAL_SUCCESS) {
@@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
269 * that gracefully and not create two conflicting 270 * that gracefully and not create two conflicting
270 * entries. 271 * entries.
271 */ 272 */
272 if (kset_find_obj(elog_kset, name)) 273 kobj = kset_find_obj(elog_kset, name);
274 if (kobj) {
275 /* Drop reference added by kset_find_obj() */
276 kobject_put(kobj);
273 return IRQ_HANDLED; 277 return IRQ_HANDLED;
278 }
274 279
275 create_elog_obj(log_id, elog_size, elog_type); 280 create_elog_obj(log_id, elog_size, elog_type);
276 281
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index e505223b4ec5..ed8bba68a162 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -228,7 +228,8 @@ int __init opal_event_init(void)
228 } 228 }
229 229
230 /* Install interrupt handler */ 230 /* Install interrupt handler */
231 rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); 231 rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW,
232 "opal", NULL);
232 if (rc) { 233 if (rc) {
233 irq_dispose_mapping(virq); 234 irq_dispose_mapping(virq);
234 pr_warn("Error %d requesting irq %d (0x%x)\n", 235 pr_warn("Error %d requesting irq %d (0x%x)\n",
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 8b4fc68cebcb..6c9a65b52e63 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -399,6 +399,7 @@ static int opal_recover_mce(struct pt_regs *regs,
399 399
400 if (!(regs->msr & MSR_RI)) { 400 if (!(regs->msr & MSR_RI)) {
401 /* If MSR_RI isn't set, we cannot recover */ 401 /* If MSR_RI isn't set, we cannot recover */
402 pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
402 recovered = 0; 403 recovered = 0;
403 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 404 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
404 /* Platform corrected itself */ 405 /* Platform corrected itself */
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6b9528307f62..bc0c91e84ca0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -111,10 +111,17 @@ static int __init iommu_setup(char *str)
111} 111}
112early_param("iommu", iommu_setup); 112early_param("iommu", iommu_setup);
113 113
114static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) 114static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
115{ 115{
116 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == 116 /*
117 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); 117 * WARNING: We cannot rely on the resource flags. The Linux PCI
118 * allocation code sometimes decides to put a 64-bit prefetchable
119 * BAR in the 32-bit window, so we have to compare the addresses.
120 *
121 * For simplicity we only test resource start.
122 */
123 return (r->start >= phb->ioda.m64_base &&
124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
118} 125}
119 126
120static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 127static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
@@ -142,7 +149,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
142 149
143static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) 150static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
144{ 151{
145 unsigned long pe = phb->ioda.total_pe_num - 1; 152 long pe;
146 153
147 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { 154 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
148 if (!test_and_set_bit(pe, phb->ioda.pe_alloc)) 155 if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
@@ -155,11 +162,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
155static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) 162static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
156{ 163{
157 struct pnv_phb *phb = pe->phb; 164 struct pnv_phb *phb = pe->phb;
165 unsigned int pe_num = pe->pe_number;
158 166
159 WARN_ON(pe->pdev); 167 WARN_ON(pe->pdev);
160 168
161 memset(pe, 0, sizeof(struct pnv_ioda_pe)); 169 memset(pe, 0, sizeof(struct pnv_ioda_pe));
162 clear_bit(pe->pe_number, phb->ioda.pe_alloc); 170 clear_bit(pe_num, phb->ioda.pe_alloc);
163} 171}
164 172
165/* The default M64 BAR is shared by all PEs */ 173/* The default M64 BAR is shared by all PEs */
@@ -229,7 +237,7 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
229 sgsz = phb->ioda.m64_segsize; 237 sgsz = phb->ioda.m64_segsize;
230 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 238 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
231 r = &pdev->resource[i]; 239 r = &pdev->resource[i];
232 if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) 240 if (!r->parent || !pnv_pci_is_m64(phb, r))
233 continue; 241 continue;
234 242
235 start = _ALIGN_DOWN(r->start - base, sgsz); 243 start = _ALIGN_DOWN(r->start - base, sgsz);
@@ -1877,7 +1885,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
1877 unsigned shift, unsigned long index, 1885 unsigned shift, unsigned long index,
1878 unsigned long npages) 1886 unsigned long npages)
1879{ 1887{
1880 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); 1888 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
1881 unsigned long start, end, inc; 1889 unsigned long start, end, inc;
1882 1890
1883 /* We'll invalidate DMA address in PE scope */ 1891 /* We'll invalidate DMA address in PE scope */
@@ -2209,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
2209 2217
2210 pnv_pci_link_table_and_group(phb->hose->node, num, 2218 pnv_pci_link_table_and_group(phb->hose->node, num,
2211 tbl, &pe->table_group); 2219 tbl, &pe->table_group);
2212 pnv_pci_phb3_tce_invalidate_pe(pe); 2220 pnv_pci_ioda2_tce_invalidate_pe(pe);
2213 2221
2214 return 0; 2222 return 0;
2215} 2223}
@@ -2347,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
2347 if (ret) 2355 if (ret)
2348 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); 2356 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
2349 else 2357 else
2350 pnv_pci_phb3_tce_invalidate_pe(pe); 2358 pnv_pci_ioda2_tce_invalidate_pe(pe);
2351 2359
2352 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); 2360 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
2353 2361
@@ -2863,7 +2871,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2863 res = &pdev->resource[i + PCI_IOV_RESOURCES]; 2871 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2864 if (!res->flags || res->parent) 2872 if (!res->flags || res->parent)
2865 continue; 2873 continue;
2866 if (!pnv_pci_is_mem_pref_64(res->flags)) { 2874 if (!pnv_pci_is_m64(phb, res)) {
2867 dev_warn(&pdev->dev, "Don't support SR-IOV with" 2875 dev_warn(&pdev->dev, "Don't support SR-IOV with"
2868 " non M64 VF BAR%d: %pR. \n", 2876 " non M64 VF BAR%d: %pR. \n",
2869 i, res); 2877 i, res);
@@ -2958,7 +2966,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
2958 index++; 2966 index++;
2959 } 2967 }
2960 } else if ((res->flags & IORESOURCE_MEM) && 2968 } else if ((res->flags & IORESOURCE_MEM) &&
2961 !pnv_pci_is_mem_pref_64(res->flags)) { 2969 !pnv_pci_is_m64(phb, res)) {
2962 region.start = res->start - 2970 region.start = res->start -
2963 phb->hose->mem_offset[0] - 2971 phb->hose->mem_offset[0] -
2964 phb->ioda.m32_pci_base; 2972 phb->ioda.m32_pci_base;
@@ -3083,9 +3091,12 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3083 bridge = bridge->bus->self; 3091 bridge = bridge->bus->self;
3084 } 3092 }
3085 3093
3086 /* We fail back to M32 if M64 isn't supported */ 3094 /*
3087 if (phb->ioda.m64_segsize && 3095 * We fall back to M32 if M64 isn't supported. We enforce the M64
3088 pnv_pci_is_mem_pref_64(type)) 3096 * alignment for any 64-bit resource, PCIe doesn't care and
3097 * bridges only do 64-bit prefetchable anyway.
3098 */
3099 if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
3089 return phb->ioda.m64_segsize; 3100 return phb->ioda.m64_segsize;
3090 if (type & IORESOURCE_MEM) 3101 if (type & IORESOURCE_MEM)
3091 return phb->ioda.m32_segsize; 3102 return phb->ioda.m32_segsize;
@@ -3125,7 +3136,7 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
3125 w = NULL; 3136 w = NULL;
3126 if (r->flags & type & IORESOURCE_IO) 3137 if (r->flags & type & IORESOURCE_IO)
3127 w = &hose->io_resource; 3138 w = &hose->io_resource;
3128 else if (pnv_pci_is_mem_pref_64(r->flags) && 3139 else if (pnv_pci_is_m64(phb, r) &&
3129 (type & IORESOURCE_PREFETCH) && 3140 (type & IORESOURCE_PREFETCH) &&
3130 phb->ioda.m64_segsize) 3141 phb->ioda.m64_segsize)
3131 w = &hose->mem_resources[1]; 3142 w = &hose->mem_resources[1];
@@ -3392,12 +3403,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3392 struct pnv_phb *phb = pe->phb; 3403 struct pnv_phb *phb = pe->phb;
3393 struct pnv_ioda_pe *slave, *tmp; 3404 struct pnv_ioda_pe *slave, *tmp;
3394 3405
3395 /* Release slave PEs in compound PE */
3396 if (pe->flags & PNV_IODA_PE_MASTER) {
3397 list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
3398 pnv_ioda_release_pe(slave);
3399 }
3400
3401 list_del(&pe->list); 3406 list_del(&pe->list);
3402 switch (phb->type) { 3407 switch (phb->type) {
3403 case PNV_PHB_IODA1: 3408 case PNV_PHB_IODA1:
@@ -3412,7 +3417,26 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
3412 3417
3413 pnv_ioda_release_pe_seg(pe); 3418 pnv_ioda_release_pe_seg(pe);
3414 pnv_ioda_deconfigure_pe(pe->phb, pe); 3419 pnv_ioda_deconfigure_pe(pe->phb, pe);
3415 pnv_ioda_free_pe(pe); 3420
3421 /* Release slave PEs in the compound PE */
3422 if (pe->flags & PNV_IODA_PE_MASTER) {
3423 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
3424 list_del(&slave->list);
3425 pnv_ioda_free_pe(slave);
3426 }
3427 }
3428
3429 /*
3430 * The PE for root bus can be removed because of hotplug in EEH
3431 * recovery for fenced PHB error. We need to mark the PE dead so
3432 * that it can be populated again in PCI hot add path. The PE
3433 * shouldn't be destroyed as it's the global reserved resource.
3434 */
3435 if (phb->ioda.root_pe_populated &&
3436 phb->ioda.root_pe_idx == pe->pe_number)
3437 phb->ioda.root_pe_populated = false;
3438 else
3439 pnv_ioda_free_pe(pe);
3416} 3440}
3417 3441
3418static void pnv_pci_release_device(struct pci_dev *pdev) 3442static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3428,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
3428 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 3452 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
3429 return; 3453 return;
3430 3454
3455 /*
3456 * PCI hotplug can happen as part of EEH error recovery. The @pdn
3457 * isn't removed and added afterwards in this scenario. We should
3458 * set the PE number in @pdn to an invalid one. Otherwise, the PE's
3459 * device count is decreased on removing devices while failing to
3460 * be increased on adding devices. It leads to unbalanced PE's device
3461 * count and eventually make normal PCI hotplug path broken.
3462 */
3431 pe = &phb->ioda.pe_array[pdn->pe_number]; 3463 pe = &phb->ioda.pe_array[pdn->pe_number];
3464 pdn->pe_number = IODA_INVALID_PE;
3465
3432 WARN_ON(--pe->device_count < 0); 3466 WARN_ON(--pe->device_count < 0);
3433 if (pe->device_count == 0) 3467 if (pe->device_count == 0)
3434 pnv_ioda_release_pe(pe); 3468 pnv_ioda_release_pe(pe);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 43f7beb2902d..76ec104e88be 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -320,19 +320,6 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
320 return dlpar_update_device_tree_lmb(lmb); 320 return dlpar_update_device_tree_lmb(lmb);
321} 321}
322 322
323static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
324{
325 unsigned long section_nr;
326 struct mem_section *mem_sect;
327 struct memory_block *mem_block;
328
329 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
330 mem_sect = __nr_to_section(section_nr);
331
332 mem_block = find_memory_block(mem_sect);
333 return mem_block;
334}
335
336#ifdef CONFIG_MEMORY_HOTREMOVE 323#ifdef CONFIG_MEMORY_HOTREMOVE
337static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 324static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
338{ 325{
@@ -420,6 +407,19 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
420 407
421static int dlpar_add_lmb(struct of_drconf_cell *); 408static int dlpar_add_lmb(struct of_drconf_cell *);
422 409
410static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
411{
412 unsigned long section_nr;
413 struct mem_section *mem_sect;
414 struct memory_block *mem_block;
415
416 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
417 mem_sect = __nr_to_section(section_nr);
418
419 mem_block = find_memory_block(mem_sect);
420 return mem_block;
421}
422
423static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 423static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
424{ 424{
425 struct memory_block *mem_block; 425 struct memory_block *mem_block;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index fe16a50700de..09eba5a9929a 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
119 119
120 bus = bridge->bus; 120 bus = bridge->bus;
121 121
122 /* Rely on the pcibios_free_controller_deferred() callback. */
123 pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
124 (void *) pci_bus_to_host(bus));
125
122 dn = pcibios_get_phb_of_node(bus); 126 dn = pcibios_get_phb_of_node(bus);
123 if (!dn) 127 if (!dn)
124 return 0; 128 return 0;
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 906dbaa97fe2..547fd13e4f8e 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
106 release_resource(res); 106 release_resource(res);
107 } 107 }
108 108
109 /* Free pci_controller data structure */ 109 /*
110 pcibios_free_controller(phb); 110 * The pci_controller data structure is freed by
111 * the pcibios_free_controller_deferred() callback;
112 * see pseries_root_bridge_prepare().
113 */
111 114
112 return 0; 115 return 0;
113} 116}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 4ffcaa6f8670..a39d20e8623d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -41,7 +41,6 @@
41#include <linux/root_dev.h> 41#include <linux/root_dev.h>
42#include <linux/of.h> 42#include <linux/of.h>
43#include <linux/of_pci.h> 43#include <linux/of_pci.h>
44#include <linux/kexec.h>
45 44
46#include <asm/mmu.h> 45#include <asm/mmu.h>
47#include <asm/processor.h> 46#include <asm/processor.h>
@@ -66,6 +65,7 @@
66#include <asm/eeh.h> 65#include <asm/eeh.h>
67#include <asm/reg.h> 66#include <asm/reg.h>
68#include <asm/plpar_wrappers.h> 67#include <asm/plpar_wrappers.h>
68#include <asm/kexec.h>
69 69
70#include "pseries.h" 70#include "pseries.h"
71 71
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 6c110994d902..81d49476c47e 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
534 534
535static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) 535static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
536{ 536{
537 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 537 struct cpm1_gpio16_chip *cpm1_gc =
538 container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
538 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 539 struct cpm_ioport16 __iomem *iop = mm_gc->regs;
539 540
540 cpm1_gc->cpdata = in_be16(&iop->dat); 541 cpm1_gc->cpdata = in_be16(&iop->dat);
@@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
649 650
650static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) 651static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
651{ 652{
652 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 653 struct cpm1_gpio32_chip *cpm1_gc =
654 container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
653 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 655 struct cpm_ioport32b __iomem *iop = mm_gc->regs;
654 656
655 cpm1_gc->cpdata = in_be32(&iop->dat); 657 cpm1_gc->cpdata = in_be32(&iop->dat);
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 911456d17713..947f42007734 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
94 94
95static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) 95static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
96{ 96{
97 struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc); 97 struct cpm2_gpio32_chip *cpm2_gc =
98 container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
98 struct cpm2_ioports __iomem *iop = mm_gc->regs; 99 struct cpm2_ioports __iomem *iop = mm_gc->regs;
99 100
100 cpm2_gc->cpdata = in_be32(&iop->dat); 101 cpm2_gc->cpdata = in_be32(&iop->dat);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 68e7c0dd2e45..3cc7cace194a 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/extable.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 0031eda320c3..385e7aa9e273 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -1,6 +1,7 @@
1config PPC_XICS 1config PPC_XICS
2 def_bool n 2 def_bool n
3 select PPC_SMP_MUXED_IPI 3 select PPC_SMP_MUXED_IPI
4 select HARDIRQS_SW_RESEND
4 5
5config PPC_ICP_NATIVE 6config PPC_ICP_NATIVE
6 def_bool n 7 def_bool n
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 57d72f10a97f..9114243fa1b5 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -23,10 +23,10 @@
23 23
24static void icp_opal_teardown_cpu(void) 24static void icp_opal_teardown_cpu(void)
25{ 25{
26 int cpu = smp_processor_id(); 26 int hw_cpu = hard_smp_processor_id();
27 27
28 /* Clear any pending IPI */ 28 /* Clear any pending IPI */
29 opal_int_set_mfrr(cpu, 0xff); 29 opal_int_set_mfrr(hw_cpu, 0xff);
30} 30}
31 31
32static void icp_opal_flush_ipi(void) 32static void icp_opal_flush_ipi(void)
@@ -101,14 +101,16 @@ static void icp_opal_eoi(struct irq_data *d)
101 101
102static void icp_opal_cause_ipi(int cpu, unsigned long data) 102static void icp_opal_cause_ipi(int cpu, unsigned long data)
103{ 103{
104 opal_int_set_mfrr(cpu, IPI_PRIORITY); 104 int hw_cpu = get_hard_smp_processor_id(cpu);
105
106 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
105} 107}
106 108
107static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) 109static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
108{ 110{
109 int cpu = smp_processor_id(); 111 int hw_cpu = hard_smp_processor_id();
110 112
111 opal_int_set_mfrr(cpu, 0xff); 113 opal_int_set_mfrr(hw_cpu, 0xff);
112 114
113 return smp_ipi_demux(); 115 return smp_ipi_demux();
114} 116}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 27c936c080a6..1c6bf4b66f56 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -156,7 +156,9 @@ static struct irq_chip ics_opal_irq_chip = {
156 .irq_mask = ics_opal_mask_irq, 156 .irq_mask = ics_opal_mask_irq,
157 .irq_unmask = ics_opal_unmask_irq, 157 .irq_unmask = ics_opal_unmask_irq,
158 .irq_eoi = NULL, /* Patched at init time */ 158 .irq_eoi = NULL, /* Patched at init time */
159 .irq_set_affinity = ics_opal_set_affinity 159 .irq_set_affinity = ics_opal_set_affinity,
160 .irq_set_type = xics_set_irq_type,
161 .irq_retrigger = xics_retrigger,
160}; 162};
161 163
162static int ics_opal_map(struct ics *ics, unsigned int virq); 164static int ics_opal_map(struct ics *ics, unsigned int virq);
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index 3854dd41558d..78ee5c778ef8 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -163,7 +163,9 @@ static struct irq_chip ics_rtas_irq_chip = {
163 .irq_mask = ics_rtas_mask_irq, 163 .irq_mask = ics_rtas_mask_irq,
164 .irq_unmask = ics_rtas_unmask_irq, 164 .irq_unmask = ics_rtas_unmask_irq,
165 .irq_eoi = NULL, /* Patched at init time */ 165 .irq_eoi = NULL, /* Patched at init time */
166 .irq_set_affinity = ics_rtas_set_affinity 166 .irq_set_affinity = ics_rtas_set_affinity,
167 .irq_set_type = xics_set_irq_type,
168 .irq_retrigger = xics_retrigger,
167}; 169};
168 170
169static int ics_rtas_map(struct ics *ics, unsigned int virq) 171static int ics_rtas_map(struct ics *ics, unsigned int virq)
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index a795a5f0301c..9d530f479588 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -328,8 +328,12 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
328 328
329 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); 329 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
330 330
331 /* They aren't all level sensitive but we just don't really know */ 331 /*
332 irq_set_status_flags(virq, IRQ_LEVEL); 332 * Mark interrupts as edge sensitive by default so that resend
333 * actually works. The device-tree parsing will turn the LSIs
334 * back to level.
335 */
336 irq_clear_status_flags(virq, IRQ_LEVEL);
333 337
334 /* Don't call into ICS for IPIs */ 338 /* Don't call into ICS for IPIs */
335 if (hw == XICS_IPI) { 339 if (hw == XICS_IPI) {
@@ -351,13 +355,54 @@ static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
351 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 355 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
352 356
353{ 357{
354 /* Current xics implementation translates everything
355 * to level. It is not technically right for MSIs but this
356 * is irrelevant at this point. We might get smarter in the future
357 */
358 *out_hwirq = intspec[0]; 358 *out_hwirq = intspec[0];
359 *out_flags = IRQ_TYPE_LEVEL_LOW;
360 359
360 /*
361 * If intsize is at least 2, we look for the type in the second cell,
362 * we assume the LSB indicates a level interrupt.
363 */
364 if (intsize > 1) {
365 if (intspec[1] & 1)
366 *out_flags = IRQ_TYPE_LEVEL_LOW;
367 else
368 *out_flags = IRQ_TYPE_EDGE_RISING;
369 } else
370 *out_flags = IRQ_TYPE_LEVEL_LOW;
371
372 return 0;
373}
374
375int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
376{
377 /*
378 * We only support these. This has really no effect other than setting
379 * the corresponding descriptor bits mind you but those will in turn
380 * affect the resend function when re-enabling an edge interrupt.
381 *
382 * Set set the default to edge as explained in map().
383 */
384 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
385 flow_type = IRQ_TYPE_EDGE_RISING;
386
387 if (flow_type != IRQ_TYPE_EDGE_RISING &&
388 flow_type != IRQ_TYPE_LEVEL_LOW)
389 return -EINVAL;
390
391 irqd_set_trigger_type(d, flow_type);
392
393 return IRQ_SET_MASK_OK_NOCOPY;
394}
395
396int xics_retrigger(struct irq_data *data)
397{
398 /*
399 * We need to push a dummy CPPR when retriggering, since the subsequent
400 * EOI will try to pop it. Passing 0 works, as the function hard codes
401 * the priority value anyway.
402 */
403 xics_push_cppr(0);
404
405 /* Tell the core to do a soft retrigger */
361 return 0; 406 return 0;
362} 407}
363 408
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9e607bf2d640..c109f073d454 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,7 +68,6 @@ config DEBUG_RODATA
68config S390 68config S390
69 def_bool y 69 def_bool y
70 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 70 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
71 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
72 select ARCH_HAS_DEVMEM_IS_ALLOWED 71 select ARCH_HAS_DEVMEM_IS_ALLOWED
73 select ARCH_HAS_ELF_RANDOMIZE 72 select ARCH_HAS_ELF_RANDOMIZE
74 select ARCH_HAS_GCOV_PROFILE_ALL 73 select ARCH_HAS_GCOV_PROFILE_ALL
@@ -123,6 +122,7 @@ config S390
123 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 122 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
124 select HAVE_ARCH_AUDITSYSCALL 123 select HAVE_ARCH_AUDITSYSCALL
125 select HAVE_ARCH_EARLY_PFN_TO_NID 124 select HAVE_ARCH_EARLY_PFN_TO_NID
125 select HAVE_ARCH_HARDENED_USERCOPY
126 select HAVE_ARCH_JUMP_LABEL 126 select HAVE_ARCH_JUMP_LABEL
127 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 127 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
128 select HAVE_ARCH_SECCOMP_FILTER 128 select HAVE_ARCH_SECCOMP_FILTER
@@ -871,4 +871,17 @@ config S390_GUEST
871 Select this option if you want to run the kernel as a guest under 871 Select this option if you want to run the kernel as a guest under
872 the KVM hypervisor. 872 the KVM hypervisor.
873 873
874config S390_GUEST_OLD_TRANSPORT
875 def_bool y
876 prompt "Guest support for old s390 virtio transport (DEPRECATED)"
877 depends on S390_GUEST
878 help
879 Enable this option to add support for the old s390-virtio
880 transport (i.e. virtio devices NOT based on virtio-ccw). This
881 type of virtio devices is only available on the experimental
882 kuli userspace or with old (< 2.6) qemu. If you are running
883 with a modern version of qemu (which supports virtio-ccw since
884 1.4 and uses it by default since version 2.4), you probably won't
885 need this.
886
874endmenu 887endmenu
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index f86a4eef28a9..28c4f96a2d9c 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -21,16 +21,21 @@ ENTRY(startup_continue)
21 lg %r15,.Lstack-.LPG1(%r13) 21 lg %r15,.Lstack-.LPG1(%r13)
22 aghi %r15,-160 22 aghi %r15,-160
23 brasl %r14,decompress_kernel 23 brasl %r14,decompress_kernel
24 # setup registers for memory mover & branch to target 24 # Set up registers for memory mover. We move the decompressed image to
25 # 0x11000, starting at offset 0x11000 in the decompressed image so
26 # that code living at 0x11000 in the image will end up at 0x11000 in
27 # memory.
25 lgr %r4,%r2 28 lgr %r4,%r2
26 lg %r2,.Loffset-.LPG1(%r13) 29 lg %r2,.Loffset-.LPG1(%r13)
27 la %r4,0(%r2,%r4) 30 la %r4,0(%r2,%r4)
28 lg %r3,.Lmvsize-.LPG1(%r13) 31 lg %r3,.Lmvsize-.LPG1(%r13)
29 lgr %r5,%r3 32 lgr %r5,%r3
30 # move the memory mover someplace safe 33 # Move the memory mover someplace safe so it doesn't overwrite itself.
31 la %r1,0x200 34 la %r1,0x200
32 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) 35 mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
33 # decompress image is started at 0x11000 36 # When the memory mover is done we pass control to
37 # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
38 # the decompressed image.
34 lgr %r6,%r2 39 lgr %r6,%r2
35 br %r1 40 br %r1
36mover: 41mover:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 889ea3450210..412b1bd21029 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
602CONFIG_FAULT_INJECTION_DEBUG_FS=y 602CONFIG_FAULT_INJECTION_DEBUG_FS=y
603CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y 603CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
604CONFIG_LATENCYTOP=y 604CONFIG_LATENCYTOP=y
605CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
606CONFIG_IRQSOFF_TRACER=y 605CONFIG_IRQSOFF_TRACER=y
607CONFIG_PREEMPT_TRACER=y 606CONFIG_PREEMPT_TRACER=y
608CONFIG_SCHED_TRACER=y 607CONFIG_SCHED_TRACER=y
@@ -678,7 +677,7 @@ CONFIG_CRYPTO_SHA512_S390=m
678CONFIG_CRYPTO_DES_S390=m 677CONFIG_CRYPTO_DES_S390=m
679CONFIG_CRYPTO_AES_S390=m 678CONFIG_CRYPTO_AES_S390=m
680CONFIG_CRYPTO_GHASH_S390=m 679CONFIG_CRYPTO_GHASH_S390=m
681CONFIG_CRYPTO_CRC32_S390=m 680CONFIG_CRYPTO_CRC32_S390=y
682CONFIG_ASYMMETRIC_KEY_TYPE=y 681CONFIG_ASYMMETRIC_KEY_TYPE=y
683CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 682CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
684CONFIG_X509_CERTIFICATE_PARSER=m 683CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1bcfd764910a..bec279eb4b93 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
552CONFIG_CPU_NOTIFIER_ERROR_INJECT=m 552CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
553CONFIG_PM_NOTIFIER_ERROR_INJECT=m 553CONFIG_PM_NOTIFIER_ERROR_INJECT=m
554CONFIG_LATENCYTOP=y 554CONFIG_LATENCYTOP=y
555CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
556CONFIG_BLK_DEV_IO_TRACE=y 555CONFIG_BLK_DEV_IO_TRACE=y
557# CONFIG_KPROBE_EVENT is not set 556# CONFIG_KPROBE_EVENT is not set
558CONFIG_TRACE_ENUM_MAP_FILE=y 557CONFIG_TRACE_ENUM_MAP_FILE=y
@@ -616,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
616CONFIG_CRYPTO_DES_S390=m 615CONFIG_CRYPTO_DES_S390=m
617CONFIG_CRYPTO_AES_S390=m 616CONFIG_CRYPTO_AES_S390=m
618CONFIG_CRYPTO_GHASH_S390=m 617CONFIG_CRYPTO_GHASH_S390=m
619CONFIG_CRYPTO_CRC32_S390=m 618CONFIG_CRYPTO_CRC32_S390=y
620CONFIG_ASYMMETRIC_KEY_TYPE=y 619CONFIG_ASYMMETRIC_KEY_TYPE=y
621CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 620CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
622CONFIG_X509_CERTIFICATE_PARSER=m 621CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 13ff090139c8..1751446a5bbb 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
549CONFIG_RCU_TORTURE_TEST=m 549CONFIG_RCU_TORTURE_TEST=m
550CONFIG_RCU_CPU_STALL_TIMEOUT=60 550CONFIG_RCU_CPU_STALL_TIMEOUT=60
551CONFIG_LATENCYTOP=y 551CONFIG_LATENCYTOP=y
552CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
553CONFIG_SCHED_TRACER=y 552CONFIG_SCHED_TRACER=y
554CONFIG_FTRACE_SYSCALLS=y 553CONFIG_FTRACE_SYSCALLS=y
555CONFIG_STACK_TRACER=y 554CONFIG_STACK_TRACER=y
@@ -615,7 +614,7 @@ CONFIG_CRYPTO_SHA512_S390=m
615CONFIG_CRYPTO_DES_S390=m 614CONFIG_CRYPTO_DES_S390=m
616CONFIG_CRYPTO_AES_S390=m 615CONFIG_CRYPTO_AES_S390=m
617CONFIG_CRYPTO_GHASH_S390=m 616CONFIG_CRYPTO_GHASH_S390=m
618CONFIG_CRYPTO_CRC32_S390=m 617CONFIG_CRYPTO_CRC32_S390=y
619CONFIG_ASYMMETRIC_KEY_TYPE=y 618CONFIG_ASYMMETRIC_KEY_TYPE=y
620CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 619CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
621CONFIG_X509_CERTIFICATE_PARSER=m 620CONFIG_X509_CERTIFICATE_PARSER=m
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 577ae1d4ae89..2bad9d837029 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
51 struct kernel_fpu vxstate; \ 51 struct kernel_fpu vxstate; \
52 unsigned long prealign, aligned, remaining; \ 52 unsigned long prealign, aligned, remaining; \
53 \ 53 \
54 if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
55 return ___crc32_sw(crc, data, datalen); \
56 \
54 if ((unsigned long)data & VX_ALIGN_MASK) { \ 57 if ((unsigned long)data & VX_ALIGN_MASK) { \
55 prealign = VX_ALIGNMENT - \ 58 prealign = VX_ALIGNMENT - \
56 ((unsigned long)data & VX_ALIGN_MASK); \ 59 ((unsigned long)data & VX_ALIGN_MASK); \
@@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
59 data = (void *)((unsigned long)data + prealign); \ 62 data = (void *)((unsigned long)data + prealign); \
60 } \ 63 } \
61 \ 64 \
62 if (datalen < VX_MIN_LEN) \
63 return ___crc32_sw(crc, data, datalen); \
64 \
65 aligned = datalen & ~VX_ALIGN_MASK; \ 65 aligned = datalen & ~VX_ALIGN_MASK; \
66 remaining = datalen & VX_ALIGN_MASK; \ 66 remaining = datalen & VX_ALIGN_MASK; \
67 \ 67 \
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index ccccebeeaaf6..2d40ef0a6295 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
172CONFIG_RCU_CPU_STALL_TIMEOUT=60 172CONFIG_RCU_CPU_STALL_TIMEOUT=60
173CONFIG_RCU_TRACE=y 173CONFIG_RCU_TRACE=y
174CONFIG_LATENCYTOP=y 174CONFIG_LATENCYTOP=y
175CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
176CONFIG_SCHED_TRACER=y 175CONFIG_SCHED_TRACER=y
177CONFIG_FTRACE_SYSCALLS=y 176CONFIG_FTRACE_SYSCALLS=y
178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 177CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
@@ -234,7 +233,7 @@ CONFIG_CRYPTO_SHA256_S390=m
234CONFIG_CRYPTO_SHA512_S390=m 233CONFIG_CRYPTO_SHA512_S390=m
235CONFIG_CRYPTO_DES_S390=m 234CONFIG_CRYPTO_DES_S390=m
236CONFIG_CRYPTO_AES_S390=m 235CONFIG_CRYPTO_AES_S390=m
237CONFIG_CRYPTO_CRC32_S390=m 236CONFIG_CRYPTO_CRC32_S390=y
238CONFIG_CRC7=m 237CONFIG_CRC7=m
239# CONFIG_XZ_DEC_X86 is not set 238# CONFIG_XZ_DEC_X86 is not set
240# CONFIG_XZ_DEC_POWERPC is not set 239# CONFIG_XZ_DEC_POWERPC is not set
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9b49cf1daa8f..52d7c8709279 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
266 __chk_user_ptr(ptr); \ 266 __chk_user_ptr(ptr); \
267 switch (sizeof(*(ptr))) { \ 267 switch (sizeof(*(ptr))) { \
268 case 1: { \ 268 case 1: { \
269 unsigned char __x; \ 269 unsigned char __x = 0; \
270 __gu_err = __get_user_fn(&__x, ptr, \ 270 __gu_err = __get_user_fn(&__x, ptr, \
271 sizeof(*(ptr))); \ 271 sizeof(*(ptr))); \
272 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 272 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
273 break; \ 273 break; \
274 }; \ 274 }; \
275 case 2: { \ 275 case 2: { \
276 unsigned short __x; \ 276 unsigned short __x = 0; \
277 __gu_err = __get_user_fn(&__x, ptr, \ 277 __gu_err = __get_user_fn(&__x, ptr, \
278 sizeof(*(ptr))); \ 278 sizeof(*(ptr))); \
279 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 279 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
280 break; \ 280 break; \
281 }; \ 281 }; \
282 case 4: { \ 282 case 4: { \
283 unsigned int __x; \ 283 unsigned int __x = 0; \
284 __gu_err = __get_user_fn(&__x, ptr, \ 284 __gu_err = __get_user_fn(&__x, ptr, \
285 sizeof(*(ptr))); \ 285 sizeof(*(ptr))); \
286 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 286 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
287 break; \ 287 break; \
288 }; \ 288 }; \
289 case 8: { \ 289 case 8: { \
290 unsigned long long __x; \ 290 unsigned long long __x = 0; \
291 __gu_err = __get_user_fn(&__x, ptr, \ 291 __gu_err = __get_user_fn(&__x, ptr, \
292 sizeof(*(ptr))); \ 292 sizeof(*(ptr))); \
293 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 293 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
311#define __put_user_unaligned __put_user 311#define __put_user_unaligned __put_user
312#define __get_user_unaligned __get_user 312#define __get_user_unaligned __get_user
313 313
314extern void __compiletime_error("usercopy buffer size is too small")
315__bad_copy_user(void);
316
317static inline void copy_user_overflow(int size, unsigned long count)
318{
319 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
320}
321
314/** 322/**
315 * copy_to_user: - Copy a block of data into user space. 323 * copy_to_user: - Copy a block of data into user space.
316 * @to: Destination address, in user space. 324 * @to: Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
332 return __copy_to_user(to, from, n); 340 return __copy_to_user(to, from, n);
333} 341}
334 342
335void copy_from_user_overflow(void)
336#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
337__compiletime_warning("copy_from_user() buffer size is not provably correct")
338#endif
339;
340
341/** 343/**
342 * copy_from_user: - Copy a block of data from user space. 344 * copy_from_user: - Copy a block of data from user space.
343 * @to: Destination address, in kernel space. 345 * @to: Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
362 364
363 might_fault(); 365 might_fault();
364 if (unlikely(sz != -1 && sz < n)) { 366 if (unlikely(sz != -1 && sz < n)) {
365 copy_from_user_overflow(); 367 if (!__builtin_constant_p(n))
368 copy_user_overflow(sz, n);
369 else
370 __bad_copy_user();
366 return n; 371 return n;
367 } 372 }
368 return __copy_from_user(to, from, n); 373 return __copy_from_user(to, from, n);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 56e4d8234ef2..4431905f8cfa 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -309,7 +309,9 @@ ENTRY(startup_kdump)
309 l %r15,.Lstack-.LPG0(%r13) 309 l %r15,.Lstack-.LPG0(%r13)
310 ahi %r15,-STACK_FRAME_OVERHEAD 310 ahi %r15,-STACK_FRAME_OVERHEAD
311 brasl %r14,verify_facilities 311 brasl %r14,verify_facilities
312 /* Continue with startup code in head64.S */ 312# For uncompressed images, continue in
313# arch/s390/kernel/head64.S. For compressed images, continue in
314# arch/s390/boot/compressed/head.S.
313 jg startup_continue 315 jg startup_continue
314 316
315.Lstack: 317.Lstack:
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ba5f456edaa9..7f7ba5f23f13 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -204,11 +204,9 @@ static void __init conmode_default(void)
204#endif 204#endif
205 } 205 }
206 } else if (MACHINE_IS_KVM) { 206 } else if (MACHINE_IS_KVM) {
207 if (sclp.has_vt220 && 207 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
208 config_enabled(CONFIG_SCLP_VT220_CONSOLE))
209 SET_CONSOLE_VT220; 208 SET_CONSOLE_VT220;
210 else if (sclp.has_linemode && 209 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
211 config_enabled(CONFIG_SCLP_CONSOLE))
212 SET_CONSOLE_SCLP; 210 SET_CONSOLE_SCLP;
213 else 211 else
214 SET_CONSOLE_HVC; 212 SET_CONSOLE_HVC;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3f3ae4865d57..607ec91966c7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1672,6 +1672,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1672 KVM_SYNC_CRS | 1672 KVM_SYNC_CRS |
1673 KVM_SYNC_ARCH0 | 1673 KVM_SYNC_ARCH0 |
1674 KVM_SYNC_PFAULT; 1674 KVM_SYNC_PFAULT;
1675 kvm_s390_set_prefix(vcpu, 0);
1675 if (test_kvm_facility(vcpu->kvm, 64)) 1676 if (test_kvm_facility(vcpu->kvm, 64))
1676 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 1677 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1677 /* fprs can be synchronized via vrs, even if the guest has no vx. With 1678 /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2230,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2230 return -EINVAL; 2231 return -EINVAL;
2231 current->thread.fpu.fpc = fpu->fpc; 2232 current->thread.fpu.fpc = fpu->fpc;
2232 if (MACHINE_HAS_VX) 2233 if (MACHINE_HAS_VX)
2233 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs); 2234 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2235 (freg_t *) fpu->fprs);
2234 else 2236 else
2235 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs)); 2237 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
2236 return 0; 2238 return 0;
2237} 2239}
2238 2240
@@ -2241,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2241 /* make sure we have the latest values */ 2243 /* make sure we have the latest values */
2242 save_fpu_regs(); 2244 save_fpu_regs();
2243 if (MACHINE_HAS_VX) 2245 if (MACHINE_HAS_VX)
2244 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs); 2246 convert_vx_to_fp((freg_t *) fpu->fprs,
2247 (__vector128 *) vcpu->run->s.regs.vrs);
2245 else 2248 else
2246 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs)); 2249 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
2247 fpu->fpc = current->thread.fpu.fpc; 2250 fpu->fpc = current->thread.fpu.fpc;
2248 return 0; 2251 return 0;
2249} 2252}
@@ -2361,8 +2364,10 @@ retry:
2361 rc = gmap_mprotect_notify(vcpu->arch.gmap, 2364 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2362 kvm_s390_get_prefix(vcpu), 2365 kvm_s390_get_prefix(vcpu),
2363 PAGE_SIZE * 2, PROT_WRITE); 2366 PAGE_SIZE * 2, PROT_WRITE);
2364 if (rc) 2367 if (rc) {
2368 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2365 return rc; 2369 return rc;
2370 }
2366 goto retry; 2371 goto retry;
2367 } 2372 }
2368 2373
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index c106488b4137..d8673e243f13 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
584 /* Validity 0x0044 will be checked by SIE */ 584 /* Validity 0x0044 will be checked by SIE */
585 if (rc) 585 if (rc)
586 goto unpin; 586 goto unpin;
587 scb_s->gvrd = hpa; 587 scb_s->riccbd = hpa;
588 } 588 }
589 return 0; 589 return 0;
590unpin: 590unpin:
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index e390bbb16443..48352bffbc92 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -237,11 +237,10 @@ char * strrchr(const char * s, int c)
237EXPORT_SYMBOL(strrchr); 237EXPORT_SYMBOL(strrchr);
238 238
239static inline int clcle(const char *s1, unsigned long l1, 239static inline int clcle(const char *s1, unsigned long l1,
240 const char *s2, unsigned long l2, 240 const char *s2, unsigned long l2)
241 int *diff)
242{ 241{
243 register unsigned long r2 asm("2") = (unsigned long) s1; 242 register unsigned long r2 asm("2") = (unsigned long) s1;
244 register unsigned long r3 asm("3") = (unsigned long) l2; 243 register unsigned long r3 asm("3") = (unsigned long) l1;
245 register unsigned long r4 asm("4") = (unsigned long) s2; 244 register unsigned long r4 asm("4") = (unsigned long) s2;
246 register unsigned long r5 asm("5") = (unsigned long) l2; 245 register unsigned long r5 asm("5") = (unsigned long) l2;
247 int cc; 246 int cc;
@@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1,
252 " srl %0,28" 251 " srl %0,28"
253 : "=&d" (cc), "+a" (r2), "+a" (r3), 252 : "=&d" (cc), "+a" (r2), "+a" (r3),
254 "+a" (r4), "+a" (r5) : : "cc"); 253 "+a" (r4), "+a" (r5) : : "cc");
255 *diff = *(char *)r2 - *(char *)r4;
256 return cc; 254 return cc;
257} 255}
258 256
@@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2)
270 return (char *) s1; 268 return (char *) s1;
271 l1 = __strend(s1) - s1; 269 l1 = __strend(s1) - s1;
272 while (l1-- >= l2) { 270 while (l1-- >= l2) {
273 int cc, dummy; 271 int cc;
274 272
275 cc = clcle(s1, l1, s2, l2, &dummy); 273 cc = clcle(s1, l2, s2, l2);
276 if (!cc) 274 if (!cc)
277 return (char *) s1; 275 return (char *) s1;
278 s1++; 276 s1++;
@@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr);
313 */ 311 */
314int memcmp(const void *cs, const void *ct, size_t n) 312int memcmp(const void *cs, const void *ct, size_t n)
315{ 313{
316 int ret, diff; 314 int ret;
317 315
318 ret = clcle(cs, n, ct, n, &diff); 316 ret = clcle(cs, n, ct, n);
319 if (ret) 317 if (ret)
320 ret = diff; 318 ret = ret == 1 ? -1 : 1;
321 return ret; 319 return ret;
322} 320}
323EXPORT_SYMBOL(memcmp); 321EXPORT_SYMBOL(memcmp);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d96596128e9f..f481fcde067b 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
104 104
105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 105unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
106{ 106{
107 check_object_size(to, n, false);
107 if (static_branch_likely(&have_mvcos)) 108 if (static_branch_likely(&have_mvcos))
108 return copy_from_user_mvcos(to, from, n); 109 return copy_from_user_mvcos(to, from, n);
109 return copy_from_user_mvcp(to, from, n); 110 return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
177 178
178unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 179unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
179{ 180{
181 check_object_size(from, n, true);
180 if (static_branch_likely(&have_mvcos)) 182 if (static_branch_likely(&have_mvcos))
181 return copy_to_user_mvcos(to, from, n); 183 return copy_to_user_mvcos(to, from, n);
182 return copy_to_user_mvcs(to, from, n); 184 return copy_to_user_mvcs(to, from, n);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 7104ffb5a67f..af7cf28cf97e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end,
252 int rc = -EINVAL; 252 int rc = -EINVAL;
253 pgd_t *pgdp; 253 pgd_t *pgdp;
254 254
255 if (addr == end)
256 return 0;
255 if (end >= MODULES_END) 257 if (end >= MODULES_END)
256 return -EINVAL; 258 return -EINVAL;
257 mutex_lock(&cpa_mutex); 259 mutex_lock(&cpa_mutex);
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index 20a3591225cc..01aec8ccde83 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -163,7 +163,7 @@ do { \
163 __get_user_asm(val, "lw", ptr); \ 163 __get_user_asm(val, "lw", ptr); \
164 break; \ 164 break; \
165 case 8: \ 165 case 8: \
166 if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ 166 if (__copy_from_user((void *)&val, ptr, 8) == 0) \
167 __gu_err = 0; \ 167 __gu_err = 0; \
168 else \ 168 else \
169 __gu_err = -EFAULT; \ 169 __gu_err = -EFAULT; \
@@ -188,6 +188,8 @@ do { \
188 \ 188 \
189 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 189 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
190 __get_user_common((x), size, __gu_ptr); \ 190 __get_user_common((x), size, __gu_ptr); \
191 else \
192 (x) = 0; \
191 \ 193 \
192 __gu_err; \ 194 __gu_err; \
193}) 195})
@@ -201,6 +203,7 @@ do { \
201 "2:\n" \ 203 "2:\n" \
202 ".section .fixup,\"ax\"\n" \ 204 ".section .fixup,\"ax\"\n" \
203 "3:li %0, %4\n" \ 205 "3:li %0, %4\n" \
206 "li %1, 0\n" \
204 "j 2b\n" \ 207 "j 2b\n" \
205 ".previous\n" \ 208 ".previous\n" \
206 ".section __ex_table,\"a\"\n" \ 209 ".section __ex_table,\"a\"\n" \
@@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
298static inline unsigned long 301static inline unsigned long
299copy_from_user(void *to, const void *from, unsigned long len) 302copy_from_user(void *to, const void *from, unsigned long len)
300{ 303{
301 unsigned long over; 304 unsigned long res = len;
302 305
303 if (access_ok(VERIFY_READ, from, len)) 306 if (likely(access_ok(VERIFY_READ, from, len)))
304 return __copy_tofrom_user(to, from, len); 307 res = __copy_tofrom_user(to, from, len);
305 308
306 if ((unsigned long)from < TASK_SIZE) { 309 if (unlikely(res))
307 over = (unsigned long)from + len - TASK_SIZE; 310 memset(to + (len - res), 0, res);
308 return __copy_tofrom_user(to, from, len - over) + over; 311
309 } 312 return res;
310 return len;
311} 313}
312 314
313static inline unsigned long 315static inline unsigned long
314copy_to_user(void *to, const void *from, unsigned long len) 316copy_to_user(void *to, const void *from, unsigned long len)
315{ 317{
316 unsigned long over; 318 if (likely(access_ok(VERIFY_WRITE, to, len)))
317 319 len = __copy_tofrom_user(to, from, len);
318 if (access_ok(VERIFY_WRITE, to, len))
319 return __copy_tofrom_user(to, from, len);
320 320
321 if ((unsigned long)to < TASK_SIZE) {
322 over = (unsigned long)to + len - TASK_SIZE;
323 return __copy_tofrom_user(to, from, len - over) + over;
324 }
325 return len; 321 return len;
326} 322}
327 323
328#define __copy_from_user(to, from, len) \ 324static inline unsigned long
329 __copy_tofrom_user((to), (from), (len)) 325__copy_from_user(void *to, const void *from, unsigned long len)
326{
327 unsigned long left = __copy_tofrom_user(to, from, len);
328 if (unlikely(left))
329 memset(to + (len - left), 0, left);
330 return left;
331}
330 332
331#define __copy_to_user(to, from, len) \ 333#define __copy_to_user(to, from, len) \
332 __copy_tofrom_user((to), (from), (len)) 334 __copy_tofrom_user((to), (from), (len))
@@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
340static inline unsigned long 342static inline unsigned long
341__copy_from_user_inatomic(void *to, const void *from, unsigned long len) 343__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
342{ 344{
343 return __copy_from_user(to, from, len); 345 return __copy_tofrom_user(to, from, len);
344} 346}
345 347
346#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) 348#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
347 349
348static inline unsigned long 350static inline unsigned long
349copy_in_user(void *to, const void *from, unsigned long len) 351copy_in_user(void *to, const void *from, unsigned long len)
350{ 352{
351 if (access_ok(VERIFY_READ, from, len) && 353 if (access_ok(VERIFY_READ, from, len) &&
352 access_ok(VERFITY_WRITE, to, len)) 354 access_ok(VERFITY_WRITE, to, len))
353 return copy_from_user(to, from, len); 355 return __copy_tofrom_user(to, from, len);
354} 356}
355 357
356/* 358/*
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index a49635c51266..92ade79ac427 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
151 __kernel_size_t __copy_size = (__kernel_size_t) n; 151 __kernel_size_t __copy_size = (__kernel_size_t) n;
152 152
153 if (__copy_size && __access_ok(__copy_from, __copy_size)) 153 if (__copy_size && __access_ok(__copy_from, __copy_size))
154 return __copy_user(to, from, __copy_size); 154 __copy_size = __copy_user(to, from, __copy_size);
155
156 if (unlikely(__copy_size))
157 memset(to + (n - __copy_size), 0, __copy_size);
155 158
156 return __copy_size; 159 return __copy_size;
157} 160}
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index c01376c76b86..ca5073dd4596 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -24,6 +24,7 @@
24#define __get_user_size(x,ptr,size,retval) \ 24#define __get_user_size(x,ptr,size,retval) \
25do { \ 25do { \
26 retval = 0; \ 26 retval = 0; \
27 x = 0; \
27 switch (size) { \ 28 switch (size) { \
28 case 1: \ 29 case 1: \
29 retval = __get_user_asm_b((void *)&x, \ 30 retval = __get_user_asm_b((void *)&x, \
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 546293d9e6c5..59b09600dd32 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
43 select OLD_SIGSUSPEND 43 select OLD_SIGSUSPEND
44 select ARCH_HAS_SG_CHAIN 44 select ARCH_HAS_SG_CHAIN
45 select CPU_NO_EFFICIENT_FFS 45 select CPU_NO_EFFICIENT_FFS
46 select HAVE_ARCH_HARDENED_USERCOPY
46 47
47config SPARC32 48config SPARC32
48 def_bool !64BIT 49 def_bool !64BIT
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 57aca2792d29..ea55f86d7ccd 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -248,23 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
248 248
249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 249static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
250{ 250{
251 if (n && __access_ok((unsigned long) to, n)) 251 if (n && __access_ok((unsigned long) to, n)) {
252 check_object_size(from, n, true);
252 return __copy_user(to, (__force void __user *) from, n); 253 return __copy_user(to, (__force void __user *) from, n);
253 else 254 } else
254 return n; 255 return n;
255} 256}
256 257
257static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 258static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
258{ 259{
260 check_object_size(from, n, true);
259 return __copy_user(to, (__force void __user *) from, n); 261 return __copy_user(to, (__force void __user *) from, n);
260} 262}
261 263
262static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 264static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
263{ 265{
264 if (n && __access_ok((unsigned long) from, n)) 266 if (n && __access_ok((unsigned long) from, n)) {
267 check_object_size(to, n, false);
265 return __copy_user((__force void __user *) to, from, n); 268 return __copy_user((__force void __user *) to, from, n);
266 else 269 } else {
270 memset(to, 0, n);
267 return n; 271 return n;
272 }
268} 273}
269 274
270static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 275static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e9a51d64974d..37a315d0ddd4 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -210,8 +210,11 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
210static inline unsigned long __must_check 210static inline unsigned long __must_check
211copy_from_user(void *to, const void __user *from, unsigned long size) 211copy_from_user(void *to, const void __user *from, unsigned long size)
212{ 212{
213 unsigned long ret = ___copy_from_user(to, from, size); 213 unsigned long ret;
214 214
215 check_object_size(to, size, false);
216
217 ret = ___copy_from_user(to, from, size);
215 if (unlikely(ret)) 218 if (unlikely(ret))
216 ret = copy_from_user_fixup(to, from, size); 219 ret = copy_from_user_fixup(to, from, size);
217 220
@@ -227,8 +230,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
227static inline unsigned long __must_check 230static inline unsigned long __must_check
228copy_to_user(void __user *to, const void *from, unsigned long size) 231copy_to_user(void __user *to, const void *from, unsigned long size)
229{ 232{
230 unsigned long ret = ___copy_to_user(to, from, size); 233 unsigned long ret;
234
235 check_object_size(from, size, true);
231 236
237 ret = ___copy_to_user(to, from, size);
232 if (unlikely(ret)) 238 if (unlikely(ret))
233 ret = copy_to_user_fixup(to, from, size); 239 ret = copy_to_user_fixup(to, from, size);
234 return ret; 240 return ret;
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4820a02838ac..78da75b670bc 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -4,7 +4,6 @@
4config TILE 4config TILE
5 def_bool y 5 def_bool y
6 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 6 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
7 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
8 select ARCH_HAS_DEVMEM_IS_ALLOWED 7 select ARCH_HAS_DEVMEM_IS_ALLOWED
9 select ARCH_HAVE_NMI_SAFE_CMPXCHG 8 select ARCH_HAVE_NMI_SAFE_CMPXCHG
10 select ARCH_WANT_FRAME_POINTERS 9 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 0a9c4265763b..a77369e91e54 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
416 return n; 416 return n;
417} 417}
418 418
419#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 419extern void __compiletime_error("usercopy buffer size is too small")
420/* 420__bad_copy_user(void);
421 * There are still unprovable places in the generic code as of 2.6.34, so this 421
422 * option is not really compatible with -Werror, which is more useful in 422static inline void copy_user_overflow(int size, unsigned long count)
423 * general. 423{
424 */ 424 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
425extern void copy_from_user_overflow(void) 425}
426 __compiletime_warning("copy_from_user() size is not provably correct");
427 426
428static inline unsigned long __must_check copy_from_user(void *to, 427static inline unsigned long __must_check copy_from_user(void *to,
429 const void __user *from, 428 const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
433 432
434 if (likely(sz == -1 || sz >= n)) 433 if (likely(sz == -1 || sz >= n))
435 n = _copy_from_user(to, from, n); 434 n = _copy_from_user(to, from, n);
435 else if (!__builtin_constant_p(n))
436 copy_user_overflow(sz, n);
436 else 437 else
437 copy_from_user_overflow(); 438 __bad_copy_user();
438 439
439 return n; 440 return n;
440} 441}
441#else
442#define copy_from_user _copy_from_user
443#endif
444 442
445#ifdef __tilegx__ 443#ifdef __tilegx__
446/** 444/**
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 1dd5bd8a8c59..133055311dce 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -81,7 +81,7 @@
81 .altinstr_replacement : { *(.altinstr_replacement) } 81 .altinstr_replacement : { *(.altinstr_replacement) }
82 /* .exit.text is discard at runtime, not link time, to deal with references 82 /* .exit.text is discard at runtime, not link time, to deal with references
83 from .altinstructions and .eh_frame */ 83 from .altinstructions and .eh_frame */
84 .exit.text : { *(.exit.text) } 84 .exit.text : { EXIT_TEXT }
85 .exit.data : { *(.exit.data) } 85 .exit.data : { *(.exit.data) }
86 86
87 .preinit_array : { 87 .preinit_array : {
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index ef4b8f949b51..b783ac87d98a 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
21 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); 21 PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
22 22
23 if (syscall_trace_enter(regs)) 23 if (syscall_trace_enter(regs))
24 return; 24 goto out;
25 25
26 /* Do the seccomp check after ptrace; failures should be fast. */ 26 /* Do the seccomp check after ptrace; failures should be fast. */
27 if (secure_computing(NULL) == -1) 27 if (secure_computing(NULL) == -1)
28 return; 28 goto out;
29 29
30 /* Update the syscall number after orig_ax has potentially been updated
31 * with ptrace.
32 */
33 UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
34 syscall = UPT_SYSCALL_NR(r); 30 syscall = UPT_SYSCALL_NR(r);
35
36 if (syscall >= 0 && syscall <= __NR_syscall_max) 31 if (syscall >= 0 && syscall <= __NR_syscall_max)
37 PT_REGS_SET_SYSCALL_RETURN(regs, 32 PT_REGS_SET_SYSCALL_RETURN(regs,
38 EXECUTE_SYSCALL(syscall, regs)); 33 EXECUTE_SYSCALL(syscall, regs));
39 34
35out:
40 syscall_trace_leave(regs); 36 syscall_trace_leave(regs);
41} 37}
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index e35632ef23c7..62dfc644c908 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
98} 98}
99 99
100static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 100static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
101 bool write, bool foreign) 101 bool write, bool execute, bool foreign)
102{ 102{
103 /* by default, allow everything */ 103 /* by default, allow everything */
104 return true; 104 return true;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5c6e7471b732..2a1f0ce7c59a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,6 @@ config X86
24 select ARCH_DISCARD_MEMBLOCK 24 select ARCH_DISCARD_MEMBLOCK
25 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI 25 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
26 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 26 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
27 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
28 select ARCH_HAS_DEVMEM_IS_ALLOWED 27 select ARCH_HAS_DEVMEM_IS_ALLOWED
29 select ARCH_HAS_ELF_RANDOMIZE 28 select ARCH_HAS_ELF_RANDOMIZE
30 select ARCH_HAS_FAST_MULTIPLIER 29 select ARCH_HAS_FAST_MULTIPLIER
@@ -80,6 +79,7 @@ config X86
80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 79 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
81 select HAVE_AOUT if X86_32 80 select HAVE_AOUT if X86_32
82 select HAVE_ARCH_AUDITSYSCALL 81 select HAVE_ARCH_AUDITSYSCALL
82 select HAVE_ARCH_HARDENED_USERCOPY
83 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 83 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
84 select HAVE_ARCH_JUMP_LABEL 84 select HAVE_ARCH_JUMP_LABEL
85 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP 85 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +91,7 @@ config X86
91 select HAVE_ARCH_SOFT_DIRTY if X86_64 91 select HAVE_ARCH_SOFT_DIRTY if X86_64
92 select HAVE_ARCH_TRACEHOOK 92 select HAVE_ARCH_TRACEHOOK
93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
94 select HAVE_ARCH_WITHIN_STACK_FRAMES
94 select HAVE_EBPF_JIT if X86_64 95 select HAVE_EBPF_JIT if X86_64
95 select HAVE_CC_STACKPROTECTOR 96 select HAVE_CC_STACKPROTECTOR
96 select HAVE_CMPXCHG_DOUBLE 97 select HAVE_CMPXCHG_DOUBLE
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ff574dad95cc..94dd4a31f5b3 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
1004 return status; 1004 return status;
1005} 1005}
1006 1006
1007static efi_status_t exit_boot(struct boot_params *boot_params, 1007struct exit_boot_struct {
1008 void *handle, bool is64) 1008 struct boot_params *boot_params;
1009{ 1009 struct efi_info *efi;
1010 struct efi_info *efi = &boot_params->efi_info;
1011 unsigned long map_sz, key, desc_size;
1012 efi_memory_desc_t *mem_map;
1013 struct setup_data *e820ext; 1010 struct setup_data *e820ext;
1014 const char *signature;
1015 __u32 e820ext_size; 1011 __u32 e820ext_size;
1016 __u32 nr_desc, prev_nr_desc; 1012 bool is64;
1017 efi_status_t status; 1013};
1018 __u32 desc_version;
1019 bool called_exit = false;
1020 u8 nr_entries;
1021 int i;
1022
1023 nr_desc = 0;
1024 e820ext = NULL;
1025 e820ext_size = 0;
1026
1027get_map:
1028 status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
1029 &desc_version, &key);
1030
1031 if (status != EFI_SUCCESS)
1032 return status;
1033
1034 prev_nr_desc = nr_desc;
1035 nr_desc = map_sz / desc_size;
1036 if (nr_desc > prev_nr_desc &&
1037 nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
1038 u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
1039
1040 status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
1041 if (status != EFI_SUCCESS)
1042 goto free_mem_map;
1043 1014
1044 efi_call_early(free_pool, mem_map); 1015static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
1045 goto get_map; /* Allocated memory, get map again */ 1016 struct efi_boot_memmap *map,
1017 void *priv)
1018{
1019 static bool first = true;
1020 const char *signature;
1021 __u32 nr_desc;
1022 efi_status_t status;
1023 struct exit_boot_struct *p = priv;
1024
1025 if (first) {
1026 nr_desc = *map->buff_size / *map->desc_size;
1027 if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
1028 u32 nr_e820ext = nr_desc -
1029 ARRAY_SIZE(p->boot_params->e820_map);
1030
1031 status = alloc_e820ext(nr_e820ext, &p->e820ext,
1032 &p->e820ext_size);
1033 if (status != EFI_SUCCESS)
1034 return status;
1035 }
1036 first = false;
1046 } 1037 }
1047 1038
1048 signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE; 1039 signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
1049 memcpy(&efi->efi_loader_signature, signature, sizeof(__u32)); 1040 memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
1050 1041
1051 efi->efi_systab = (unsigned long)sys_table; 1042 p->efi->efi_systab = (unsigned long)sys_table_arg;
1052 efi->efi_memdesc_size = desc_size; 1043 p->efi->efi_memdesc_size = *map->desc_size;
1053 efi->efi_memdesc_version = desc_version; 1044 p->efi->efi_memdesc_version = *map->desc_ver;
1054 efi->efi_memmap = (unsigned long)mem_map; 1045 p->efi->efi_memmap = (unsigned long)*map->map;
1055 efi->efi_memmap_size = map_sz; 1046 p->efi->efi_memmap_size = *map->map_size;
1056 1047
1057#ifdef CONFIG_X86_64 1048#ifdef CONFIG_X86_64
1058 efi->efi_systab_hi = (unsigned long)sys_table >> 32; 1049 p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
1059 efi->efi_memmap_hi = (unsigned long)mem_map >> 32; 1050 p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
1060#endif 1051#endif
1061 1052
1053 return EFI_SUCCESS;
1054}
1055
1056static efi_status_t exit_boot(struct boot_params *boot_params,
1057 void *handle, bool is64)
1058{
1059 unsigned long map_sz, key, desc_size, buff_size;
1060 efi_memory_desc_t *mem_map;
1061 struct setup_data *e820ext;
1062 __u32 e820ext_size;
1063 efi_status_t status;
1064 __u32 desc_version;
1065 struct efi_boot_memmap map;
1066 struct exit_boot_struct priv;
1067
1068 map.map = &mem_map;
1069 map.map_size = &map_sz;
1070 map.desc_size = &desc_size;
1071 map.desc_ver = &desc_version;
1072 map.key_ptr = &key;
1073 map.buff_size = &buff_size;
1074 priv.boot_params = boot_params;
1075 priv.efi = &boot_params->efi_info;
1076 priv.e820ext = NULL;
1077 priv.e820ext_size = 0;
1078 priv.is64 = is64;
1079
1062 /* Might as well exit boot services now */ 1080 /* Might as well exit boot services now */
1063 status = efi_call_early(exit_boot_services, handle, key); 1081 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
1064 if (status != EFI_SUCCESS) { 1082 exit_boot_func);
1065 /* 1083 if (status != EFI_SUCCESS)
1066 * ExitBootServices() will fail if any of the event 1084 return status;
1067 * handlers change the memory map. In which case, we
1068 * must be prepared to retry, but only once so that
1069 * we're guaranteed to exit on repeated failures instead
1070 * of spinning forever.
1071 */
1072 if (called_exit)
1073 goto free_mem_map;
1074
1075 called_exit = true;
1076 efi_call_early(free_pool, mem_map);
1077 goto get_map;
1078 }
1079 1085
1086 e820ext = priv.e820ext;
1087 e820ext_size = priv.e820ext_size;
1080 /* Historic? */ 1088 /* Historic? */
1081 boot_params->alt_mem_k = 32 * 1024; 1089 boot_params->alt_mem_k = 32 * 1024;
1082 1090
@@ -1085,10 +1093,6 @@ get_map:
1085 return status; 1093 return status;
1086 1094
1087 return EFI_SUCCESS; 1095 return EFI_SUCCESS;
1088
1089free_mem_map:
1090 efi_call_early(free_pool, mem_map);
1091 return status;
1092} 1096}
1093 1097
1094/* 1098/*
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index 4e2ecfa23c15..4b429df40d7a 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1 +1,3 @@
1CONFIG_NOHIGHMEM=y 1CONFIG_NOHIGHMEM=y
2# CONFIG_HIGHMEM4G is not set
3# CONFIG_HIGHMEM64G is not set
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 89fa85e8b10c..6f97fb33ae21 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
485 485
486 req = cast_mcryptd_ctx_to_req(req_ctx); 486 req = cast_mcryptd_ctx_to_req(req_ctx);
487 if (irqs_disabled()) 487 if (irqs_disabled())
488 rctx->complete(&req->base, ret); 488 req_ctx->complete(&req->base, ret);
489 else { 489 else {
490 local_bh_disable(); 490 local_bh_disable();
491 rctx->complete(&req->base, ret); 491 req_ctx->complete(&req->base, ret);
492 local_bh_enable(); 492 local_bh_enable();
493 } 493 }
494 } 494 }
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index b691da981cd9..a78a0694ddef 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
265 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 265 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
266 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 266 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
267 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 267 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
268 movl _args_digest+4*32(state, idx, 4), tmp2_w 268 vmovd _args_digest(state , idx, 4) , %xmm0
269 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 269 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
270 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 270 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
271 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 271 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
272 272
273 vmovdqu %xmm0, _result_digest(job_rax) 273 vmovdqu %xmm0, _result_digest(job_rax)
274 movl tmp2_w, _result_digest+1*16(job_rax) 274 offset = (_result_digest + 1*16)
275 vmovdqu %xmm1, offset(job_rax)
275 276
276 pop %rbx 277 pop %rbx
277 278
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index f4cf5b78fd36..d210174a52b0 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
497 497
498 req = cast_mcryptd_ctx_to_req(req_ctx); 498 req = cast_mcryptd_ctx_to_req(req_ctx);
499 if (irqs_disabled()) 499 if (irqs_disabled())
500 rctx->complete(&req->base, ret); 500 req_ctx->complete(&req->base, ret);
501 else { 501 else {
502 local_bh_disable(); 502 local_bh_disable();
503 rctx->complete(&req->base, ret); 503 req_ctx->complete(&req->base, ret);
504 local_bh_enable(); 504 local_bh_enable();
505 } 505 }
506 } 506 }
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index fe91c25092da..77f28ce9c646 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -5,6 +5,8 @@
5OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y 5OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
6OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y 6OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
7 7
8CFLAGS_syscall_64.o += -Wno-override-init
9CFLAGS_syscall_32.o += -Wno-override-init
8obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o 10obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
9obj-y += common.o 11obj-y += common.o
10 12
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index b846875aeea6..d172c619c449 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -288,11 +288,15 @@ return_from_SYSCALL_64:
288 jne opportunistic_sysret_failed 288 jne opportunistic_sysret_failed
289 289
290 /* 290 /*
291 * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, 291 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
292 * restoring TF results in a trap from userspace immediately after 292 * restore RF properly. If the slowpath sets it for whatever reason, we
293 * SYSRET. This would cause an infinite loop whenever #DB happens 293 * need to restore it correctly.
294 * with register state that satisfies the opportunistic SYSRET 294 *
295 * conditions. For example, single-stepping this user code: 295 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
296 * trap from userspace immediately after SYSRET. This would cause an
297 * infinite loop whenever #DB happens with register state that satisfies
298 * the opportunistic SYSRET conditions. For example, single-stepping
299 * this user code:
296 * 300 *
297 * movq $stuck_here, %rcx 301 * movq $stuck_here, %rcx
298 * pushfq 302 * pushfq
@@ -601,9 +605,20 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
601.endm 605.endm
602#endif 606#endif
603 607
608/* Make sure APIC interrupt handlers end up in the irqentry section: */
609#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
610# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
611# define POP_SECTION_IRQENTRY .popsection
612#else
613# define PUSH_SECTION_IRQENTRY
614# define POP_SECTION_IRQENTRY
615#endif
616
604.macro apicinterrupt num sym do_sym 617.macro apicinterrupt num sym do_sym
618PUSH_SECTION_IRQENTRY
605apicinterrupt3 \num \sym \do_sym 619apicinterrupt3 \num \sym \do_sym
606trace_apicinterrupt \num \sym 620trace_apicinterrupt \num \sym
621POP_SECTION_IRQENTRY
607.endm 622.endm
608 623
609#ifdef CONFIG_SMP 624#ifdef CONFIG_SMP
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e07a22bb9308..f5f4b3fbbbc2 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
119{ 119{
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, 122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
123 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, 123 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, 124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, 125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ 126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e6131d4454e6..65577f081d07 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -29,6 +29,8 @@
29 29
30#define COUNTER_SHIFT 16 30#define COUNTER_SHIFT 16
31 31
32static HLIST_HEAD(uncore_unused_list);
33
32struct amd_uncore { 34struct amd_uncore {
33 int id; 35 int id;
34 int refcnt; 36 int refcnt;
@@ -39,7 +41,7 @@ struct amd_uncore {
39 cpumask_t *active_mask; 41 cpumask_t *active_mask;
40 struct pmu *pmu; 42 struct pmu *pmu;
41 struct perf_event *events[MAX_COUNTERS]; 43 struct perf_event *events[MAX_COUNTERS];
42 struct amd_uncore *free_when_cpu_online; 44 struct hlist_node node;
43}; 45};
44 46
45static struct amd_uncore * __percpu *amd_uncore_nb; 47static struct amd_uncore * __percpu *amd_uncore_nb;
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
306 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; 308 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
307 uncore_nb->active_mask = &amd_nb_active_mask; 309 uncore_nb->active_mask = &amd_nb_active_mask;
308 uncore_nb->pmu = &amd_nb_pmu; 310 uncore_nb->pmu = &amd_nb_pmu;
311 uncore_nb->id = -1;
309 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; 312 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
310 } 313 }
311 314
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
319 uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL; 322 uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
320 uncore_l2->active_mask = &amd_l2_active_mask; 323 uncore_l2->active_mask = &amd_l2_active_mask;
321 uncore_l2->pmu = &amd_l2_pmu; 324 uncore_l2->pmu = &amd_l2_pmu;
325 uncore_l2->id = -1;
322 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; 326 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
323 } 327 }
324 328
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
348 continue; 352 continue;
349 353
350 if (this->id == that->id) { 354 if (this->id == that->id) {
351 that->free_when_cpu_online = this; 355 hlist_add_head(&this->node, &uncore_unused_list);
352 this = that; 356 this = that;
353 break; 357 break;
354 } 358 }
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
388 return 0; 392 return 0;
389} 393}
390 394
395static void uncore_clean_online(void)
396{
397 struct amd_uncore *uncore;
398 struct hlist_node *n;
399
400 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
401 hlist_del(&uncore->node);
402 kfree(uncore);
403 }
404}
405
391static void uncore_online(unsigned int cpu, 406static void uncore_online(unsigned int cpu,
392 struct amd_uncore * __percpu *uncores) 407 struct amd_uncore * __percpu *uncores)
393{ 408{
394 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 409 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
395 410
396 kfree(uncore->free_when_cpu_online); 411 uncore_clean_online();
397 uncore->free_when_cpu_online = NULL;
398 412
399 if (cpu == uncore->cpu) 413 if (cpu == uncore->cpu)
400 cpumask_set_cpu(cpu, uncore->active_mask); 414 cpumask_set_cpu(cpu, uncore->active_mask);
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 0a6e393a2e62..bdcd6510992c 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -31,7 +31,17 @@
31struct bts_ctx { 31struct bts_ctx {
32 struct perf_output_handle handle; 32 struct perf_output_handle handle;
33 struct debug_store ds_back; 33 struct debug_store ds_back;
34 int started; 34 int state;
35};
36
37/* BTS context states: */
38enum {
39 /* no ongoing AUX transactions */
40 BTS_STATE_STOPPED = 0,
41 /* AUX transaction is on, BTS tracing is disabled */
42 BTS_STATE_INACTIVE,
43 /* AUX transaction is on, BTS tracing is running */
44 BTS_STATE_ACTIVE,
35}; 45};
36 46
37static DEFINE_PER_CPU(struct bts_ctx, bts_ctx); 47static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
204static int 214static int
205bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle); 215bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
206 216
217/*
218 * Ordering PMU callbacks wrt themselves and the PMI is done by means
219 * of bts::state, which:
220 * - is set when bts::handle::event is valid, that is, between
221 * perf_aux_output_begin() and perf_aux_output_end();
222 * - is zero otherwise;
223 * - is ordered against bts::handle::event with a compiler barrier.
224 */
225
207static void __bts_event_start(struct perf_event *event) 226static void __bts_event_start(struct perf_event *event)
208{ 227{
209 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); 228 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
221 240
222 /* 241 /*
223 * local barrier to make sure that ds configuration made it 242 * local barrier to make sure that ds configuration made it
224 * before we enable BTS 243 * before we enable BTS and bts::state goes ACTIVE
225 */ 244 */
226 wmb(); 245 wmb();
227 246
247 /* INACTIVE/STOPPED -> ACTIVE */
248 WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
249
228 intel_pmu_enable_bts(config); 250 intel_pmu_enable_bts(config);
229 251
230} 252}
@@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
251 273
252 __bts_event_start(event); 274 __bts_event_start(event);
253 275
254 /* PMI handler: this counter is running and likely generating PMIs */
255 ACCESS_ONCE(bts->started) = 1;
256
257 return; 276 return;
258 277
259fail_end_stop: 278fail_end_stop:
@@ -263,30 +282,34 @@ fail_stop:
263 event->hw.state = PERF_HES_STOPPED; 282 event->hw.state = PERF_HES_STOPPED;
264} 283}
265 284
266static void __bts_event_stop(struct perf_event *event) 285static void __bts_event_stop(struct perf_event *event, int state)
267{ 286{
287 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
288
289 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
290 WRITE_ONCE(bts->state, state);
291
268 /* 292 /*
269 * No extra synchronization is mandated by the documentation to have 293 * No extra synchronization is mandated by the documentation to have
270 * BTS data stores globally visible. 294 * BTS data stores globally visible.
271 */ 295 */
272 intel_pmu_disable_bts(); 296 intel_pmu_disable_bts();
273
274 if (event->hw.state & PERF_HES_STOPPED)
275 return;
276
277 ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
278} 297}
279 298
280static void bts_event_stop(struct perf_event *event, int flags) 299static void bts_event_stop(struct perf_event *event, int flags)
281{ 300{
282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 301 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
283 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); 302 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
284 struct bts_buffer *buf = perf_get_aux(&bts->handle); 303 struct bts_buffer *buf = NULL;
304 int state = READ_ONCE(bts->state);
285 305
286 /* PMI handler: don't restart this counter */ 306 if (state == BTS_STATE_ACTIVE)
287 ACCESS_ONCE(bts->started) = 0; 307 __bts_event_stop(event, BTS_STATE_STOPPED);
288 308
289 __bts_event_stop(event); 309 if (state != BTS_STATE_STOPPED)
310 buf = perf_get_aux(&bts->handle);
311
312 event->hw.state |= PERF_HES_STOPPED;
290 313
291 if (flags & PERF_EF_UPDATE) { 314 if (flags & PERF_EF_UPDATE) {
292 bts_update(bts); 315 bts_update(bts);
@@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
296 bts->handle.head = 319 bts->handle.head =
297 local_xchg(&buf->data_size, 320 local_xchg(&buf->data_size,
298 buf->nr_pages << PAGE_SHIFT); 321 buf->nr_pages << PAGE_SHIFT);
322
299 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), 323 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
300 !!local_xchg(&buf->lost, 0)); 324 !!local_xchg(&buf->lost, 0));
301 } 325 }
@@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
310void intel_bts_enable_local(void) 334void intel_bts_enable_local(void)
311{ 335{
312 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); 336 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
337 int state = READ_ONCE(bts->state);
338
339 /*
340 * Here we transition from INACTIVE to ACTIVE;
341 * if we instead are STOPPED from the interrupt handler,
342 * stay that way. Can't be ACTIVE here though.
343 */
344 if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
345 return;
346
347 if (state == BTS_STATE_STOPPED)
348 return;
313 349
314 if (bts->handle.event && bts->started) 350 if (bts->handle.event)
315 __bts_event_start(bts->handle.event); 351 __bts_event_start(bts->handle.event);
316} 352}
317 353
@@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
319{ 355{
320 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); 356 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
321 357
358 /*
359 * Here we transition from ACTIVE to INACTIVE;
360 * do nothing for STOPPED or INACTIVE.
361 */
362 if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
363 return;
364
322 if (bts->handle.event) 365 if (bts->handle.event)
323 __bts_event_stop(bts->handle.event); 366 __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
324} 367}
325 368
326static int 369static int
@@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
335 return 0; 378 return 0;
336 379
337 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 380 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
338 if (WARN_ON_ONCE(head != local_read(&buf->head)))
339 return -EINVAL;
340 381
341 phys = &buf->buf[buf->cur_buf]; 382 phys = &buf->buf[buf->cur_buf];
342 space = phys->offset + phys->displacement + phys->size - head; 383 space = phys->offset + phys->displacement + phys->size - head;
@@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
403 444
404int intel_bts_interrupt(void) 445int intel_bts_interrupt(void)
405{ 446{
447 struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
406 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); 448 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
407 struct perf_event *event = bts->handle.event; 449 struct perf_event *event = bts->handle.event;
408 struct bts_buffer *buf; 450 struct bts_buffer *buf;
409 s64 old_head; 451 s64 old_head;
410 int err; 452 int err = -ENOSPC, handled = 0;
411 453
412 if (!event || !bts->started) 454 /*
413 return 0; 455 * The only surefire way of knowing if this NMI is ours is by checking
456 * the write ptr against the PMI threshold.
457 */
458 if (ds->bts_index >= ds->bts_interrupt_threshold)
459 handled = 1;
460
461 /*
462 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
463 * so we can only be INACTIVE or STOPPED
464 */
465 if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
466 return handled;
414 467
415 buf = perf_get_aux(&bts->handle); 468 buf = perf_get_aux(&bts->handle);
469 if (!buf)
470 return handled;
471
416 /* 472 /*
417 * Skip snapshot counters: they don't use the interrupt, but 473 * Skip snapshot counters: they don't use the interrupt, but
418 * there's no other way of telling, because the pointer will 474 * there's no other way of telling, because the pointer will
419 * keep moving 475 * keep moving
420 */ 476 */
421 if (!buf || buf->snapshot) 477 if (buf->snapshot)
422 return 0; 478 return 0;
423 479
424 old_head = local_read(&buf->head); 480 old_head = local_read(&buf->head);
@@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
426 482
427 /* no new data */ 483 /* no new data */
428 if (old_head == local_read(&buf->head)) 484 if (old_head == local_read(&buf->head))
429 return 0; 485 return handled;
430 486
431 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0), 487 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
432 !!local_xchg(&buf->lost, 0)); 488 !!local_xchg(&buf->lost, 0));
433 489
434 buf = perf_aux_output_begin(&bts->handle, event); 490 buf = perf_aux_output_begin(&bts->handle, event);
435 if (!buf) 491 if (buf)
436 return 1; 492 err = bts_buffer_reset(buf, &bts->handle);
493
494 if (err) {
495 WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
437 496
438 err = bts_buffer_reset(buf, &bts->handle); 497 if (buf) {
439 if (err) 498 /*
440 perf_aux_output_end(&bts->handle, 0, false); 499 * BTS_STATE_STOPPED should be visible before
500 * cleared handle::event
501 */
502 barrier();
503 perf_aux_output_end(&bts->handle, 0, false);
504 }
505 }
441 506
442 return 1; 507 return 1;
443} 508}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2cbde2f449aa..4c9a79b9cd69 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
1730 * disabled state if called consecutively. 1730 * disabled state if called consecutively.
1731 * 1731 *
1732 * During consecutive calls, the same disable value will be written to related 1732 * During consecutive calls, the same disable value will be written to related
1733 * registers, so the PMU state remains unchanged. hw.state in 1733 * registers, so the PMU state remains unchanged.
1734 * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive 1734 *
1735 * calls. 1735 * intel_bts events don't coexist with intel PMU's BTS events because of
1736 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1737 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1736 */ 1738 */
1737static void __intel_pmu_disable_all(void) 1739static void __intel_pmu_disable_all(void)
1738{ 1740{
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
1742 1744
1743 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 1745 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1744 intel_pmu_disable_bts(); 1746 intel_pmu_disable_bts();
1745 else
1746 intel_bts_disable_local();
1747 1747
1748 intel_pmu_pebs_disable_all(); 1748 intel_pmu_pebs_disable_all();
1749} 1749}
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
1771 return; 1771 return;
1772 1772
1773 intel_pmu_enable_bts(event->hw.config); 1773 intel_pmu_enable_bts(event->hw.config);
1774 } else 1774 }
1775 intel_bts_enable_local();
1776} 1775}
1777 1776
1778static void intel_pmu_enable_all(int added) 1777static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
2073 */ 2072 */
2074 if (!x86_pmu.late_ack) 2073 if (!x86_pmu.late_ack)
2075 apic_write(APIC_LVTPC, APIC_DM_NMI); 2074 apic_write(APIC_LVTPC, APIC_DM_NMI);
2075 intel_bts_disable_local();
2076 __intel_pmu_disable_all(); 2076 __intel_pmu_disable_all();
2077 handled = intel_pmu_drain_bts_buffer(); 2077 handled = intel_pmu_drain_bts_buffer();
2078 handled += intel_bts_interrupt(); 2078 handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
2172 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 2172 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2173 if (cpuc->enabled) 2173 if (cpuc->enabled)
2174 __intel_pmu_enable_all(0, true); 2174 __intel_pmu_enable_all(0, true);
2175 intel_bts_enable_local();
2175 2176
2176 /* 2177 /*
2177 * Only unmask the NMI after the overflow counters 2178 * Only unmask the NMI after the overflow counters
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 783c49ddef29..8f82b02934fa 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
458static void init_mbm_sample(u32 rmid, u32 evt_type); 458static void init_mbm_sample(u32 rmid, u32 evt_type);
459static void __intel_mbm_event_count(void *info); 459static void __intel_mbm_event_count(void *info);
460 460
461static bool is_cqm_event(int e)
462{
463 return (e == QOS_L3_OCCUP_EVENT_ID);
464}
465
461static bool is_mbm_event(int e) 466static bool is_mbm_event(int e)
462{ 467{
463 return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID); 468 return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
1366 (event->attr.config > QOS_MBM_LOCAL_EVENT_ID)) 1371 (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
1367 return -EINVAL; 1372 return -EINVAL;
1368 1373
1374 if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
1375 (is_mbm_event(event->attr.config) && !mbm_enabled))
1376 return -EINVAL;
1377
1369 /* unsupported modes and filters */ 1378 /* unsupported modes and filters */
1370 if (event->attr.exclude_user || 1379 if (event->attr.exclude_user ||
1371 event->attr.exclude_kernel || 1380 event->attr.exclude_kernel ||
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7ce9f3f669e6..9b983a474253 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1274 struct pebs_record_nhm *p = at; 1274 struct pebs_record_nhm *p = at;
1275 u64 pebs_status; 1275 u64 pebs_status;
1276 1276
1277 /* PEBS v3 has accurate status bits */ 1277 pebs_status = p->status & cpuc->pebs_enabled;
1278 pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
1279
1280 /* PEBS v3 has more accurate status bits */
1278 if (x86_pmu.intel_cap.pebs_format >= 3) { 1281 if (x86_pmu.intel_cap.pebs_format >= 3) {
1279 for_each_set_bit(bit, (unsigned long *)&p->status, 1282 for_each_set_bit(bit, (unsigned long *)&pebs_status,
1280 MAX_PEBS_EVENTS) 1283 x86_pmu.max_pebs_events)
1281 counts[bit]++; 1284 counts[bit]++;
1282 1285
1283 continue; 1286 continue;
1284 } 1287 }
1285 1288
1286 pebs_status = p->status & cpuc->pebs_enabled;
1287 pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
1288
1289 /* 1289 /*
1290 * On some CPUs the PEBS status can be zero when PEBS is 1290 * On some CPUs the PEBS status can be zero when PEBS is
1291 * racing with clearing of GLOBAL_STATUS. 1291 * racing with clearing of GLOBAL_STATUS.
@@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1333 continue; 1333 continue;
1334 1334
1335 event = cpuc->events[bit]; 1335 event = cpuc->events[bit];
1336 WARN_ON_ONCE(!event); 1336 if (WARN_ON_ONCE(!event))
1337 WARN_ON_ONCE(!event->attr.precise_ip); 1337 continue;
1338
1339 if (WARN_ON_ONCE(!event->attr.precise_ip))
1340 continue;
1338 1341
1339 /* log dropped samples number */ 1342 /* log dropped samples number */
1340 if (error[bit]) 1343 if (error[bit])
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 04bb5fb5a8d7..861a7d9cb60f 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
1074 event->hw.addr_filters = NULL; 1074 event->hw.addr_filters = NULL;
1075} 1075}
1076 1076
1077static inline bool valid_kernel_ip(unsigned long ip)
1078{
1079 return virt_addr_valid(ip) && kernel_ip(ip);
1080}
1081
1077static int pt_event_addr_filters_validate(struct list_head *filters) 1082static int pt_event_addr_filters_validate(struct list_head *filters)
1078{ 1083{
1079 struct perf_addr_filter *filter; 1084 struct perf_addr_filter *filter;
@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
1081 1086
1082 list_for_each_entry(filter, filters, entry) { 1087 list_for_each_entry(filter, filters, entry) {
1083 /* PT doesn't support single address triggers */ 1088 /* PT doesn't support single address triggers */
1084 if (!filter->range) 1089 if (!filter->range || !filter->size)
1085 return -EOPNOTSUPP; 1090 return -EOPNOTSUPP;
1086 1091
1087 if (!filter->inode && !kernel_ip(filter->offset)) 1092 if (!filter->inode) {
1088 return -EINVAL; 1093 if (!valid_kernel_ip(filter->offset))
1094 return -EINVAL;
1095
1096 if (!valid_kernel_ip(filter->offset + filter->size))
1097 return -EINVAL;
1098 }
1089 1099
1090 if (++range > pt_cap_get(PT_CAP_num_address_ranges)) 1100 if (++range > pt_cap_get(PT_CAP_num_address_ranges))
1091 return -EOPNOTSUPP; 1101 return -EOPNOTSUPP;
@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
1111 } else { 1121 } else {
1112 /* apply the offset */ 1122 /* apply the offset */
1113 msr_a = filter->offset + offs[range]; 1123 msr_a = filter->offset + offs[range];
1114 msr_b = filter->size + msr_a; 1124 msr_b = filter->size + msr_a - 1;
1115 } 1125 }
1116 1126
1117 filters->filter[range].msr_a = msr_a; 1127 filters->filter[range].msr_a = msr_a;
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 97a69dbba649..9d35ec0cb8fc 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -100,6 +100,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
100 } 100 }
101} 101}
102 102
103static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
104{
105 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
106 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
107}
108
103static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 109static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
104{ 110{
105 if (box->pmu->pmu_idx == 0) 111 if (box->pmu->pmu_idx == 0)
@@ -127,6 +133,7 @@ static struct attribute_group snb_uncore_format_group = {
127 133
128static struct intel_uncore_ops snb_uncore_msr_ops = { 134static struct intel_uncore_ops snb_uncore_msr_ops = {
129 .init_box = snb_uncore_msr_init_box, 135 .init_box = snb_uncore_msr_init_box,
136 .enable_box = snb_uncore_msr_enable_box,
130 .exit_box = snb_uncore_msr_exit_box, 137 .exit_box = snb_uncore_msr_exit_box,
131 .disable_event = snb_uncore_msr_disable_event, 138 .disable_event = snb_uncore_msr_disable_event,
132 .enable_event = snb_uncore_msr_enable_event, 139 .enable_event = snb_uncore_msr_enable_event,
@@ -192,6 +199,12 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
192 } 199 }
193} 200}
194 201
202static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
203{
204 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
205 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
206}
207
195static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 208static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
196{ 209{
197 if (box->pmu->pmu_idx == 0) 210 if (box->pmu->pmu_idx == 0)
@@ -200,6 +213,7 @@ static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
200 213
201static struct intel_uncore_ops skl_uncore_msr_ops = { 214static struct intel_uncore_ops skl_uncore_msr_ops = {
202 .init_box = skl_uncore_msr_init_box, 215 .init_box = skl_uncore_msr_init_box,
216 .enable_box = skl_uncore_msr_enable_box,
203 .exit_box = skl_uncore_msr_exit_box, 217 .exit_box = skl_uncore_msr_exit_box,
204 .disable_event = snb_uncore_msr_disable_event, 218 .disable_event = snb_uncore_msr_disable_event,
205 .enable_event = snb_uncore_msr_enable_event, 219 .enable_event = snb_uncore_msr_enable_event,
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 824e54086e07..8aee83bcf71f 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2626,7 +2626,7 @@ void hswep_uncore_cpu_init(void)
2626 2626
2627static struct intel_uncore_type hswep_uncore_ha = { 2627static struct intel_uncore_type hswep_uncore_ha = {
2628 .name = "ha", 2628 .name = "ha",
2629 .num_counters = 5, 2629 .num_counters = 4,
2630 .num_boxes = 2, 2630 .num_boxes = 2,
2631 .perf_ctr_bits = 48, 2631 .perf_ctr_bits = 48,
2632 SNBEP_UNCORE_PCI_COMMON_INIT(), 2632 SNBEP_UNCORE_PCI_COMMON_INIT(),
@@ -2645,7 +2645,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
2645 2645
2646static struct intel_uncore_type hswep_uncore_imc = { 2646static struct intel_uncore_type hswep_uncore_imc = {
2647 .name = "imc", 2647 .name = "imc",
2648 .num_counters = 5, 2648 .num_counters = 4,
2649 .num_boxes = 8, 2649 .num_boxes = 8,
2650 .perf_ctr_bits = 48, 2650 .perf_ctr_bits = 48,
2651 .fixed_ctr_bits = 48, 2651 .fixed_ctr_bits = 48,
@@ -2691,7 +2691,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
2691 2691
2692static struct intel_uncore_type hswep_uncore_qpi = { 2692static struct intel_uncore_type hswep_uncore_qpi = {
2693 .name = "qpi", 2693 .name = "qpi",
2694 .num_counters = 5, 2694 .num_counters = 4,
2695 .num_boxes = 3, 2695 .num_boxes = 3,
2696 .perf_ctr_bits = 48, 2696 .perf_ctr_bits = 48,
2697 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2697 .perf_ctr = SNBEP_PCI_PMON_CTR0,
@@ -2773,7 +2773,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2773 2773
2774static struct intel_uncore_type hswep_uncore_r3qpi = { 2774static struct intel_uncore_type hswep_uncore_r3qpi = {
2775 .name = "r3qpi", 2775 .name = "r3qpi",
2776 .num_counters = 4, 2776 .num_counters = 3,
2777 .num_boxes = 3, 2777 .num_boxes = 3,
2778 .perf_ctr_bits = 44, 2778 .perf_ctr_bits = 44,
2779 .constraints = hswep_uncore_r3qpi_constraints, 2779 .constraints = hswep_uncore_r3qpi_constraints,
@@ -2972,7 +2972,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
2972 2972
2973static struct intel_uncore_type bdx_uncore_imc = { 2973static struct intel_uncore_type bdx_uncore_imc = {
2974 .name = "imc", 2974 .name = "imc",
2975 .num_counters = 5, 2975 .num_counters = 4,
2976 .num_boxes = 8, 2976 .num_boxes = 8,
2977 .perf_ctr_bits = 48, 2977 .perf_ctr_bits = 48,
2978 .fixed_ctr_bits = 48, 2978 .fixed_ctr_bits = 48,
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f5befd4945f2..124357773ffa 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -135,6 +135,7 @@ extern void init_apic_mappings(void);
135void register_lapic_address(unsigned long address); 135void register_lapic_address(unsigned long address);
136extern void setup_boot_APIC_clock(void); 136extern void setup_boot_APIC_clock(void);
137extern void setup_secondary_APIC_clock(void); 137extern void setup_secondary_APIC_clock(void);
138extern void lapic_update_tsc_freq(void);
138extern int APIC_init_uniprocessor(void); 139extern int APIC_init_uniprocessor(void);
139 140
140#ifdef CONFIG_X86_64 141#ifdef CONFIG_X86_64
@@ -170,6 +171,7 @@ static inline void init_apic_mappings(void) { }
170static inline void disable_local_APIC(void) { } 171static inline void disable_local_APIC(void) { }
171# define setup_boot_APIC_clock x86_init_noop 172# define setup_boot_APIC_clock x86_init_noop
172# define setup_secondary_APIC_clock x86_init_noop 173# define setup_secondary_APIC_clock x86_init_noop
174static inline void lapic_update_tsc_freq(void) { }
173#endif /* !CONFIG_X86_LOCAL_APIC */ 175#endif /* !CONFIG_X86_LOCAL_APIC */
174 176
175#ifdef CONFIG_X86_X2APIC 177#ifdef CONFIG_X86_X2APIC
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 7178043b0e1d..59405a248fc2 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -22,10 +22,6 @@ typedef struct {
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 unsigned int irq_resched_count; 23 unsigned int irq_resched_count;
24 unsigned int irq_call_count; 24 unsigned int irq_call_count;
25 /*
26 * irq_tlb_count is double-counted in irq_call_count, so it must be
27 * subtracted from irq_call_count when displaying irq_call_count
28 */
29 unsigned int irq_tlb_count; 25 unsigned int irq_tlb_count;
30#endif 26#endif
31#ifdef CONFIG_X86_THERMAL_VECTOR 27#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 223042086f4e..737da62bfeb0 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -5,10 +5,10 @@ struct x86_mapping_info {
5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
6 void *context; /* context for alloc_pgt_page */ 6 void *context; /* context for alloc_pgt_page */
7 unsigned long pmd_flag; /* page flag for PMD entry */ 7 unsigned long pmd_flag; /* page flag for PMD entry */
8 bool kernel_mapping; /* kernel mapping or ident mapping */ 8 unsigned long offset; /* ident mapping offset */
9}; 9};
10 10
11int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 11int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
12 unsigned long addr, unsigned long end); 12 unsigned long pstart, unsigned long pend);
13 13
14#endif /* _ASM_X86_INIT_H */ 14#endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 7e8ec7ae10fa..1cc82ece9ac1 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -145,7 +145,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
145 * 145 *
146 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number 146 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
147 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names 147 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
148 * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry 148 * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
149 * 149 *
150 * G (8) is aliased and used as a PROT_NONE indicator for 150 * G (8) is aliased and used as a PROT_NONE indicator for
151 * !present ptes. We need to start storing swap entries above 151 * !present ptes. We need to start storing swap entries above
@@ -156,7 +156,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
156#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) 156#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
157#define SWP_TYPE_BITS 5 157#define SWP_TYPE_BITS 5
158/* Place the offset above the type: */ 158/* Place the offset above the type: */
159#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1) 159#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
160 160
161#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 161#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
162 162
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 9c6b890d5e7a..b2988c0ed829 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -58,7 +58,15 @@ extern unsigned char boot_gdt[];
58extern unsigned char secondary_startup_64[]; 58extern unsigned char secondary_startup_64[];
59#endif 59#endif
60 60
61static inline size_t real_mode_size_needed(void)
62{
63 if (real_mode_header)
64 return 0; /* already allocated. */
65
66 return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
67}
68
69void set_real_mode_mem(phys_addr_t mem, size_t size);
61void reserve_real_mode(void); 70void reserve_real_mode(void);
62void setup_real_mode(void);
63 71
64#endif /* _ARCH_X86_REALMODE_H */ 72#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 84b59846154a..8b7c8d8e0852 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
176 return sp; 176 return sp;
177} 177}
178 178
179/*
180 * Walks up the stack frames to make sure that the specified object is
181 * entirely contained by a single stack frame.
182 *
183 * Returns:
184 * 1 if within a frame
185 * -1 if placed across a frame boundary (or outside stack)
186 * 0 unable to determine (no frame pointers, etc)
187 */
188static inline int arch_within_stack_frames(const void * const stack,
189 const void * const stackend,
190 const void *obj, unsigned long len)
191{
192#if defined(CONFIG_FRAME_POINTER)
193 const void *frame = NULL;
194 const void *oldframe;
195
196 oldframe = __builtin_frame_address(1);
197 if (oldframe)
198 frame = __builtin_frame_address(2);
199 /*
200 * low ----------------------------------------------> high
201 * [saved bp][saved ip][args][local vars][saved bp][saved ip]
202 * ^----------------^
203 * allow copies only within here
204 */
205 while (stack <= frame && frame < stackend) {
206 /*
207 * If obj + len extends past the last frame, this
208 * check won't pass and the next frame will be 0,
209 * causing us to bail out and correctly report
210 * the copy as invalid.
211 */
212 if (obj + len <= frame)
213 return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
214 oldframe = frame;
215 frame = *(const void * const *)frame;
216 }
217 return -1;
218#else
219 return 0;
220#endif
221}
222
179#else /* !__ASSEMBLY__ */ 223#else /* !__ASSEMBLY__ */
180 224
181#ifdef CONFIG_X86_64 225#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4e5be94e079a..6fa85944af83 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
135 135
136static inline void __native_flush_tlb(void) 136static inline void __native_flush_tlb(void)
137{ 137{
138 /*
139 * If current->mm == NULL then we borrow a mm which may change during a
140 * task switch and therefore we must not be preempted while we write CR3
141 * back:
142 */
143 preempt_disable();
138 native_write_cr3(native_read_cr3()); 144 native_write_cr3(native_read_cr3());
145 preempt_enable();
139} 146}
140 147
141static inline void __native_flush_tlb_global_irq_disabled(void) 148static inline void __native_flush_tlb_global_irq_disabled(void)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c03bfb68c503..2131c4ce7d8a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -433,7 +433,11 @@ do { \
433#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ 433#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
434 asm volatile("1: mov"itype" %1,%"rtype"0\n" \ 434 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
435 "2:\n" \ 435 "2:\n" \
436 _ASM_EXTABLE_EX(1b, 2b) \ 436 ".section .fixup,\"ax\"\n" \
437 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
438 " jmp 2b\n" \
439 ".previous\n" \
440 _ASM_EXTABLE_EX(1b, 3b) \
437 : ltype(x) : "m" (__m(addr))) 441 : ltype(x) : "m" (__m(addr)))
438 442
439#define __put_user_nocheck(x, ptr, size) \ 443#define __put_user_nocheck(x, ptr, size) \
@@ -697,44 +701,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
697unsigned long __must_check _copy_to_user(void __user *to, const void *from, 701unsigned long __must_check _copy_to_user(void __user *to, const void *from,
698 unsigned n); 702 unsigned n);
699 703
700#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 704extern void __compiletime_error("usercopy buffer size is too small")
701# define copy_user_diag __compiletime_error 705__bad_copy_user(void);
702#else
703# define copy_user_diag __compiletime_warning
704#endif
705
706extern void copy_user_diag("copy_from_user() buffer size is too small")
707copy_from_user_overflow(void);
708extern void copy_user_diag("copy_to_user() buffer size is too small")
709copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
710
711#undef copy_user_diag
712
713#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
714
715extern void
716__compiletime_warning("copy_from_user() buffer size is not provably correct")
717__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
718#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
719
720extern void
721__compiletime_warning("copy_to_user() buffer size is not provably correct")
722__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
723#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
724
725#else
726 706
727static inline void 707static inline void copy_user_overflow(int size, unsigned long count)
728__copy_from_user_overflow(int size, unsigned long count)
729{ 708{
730 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); 709 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
731} 710}
732 711
733#define __copy_to_user_overflow __copy_from_user_overflow 712static __always_inline unsigned long __must_check
734
735#endif
736
737static inline unsigned long __must_check
738copy_from_user(void *to, const void __user *from, unsigned long n) 713copy_from_user(void *to, const void __user *from, unsigned long n)
739{ 714{
740 int sz = __compiletime_object_size(to); 715 int sz = __compiletime_object_size(to);
@@ -743,35 +718,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
743 718
744 kasan_check_write(to, n); 719 kasan_check_write(to, n);
745 720
746 /* 721 if (likely(sz < 0 || sz >= n)) {
747 * While we would like to have the compiler do the checking for us 722 check_object_size(to, n, false);
748 * even in the non-constant size case, any false positives there are
749 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
750 * without - the [hopefully] dangerous looking nature of the warning
751 * would make people go look at the respecitive call sites over and
752 * over again just to find that there's no problem).
753 *
754 * And there are cases where it's just not realistic for the compiler
755 * to prove the count to be in range. For example when multiple call
756 * sites of a helper function - perhaps in different source files -
757 * all doing proper range checking, yet the helper function not doing
758 * so again.
759 *
760 * Therefore limit the compile time checking to the constant size
761 * case, and do only runtime checking for non-constant sizes.
762 */
763
764 if (likely(sz < 0 || sz >= n))
765 n = _copy_from_user(to, from, n); 723 n = _copy_from_user(to, from, n);
766 else if(__builtin_constant_p(n)) 724 } else if (!__builtin_constant_p(n))
767 copy_from_user_overflow(); 725 copy_user_overflow(sz, n);
768 else 726 else
769 __copy_from_user_overflow(sz, n); 727 __bad_copy_user();
770 728
771 return n; 729 return n;
772} 730}
773 731
774static inline unsigned long __must_check 732static __always_inline unsigned long __must_check
775copy_to_user(void __user *to, const void *from, unsigned long n) 733copy_to_user(void __user *to, const void *from, unsigned long n)
776{ 734{
777 int sz = __compiletime_object_size(from); 735 int sz = __compiletime_object_size(from);
@@ -780,20 +738,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
780 738
781 might_fault(); 739 might_fault();
782 740
783 /* See the comment in copy_from_user() above. */ 741 if (likely(sz < 0 || sz >= n)) {
784 if (likely(sz < 0 || sz >= n)) 742 check_object_size(from, n, true);
785 n = _copy_to_user(to, from, n); 743 n = _copy_to_user(to, from, n);
786 else if(__builtin_constant_p(n)) 744 } else if (!__builtin_constant_p(n))
787 copy_to_user_overflow(); 745 copy_user_overflow(sz, n);
788 else 746 else
789 __copy_to_user_overflow(sz, n); 747 __bad_copy_user();
790 748
791 return n; 749 return n;
792} 750}
793 751
794#undef __copy_from_user_overflow
795#undef __copy_to_user_overflow
796
797/* 752/*
798 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 753 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
799 * nested NMI paths are careful to preserve CR2. 754 * nested NMI paths are careful to preserve CR2.
@@ -812,21 +767,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
812#define user_access_begin() __uaccess_begin() 767#define user_access_begin() __uaccess_begin()
813#define user_access_end() __uaccess_end() 768#define user_access_end() __uaccess_end()
814 769
815#define unsafe_put_user(x, ptr) \ 770#define unsafe_put_user(x, ptr, err_label) \
816({ \ 771do { \
817 int __pu_err; \ 772 int __pu_err; \
818 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 773 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
819 __builtin_expect(__pu_err, 0); \ 774 if (unlikely(__pu_err)) goto err_label; \
820}) 775} while (0)
821 776
822#define unsafe_get_user(x, ptr) \ 777#define unsafe_get_user(x, ptr, err_label) \
823({ \ 778do { \
824 int __gu_err; \ 779 int __gu_err; \
825 unsigned long __gu_val; \ 780 unsigned long __gu_val; \
826 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 781 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
827 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 782 (x) = (__force __typeof__(*(ptr)))__gu_val; \
828 __builtin_expect(__gu_err, 0); \ 783 if (unlikely(__gu_err)) goto err_label; \
829}) 784} while (0)
830 785
831#endif /* _ASM_X86_UACCESS_H */ 786#endif /* _ASM_X86_UACCESS_H */
832 787
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 4b32da24faaf..7d3bdd1ed697 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
37static __always_inline unsigned long __must_check 37static __always_inline unsigned long __must_check
38__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 38__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
39{ 39{
40 check_object_size(from, n, true);
40 return __copy_to_user_ll(to, from, n); 41 return __copy_to_user_ll(to, from, n);
41} 42}
42 43
@@ -95,6 +96,7 @@ static __always_inline unsigned long
95__copy_from_user(void *to, const void __user *from, unsigned long n) 96__copy_from_user(void *to, const void __user *from, unsigned long n)
96{ 97{
97 might_fault(); 98 might_fault();
99 check_object_size(to, n, false);
98 if (__builtin_constant_p(n)) { 100 if (__builtin_constant_p(n)) {
99 unsigned long ret; 101 unsigned long ret;
100 102
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 2eac2aa3e37f..673059a109fe 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
54{ 54{
55 int ret = 0; 55 int ret = 0;
56 56
57 check_object_size(dst, size, false);
57 if (!__builtin_constant_p(size)) 58 if (!__builtin_constant_p(size))
58 return copy_user_generic(dst, (__force void *)src, size); 59 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) { 60 switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
119{ 120{
120 int ret = 0; 121 int ret = 0;
121 122
123 check_object_size(src, size, true);
122 if (!__builtin_constant_p(size)) 124 if (!__builtin_constant_p(size))
123 return copy_user_generic((__force void *)dst, src, size); 125 return copy_user_generic((__force void *)dst, src, size);
124 switch (size) { 126 switch (size) {
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index c852590254d5..e652a7cc6186 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -79,7 +79,7 @@ struct uv_gam_range_entry {
79 u16 nasid; /* HNasid */ 79 u16 nasid; /* HNasid */
80 u16 sockid; /* Socket ID, high bits of APIC ID */ 80 u16 sockid; /* Socket ID, high bits of APIC ID */
81 u16 pnode; /* Index to MMR and GRU spaces */ 81 u16 pnode; /* Index to MMR and GRU spaces */
82 u32 pxm; /* ACPI proximity domain number */ 82 u32 unused2;
83 u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */ 83 u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
84}; 84};
85 85
@@ -88,7 +88,8 @@ struct uv_gam_range_entry {
88#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */ 88#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
89#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */ 89#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */
90#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */ 90#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */
91#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_2 91#define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */
92#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3
92 93
93#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */ 94#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */
94#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */ 95#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 20abd912f0e4..f3e9b2df4b16 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -313,7 +313,7 @@ int lapic_get_maxlvt(void)
313 313
314/* Clock divisor */ 314/* Clock divisor */
315#define APIC_DIVISOR 16 315#define APIC_DIVISOR 16
316#define TSC_DIVISOR 32 316#define TSC_DIVISOR 8
317 317
318/* 318/*
319 * This function sets up the local APIC timer, with a timeout of 319 * This function sets up the local APIC timer, with a timeout of
@@ -565,13 +565,37 @@ static void setup_APIC_timer(void)
565 CLOCK_EVT_FEAT_DUMMY); 565 CLOCK_EVT_FEAT_DUMMY);
566 levt->set_next_event = lapic_next_deadline; 566 levt->set_next_event = lapic_next_deadline;
567 clockevents_config_and_register(levt, 567 clockevents_config_and_register(levt,
568 (tsc_khz / TSC_DIVISOR) * 1000, 568 tsc_khz * (1000 / TSC_DIVISOR),
569 0xF, ~0UL); 569 0xF, ~0UL);
570 } else 570 } else
571 clockevents_register_device(levt); 571 clockevents_register_device(levt);
572} 572}
573 573
574/* 574/*
575 * Install the updated TSC frequency from recalibration at the TSC
576 * deadline clockevent devices.
577 */
578static void __lapic_update_tsc_freq(void *info)
579{
580 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
581
582 if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
583 return;
584
585 clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
586}
587
588void lapic_update_tsc_freq(void)
589{
590 /*
591 * The clockevent device's ->mult and ->shift can both be
592 * changed. In order to avoid races, schedule the frequency
593 * update code on each CPU.
594 */
595 on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
596}
597
598/*
575 * In this functions we calibrate APIC bus clocks to the external timer. 599 * In this functions we calibrate APIC bus clocks to the external timer.
576 * 600 *
577 * We want to do the calibration only once since we want to have local timer 601 * We want to do the calibration only once since we want to have local timer
@@ -1599,6 +1623,9 @@ void __init enable_IR_x2apic(void)
1599 unsigned long flags; 1623 unsigned long flags;
1600 int ret, ir_stat; 1624 int ret, ir_stat;
1601 1625
1626 if (skip_ioapic_setup)
1627 return;
1628
1602 ir_stat = irq_remapping_prepare(); 1629 ir_stat = irq_remapping_prepare();
1603 if (ir_stat < 0 && !x2apic_supported()) 1630 if (ir_stat < 0 && !x2apic_supported())
1604 return; 1631 return;
@@ -2066,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
2066 return -EINVAL; 2093 return -EINVAL;
2067 } 2094 }
2068 2095
2069 num_processors++;
2070 if (apicid == boot_cpu_physical_apicid) { 2096 if (apicid == boot_cpu_physical_apicid) {
2071 /* 2097 /*
2072 * x86_bios_cpu_apicid is required to have processors listed 2098 * x86_bios_cpu_apicid is required to have processors listed
@@ -2089,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
2089 2115
2090 pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n", 2116 pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
2091 thiscpu, apicid); 2117 thiscpu, apicid);
2118
2092 disabled_cpus++; 2119 disabled_cpus++;
2093 return -ENOSPC; 2120 return -ENOSPC;
2094 } 2121 }
2095 2122
2123 num_processors++;
2124
2096 /* 2125 /*
2097 * Validate version 2126 * Validate version
2098 */ 2127 */
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 6368fa69d2af..54f35d988025 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -155,7 +155,7 @@ static void init_x2apic_ldr(void)
155/* 155/*
156 * At CPU state changes, update the x2apic cluster sibling info. 156 * At CPU state changes, update the x2apic cluster sibling info.
157 */ 157 */
158int x2apic_prepare_cpu(unsigned int cpu) 158static int x2apic_prepare_cpu(unsigned int cpu)
159{ 159{
160 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) 160 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
161 return -ENOMEM; 161 return -ENOMEM;
@@ -168,7 +168,7 @@ int x2apic_prepare_cpu(unsigned int cpu)
168 return 0; 168 return 0;
169} 169}
170 170
171int x2apic_dead_cpu(unsigned int this_cpu) 171static int x2apic_dead_cpu(unsigned int this_cpu)
172{ 172{
173 int cpu; 173 int cpu;
174 174
@@ -186,13 +186,18 @@ int x2apic_dead_cpu(unsigned int this_cpu)
186static int x2apic_cluster_probe(void) 186static int x2apic_cluster_probe(void)
187{ 187{
188 int cpu = smp_processor_id(); 188 int cpu = smp_processor_id();
189 int ret;
189 190
190 if (!x2apic_mode) 191 if (!x2apic_mode)
191 return 0; 192 return 0;
192 193
194 ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
195 x2apic_prepare_cpu, x2apic_dead_cpu);
196 if (ret < 0) {
197 pr_err("Failed to register X2APIC_PREPARE\n");
198 return 0;
199 }
193 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); 200 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
194 cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
195 x2apic_prepare_cpu, x2apic_dead_cpu);
196 return 1; 201 return 1;
197} 202}
198 203
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 09b59adaea3f..cb0673c1e940 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -223,6 +223,11 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
223 if (strncmp(oem_id, "SGI", 3) != 0) 223 if (strncmp(oem_id, "SGI", 3) != 0)
224 return 0; 224 return 0;
225 225
226 if (numa_off) {
227 pr_err("UV: NUMA is off, disabling UV support\n");
228 return 0;
229 }
230
226 /* Setup early hub type field in uv_hub_info for Node 0 */ 231 /* Setup early hub type field in uv_hub_info for Node 0 */
227 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; 232 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
228 233
@@ -325,7 +330,7 @@ static __init void build_uv_gr_table(void)
325 struct uv_gam_range_entry *gre = uv_gre_table; 330 struct uv_gam_range_entry *gre = uv_gre_table;
326 struct uv_gam_range_s *grt; 331 struct uv_gam_range_s *grt;
327 unsigned long last_limit = 0, ram_limit = 0; 332 unsigned long last_limit = 0, ram_limit = 0;
328 int bytes, i, sid, lsid = -1; 333 int bytes, i, sid, lsid = -1, indx = 0, lindx = -1;
329 334
330 if (!gre) 335 if (!gre)
331 return; 336 return;
@@ -356,11 +361,12 @@ static __init void build_uv_gr_table(void)
356 } 361 }
357 sid = gre->sockid - _min_socket; 362 sid = gre->sockid - _min_socket;
358 if (lsid < sid) { /* new range */ 363 if (lsid < sid) { /* new range */
359 grt = &_gr_table[sid]; 364 grt = &_gr_table[indx];
360 grt->base = lsid; 365 grt->base = lindx;
361 grt->nasid = gre->nasid; 366 grt->nasid = gre->nasid;
362 grt->limit = last_limit = gre->limit; 367 grt->limit = last_limit = gre->limit;
363 lsid = sid; 368 lsid = sid;
369 lindx = indx++;
364 continue; 370 continue;
365 } 371 }
366 if (lsid == sid && !ram_limit) { /* update range */ 372 if (lsid == sid && !ram_limit) { /* update range */
@@ -371,7 +377,7 @@ static __init void build_uv_gr_table(void)
371 } 377 }
372 if (!ram_limit) { /* non-contiguous ram range */ 378 if (!ram_limit) { /* non-contiguous ram range */
373 grt++; 379 grt++;
374 grt->base = sid - 1; 380 grt->base = lindx;
375 grt->nasid = gre->nasid; 381 grt->nasid = gre->nasid;
376 grt->limit = last_limit = gre->limit; 382 grt->limit = last_limit = gre->limit;
377 continue; 383 continue;
@@ -1155,19 +1161,18 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
1155 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1161 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1156 if (!index) { 1162 if (!index) {
1157 pr_info("UV: GAM Range Table...\n"); 1163 pr_info("UV: GAM Range Table...\n");
1158 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s %3s\n", 1164 pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n",
1159 "Range", "", "Size", "Type", "NASID", 1165 "Range", "", "Size", "Type", "NASID",
1160 "SID", "PN", "PXM"); 1166 "SID", "PN");
1161 } 1167 }
1162 pr_info( 1168 pr_info(
1163 "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x %3d\n", 1169 "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
1164 index++, 1170 index++,
1165 (unsigned long)lgre << UV_GAM_RANGE_SHFT, 1171 (unsigned long)lgre << UV_GAM_RANGE_SHFT,
1166 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, 1172 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
1167 ((unsigned long)(gre->limit - lgre)) >> 1173 ((unsigned long)(gre->limit - lgre)) >>
1168 (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ 1174 (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
1169 gre->type, gre->nasid, gre->sockid, 1175 gre->type, gre->nasid, gre->sockid, gre->pnode);
1170 gre->pnode, gre->pxm);
1171 1176
1172 lgre = gre->limit; 1177 lgre = gre->limit;
1173 if (sock_min > gre->sockid) 1178 if (sock_min > gre->sockid)
@@ -1286,7 +1291,7 @@ static void __init build_socket_tables(void)
1286 _pnode_to_socket[i] = SOCK_EMPTY; 1291 _pnode_to_socket[i] = SOCK_EMPTY;
1287 1292
1288 /* fill in pnode/node/addr conversion list values */ 1293 /* fill in pnode/node/addr conversion list values */
1289 pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n"); 1294 pr_info("UV: GAM Building socket/pnode conversion tables\n");
1290 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1295 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1291 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) 1296 if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
1292 continue; 1297 continue;
@@ -1294,20 +1299,18 @@ static void __init build_socket_tables(void)
1294 if (_socket_to_pnode[i] != SOCK_EMPTY) 1299 if (_socket_to_pnode[i] != SOCK_EMPTY)
1295 continue; /* duplicate */ 1300 continue; /* duplicate */
1296 _socket_to_pnode[i] = gre->pnode; 1301 _socket_to_pnode[i] = gre->pnode;
1297 _socket_to_node[i] = gre->pxm;
1298 1302
1299 i = gre->pnode - minpnode; 1303 i = gre->pnode - minpnode;
1300 _pnode_to_socket[i] = gre->sockid; 1304 _pnode_to_socket[i] = gre->sockid;
1301 1305
1302 pr_info( 1306 pr_info(
1303 "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n", 1307 "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n",
1304 gre->sockid, gre->type, gre->nasid, 1308 gre->sockid, gre->type, gre->nasid,
1305 _socket_to_pnode[gre->sockid - minsock], 1309 _socket_to_pnode[gre->sockid - minsock],
1306 _socket_to_node[gre->sockid - minsock],
1307 _pnode_to_socket[gre->pnode - minpnode]); 1310 _pnode_to_socket[gre->pnode - minpnode]);
1308 } 1311 }
1309 1312
1310 /* check socket -> node values */ 1313 /* Set socket -> node values */
1311 lnid = -1; 1314 lnid = -1;
1312 for_each_present_cpu(cpu) { 1315 for_each_present_cpu(cpu) {
1313 int nid = cpu_to_node(cpu); 1316 int nid = cpu_to_node(cpu);
@@ -1318,14 +1321,9 @@ static void __init build_socket_tables(void)
1318 lnid = nid; 1321 lnid = nid;
1319 apicid = per_cpu(x86_cpu_to_apicid, cpu); 1322 apicid = per_cpu(x86_cpu_to_apicid, cpu);
1320 sockid = apicid >> uv_cpuid.socketid_shift; 1323 sockid = apicid >> uv_cpuid.socketid_shift;
1321 i = sockid - minsock; 1324 _socket_to_node[sockid - minsock] = nid;
1322 1325 pr_info("UV: sid:%02x: apicid:%04x node:%2d\n",
1323 if (nid != _socket_to_node[i]) { 1326 sockid, apicid, nid);
1324 pr_warn(
1325 "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
1326 i, sockid, gre->type, _socket_to_node[i], nid);
1327 _socket_to_node[i] = nid;
1328 }
1329 } 1327 }
1330 1328
1331 /* Setup physical blade to pnode translation from GAM Range Table */ 1329 /* Setup physical blade to pnode translation from GAM Range Table */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f5c69d8974e1..b81fe2d63e15 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
669 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 669 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
670} 670}
671 671
672#define MSR_AMD64_DE_CFG 0xC0011029
673
674static void init_amd_ln(struct cpuinfo_x86 *c)
675{
676 /*
677 * Apply erratum 665 fix unconditionally so machines without a BIOS
678 * fix work.
679 */
680 msr_set_bit(MSR_AMD64_DE_CFG, 31);
681}
682
672static void init_amd_bd(struct cpuinfo_x86 *c) 683static void init_amd_bd(struct cpuinfo_x86 *c)
673{ 684{
674 u64 value; 685 u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
726 case 6: init_amd_k7(c); break; 737 case 6: init_amd_k7(c); break;
727 case 0xf: init_amd_k8(c); break; 738 case 0xf: init_amd_k8(c); break;
728 case 0x10: init_amd_gh(c); break; 739 case 0x10: init_amd_gh(c); break;
740 case 0x12: init_amd_ln(c); break;
729 case 0x15: init_amd_bd(c); break; 741 case 0x15: init_amd_bd(c); break;
730 } 742 }
731 743
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 27a0228c9cae..620ab06bcf45 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
54 */ 54 */
55static u8 *container; 55static u8 *container;
56static size_t container_size; 56static size_t container_size;
57static bool ucode_builtin;
57 58
58static u32 ucode_new_rev; 59static u32 ucode_new_rev;
59static u8 amd_ucode_patch[PATCH_MAX_SIZE]; 60static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
281void __init load_ucode_amd_bsp(unsigned int family) 282void __init load_ucode_amd_bsp(unsigned int family)
282{ 283{
283 struct cpio_data cp; 284 struct cpio_data cp;
285 bool *builtin;
284 void **data; 286 void **data;
285 size_t *size; 287 size_t *size;
286 288
287#ifdef CONFIG_X86_32 289#ifdef CONFIG_X86_32
288 data = (void **)__pa_nodebug(&ucode_cpio.data); 290 data = (void **)__pa_nodebug(&ucode_cpio.data);
289 size = (size_t *)__pa_nodebug(&ucode_cpio.size); 291 size = (size_t *)__pa_nodebug(&ucode_cpio.size);
292 builtin = (bool *)__pa_nodebug(&ucode_builtin);
290#else 293#else
291 data = &ucode_cpio.data; 294 data = &ucode_cpio.data;
292 size = &ucode_cpio.size; 295 size = &ucode_cpio.size;
296 builtin = &ucode_builtin;
293#endif 297#endif
294 298
295 if (!load_builtin_amd_microcode(&cp, family)) 299 *builtin = load_builtin_amd_microcode(&cp, family);
300 if (!*builtin)
296 cp = find_ucode_in_initrd(); 301 cp = find_ucode_in_initrd();
297 302
298 if (!(cp.data && cp.size)) 303 if (!(cp.data && cp.size))
@@ -355,6 +360,7 @@ void load_ucode_amd_ap(void)
355 unsigned int cpu = smp_processor_id(); 360 unsigned int cpu = smp_processor_id();
356 struct equiv_cpu_entry *eq; 361 struct equiv_cpu_entry *eq;
357 struct microcode_amd *mc; 362 struct microcode_amd *mc;
363 u8 *cont = container;
358 u32 rev, eax; 364 u32 rev, eax;
359 u16 eq_id; 365 u16 eq_id;
360 366
@@ -371,8 +377,12 @@ void load_ucode_amd_ap(void)
371 if (check_current_patch_level(&rev, false)) 377 if (check_current_patch_level(&rev, false))
372 return; 378 return;
373 379
380 /* Add CONFIG_RANDOMIZE_MEMORY offset. */
381 if (!ucode_builtin)
382 cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
383
374 eax = cpuid_eax(0x00000001); 384 eax = cpuid_eax(0x00000001);
375 eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); 385 eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
376 386
377 eq_id = find_equiv_id(eq, eax); 387 eq_id = find_equiv_id(eq, eax);
378 if (!eq_id) 388 if (!eq_id)
@@ -434,6 +444,10 @@ int __init save_microcode_in_initrd_amd(void)
434 else 444 else
435 container = cont_va; 445 container = cont_va;
436 446
447 /* Add CONFIG_RANDOMIZE_MEMORY offset. */
448 if (!ucode_builtin)
449 container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
450
437 eax = cpuid_eax(0x00000001); 451 eax = cpuid_eax(0x00000001);
438 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 452 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
439 453
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 680049aa4593..01567aa87503 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -866,105 +866,17 @@ const void *get_xsave_field_ptr(int xsave_state)
866 return get_xsave_addr(&fpu->state.xsave, xsave_state); 866 return get_xsave_addr(&fpu->state.xsave, xsave_state);
867} 867}
868 868
869
870/*
871 * Set xfeatures (aka XSTATE_BV) bit for a feature that we want
872 * to take out of its "init state". This will ensure that an
873 * XRSTOR actually restores the state.
874 */
875static void fpu__xfeature_set_non_init(struct xregs_state *xsave,
876 int xstate_feature_mask)
877{
878 xsave->header.xfeatures |= xstate_feature_mask;
879}
880
881/*
882 * This function is safe to call whether the FPU is in use or not.
883 *
884 * Note that this only works on the current task.
885 *
886 * Inputs:
887 * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
888 * XFEATURE_MASK_SSE, etc...)
889 * @xsave_state_ptr: a pointer to a copy of the state that you would
890 * like written in to the current task's FPU xsave state. This pointer
891 * must not be located in the current tasks's xsave area.
892 * Output:
893 * address of the state in the xsave area or NULL if the state
894 * is not present or is in its 'init state'.
895 */
896static void fpu__xfeature_set_state(int xstate_feature_mask,
897 void *xstate_feature_src, size_t len)
898{
899 struct xregs_state *xsave = &current->thread.fpu.state.xsave;
900 struct fpu *fpu = &current->thread.fpu;
901 void *dst;
902
903 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
904 WARN_ONCE(1, "%s() attempted with no xsave support", __func__);
905 return;
906 }
907
908 /*
909 * Tell the FPU code that we need the FPU state to be in
910 * 'fpu' (not in the registers), and that we need it to
911 * be stable while we write to it.
912 */
913 fpu__current_fpstate_write_begin();
914
915 /*
916 * This method *WILL* *NOT* work for compact-format
917 * buffers. If the 'xstate_feature_mask' is unset in
918 * xcomp_bv then we may need to move other feature state
919 * "up" in the buffer.
920 */
921 if (xsave->header.xcomp_bv & xstate_feature_mask) {
922 WARN_ON_ONCE(1);
923 goto out;
924 }
925
926 /* find the location in the xsave buffer of the desired state */
927 dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask);
928
929 /*
930 * Make sure that the pointer being passed in did not
931 * come from the xsave buffer itself.
932 */
933 WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself");
934
935 /* put the caller-provided data in the location */
936 memcpy(dst, xstate_feature_src, len);
937
938 /*
939 * Mark the xfeature so that the CPU knows there is state
940 * in the buffer now.
941 */
942 fpu__xfeature_set_non_init(xsave, xstate_feature_mask);
943out:
944 /*
945 * We are done writing to the 'fpu'. Reenable preeption
946 * and (possibly) move the fpstate back in to the fpregs.
947 */
948 fpu__current_fpstate_write_end();
949}
950
951#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) 869#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
952#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) 870#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
953 871
954/* 872/*
955 * This will go out and modify the XSAVE buffer so that PKRU is 873 * This will go out and modify PKRU register to set the access
956 * set to a particular state for access to 'pkey'. 874 * rights for @pkey to @init_val.
957 *
958 * PKRU state does affect kernel access to user memory. We do
959 * not modfiy PKRU *itself* here, only the XSAVE state that will
960 * be restored in to PKRU when we return back to userspace.
961 */ 875 */
962int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 876int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
963 unsigned long init_val) 877 unsigned long init_val)
964{ 878{
965 struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; 879 u32 old_pkru;
966 struct pkru_state *old_pkru_state;
967 struct pkru_state new_pkru_state;
968 int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); 880 int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
969 u32 new_pkru_bits = 0; 881 u32 new_pkru_bits = 0;
970 882
@@ -974,6 +886,15 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
974 */ 886 */
975 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 887 if (!boot_cpu_has(X86_FEATURE_OSPKE))
976 return -EINVAL; 888 return -EINVAL;
889 /*
890 * For most XSAVE components, this would be an arduous task:
891 * brining fpstate up to date with fpregs, updating fpstate,
892 * then re-populating fpregs. But, for components that are
893 * never lazily managed, we can just access the fpregs
894 * directly. PKRU is never managed lazily, so we can just
895 * manipulate it directly. Make sure it stays that way.
896 */
897 WARN_ON_ONCE(!use_eager_fpu());
977 898
978 /* Set the bits we need in PKRU: */ 899 /* Set the bits we need in PKRU: */
979 if (init_val & PKEY_DISABLE_ACCESS) 900 if (init_val & PKEY_DISABLE_ACCESS)
@@ -984,37 +905,12 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
984 /* Shift the bits in to the correct place in PKRU for pkey: */ 905 /* Shift the bits in to the correct place in PKRU for pkey: */
985 new_pkru_bits <<= pkey_shift; 906 new_pkru_bits <<= pkey_shift;
986 907
987 /* Locate old copy of the state in the xsave buffer: */ 908 /* Get old PKRU and mask off any old bits in place: */
988 old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU); 909 old_pkru = read_pkru();
989 910 old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
990 /*
991 * When state is not in the buffer, it is in the init
992 * state, set it manually. Otherwise, copy out the old
993 * state.
994 */
995 if (!old_pkru_state)
996 new_pkru_state.pkru = 0;
997 else
998 new_pkru_state.pkru = old_pkru_state->pkru;
999
1000 /* Mask off any old bits in place: */
1001 new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
1002
1003 /* Set the newly-requested bits: */
1004 new_pkru_state.pkru |= new_pkru_bits;
1005
1006 /*
1007 * We could theoretically live without zeroing pkru.pad.
1008 * The current XSAVE feature state definition says that
1009 * only bytes 0->3 are used. But we do not want to
1010 * chance leaking kernel stack out to userspace in case a
1011 * memcpy() of the whole xsave buffer was done.
1012 *
1013 * They're in the same cacheline anyway.
1014 */
1015 new_pkru_state.pad = 0;
1016 911
1017 fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state)); 912 /* Write old part along with new part: */
913 write_pkru(old_pkru | new_pkru_bits);
1018 914
1019 return 0; 915 return 0;
1020} 916}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 2dda0bc4576e..f16c55bfc090 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -25,8 +25,6 @@ static void __init i386_default_early_setup(void)
25 /* Initialize 32bit specific setup functions */ 25 /* Initialize 32bit specific setup functions */
26 x86_init.resources.reserve_resources = i386_reserve_resources; 26 x86_init.resources.reserve_resources = i386_reserve_resources;
27 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; 27 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
28
29 reserve_bios_regions();
30} 28}
31 29
32asmlinkage __visible void __init i386_start_kernel(void) 30asmlinkage __visible void __init i386_start_kernel(void)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 99d48e7d2974..54a2372f5dbb 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -183,7 +183,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
183 copy_bootdata(__va(real_mode_data)); 183 copy_bootdata(__va(real_mode_data));
184 184
185 x86_early_init_platform_quirks(); 185 x86_early_init_platform_quirks();
186 reserve_bios_regions();
187 186
188 switch (boot_params.hdr.hardware_subarch) { 187 switch (boot_params.hdr.hardware_subarch) {
189 case X86_SUBARCH_INTEL_MID: 188 case X86_SUBARCH_INTEL_MID:
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ed16e58658a4..c6dfd801df97 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1242,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1242 memset(&curr_time, 0, sizeof(struct rtc_time)); 1242 memset(&curr_time, 0, sizeof(struct rtc_time));
1243 1243
1244 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) 1244 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1245 mc146818_set_time(&curr_time); 1245 mc146818_get_time(&curr_time);
1246 1246
1247 if (hpet_rtc_flags & RTC_UIE && 1247 if (hpet_rtc_flags & RTC_UIE &&
1248 curr_time.tm_sec != hpet_prev_update_sec) { 1248 curr_time.tm_sec != hpet_prev_update_sec) {
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 61521dc19c10..9f669fdd2010 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
102 seq_puts(p, " Rescheduling interrupts\n"); 102 seq_puts(p, " Rescheduling interrupts\n");
103 seq_printf(p, "%*s: ", prec, "CAL"); 103 seq_printf(p, "%*s: ", prec, "CAL");
104 for_each_online_cpu(j) 104 for_each_online_cpu(j)
105 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - 105 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
106 irq_stats(j)->irq_tlb_count);
107 seq_puts(p, " Function call interrupts\n"); 106 seq_puts(p, " Function call interrupts\n");
108 seq_printf(p, "%*s: ", prec, "TLB"); 107 seq_printf(p, "%*s: ", prec, "TLB");
109 for_each_online_cpu(j) 108 for_each_online_cpu(j)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1d39bfbd26bb..3692249a70f1 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -289,6 +289,7 @@ void __init kvmclock_init(void)
289 put_cpu(); 289 put_cpu();
290 290
291 x86_platform.calibrate_tsc = kvm_get_tsc_khz; 291 x86_platform.calibrate_tsc = kvm_get_tsc_khz;
292 x86_platform.calibrate_cpu = kvm_get_tsc_khz;
292 x86_platform.get_wallclock = kvm_get_wallclock; 293 x86_platform.get_wallclock = kvm_get_wallclock;
293 x86_platform.set_wallclock = kvm_set_wallclock; 294 x86_platform.set_wallclock = kvm_set_wallclock;
294#ifdef CONFIG_X86_LOCAL_APIC 295#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ad5bc9578a73..1acfd76e3e26 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
56 ".popsection"); 56 ".popsection");
57 57
58/* identity function, which can be inlined */ 58/* identity function, which can be inlined */
59u32 _paravirt_ident_32(u32 x) 59u32 notrace _paravirt_ident_32(u32 x)
60{ 60{
61 return x; 61 return x;
62} 62}
63 63
64u64 _paravirt_ident_64(u64 x) 64u64 notrace _paravirt_ident_64(u64 x)
65{ 65{
66 return x; 66 return x;
67} 67}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 991b77986d57..0fa60f5f5a16 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -936,8 +936,6 @@ void __init setup_arch(char **cmdline_p)
936 936
937 x86_init.oem.arch_setup(); 937 x86_init.oem.arch_setup();
938 938
939 kernel_randomize_memory();
940
941 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 939 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
942 setup_memory_map(); 940 setup_memory_map();
943 parse_setup_data(); 941 parse_setup_data();
@@ -1055,6 +1053,12 @@ void __init setup_arch(char **cmdline_p)
1055 1053
1056 max_possible_pfn = max_pfn; 1054 max_possible_pfn = max_pfn;
1057 1055
1056 /*
1057 * Define random base addresses for memory sections after max_pfn is
1058 * defined and before each memory section base is used.
1059 */
1060 kernel_randomize_memory();
1061
1058#ifdef CONFIG_X86_32 1062#ifdef CONFIG_X86_32
1059 /* max_low_pfn get updated here */ 1063 /* max_low_pfn get updated here */
1060 find_low_pfn_range(); 1064 find_low_pfn_range();
@@ -1097,6 +1101,8 @@ void __init setup_arch(char **cmdline_p)
1097 efi_find_mirror(); 1101 efi_find_mirror();
1098 } 1102 }
1099 1103
1104 reserve_bios_regions();
1105
1100 /* 1106 /*
1101 * The EFI specification says that boot service code won't be called 1107 * The EFI specification says that boot service code won't be called
1102 * after ExitBootServices(). This is, in fact, a lie. 1108 * after ExitBootServices(). This is, in fact, a lie.
@@ -1125,7 +1131,15 @@ void __init setup_arch(char **cmdline_p)
1125 1131
1126 early_trap_pf_init(); 1132 early_trap_pf_init();
1127 1133
1128 setup_real_mode(); 1134 /*
1135 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1136 * with the current CR4 value. This may not be necessary, but
1137 * auditing all the early-boot CR4 manipulation would be needed to
1138 * rule it out.
1139 */
1140 if (boot_cpu_data.cpuid_level >= 0)
1141 /* A CPU has %cr4 if and only if it has CPUID. */
1142 mmu_cr4_features = __read_cr4();
1129 1143
1130 memblock_set_current_limit(get_max_mapped()); 1144 memblock_set_current_limit(get_max_mapped());
1131 1145
@@ -1174,13 +1188,6 @@ void __init setup_arch(char **cmdline_p)
1174 1188
1175 kasan_init(); 1189 kasan_init();
1176 1190
1177 if (boot_cpu_data.cpuid_level >= 0) {
1178 /* A CPU has %cr4 if and only if it has CPUID */
1179 mmu_cr4_features = __read_cr4();
1180 if (trampoline_cr4_features)
1181 *trampoline_cr4_features = mmu_cr4_features;
1182 }
1183
1184#ifdef CONFIG_X86_32 1191#ifdef CONFIG_X86_32
1185 /* sync back kernel address range */ 1192 /* sync back kernel address range */
1186 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 1193 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2a6e84a30a54..4296beb8fdd3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
100/* Logical package management. We might want to allocate that dynamically */ 100/* Logical package management. We might want to allocate that dynamically */
101static int *physical_to_logical_pkg __read_mostly; 101static int *physical_to_logical_pkg __read_mostly;
102static unsigned long *physical_package_map __read_mostly;; 102static unsigned long *physical_package_map __read_mostly;;
103static unsigned long *logical_package_map __read_mostly;
104static unsigned int max_physical_pkg_id __read_mostly; 103static unsigned int max_physical_pkg_id __read_mostly;
105unsigned int __max_logical_packages __read_mostly; 104unsigned int __max_logical_packages __read_mostly;
106EXPORT_SYMBOL(__max_logical_packages); 105EXPORT_SYMBOL(__max_logical_packages);
106static unsigned int logical_packages __read_mostly;
107static bool logical_packages_frozen __read_mostly;
107 108
108/* Maximum number of SMT threads on any online core */ 109/* Maximum number of SMT threads on any online core */
109int __max_smt_threads __read_mostly; 110int __max_smt_threads __read_mostly;
@@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
277 if (test_and_set_bit(pkg, physical_package_map)) 278 if (test_and_set_bit(pkg, physical_package_map))
278 goto found; 279 goto found;
279 280
280 new = find_first_zero_bit(logical_package_map, __max_logical_packages); 281 if (logical_packages_frozen) {
281 if (new >= __max_logical_packages) {
282 physical_to_logical_pkg[pkg] = -1; 282 physical_to_logical_pkg[pkg] = -1;
283 pr_warn("APIC(%x) Package %u exceeds logical package map\n", 283 pr_warn("APIC(%x) Package %u exceeds logical package max\n",
284 apicid, pkg); 284 apicid, pkg);
285 return -ENOSPC; 285 return -ENOSPC;
286 } 286 }
287 set_bit(new, logical_package_map); 287
288 new = logical_packages++;
288 pr_info("APIC(%x) Converting physical %u to logical package %u\n", 289 pr_info("APIC(%x) Converting physical %u to logical package %u\n",
289 apicid, pkg, new); 290 apicid, pkg, new);
290 physical_to_logical_pkg[pkg] = new; 291 physical_to_logical_pkg[pkg] = new;
@@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
341 } 342 }
342 343
343 __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); 344 __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
345 logical_packages = 0;
344 346
345 /* 347 /*
346 * Possibly larger than what we need as the number of apic ids per 348 * Possibly larger than what we need as the number of apic ids per
@@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
352 memset(physical_to_logical_pkg, 0xff, size); 354 memset(physical_to_logical_pkg, 0xff, size);
353 size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long); 355 size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
354 physical_package_map = kzalloc(size, GFP_KERNEL); 356 physical_package_map = kzalloc(size, GFP_KERNEL);
355 size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
356 logical_package_map = kzalloc(size, GFP_KERNEL);
357
358 pr_info("Max logical packages: %u\n", __max_logical_packages);
359 357
360 for_each_present_cpu(cpu) { 358 for_each_present_cpu(cpu) {
361 unsigned int apicid = apic->cpu_present_to_apicid(cpu); 359 unsigned int apicid = apic->cpu_present_to_apicid(cpu);
@@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
369 set_cpu_possible(cpu, false); 367 set_cpu_possible(cpu, false);
370 set_cpu_present(cpu, false); 368 set_cpu_present(cpu, false);
371 } 369 }
370
371 if (logical_packages > __max_logical_packages) {
372 pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
373 logical_packages, __max_logical_packages);
374 logical_packages_frozen = true;
375 __max_logical_packages = logical_packages;
376 }
377
378 pr_info("Max logical packages: %u\n", __max_logical_packages);
372} 379}
373 380
374void __init smp_store_boot_cpu_info(void) 381void __init smp_store_boot_cpu_info(void)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 1ef87e887051..78b9cb5a26af 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -22,6 +22,7 @@
22#include <asm/nmi.h> 22#include <asm/nmi.h>
23#include <asm/x86_init.h> 23#include <asm/x86_init.h>
24#include <asm/geode.h> 24#include <asm/geode.h>
25#include <asm/apic.h>
25 26
26unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 27unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
27EXPORT_SYMBOL(cpu_khz); 28EXPORT_SYMBOL(cpu_khz);
@@ -1249,6 +1250,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
1249 (unsigned long)tsc_khz / 1000, 1250 (unsigned long)tsc_khz / 1000,
1250 (unsigned long)tsc_khz % 1000); 1251 (unsigned long)tsc_khz % 1000);
1251 1252
1253 /* Inform the TSC deadline clockevent devices about the recalibration */
1254 lapic_update_tsc_freq();
1255
1252out: 1256out:
1253 if (boot_cpu_has(X86_FEATURE_ART)) 1257 if (boot_cpu_has(X86_FEATURE_ART))
1254 art_related_clocksource = &clocksource_tsc; 1258 art_related_clocksource = &clocksource_tsc;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6c1ff31d99ff..495c776de4b4 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
357 *cursor &= 0xfe; 357 *cursor &= 0xfe;
358 } 358 }
359 /* 359 /*
360 * Similar treatment for VEX3 prefix. 360 * Similar treatment for VEX3/EVEX prefix.
361 * TODO: add XOP/EVEX treatment when insn decoder supports them 361 * TODO: add XOP treatment when insn decoder supports them
362 */ 362 */
363 if (insn->vex_prefix.nbytes == 3) { 363 if (insn->vex_prefix.nbytes >= 3) {
364 /* 364 /*
365 * vex2: c5 rvvvvLpp (has no b bit) 365 * vex2: c5 rvvvvLpp (has no b bit)
366 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp 366 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp
367 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa 367 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa
368 * (evex will need setting of both b and x since 368 * Setting VEX3.b (setting because it has inverted meaning).
369 * in non-sib encoding evex.x is 4th bit of MODRM.rm) 369 * Setting EVEX.x since (in non-SIB encoding) EVEX.x
370 * Setting VEX3.b (setting because it has inverted meaning): 370 * is the 4th bit of MODRM.rm, and needs the same treatment.
371 * For VEX3-encoded insns, VEX3.x value has no effect in
372 * non-SIB encoding, the change is superfluous but harmless.
371 */ 373 */
372 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; 374 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
373 *cursor |= 0x20; 375 *cursor |= 0x60;
374 } 376 }
375 377
376 /* 378 /*
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
415 417
416 reg = MODRM_REG(insn); /* Fetch modrm.reg */ 418 reg = MODRM_REG(insn); /* Fetch modrm.reg */
417 reg2 = 0xff; /* Fetch vex.vvvv */ 419 reg2 = 0xff; /* Fetch vex.vvvv */
418 if (insn->vex_prefix.nbytes == 2) 420 if (insn->vex_prefix.nbytes)
419 reg2 = insn->vex_prefix.bytes[1];
420 else if (insn->vex_prefix.nbytes == 3)
421 reg2 = insn->vex_prefix.bytes[2]; 421 reg2 = insn->vex_prefix.bytes[2];
422 /* 422 /*
423 * TODO: add XOP, EXEV vvvv reading. 423 * TODO: add XOP vvvv reading.
424 * 424 *
425 * vex.vvvv field is in bits 6-3, bits are inverted. 425 * vex.vvvv field is in bits 6-3, bits are inverted.
426 * But in 32-bit mode, high-order bit may be ignored. 426 * But in 32-bit mode, high-order bit may be ignored.
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 5f42d038fcb4..c7220ba94aa7 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
109{ 109{
110 bool new_val, old_val; 110 bool new_val, old_val;
111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
112 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
112 union kvm_ioapic_redirect_entry *e; 113 union kvm_ioapic_redirect_entry *e;
113 114
114 e = &ioapic->redirtbl[RTC_GSI]; 115 e = &ioapic->redirtbl[RTC_GSI];
@@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
117 return; 118 return;
118 119
119 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); 120 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
120 old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); 121 old_val = test_bit(vcpu->vcpu_id, dest_map->map);
121 122
122 if (new_val == old_val) 123 if (new_val == old_val)
123 return; 124 return;
124 125
125 if (new_val) { 126 if (new_val) {
126 __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); 127 __set_bit(vcpu->vcpu_id, dest_map->map);
128 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
127 ioapic->rtc_status.pending_eoi++; 129 ioapic->rtc_status.pending_eoi++;
128 } else { 130 } else {
129 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map); 131 __clear_bit(vcpu->vcpu_id, dest_map->map);
130 ioapic->rtc_status.pending_eoi--; 132 ioapic->rtc_status.pending_eoi--;
131 rtc_status_pending_eoi_check_valid(ioapic); 133 rtc_status_pending_eoi_check_valid(ioapic);
132 } 134 }
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 39b91127ef07..cd944435dfbd 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -23,8 +23,8 @@
23static struct kvm_event_hw_type_mapping amd_event_mapping[] = { 23static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
24 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, 24 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
25 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, 25 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
26 [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES }, 26 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
27 [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES }, 27 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
28 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 28 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
29 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, 29 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
30 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 30 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a45d8580f91e..5cede40e2552 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -422,6 +422,7 @@ struct nested_vmx {
422 struct list_head vmcs02_pool; 422 struct list_head vmcs02_pool;
423 int vmcs02_num; 423 int vmcs02_num;
424 u64 vmcs01_tsc_offset; 424 u64 vmcs01_tsc_offset;
425 bool change_vmcs01_virtual_x2apic_mode;
425 /* L2 must run next, and mustn't decide to exit to L1. */ 426 /* L2 must run next, and mustn't decide to exit to L1. */
426 bool nested_run_pending; 427 bool nested_run_pending;
427 /* 428 /*
@@ -435,6 +436,8 @@ struct nested_vmx {
435 bool pi_pending; 436 bool pi_pending;
436 u16 posted_intr_nv; 437 u16 posted_intr_nv;
437 438
439 unsigned long *msr_bitmap;
440
438 struct hrtimer preemption_timer; 441 struct hrtimer preemption_timer;
439 bool preemption_timer_expired; 442 bool preemption_timer_expired;
440 443
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
924static unsigned long *vmx_msr_bitmap_longmode; 927static unsigned long *vmx_msr_bitmap_longmode;
925static unsigned long *vmx_msr_bitmap_legacy_x2apic; 928static unsigned long *vmx_msr_bitmap_legacy_x2apic;
926static unsigned long *vmx_msr_bitmap_longmode_x2apic; 929static unsigned long *vmx_msr_bitmap_longmode_x2apic;
927static unsigned long *vmx_msr_bitmap_nested;
928static unsigned long *vmx_vmread_bitmap; 930static unsigned long *vmx_vmread_bitmap;
929static unsigned long *vmx_vmwrite_bitmap; 931static unsigned long *vmx_vmwrite_bitmap;
930 932
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2198 new.control) != old.control); 2200 new.control) != old.control);
2199} 2201}
2200 2202
2203static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2204{
2205 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2206 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2207}
2208
2201/* 2209/*
2202 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 2210 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2203 * vcpu mutex is already taken. 2211 * vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2256 2264
2257 /* Setup TSC multiplier */ 2265 /* Setup TSC multiplier */
2258 if (kvm_has_tsc_control && 2266 if (kvm_has_tsc_control &&
2259 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { 2267 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2260 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; 2268 decache_tsc_multiplier(vmx);
2261 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2262 }
2263 2269
2264 vmx_vcpu_pi_load(vcpu, cpu); 2270 vmx_vcpu_pi_load(vcpu, cpu);
2265 vmx->host_pkru = read_pkru(); 2271 vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2508 unsigned long *msr_bitmap; 2514 unsigned long *msr_bitmap;
2509 2515
2510 if (is_guest_mode(vcpu)) 2516 if (is_guest_mode(vcpu))
2511 msr_bitmap = vmx_msr_bitmap_nested; 2517 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2512 else if (cpu_has_secondary_exec_ctrls() && 2518 else if (cpu_has_secondary_exec_ctrls() &&
2513 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & 2519 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2514 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { 2520 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
6363 if (!vmx_msr_bitmap_longmode_x2apic) 6369 if (!vmx_msr_bitmap_longmode_x2apic)
6364 goto out4; 6370 goto out4;
6365 6371
6366 if (nested) {
6367 vmx_msr_bitmap_nested =
6368 (unsigned long *)__get_free_page(GFP_KERNEL);
6369 if (!vmx_msr_bitmap_nested)
6370 goto out5;
6371 }
6372
6373 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 6372 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
6374 if (!vmx_vmread_bitmap) 6373 if (!vmx_vmread_bitmap)
6375 goto out6; 6374 goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
6392 6391
6393 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); 6392 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
6394 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); 6393 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
6395 if (nested)
6396 memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
6397 6394
6398 if (setup_vmcs_config(&vmcs_config) < 0) { 6395 if (setup_vmcs_config(&vmcs_config) < 0) {
6399 r = -EIO; 6396 r = -EIO;
@@ -6529,9 +6526,6 @@ out8:
6529out7: 6526out7:
6530 free_page((unsigned long)vmx_vmread_bitmap); 6527 free_page((unsigned long)vmx_vmread_bitmap);
6531out6: 6528out6:
6532 if (nested)
6533 free_page((unsigned long)vmx_msr_bitmap_nested);
6534out5:
6535 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); 6529 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
6536out4: 6530out4:
6537 free_page((unsigned long)vmx_msr_bitmap_longmode); 6531 free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
6557 free_page((unsigned long)vmx_io_bitmap_a); 6551 free_page((unsigned long)vmx_io_bitmap_a);
6558 free_page((unsigned long)vmx_vmwrite_bitmap); 6552 free_page((unsigned long)vmx_vmwrite_bitmap);
6559 free_page((unsigned long)vmx_vmread_bitmap); 6553 free_page((unsigned long)vmx_vmread_bitmap);
6560 if (nested)
6561 free_page((unsigned long)vmx_msr_bitmap_nested);
6562 6554
6563 free_kvm_area(); 6555 free_kvm_area();
6564} 6556}
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
6995 return 1; 6987 return 1;
6996 } 6988 }
6997 6989
6990 if (cpu_has_vmx_msr_bitmap()) {
6991 vmx->nested.msr_bitmap =
6992 (unsigned long *)__get_free_page(GFP_KERNEL);
6993 if (!vmx->nested.msr_bitmap)
6994 goto out_msr_bitmap;
6995 }
6996
6998 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 6997 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
6999 if (!vmx->nested.cached_vmcs12) 6998 if (!vmx->nested.cached_vmcs12)
7000 return -ENOMEM; 6999 goto out_cached_vmcs12;
7001 7000
7002 if (enable_shadow_vmcs) { 7001 if (enable_shadow_vmcs) {
7003 shadow_vmcs = alloc_vmcs(); 7002 shadow_vmcs = alloc_vmcs();
7004 if (!shadow_vmcs) { 7003 if (!shadow_vmcs)
7005 kfree(vmx->nested.cached_vmcs12); 7004 goto out_shadow_vmcs;
7006 return -ENOMEM;
7007 }
7008 /* mark vmcs as shadow */ 7005 /* mark vmcs as shadow */
7009 shadow_vmcs->revision_id |= (1u << 31); 7006 shadow_vmcs->revision_id |= (1u << 31);
7010 /* init shadow vmcs */ 7007 /* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7024 skip_emulated_instruction(vcpu); 7021 skip_emulated_instruction(vcpu);
7025 nested_vmx_succeed(vcpu); 7022 nested_vmx_succeed(vcpu);
7026 return 1; 7023 return 1;
7024
7025out_shadow_vmcs:
7026 kfree(vmx->nested.cached_vmcs12);
7027
7028out_cached_vmcs12:
7029 free_page((unsigned long)vmx->nested.msr_bitmap);
7030
7031out_msr_bitmap:
7032 return -ENOMEM;
7027} 7033}
7028 7034
7029/* 7035/*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
7098 vmx->nested.vmxon = false; 7104 vmx->nested.vmxon = false;
7099 free_vpid(vmx->nested.vpid02); 7105 free_vpid(vmx->nested.vpid02);
7100 nested_release_vmcs12(vmx); 7106 nested_release_vmcs12(vmx);
7107 if (vmx->nested.msr_bitmap) {
7108 free_page((unsigned long)vmx->nested.msr_bitmap);
7109 vmx->nested.msr_bitmap = NULL;
7110 }
7101 if (enable_shadow_vmcs) 7111 if (enable_shadow_vmcs)
7102 free_vmcs(vmx->nested.current_shadow_vmcs); 7112 free_vmcs(vmx->nested.current_shadow_vmcs);
7103 kfree(vmx->nested.cached_vmcs12); 7113 kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8419{ 8429{
8420 u32 sec_exec_control; 8430 u32 sec_exec_control;
8421 8431
8432 /* Postpone execution until vmcs01 is the current VMCS. */
8433 if (is_guest_mode(vcpu)) {
8434 to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
8435 return;
8436 }
8437
8422 /* 8438 /*
8423 * There is not point to enable virtualize x2apic without enable 8439 * There is not point to enable virtualize x2apic without enable
8424 * apicv 8440 * apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9472{ 9488{
9473 int msr; 9489 int msr;
9474 struct page *page; 9490 struct page *page;
9475 unsigned long *msr_bitmap; 9491 unsigned long *msr_bitmap_l1;
9492 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
9476 9493
9494 /* This shortcut is ok because we support only x2APIC MSRs so far. */
9477 if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) 9495 if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
9478 return false; 9496 return false;
9479 9497
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9482 WARN_ON(1); 9500 WARN_ON(1);
9483 return false; 9501 return false;
9484 } 9502 }
9485 msr_bitmap = (unsigned long *)kmap(page); 9503 msr_bitmap_l1 = (unsigned long *)kmap(page);
9486 if (!msr_bitmap) { 9504 if (!msr_bitmap_l1) {
9487 nested_release_page_clean(page); 9505 nested_release_page_clean(page);
9488 WARN_ON(1); 9506 WARN_ON(1);
9489 return false; 9507 return false;
9490 } 9508 }
9491 9509
9510 memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
9511
9492 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 9512 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
9493 if (nested_cpu_has_apic_reg_virt(vmcs12)) 9513 if (nested_cpu_has_apic_reg_virt(vmcs12))
9494 for (msr = 0x800; msr <= 0x8ff; msr++) 9514 for (msr = 0x800; msr <= 0x8ff; msr++)
9495 nested_vmx_disable_intercept_for_msr( 9515 nested_vmx_disable_intercept_for_msr(
9496 msr_bitmap, 9516 msr_bitmap_l1, msr_bitmap_l0,
9497 vmx_msr_bitmap_nested,
9498 msr, MSR_TYPE_R); 9517 msr, MSR_TYPE_R);
9499 /* TPR is allowed */ 9518
9500 nested_vmx_disable_intercept_for_msr(msr_bitmap, 9519 nested_vmx_disable_intercept_for_msr(
9501 vmx_msr_bitmap_nested, 9520 msr_bitmap_l1, msr_bitmap_l0,
9502 APIC_BASE_MSR + (APIC_TASKPRI >> 4), 9521 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9503 MSR_TYPE_R | MSR_TYPE_W); 9522 MSR_TYPE_R | MSR_TYPE_W);
9523
9504 if (nested_cpu_has_vid(vmcs12)) { 9524 if (nested_cpu_has_vid(vmcs12)) {
9505 /* EOI and self-IPI are allowed */
9506 nested_vmx_disable_intercept_for_msr( 9525 nested_vmx_disable_intercept_for_msr(
9507 msr_bitmap, 9526 msr_bitmap_l1, msr_bitmap_l0,
9508 vmx_msr_bitmap_nested,
9509 APIC_BASE_MSR + (APIC_EOI >> 4), 9527 APIC_BASE_MSR + (APIC_EOI >> 4),
9510 MSR_TYPE_W); 9528 MSR_TYPE_W);
9511 nested_vmx_disable_intercept_for_msr( 9529 nested_vmx_disable_intercept_for_msr(
9512 msr_bitmap, 9530 msr_bitmap_l1, msr_bitmap_l0,
9513 vmx_msr_bitmap_nested,
9514 APIC_BASE_MSR + (APIC_SELF_IPI >> 4), 9531 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9515 MSR_TYPE_W); 9532 MSR_TYPE_W);
9516 } 9533 }
9517 } else {
9518 /*
9519 * Enable reading intercept of all the x2apic
9520 * MSRs. We should not rely on vmcs12 to do any
9521 * optimizations here, it may have been modified
9522 * by L1.
9523 */
9524 for (msr = 0x800; msr <= 0x8ff; msr++)
9525 __vmx_enable_intercept_for_msr(
9526 vmx_msr_bitmap_nested,
9527 msr,
9528 MSR_TYPE_R);
9529
9530 __vmx_enable_intercept_for_msr(
9531 vmx_msr_bitmap_nested,
9532 APIC_BASE_MSR + (APIC_TASKPRI >> 4),
9533 MSR_TYPE_W);
9534 __vmx_enable_intercept_for_msr(
9535 vmx_msr_bitmap_nested,
9536 APIC_BASE_MSR + (APIC_EOI >> 4),
9537 MSR_TYPE_W);
9538 __vmx_enable_intercept_for_msr(
9539 vmx_msr_bitmap_nested,
9540 APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
9541 MSR_TYPE_W);
9542 } 9534 }
9543 kunmap(page); 9535 kunmap(page);
9544 nested_release_page_clean(page); 9536 nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9957 } 9949 }
9958 9950
9959 if (cpu_has_vmx_msr_bitmap() && 9951 if (cpu_has_vmx_msr_bitmap() &&
9960 exec_control & CPU_BASED_USE_MSR_BITMAPS) { 9952 exec_control & CPU_BASED_USE_MSR_BITMAPS &&
9961 nested_vmx_merge_msr_bitmap(vcpu, vmcs12); 9953 nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
9962 /* MSR_BITMAP will be set by following vmx_set_efer. */ 9954 ; /* MSR_BITMAP will be set by following vmx_set_efer. */
9963 } else 9955 else
9964 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 9956 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
9965 9957
9966 /* 9958 /*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10011 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); 10003 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
10012 else 10004 else
10013 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); 10005 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
10006 if (kvm_has_tsc_control)
10007 decache_tsc_multiplier(vmx);
10014 10008
10015 if (enable_vpid) { 10009 if (enable_vpid) {
10016 /* 10010 /*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
10767 else 10761 else
10768 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, 10762 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10769 PIN_BASED_VMX_PREEMPTION_TIMER); 10763 PIN_BASED_VMX_PREEMPTION_TIMER);
10764 if (kvm_has_tsc_control)
10765 decache_tsc_multiplier(vmx);
10766
10767 if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
10768 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
10769 vmx_set_virtual_x2apic_mode(vcpu,
10770 vcpu->arch.apic_base & X2APIC_ENABLE);
10771 }
10770 10772
10771 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 10773 /* This is needed for same reason as it was needed in prepare_vmcs02 */
10772 vmx->host_rsp = 0; 10774 vmx->host_rsp = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19f9f9e05c2a..699f8726539a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2743 if (tsc_delta < 0) 2743 if (tsc_delta < 0)
2744 mark_tsc_unstable("KVM discovered backwards TSC"); 2744 mark_tsc_unstable("KVM discovered backwards TSC");
2745 2745
2746 if (kvm_lapic_hv_timer_in_use(vcpu) &&
2747 kvm_x86_ops->set_hv_timer(vcpu,
2748 kvm_get_lapic_tscdeadline_msr(vcpu)))
2749 kvm_lapic_switch_to_sw_timer(vcpu);
2750 if (check_tsc_unstable()) { 2746 if (check_tsc_unstable()) {
2751 u64 offset = kvm_compute_tsc_offset(vcpu, 2747 u64 offset = kvm_compute_tsc_offset(vcpu,
2752 vcpu->arch.last_guest_tsc); 2748 vcpu->arch.last_guest_tsc);
2753 kvm_x86_ops->write_tsc_offset(vcpu, offset); 2749 kvm_x86_ops->write_tsc_offset(vcpu, offset);
2754 vcpu->arch.tsc_catchup = 1; 2750 vcpu->arch.tsc_catchup = 1;
2755 } 2751 }
2752 if (kvm_lapic_hv_timer_in_use(vcpu) &&
2753 kvm_x86_ops->set_hv_timer(vcpu,
2754 kvm_get_lapic_tscdeadline_msr(vcpu)))
2755 kvm_lapic_switch_to_sw_timer(vcpu);
2756 /* 2756 /*
2757 * On a host with synchronized TSC, there is no need to update 2757 * On a host with synchronized TSC, there is no need to update
2758 * kvmclock on vcpu->cpu migration 2758 * kvmclock on vcpu->cpu migration
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 02de3d74d2c5..8a602a1e404a 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32)
35 35
36ENTRY(__sw_hweight64) 36ENTRY(__sw_hweight64)
37#ifdef CONFIG_X86_64 37#ifdef CONFIG_X86_64
38 pushq %rdi
38 pushq %rdx 39 pushq %rdx
39 40
40 movq %rdi, %rdx # w -> t 41 movq %rdi, %rdx # w -> t
@@ -60,6 +61,7 @@ ENTRY(__sw_hweight64)
60 shrq $56, %rax # w = w_tmp >> 56 61 shrq $56, %rax # w = w_tmp >> 56
61 62
62 popq %rdx 63 popq %rdx
64 popq %rdi
63 ret 65 ret
64#else /* CONFIG_X86_32 */ 66#else /* CONFIG_X86_32 */
65 /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ 67 /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index f7dfeda83e5c..121f59c6ee54 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -19,7 +19,7 @@
19#include <asm/cpufeature.h> 19#include <asm/cpufeature.h>
20#include <asm/setup.h> 20#include <asm/setup.h>
21 21
22#define debug_putstr(v) early_printk(v) 22#define debug_putstr(v) early_printk("%s", v)
23#define has_cpuflag(f) boot_cpu_has(f) 23#define has_cpuflag(f) boot_cpu_has(f)
24#define get_boot_seed() kaslr_offset() 24#define get_boot_seed() kaslr_offset()
25#endif 25#endif
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796ac5fd..4473cb4f8b90 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,17 @@
3 * included by both the compressed kernel and the regular kernel. 3 * included by both the compressed kernel and the regular kernel.
4 */ 4 */
5 5
6static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 6static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
7 unsigned long addr, unsigned long end) 7 unsigned long addr, unsigned long end)
8{ 8{
9 addr &= PMD_MASK; 9 addr &= PMD_MASK;
10 for (; addr < end; addr += PMD_SIZE) { 10 for (; addr < end; addr += PMD_SIZE) {
11 pmd_t *pmd = pmd_page + pmd_index(addr); 11 pmd_t *pmd = pmd_page + pmd_index(addr);
12 12
13 if (!pmd_present(*pmd)) 13 if (pmd_present(*pmd))
14 set_pmd(pmd, __pmd(addr | pmd_flag)); 14 continue;
15
16 set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
15 } 17 }
16} 18}
17 19
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
30 32
31 if (pud_present(*pud)) { 33 if (pud_present(*pud)) {
32 pmd = pmd_offset(pud, 0); 34 pmd = pmd_offset(pud, 0);
33 ident_pmd_init(info->pmd_flag, pmd, addr, next); 35 ident_pmd_init(info, pmd, addr, next);
34 continue; 36 continue;
35 } 37 }
36 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 38 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
37 if (!pmd) 39 if (!pmd)
38 return -ENOMEM; 40 return -ENOMEM;
39 ident_pmd_init(info->pmd_flag, pmd, addr, next); 41 ident_pmd_init(info, pmd, addr, next);
40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 42 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
41 } 43 }
42 44
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
44} 46}
45 47
46int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 48int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
47 unsigned long addr, unsigned long end) 49 unsigned long pstart, unsigned long pend)
48{ 50{
51 unsigned long addr = pstart + info->offset;
52 unsigned long end = pend + info->offset;
49 unsigned long next; 53 unsigned long next;
50 int result; 54 int result;
51 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
52 55
53 for (; addr < end; addr = next) { 56 for (; addr < end; addr = next) {
54 pgd_t *pgd = pgd_page + pgd_index(addr) + off; 57 pgd_t *pgd = pgd_page + pgd_index(addr);
55 pud_t *pud; 58 pud_t *pud;
56 59
57 next = (addr & PGDIR_MASK) + PGDIR_SIZE; 60 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 620928903be3..d28a2d741f9e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num)
122 return __va(pfn << PAGE_SHIFT); 122 return __va(pfn << PAGE_SHIFT);
123} 123}
124 124
125/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ 125/*
126#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) 126 * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
127 * With KASLR memory randomization, depending on the machine e820 memory
128 * and the PUD alignment. We may need twice more pages when KASLR memory
129 * randomization is enabled.
130 */
131#ifndef CONFIG_RANDOMIZE_MEMORY
132#define INIT_PGD_PAGE_COUNT 6
133#else
134#define INIT_PGD_PAGE_COUNT 12
135#endif
136#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
127RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); 137RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
128void __init early_alloc_pgt_buf(void) 138void __init early_alloc_pgt_buf(void)
129{ 139{
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 26dccd6c0df1..bda8d5eef04d 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
77 */ 77 */
78static inline bool kaslr_memory_enabled(void) 78static inline bool kaslr_memory_enabled(void)
79{ 79{
80 return kaslr_enabled() && !config_enabled(CONFIG_KASAN); 80 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
81} 81}
82 82
83/* Initialize base and padding for each memory region randomized with KASLR */ 83/* Initialize base and padding for each memory region randomized with KASLR */
@@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void)
97 * add padding if needed (especially for memory hotplug support). 97 * add padding if needed (especially for memory hotplug support).
98 */ 98 */
99 BUG_ON(kaslr_regions[0].base != &page_offset_base); 99 BUG_ON(kaslr_regions[0].base != &page_offset_base);
100 memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) + 100 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; 101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
102 102
103 /* Adapt phyiscal memory region size based on available memory */ 103 /* Adapt phyiscal memory region size based on available memory */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ecb1b69c1651..170cc4ff057b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
927} 927}
928 928
929/* 929/*
930 * prot is passed in as a parameter for the new mapping. If the vma has a 930 * prot is passed in as a parameter for the new mapping. If the vma has
931 * linear pfn mapping for the entire range reserve the entire vma range with 931 * a linear pfn mapping for the entire range, or no vma is provided,
932 * single reserve_pfn_range call. 932 * reserve the entire pfn + size range with single reserve_pfn_range
933 * call.
933 */ 934 */
934int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 935int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
935 unsigned long pfn, unsigned long addr, unsigned long size) 936 unsigned long pfn, unsigned long addr, unsigned long size)
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
938 enum page_cache_mode pcm; 939 enum page_cache_mode pcm;
939 940
940 /* reserve the whole chunk starting from paddr */ 941 /* reserve the whole chunk starting from paddr */
941 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { 942 if (!vma || (addr == vma->vm_start
943 && size == (vma->vm_end - vma->vm_start))) {
942 int ret; 944 int ret;
943 945
944 ret = reserve_pfn_range(paddr, size, prot, 0); 946 ret = reserve_pfn_range(paddr, size, prot, 0);
945 if (!ret) 947 if (ret == 0 && vma)
946 vma->vm_flags |= VM_PAT; 948 vma->vm_flags |= VM_PAT;
947 return ret; 949 return ret;
948 } 950 }
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
997 resource_size_t paddr; 999 resource_size_t paddr;
998 unsigned long prot; 1000 unsigned long prot;
999 1001
1000 if (!(vma->vm_flags & VM_PAT)) 1002 if (vma && !(vma->vm_flags & VM_PAT))
1001 return; 1003 return;
1002 1004
1003 /* free the chunk starting from pfn or the whole chunk */ 1005 /* free the chunk starting from pfn or the whole chunk */
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1011 size = vma->vm_end - vma->vm_start; 1013 size = vma->vm_end - vma->vm_start;
1012 } 1014 }
1013 free_pfn_range(paddr, size); 1015 free_pfn_range(paddr, size);
1014 vma->vm_flags &= ~VM_PAT; 1016 if (vma)
1017 vma->vm_flags &= ~VM_PAT;
1015} 1018}
1016 1019
1017/* 1020/*
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 837ea36a837d..6d52b94f4bb9 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); 553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
554 554
555/* 555/*
556 * Broadwell EP Home Agent BARs erroneously return non-zero values when read. 556 * Device [8086:2fc0]
557 * Erratum HSE43
558 * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
559 * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
557 * 560 *
558 * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html 561 * Devices [8086:6f60,6fa0,6fc0]
559 * entry BDF2. 562 * Erratum BDF2
563 * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
564 * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
560 */ 565 */
561static void pci_bdwep_bar(struct pci_dev *dev) 566static void pci_invalid_bar(struct pci_dev *dev)
562{ 567{
563 dev->non_compliant_bars = 1; 568 dev->non_compliant_bars = 1;
564} 569}
565DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar); 570DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
566DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar); 571DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
567DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar); 572DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
573DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
index b814ca675131..7948be342ee9 100644
--- a/arch/x86/pci/vmd.c
+++ b/arch/x86/pci/vmd.c
@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
41 * @node: list item for parent traversal. 41 * @node: list item for parent traversal.
42 * @rcu: RCU callback item for freeing. 42 * @rcu: RCU callback item for freeing.
43 * @irq: back pointer to parent. 43 * @irq: back pointer to parent.
44 * @enabled: true if driver enabled IRQ
44 * @virq: the virtual IRQ value provided to the requesting driver. 45 * @virq: the virtual IRQ value provided to the requesting driver.
45 * 46 *
46 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to 47 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
@@ -50,6 +51,7 @@ struct vmd_irq {
50 struct list_head node; 51 struct list_head node;
51 struct rcu_head rcu; 52 struct rcu_head rcu;
52 struct vmd_irq_list *irq; 53 struct vmd_irq_list *irq;
54 bool enabled;
53 unsigned int virq; 55 unsigned int virq;
54}; 56};
55 57
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
122 unsigned long flags; 124 unsigned long flags;
123 125
124 raw_spin_lock_irqsave(&list_lock, flags); 126 raw_spin_lock_irqsave(&list_lock, flags);
127 WARN_ON(vmdirq->enabled);
125 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); 128 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
129 vmdirq->enabled = true;
126 raw_spin_unlock_irqrestore(&list_lock, flags); 130 raw_spin_unlock_irqrestore(&list_lock, flags);
127 131
128 data->chip->irq_unmask(data); 132 data->chip->irq_unmask(data);
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
136 data->chip->irq_mask(data); 140 data->chip->irq_mask(data);
137 141
138 raw_spin_lock_irqsave(&list_lock, flags); 142 raw_spin_lock_irqsave(&list_lock, flags);
139 list_del_rcu(&vmdirq->node); 143 if (vmdirq->enabled) {
140 INIT_LIST_HEAD_RCU(&vmdirq->node); 144 list_del_rcu(&vmdirq->node);
145 vmdirq->enabled = false;
146 }
141 raw_spin_unlock_irqrestore(&list_lock, flags); 147 raw_spin_unlock_irqrestore(&list_lock, flags);
142} 148}
143 149
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 4480c06cade7..89d1146f5a6f 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -254,6 +254,7 @@ void __init efi_free_boot_services(void)
254 for_each_efi_memory_desc(md) { 254 for_each_efi_memory_desc(md) {
255 unsigned long long start = md->phys_addr; 255 unsigned long long start = md->phys_addr;
256 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 256 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
257 size_t rm_size;
257 258
258 if (md->type != EFI_BOOT_SERVICES_CODE && 259 if (md->type != EFI_BOOT_SERVICES_CODE &&
259 md->type != EFI_BOOT_SERVICES_DATA) 260 md->type != EFI_BOOT_SERVICES_DATA)
@@ -263,6 +264,26 @@ void __init efi_free_boot_services(void)
263 if (md->attribute & EFI_MEMORY_RUNTIME) 264 if (md->attribute & EFI_MEMORY_RUNTIME)
264 continue; 265 continue;
265 266
267 /*
268 * Nasty quirk: if all sub-1MB memory is used for boot
269 * services, we can get here without having allocated the
270 * real mode trampoline. It's too late to hand boot services
271 * memory back to the memblock allocator, so instead
272 * try to manually allocate the trampoline if needed.
273 *
274 * I've seen this on a Dell XPS 13 9350 with firmware
275 * 1.4.4 with SGX enabled booting Linux via Fedora 24's
276 * grub2-efi on a hard disk. (And no, I don't know why
277 * this happened, but Linux should still try to boot rather
278 * panicing early.)
279 */
280 rm_size = real_mode_size_needed();
281 if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
282 set_real_mode_mem(start, rm_size);
283 start += rm_size;
284 size -= rm_size;
285 }
286
266 free_bootmem_late(start, size); 287 free_bootmem_late(start, size);
267 } 288 }
268 289
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 66b2166ea4a1..23f2f3e41c7f 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -187,7 +187,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
187void uv_bios_init(void) 187void uv_bios_init(void)
188{ 188{
189 uv_systab = NULL; 189 uv_systab = NULL;
190 if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) { 190 if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
191 !efi.uv_systab || efi_runtime_disabled()) {
191 pr_crit("UV: UVsystab: missing\n"); 192 pr_crit("UV: UVsystab: missing\n");
192 return; 193 return;
193 } 194 }
@@ -199,12 +200,14 @@ void uv_bios_init(void)
199 return; 200 return;
200 } 201 }
201 202
203 /* Starting with UV4 the UV systab size is variable */
202 if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) { 204 if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
205 int size = uv_systab->size;
206
203 iounmap(uv_systab); 207 iounmap(uv_systab);
204 uv_systab = ioremap(efi.uv_systab, uv_systab->size); 208 uv_systab = ioremap(efi.uv_systab, size);
205 if (!uv_systab) { 209 if (!uv_systab) {
206 pr_err("UV: UVsystab: ioremap(%d) failed!\n", 210 pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
207 uv_systab->size);
208 return; 211 return;
209 } 212 }
210 } 213 }
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index f0b5f2d402af..9634557a5444 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
87 struct x86_mapping_info info = { 87 struct x86_mapping_info info = {
88 .alloc_pgt_page = alloc_pgt_page, 88 .alloc_pgt_page = alloc_pgt_page,
89 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, 89 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
90 .kernel_mapping = true, 90 .offset = __PAGE_OFFSET,
91 }; 91 };
92 unsigned long mstart, mend; 92 unsigned long mstart, mend;
93 pgd_t *pgd; 93 pgd_t *pgd;
@@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
113 return result; 113 return result;
114 } 114 }
115 115
116 temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET; 116 temp_level4_pgt = __pa(pgd);
117 return 0; 117 return 0;
118} 118}
119 119
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 705e3fffb4a1..5db706f14111 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -1,9 +1,11 @@
1#include <linux/io.h> 1#include <linux/io.h>
2#include <linux/slab.h>
2#include <linux/memblock.h> 3#include <linux/memblock.h>
3 4
4#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
5#include <asm/pgtable.h> 6#include <asm/pgtable.h>
6#include <asm/realmode.h> 7#include <asm/realmode.h>
8#include <asm/tlbflush.h>
7 9
8struct real_mode_header *real_mode_header; 10struct real_mode_header *real_mode_header;
9u32 *trampoline_cr4_features; 11u32 *trampoline_cr4_features;
@@ -11,25 +13,37 @@ u32 *trampoline_cr4_features;
11/* Hold the pgd entry used on booting additional CPUs */ 13/* Hold the pgd entry used on booting additional CPUs */
12pgd_t trampoline_pgd_entry; 14pgd_t trampoline_pgd_entry;
13 15
16void __init set_real_mode_mem(phys_addr_t mem, size_t size)
17{
18 void *base = __va(mem);
19
20 real_mode_header = (struct real_mode_header *) base;
21 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
22 base, (unsigned long long)mem, size);
23}
24
14void __init reserve_real_mode(void) 25void __init reserve_real_mode(void)
15{ 26{
16 phys_addr_t mem; 27 phys_addr_t mem;
17 unsigned char *base; 28 size_t size = real_mode_size_needed();
18 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 29
30 if (!size)
31 return;
32
33 WARN_ON(slab_is_available());
19 34
20 /* Has to be under 1M so we can execute real-mode AP code. */ 35 /* Has to be under 1M so we can execute real-mode AP code. */
21 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); 36 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
22 if (!mem) 37 if (!mem) {
23 panic("Cannot allocate trampoline\n"); 38 pr_info("No sub-1M memory is available for the trampoline\n");
39 return;
40 }
24 41
25 base = __va(mem);
26 memblock_reserve(mem, size); 42 memblock_reserve(mem, size);
27 real_mode_header = (struct real_mode_header *) base; 43 set_real_mode_mem(mem, size);
28 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
29 base, (unsigned long long)mem, size);
30} 44}
31 45
32void __init setup_real_mode(void) 46static void __init setup_real_mode(void)
33{ 47{
34 u16 real_mode_seg; 48 u16 real_mode_seg;
35 const u32 *rel; 49 const u32 *rel;
@@ -84,7 +98,7 @@ void __init setup_real_mode(void)
84 98
85 trampoline_header->start = (u64) secondary_startup_64; 99 trampoline_header->start = (u64) secondary_startup_64;
86 trampoline_cr4_features = &trampoline_header->cr4; 100 trampoline_cr4_features = &trampoline_header->cr4;
87 *trampoline_cr4_features = __read_cr4(); 101 *trampoline_cr4_features = mmu_cr4_features;
88 102
89 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 103 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
90 trampoline_pgd[0] = trampoline_pgd_entry.pgd; 104 trampoline_pgd[0] = trampoline_pgd_entry.pgd;
@@ -100,7 +114,7 @@ void __init setup_real_mode(void)
100 * need to mark it executable at do_pre_smp_initcalls() at least, 114 * need to mark it executable at do_pre_smp_initcalls() at least,
101 * thus run it as a early_initcall(). 115 * thus run it as a early_initcall().
102 */ 116 */
103static int __init set_real_mode_permissions(void) 117static void __init set_real_mode_permissions(void)
104{ 118{
105 unsigned char *base = (unsigned char *) real_mode_header; 119 unsigned char *base = (unsigned char *) real_mode_header;
106 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 120 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
@@ -119,7 +133,16 @@ static int __init set_real_mode_permissions(void)
119 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 133 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
120 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 134 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
121 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 135 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
136}
137
138static int __init init_real_mode(void)
139{
140 if (!real_mode_header)
141 panic("Real mode trampoline was not allocated");
142
143 setup_real_mode();
144 set_real_mode_permissions();
122 145
123 return 0; 146 return 0;
124} 147}
125early_initcall(set_real_mode_permissions); 148early_initcall(init_real_mode);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index ebd4dd6ef73b..a7ef7b131e25 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
84 case EAX: 84 case EAX:
85 case EIP: 85 case EIP:
86 case UESP: 86 case UESP:
87 break;
87 case ORIG_EAX: 88 case ORIG_EAX:
89 /* Update the syscall number. */
90 UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
88 break; 91 break;
89 case FS: 92 case FS:
90 if (value && (value & 3) != 3) 93 if (value && (value & 3) != 3)
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index faab418876ce..0b5c184dd5b3 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
78 case RSI: 78 case RSI:
79 case RDI: 79 case RDI:
80 case RBP: 80 case RBP:
81 break;
82
81 case ORIG_RAX: 83 case ORIG_RAX:
84 /* Update the syscall number. */
85 UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
82 break; 86 break;
83 87
84 case FS: 88 case FS:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8ffb089b19a5..b86ebb1a9a7f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
118DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 118DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
119 119
120/* Linux <-> Xen vCPU id mapping */ 120/* Linux <-> Xen vCPU id mapping */
121DEFINE_PER_CPU(int, xen_vcpu_id) = -1; 121DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
122EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); 122EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
123 123
124enum xen_domain_type xen_domain_type = XEN_NATIVE; 124enum xen_domain_type xen_domain_type = XEN_NATIVE;
diff --git a/block/bio.c b/block/bio.c
index f39477538fef..aa7354088008 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
669 669
670 if (bio_op(bio) == REQ_OP_DISCARD) 670 switch (bio_op(bio)) {
671 goto integrity_clone; 671 case REQ_OP_DISCARD:
672 672 case REQ_OP_SECURE_ERASE:
673 if (bio_op(bio) == REQ_OP_WRITE_SAME) { 673 break;
674 case REQ_OP_WRITE_SAME:
674 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 675 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
675 goto integrity_clone; 676 break;
677 default:
678 bio_for_each_segment(bv, bio_src, iter)
679 bio->bi_io_vec[bio->bi_vcnt++] = bv;
680 break;
676 } 681 }
677 682
678 bio_for_each_segment(bv, bio_src, iter)
679 bio->bi_io_vec[bio->bi_vcnt++] = bv;
680
681integrity_clone:
682 if (bio_integrity(bio_src)) { 683 if (bio_integrity(bio_src)) {
683 int ret; 684 int ret;
684 685
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
1788 * Discards need a mutable bio_vec to accommodate the payload 1789 * Discards need a mutable bio_vec to accommodate the payload
1789 * required by the DSM TRIM and UNMAP commands. 1790 * required by the DSM TRIM and UNMAP commands.
1790 */ 1791 */
1791 if (bio_op(bio) == REQ_OP_DISCARD) 1792 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
1792 split = bio_clone_bioset(bio, gfp, bs); 1793 split = bio_clone_bioset(bio, gfp, bs);
1793 else 1794 else
1794 split = bio_clone_fast(bio, gfp, bs); 1795 split = bio_clone_fast(bio, gfp, bs);
diff --git a/block/blk-core.c b/block/blk-core.c
index 999442ec4601..36c7ac328d8c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
515 515
516void blk_set_queue_dying(struct request_queue *q) 516void blk_set_queue_dying(struct request_queue *q)
517{ 517{
518 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 518 spin_lock_irq(q->queue_lock);
519 queue_flag_set(QUEUE_FLAG_DYING, q);
520 spin_unlock_irq(q->queue_lock);
519 521
520 if (q->mq_ops) 522 if (q->mq_ops)
521 blk_mq_wake_waiters(q); 523 blk_mq_wake_waiters(q);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3eec75a9e91d..2642e5fc8b69 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -94,9 +94,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
94 bool do_split = true; 94 bool do_split = true;
95 struct bio *new = NULL; 95 struct bio *new = NULL;
96 const unsigned max_sectors = get_max_io_size(q, bio); 96 const unsigned max_sectors = get_max_io_size(q, bio);
97 unsigned bvecs = 0;
97 98
98 bio_for_each_segment(bv, bio, iter) { 99 bio_for_each_segment(bv, bio, iter) {
99 /* 100 /*
101 * With arbitrary bio size, the incoming bio may be very
102 * big. We have to split the bio into small bios so that
103 * each holds at most BIO_MAX_PAGES bvecs because
104 * bio_clone() can fail to allocate big bvecs.
105 *
106 * It should have been better to apply the limit per
107 * request queue in which bio_clone() is involved,
108 * instead of globally. The biggest blocker is the
109 * bio_clone() in bio bounce.
110 *
111 * If bio is splitted by this reason, we should have
112 * allowed to continue bios merging, but don't do
113 * that now for making the change simple.
114 *
115 * TODO: deal with bio bounce's bio_clone() gracefully
116 * and convert the global limit into per-queue limit.
117 */
118 if (bvecs++ >= BIO_MAX_PAGES)
119 goto split;
120
121 /*
100 * If the queue doesn't support SG gaps and adding this 122 * If the queue doesn't support SG gaps and adding this
101 * offset would create a gap, disallow it. 123 * offset would create a gap, disallow it.
102 */ 124 */
@@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
172 struct bio *split, *res; 194 struct bio *split, *res;
173 unsigned nsegs; 195 unsigned nsegs;
174 196
175 if (bio_op(*bio) == REQ_OP_DISCARD) 197 switch (bio_op(*bio)) {
198 case REQ_OP_DISCARD:
199 case REQ_OP_SECURE_ERASE:
176 split = blk_bio_discard_split(q, *bio, bs, &nsegs); 200 split = blk_bio_discard_split(q, *bio, bs, &nsegs);
177 else if (bio_op(*bio) == REQ_OP_WRITE_SAME) 201 break;
202 case REQ_OP_WRITE_SAME:
178 split = blk_bio_write_same_split(q, *bio, bs, &nsegs); 203 split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
179 else 204 break;
205 default:
180 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); 206 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
207 break;
208 }
181 209
182 /* physical segments can be figured out during splitting */ 210 /* physical segments can be figured out during splitting */
183 res = split ? split : *bio; 211 res = split ? split : *bio;
@@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
213 * This should probably be returning 0, but blk_add_request_payload() 241 * This should probably be returning 0, but blk_add_request_payload()
214 * (Christoph!!!!) 242 * (Christoph!!!!)
215 */ 243 */
216 if (bio_op(bio) == REQ_OP_DISCARD) 244 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
217 return 1; 245 return 1;
218 246
219 if (bio_op(bio) == REQ_OP_WRITE_SAME) 247 if (bio_op(bio) == REQ_OP_WRITE_SAME)
@@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
385 nsegs = 0; 413 nsegs = 0;
386 cluster = blk_queue_cluster(q); 414 cluster = blk_queue_cluster(q);
387 415
388 if (bio_op(bio) == REQ_OP_DISCARD) { 416 switch (bio_op(bio)) {
417 case REQ_OP_DISCARD:
418 case REQ_OP_SECURE_ERASE:
389 /* 419 /*
390 * This is a hack - drivers should be neither modifying the 420 * This is a hack - drivers should be neither modifying the
391 * biovec, nor relying on bi_vcnt - but because of 421 * biovec, nor relying on bi_vcnt - but because of
@@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
393 * a payload we need to set up here (thank you Christoph) and 423 * a payload we need to set up here (thank you Christoph) and
394 * bi_vcnt is really the only way of telling if we need to. 424 * bi_vcnt is really the only way of telling if we need to.
395 */ 425 */
396 426 if (!bio->bi_vcnt)
397 if (bio->bi_vcnt) 427 return 0;
398 goto single_segment; 428 /* Fall through */
399 429 case REQ_OP_WRITE_SAME:
400 return 0;
401 }
402
403 if (bio_op(bio) == REQ_OP_WRITE_SAME) {
404single_segment:
405 *sg = sglist; 430 *sg = sglist;
406 bvec = bio_iovec(bio); 431 bvec = bio_iovec(bio);
407 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); 432 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
408 return 1; 433 return 1;
434 default:
435 break;
409 } 436 }
410 437
411 for_each_bio(bio) 438 for_each_bio(bio)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e931a0e8e73d..13f5a6c1de76 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
793 struct list_head *dptr; 793 struct list_head *dptr;
794 int queued; 794 int queued;
795 795
796 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
797
798 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) 796 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
799 return; 797 return;
800 798
799 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
800 cpu_online(hctx->next_cpu));
801
801 hctx->run++; 802 hctx->run++;
802 803
803 /* 804 /*
@@ -1036,10 +1037,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1036EXPORT_SYMBOL(blk_mq_delay_queue); 1037EXPORT_SYMBOL(blk_mq_delay_queue);
1037 1038
1038static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, 1039static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1039 struct blk_mq_ctx *ctx,
1040 struct request *rq, 1040 struct request *rq,
1041 bool at_head) 1041 bool at_head)
1042{ 1042{
1043 struct blk_mq_ctx *ctx = rq->mq_ctx;
1044
1043 trace_block_rq_insert(hctx->queue, rq); 1045 trace_block_rq_insert(hctx->queue, rq);
1044 1046
1045 if (at_head) 1047 if (at_head)
@@ -1053,20 +1055,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1053{ 1055{
1054 struct blk_mq_ctx *ctx = rq->mq_ctx; 1056 struct blk_mq_ctx *ctx = rq->mq_ctx;
1055 1057
1056 __blk_mq_insert_req_list(hctx, ctx, rq, at_head); 1058 __blk_mq_insert_req_list(hctx, rq, at_head);
1057 blk_mq_hctx_mark_pending(hctx, ctx); 1059 blk_mq_hctx_mark_pending(hctx, ctx);
1058} 1060}
1059 1061
1060void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 1062void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1061 bool async) 1063 bool async)
1062{ 1064{
1065 struct blk_mq_ctx *ctx = rq->mq_ctx;
1063 struct request_queue *q = rq->q; 1066 struct request_queue *q = rq->q;
1064 struct blk_mq_hw_ctx *hctx; 1067 struct blk_mq_hw_ctx *hctx;
1065 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1066
1067 current_ctx = blk_mq_get_ctx(q);
1068 if (!cpu_online(ctx->cpu))
1069 rq->mq_ctx = ctx = current_ctx;
1070 1068
1071 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1069 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1072 1070
@@ -1076,8 +1074,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1076 1074
1077 if (run_queue) 1075 if (run_queue)
1078 blk_mq_run_hw_queue(hctx, async); 1076 blk_mq_run_hw_queue(hctx, async);
1079
1080 blk_mq_put_ctx(current_ctx);
1081} 1077}
1082 1078
1083static void blk_mq_insert_requests(struct request_queue *q, 1079static void blk_mq_insert_requests(struct request_queue *q,
@@ -1088,14 +1084,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
1088 1084
1089{ 1085{
1090 struct blk_mq_hw_ctx *hctx; 1086 struct blk_mq_hw_ctx *hctx;
1091 struct blk_mq_ctx *current_ctx;
1092 1087
1093 trace_block_unplug(q, depth, !from_schedule); 1088 trace_block_unplug(q, depth, !from_schedule);
1094 1089
1095 current_ctx = blk_mq_get_ctx(q);
1096
1097 if (!cpu_online(ctx->cpu))
1098 ctx = current_ctx;
1099 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1090 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1100 1091
1101 /* 1092 /*
@@ -1107,15 +1098,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
1107 struct request *rq; 1098 struct request *rq;
1108 1099
1109 rq = list_first_entry(list, struct request, queuelist); 1100 rq = list_first_entry(list, struct request, queuelist);
1101 BUG_ON(rq->mq_ctx != ctx);
1110 list_del_init(&rq->queuelist); 1102 list_del_init(&rq->queuelist);
1111 rq->mq_ctx = ctx; 1103 __blk_mq_insert_req_list(hctx, rq, false);
1112 __blk_mq_insert_req_list(hctx, ctx, rq, false);
1113 } 1104 }
1114 blk_mq_hctx_mark_pending(hctx, ctx); 1105 blk_mq_hctx_mark_pending(hctx, ctx);
1115 spin_unlock(&ctx->lock); 1106 spin_unlock(&ctx->lock);
1116 1107
1117 blk_mq_run_hw_queue(hctx, from_schedule); 1108 blk_mq_run_hw_queue(hctx, from_schedule);
1118 blk_mq_put_ctx(current_ctx);
1119} 1109}
1120 1110
1121static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1111static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -1630,16 +1620,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1630 return 0; 1620 return 0;
1631} 1621}
1632 1622
1623/*
1624 * 'cpu' is going away. splice any existing rq_list entries from this
1625 * software queue to the hw queue dispatch list, and ensure that it
1626 * gets run.
1627 */
1633static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) 1628static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1634{ 1629{
1635 struct request_queue *q = hctx->queue;
1636 struct blk_mq_ctx *ctx; 1630 struct blk_mq_ctx *ctx;
1637 LIST_HEAD(tmp); 1631 LIST_HEAD(tmp);
1638 1632
1639 /* 1633 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1640 * Move ctx entries to new CPU, if this one is going away.
1641 */
1642 ctx = __blk_mq_get_ctx(q, cpu);
1643 1634
1644 spin_lock(&ctx->lock); 1635 spin_lock(&ctx->lock);
1645 if (!list_empty(&ctx->rq_list)) { 1636 if (!list_empty(&ctx->rq_list)) {
@@ -1651,24 +1642,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1651 if (list_empty(&tmp)) 1642 if (list_empty(&tmp))
1652 return NOTIFY_OK; 1643 return NOTIFY_OK;
1653 1644
1654 ctx = blk_mq_get_ctx(q); 1645 spin_lock(&hctx->lock);
1655 spin_lock(&ctx->lock); 1646 list_splice_tail_init(&tmp, &hctx->dispatch);
1656 1647 spin_unlock(&hctx->lock);
1657 while (!list_empty(&tmp)) {
1658 struct request *rq;
1659
1660 rq = list_first_entry(&tmp, struct request, queuelist);
1661 rq->mq_ctx = ctx;
1662 list_move_tail(&rq->queuelist, &ctx->rq_list);
1663 }
1664
1665 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1666 blk_mq_hctx_mark_pending(hctx, ctx);
1667
1668 spin_unlock(&ctx->lock);
1669 1648
1670 blk_mq_run_hw_queue(hctx, true); 1649 blk_mq_run_hw_queue(hctx, true);
1671 blk_mq_put_ctx(ctx);
1672 return NOTIFY_OK; 1650 return NOTIFY_OK;
1673} 1651}
1674 1652
diff --git a/block/elevator.c b/block/elevator.c
index 7096c22041e7..f7d973a56fd7 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
366 list_for_each_prev(entry, &q->queue_head) { 366 list_for_each_prev(entry, &q->queue_head) {
367 struct request *pos = list_entry_rq(entry); 367 struct request *pos = list_entry_rq(entry);
368 368
369 if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD)) 369 if (req_op(rq) != req_op(pos))
370 break; 370 break;
371 if (rq_data_dir(rq) != rq_data_dir(pos)) 371 if (rq_data_dir(rq) != rq_data_dir(pos))
372 break; 372 break;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index a9377bef25e3..84d71482bf08 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL
439 439
440config CRYPT_CRC32C_VPMSUM 440config CRYPT_CRC32C_VPMSUM
441 tristate "CRC32c CRC algorithm (powerpc64)" 441 tristate "CRC32c CRC algorithm (powerpc64)"
442 depends on PPC64 442 depends on PPC64 && ALTIVEC
443 select CRYPTO_HASH 443 select CRYPTO_HASH
444 select CRC32 444 select CRC32
445 help 445 help
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index cf8037a87b2d..0c654e59f215 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
631 631
632static int cryptd_hash_import(struct ahash_request *req, const void *in) 632static int cryptd_hash_import(struct ahash_request *req, const void *in)
633{ 633{
634 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 634 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
635 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
636 struct shash_desc *desc = cryptd_shash_desc(req);
637
638 desc->tfm = ctx->child;
639 desc->flags = req->base.flags;
635 640
636 return crypto_shash_import(&rctx->desc, in); 641 return crypto_shash_import(desc, in);
637} 642}
638 643
639static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 644static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
733 rctx = aead_request_ctx(req); 738 rctx = aead_request_ctx(req);
734 compl = rctx->complete; 739 compl = rctx->complete;
735 740
741 tfm = crypto_aead_reqtfm(req);
742
736 if (unlikely(err == -EINPROGRESS)) 743 if (unlikely(err == -EINPROGRESS))
737 goto out; 744 goto out;
738 aead_request_set_tfm(req, child); 745 aead_request_set_tfm(req, child);
739 err = crypt( req ); 746 err = crypt( req );
740 747
741out: 748out:
742 tfm = crypto_aead_reqtfm(req);
743 ctx = crypto_aead_ctx(tfm); 749 ctx = crypto_aead_ctx(tfm);
744 refcnt = atomic_read(&ctx->refcnt); 750 refcnt = atomic_read(&ctx->refcnt);
745 751
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 62264397a2d2..7e8ed96236ce 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -24,14 +24,14 @@
24#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) 24#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
25 25
26static const u64 keccakf_rndc[24] = { 26static const u64 keccakf_rndc[24] = {
27 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 27 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
28 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, 28 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
29 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, 29 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
30 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, 30 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
31 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 31 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
32 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 32 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
33 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, 33 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
34 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 34 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
35}; 35};
36 36
37static const int keccakf_rotc[24] = { 37static const int keccakf_rotc[24] = {
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 8c234dd9b8bc..80cc7c089a15 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1527{ 1527{
1528 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1528 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1529 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1529 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1530 const u32 STATUS_MASK = 0x80000037;
1530 1531
1531 if (mmio->num_lines) 1532 if (mmio->num_lines)
1532 offset = to_interleave_offset(offset, mmio); 1533 offset = to_interleave_offset(offset, mmio);
1533 1534
1534 return readl(mmio->addr.base + offset); 1535 return readl(mmio->addr.base + offset) & STATUS_MASK;
1535} 1536}
1536 1537
1537static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1538static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 4c745bf389fe..161f91539ae6 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
42 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 42 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
43 struct acpi_nfit_system_address *spa = nfit_spa->spa; 43 struct acpi_nfit_system_address *spa = nfit_spa->spa;
44 44
45 if (nfit_spa_type(spa) == NFIT_SPA_PM) 45 if (nfit_spa_type(spa) != NFIT_SPA_PM)
46 continue; 46 continue;
47 /* find the spa that covers the mce addr */ 47 /* find the spa that covers the mce addr */
48 if (spa->address > mce->addr) 48 if (spa->address > mce->addr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ad9fc84a8601..e878fc799af7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void)
2054 2054
2055static struct acpi_probe_entry *ape; 2055static struct acpi_probe_entry *ape;
2056static int acpi_probe_count; 2056static int acpi_probe_count;
2057static DEFINE_SPINLOCK(acpi_probe_lock); 2057static DEFINE_MUTEX(acpi_probe_mutex);
2058 2058
2059static int __init acpi_match_madt(struct acpi_subtable_header *header, 2059static int __init acpi_match_madt(struct acpi_subtable_header *header,
2060 const unsigned long end) 2060 const unsigned long end)
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
2073 if (acpi_disabled) 2073 if (acpi_disabled)
2074 return 0; 2074 return 0;
2075 2075
2076 spin_lock(&acpi_probe_lock); 2076 mutex_lock(&acpi_probe_mutex);
2077 for (ape = ap_head; nr; ape++, nr--) { 2077 for (ape = ap_head; nr; ape++, nr--) {
2078 if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) { 2078 if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
2079 acpi_probe_count = 0; 2079 acpi_probe_count = 0;
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
2086 count++; 2086 count++;
2087 } 2087 }
2088 } 2088 }
2089 spin_unlock(&acpi_probe_lock); 2089 mutex_unlock(&acpi_probe_mutex);
2090 2090
2091 return count; 2091 return count;
2092} 2092}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 7461a587b39b..dcf2c724fd06 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
2524 2524
2525 /* Do not receive interrupts sent by dummy ports */ 2525 /* Do not receive interrupts sent by dummy ports */
2526 if (!pp) { 2526 if (!pp) {
2527 disable_irq(irq + i); 2527 disable_irq(irq);
2528 continue; 2528 continue;
2529 } 2529 }
2530 2530
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 633aa2934a18..44f97ad3c88d 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
144 ap->ioaddr.altstatus_addr = base + 0x1E; 144 ap->ioaddr.altstatus_addr = base + 0x1E;
145 ap->ioaddr.bmdma_addr = base; 145 ap->ioaddr.bmdma_addr = base;
146 ata_sff_std_ports(&ap->ioaddr); 146 ata_sff_std_ports(&ap->ioaddr);
147 ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; 147 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
148 148
149 ninja32_program(base); 149 ninja32_program(base);
150 /* FIXME: Should we disable them at remove ? */ 150 /* FIXME: Should we disable them at remove ? */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index e097d355cc04..82a081ea4317 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
301 int (*callback)(struct device *); 301 int (*callback)(struct device *);
302 int retval; 302 int retval;
303 303
304 trace_rpm_idle(dev, rpmflags); 304 trace_rpm_idle_rcuidle(dev, rpmflags);
305 retval = rpm_check_suspend_allowed(dev); 305 retval = rpm_check_suspend_allowed(dev);
306 if (retval < 0) 306 if (retval < 0)
307 ; /* Conditions are wrong. */ 307 ; /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
337 dev->power.request_pending = true; 337 dev->power.request_pending = true;
338 queue_work(pm_wq, &dev->power.work); 338 queue_work(pm_wq, &dev->power.work);
339 } 339 }
340 trace_rpm_return_int(dev, _THIS_IP_, 0); 340 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
341 return 0; 341 return 0;
342 } 342 }
343 343
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
352 wake_up_all(&dev->power.wait_queue); 352 wake_up_all(&dev->power.wait_queue);
353 353
354 out: 354 out:
355 trace_rpm_return_int(dev, _THIS_IP_, retval); 355 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
357} 357}
358 358
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
419 struct device *parent = NULL; 419 struct device *parent = NULL;
420 int retval; 420 int retval;
421 421
422 trace_rpm_suspend(dev, rpmflags); 422 trace_rpm_suspend_rcuidle(dev, rpmflags);
423 423
424 repeat: 424 repeat:
425 retval = rpm_check_suspend_allowed(dev); 425 retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
549 } 549 }
550 550
551 out: 551 out:
552 trace_rpm_return_int(dev, _THIS_IP_, retval); 552 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
553 553
554 return retval; 554 return retval;
555 555
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
601 struct device *parent = NULL; 601 struct device *parent = NULL;
602 int retval = 0; 602 int retval = 0;
603 603
604 trace_rpm_resume(dev, rpmflags); 604 trace_rpm_resume_rcuidle(dev, rpmflags);
605 605
606 repeat: 606 repeat:
607 if (dev->power.runtime_error) 607 if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
764 spin_lock_irq(&dev->power.lock); 764 spin_lock_irq(&dev->power.lock);
765 } 765 }
766 766
767 trace_rpm_return_int(dev, _THIS_IP_, retval); 767 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
768 768
769 return retval; 769 return retval;
770} 770}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa56af87d941..b11af3f2c1db 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
404 unsigned int new_base_reg, new_top_reg; 404 unsigned int new_base_reg, new_top_reg;
405 unsigned int min, max; 405 unsigned int min, max;
406 unsigned int max_dist; 406 unsigned int max_dist;
407 unsigned int dist, best_dist = UINT_MAX;
407 408
408 max_dist = map->reg_stride * sizeof(*rbnode_tmp) / 409 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
409 map->cache_word_size; 410 map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
423 &base_reg, &top_reg); 424 &base_reg, &top_reg);
424 425
425 if (base_reg <= max && top_reg >= min) { 426 if (base_reg <= max && top_reg >= min) {
426 new_base_reg = min(reg, base_reg); 427 if (reg < base_reg)
427 new_top_reg = max(reg, top_reg); 428 dist = base_reg - reg;
428 } else { 429 else if (reg > top_reg)
429 if (max < base_reg) 430 dist = reg - top_reg;
430 node = node->rb_left;
431 else 431 else
432 node = node->rb_right; 432 dist = 0;
433 433 if (dist < best_dist) {
434 continue; 434 rbnode = rbnode_tmp;
435 best_dist = dist;
436 new_base_reg = min(reg, base_reg);
437 new_top_reg = max(reg, top_reg);
438 }
435 } 439 }
436 440
437 ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, 441 /*
442 * Keep looking, we want to choose the closest block,
443 * otherwise we might end up creating overlapping
444 * blocks, which breaks the rbtree.
445 */
446 if (reg < base_reg)
447 node = node->rb_left;
448 else if (reg > top_reg)
449 node = node->rb_right;
450 else
451 break;
452 }
453
454 if (rbnode) {
455 ret = regcache_rbtree_insert_to_block(map, rbnode,
438 new_base_reg, 456 new_base_reg,
439 new_top_reg, reg, 457 new_top_reg, reg,
440 value); 458 value);
441 if (ret) 459 if (ret)
442 return ret; 460 return ret;
443 rbtree_ctx->cached_rbnode = rbnode_tmp; 461 rbtree_ctx->cached_rbnode = rbnode;
444 return 0; 462 return 0;
445 } 463 }
446 464
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index df7ff7290821..4e582561e1e7 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
38 38
39 /* calculate the size of reg_defaults */ 39 /* calculate the size of reg_defaults */
40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) 40 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
41 if (!regmap_volatile(map, i * map->reg_stride)) 41 if (regmap_readable(map, i * map->reg_stride) &&
42 !regmap_volatile(map, i * map->reg_stride))
42 count++; 43 count++;
43 44
44 /* all registers are volatile, so just bypass */ 45 /* all registers are unreadable or volatile, so just bypass */
45 if (!count) { 46 if (!count) {
46 map->cache_bypass = true; 47 map->cache_bypass = true;
47 return 0; 48 return 0;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 51fa7d66a393..25d26bb18970 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1474,6 +1474,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1474 ret = map->bus->write(map->bus_context, buf, len); 1474 ret = map->bus->write(map->bus_context, buf, len);
1475 1475
1476 kfree(buf); 1476 kfree(buf);
1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1478 regcache_drop_region(map, reg, reg + 1);
1477 } 1479 }
1478 1480
1479 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1481 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b71a9c767009..e3d8e4ced4a2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3706 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3707 UFDCS->rawcmd = 2;
3708 3708
3709 if (mode & (FMODE_READ|FMODE_WRITE)) { 3709 if (!(mode & FMODE_NDELAY)) {
3710 UDRS->last_checked = 0; 3710 if (mode & (FMODE_READ|FMODE_WRITE)) {
3711 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3711 UDRS->last_checked = 0;
3712 check_disk_change(bdev); 3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3713 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3713 check_disk_change(bdev);
3714 goto out; 3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3715 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3715 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3717 goto out;
3718 }
3719 res = -EROFS;
3720 if ((mode & FMODE_WRITE) &&
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3716 goto out; 3722 goto out;
3717 } 3723 }
3718
3719 res = -EROFS;
3720
3721 if ((mode & FMODE_WRITE) &&
3722 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3723 goto out;
3724
3725 mutex_unlock(&open_lock); 3724 mutex_unlock(&open_lock);
3726 mutex_unlock(&floppy_mutex); 3725 mutex_unlock(&floppy_mutex);
3727 return 0; 3726 return 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1a04af6d2421..6c6519f6492a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
3950 bool need_put = !!rbd_dev->opts; 3950 bool need_put = !!rbd_dev->opts;
3951 3951
3952 ceph_oid_destroy(&rbd_dev->header_oid); 3952 ceph_oid_destroy(&rbd_dev->header_oid);
3953 ceph_oloc_destroy(&rbd_dev->header_oloc);
3953 3954
3954 rbd_put_client(rbd_dev->rbd_client); 3955 rbd_put_client(rbd_dev->rbd_client);
3955 rbd_spec_put(rbd_dev->spec); 3956 rbd_spec_put(rbd_dev->spec);
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5336 } 5337 }
5337 spec->pool_id = (u64)rc; 5338 spec->pool_id = (u64)rc;
5338 5339
5339 /* The ceph file layout needs to fit pool id in 32 bits */
5340
5341 if (spec->pool_id > (u64)U32_MAX) {
5342 rbd_warn(NULL, "pool id too large (%llu > %u)",
5343 (unsigned long long)spec->pool_id, U32_MAX);
5344 rc = -EIO;
5345 goto err_out_client;
5346 }
5347
5348 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 5340 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5349 if (!rbd_dev) { 5341 if (!rbd_dev) {
5350 rc = -ENOMEM; 5342 rc = -ENOMEM;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1523e05c46fc..93b1aaa5ba3b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk)
391 num_vqs = 1; 391 num_vqs = 1;
392 392
393 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); 393 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
394 if (!vblk->vqs) { 394 if (!vblk->vqs)
395 err = -ENOMEM; 395 return -ENOMEM;
396 goto out;
397 }
398 396
399 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); 397 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
400 if (!names)
401 goto err_names;
402
403 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); 398 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
404 if (!callbacks)
405 goto err_callbacks;
406
407 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); 399 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
408 if (!vqs) 400 if (!names || !callbacks || !vqs) {
409 goto err_vqs; 401 err = -ENOMEM;
402 goto out;
403 }
410 404
411 for (i = 0; i < num_vqs; i++) { 405 for (i = 0; i < num_vqs; i++) {
412 callbacks[i] = virtblk_done; 406 callbacks[i] = virtblk_done;
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk)
417 /* Discover virtqueues and write information to configuration. */ 411 /* Discover virtqueues and write information to configuration. */
418 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); 412 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
419 if (err) 413 if (err)
420 goto err_find_vqs; 414 goto out;
421 415
422 for (i = 0; i < num_vqs; i++) { 416 for (i = 0; i < num_vqs; i++) {
423 spin_lock_init(&vblk->vqs[i].lock); 417 spin_lock_init(&vblk->vqs[i].lock);
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk)
425 } 419 }
426 vblk->num_vqs = num_vqs; 420 vblk->num_vqs = num_vqs;
427 421
428 err_find_vqs: 422out:
429 kfree(vqs); 423 kfree(vqs);
430 err_vqs:
431 kfree(callbacks); 424 kfree(callbacks);
432 err_callbacks:
433 kfree(names); 425 kfree(names);
434 err_names:
435 if (err) 426 if (err)
436 kfree(vblk->vqs); 427 kfree(vblk->vqs);
437 out:
438 return err; 428 return err;
439} 429}
440 430
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index be4fea6a5dd3..88ef6d4729b4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -189,6 +189,8 @@ struct blkfront_info
189 struct mutex mutex; 189 struct mutex mutex;
190 struct xenbus_device *xbdev; 190 struct xenbus_device *xbdev;
191 struct gendisk *gd; 191 struct gendisk *gd;
192 u16 sector_size;
193 unsigned int physical_sector_size;
192 int vdevice; 194 int vdevice;
193 blkif_vdev_t handle; 195 blkif_vdev_t handle;
194 enum blkif_state connected; 196 enum blkif_state connected;
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
910 .map_queue = blk_mq_map_queue, 912 .map_queue = blk_mq_map_queue,
911}; 913};
912 914
915static void blkif_set_queue_limits(struct blkfront_info *info)
916{
917 struct request_queue *rq = info->rq;
918 struct gendisk *gd = info->gd;
919 unsigned int segments = info->max_indirect_segments ? :
920 BLKIF_MAX_SEGMENTS_PER_REQUEST;
921
922 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
923
924 if (info->feature_discard) {
925 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
926 blk_queue_max_discard_sectors(rq, get_capacity(gd));
927 rq->limits.discard_granularity = info->discard_granularity;
928 rq->limits.discard_alignment = info->discard_alignment;
929 if (info->feature_secdiscard)
930 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
931 }
932
933 /* Hard sector size and max sectors impersonate the equiv. hardware. */
934 blk_queue_logical_block_size(rq, info->sector_size);
935 blk_queue_physical_block_size(rq, info->physical_sector_size);
936 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
937
938 /* Each segment in a request is up to an aligned page in size. */
939 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
940 blk_queue_max_segment_size(rq, PAGE_SIZE);
941
942 /* Ensure a merged request will fit in a single I/O ring slot. */
943 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
944
945 /* Make sure buffer addresses are sector-aligned. */
946 blk_queue_dma_alignment(rq, 511);
947
948 /* Make sure we don't use bounce buffers. */
949 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
950}
951
913static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, 952static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
914 unsigned int physical_sector_size, 953 unsigned int physical_sector_size)
915 unsigned int segments)
916{ 954{
917 struct request_queue *rq; 955 struct request_queue *rq;
918 struct blkfront_info *info = gd->private_data; 956 struct blkfront_info *info = gd->private_data;
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
944 } 982 }
945 983
946 rq->queuedata = info; 984 rq->queuedata = info;
947 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 985 info->rq = gd->queue = rq;
948 986 info->gd = gd;
949 if (info->feature_discard) { 987 info->sector_size = sector_size;
950 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); 988 info->physical_sector_size = physical_sector_size;
951 blk_queue_max_discard_sectors(rq, get_capacity(gd)); 989 blkif_set_queue_limits(info);
952 rq->limits.discard_granularity = info->discard_granularity;
953 rq->limits.discard_alignment = info->discard_alignment;
954 if (info->feature_secdiscard)
955 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
956 }
957
958 /* Hard sector size and max sectors impersonate the equiv. hardware. */
959 blk_queue_logical_block_size(rq, sector_size);
960 blk_queue_physical_block_size(rq, physical_sector_size);
961 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
962
963 /* Each segment in a request is up to an aligned page in size. */
964 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
965 blk_queue_max_segment_size(rq, PAGE_SIZE);
966
967 /* Ensure a merged request will fit in a single I/O ring slot. */
968 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
969
970 /* Make sure buffer addresses are sector-aligned. */
971 blk_queue_dma_alignment(rq, 511);
972
973 /* Make sure we don't use bounce buffers. */
974 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
975
976 gd->queue = rq;
977 990
978 return 0; 991 return 0;
979} 992}
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1136 gd->private_data = info; 1149 gd->private_data = info;
1137 set_capacity(gd, capacity); 1150 set_capacity(gd, capacity);
1138 1151
1139 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, 1152 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
1140 info->max_indirect_segments ? :
1141 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
1142 del_gendisk(gd); 1153 del_gendisk(gd);
1143 goto release; 1154 goto release;
1144 } 1155 }
1145 1156
1146 info->rq = gd->queue;
1147 info->gd = gd;
1148
1149 xlvbd_flush(info); 1157 xlvbd_flush(info);
1150 1158
1151 if (vdisk_info & VDISK_READONLY) 1159 if (vdisk_info & VDISK_READONLY)
@@ -1315,7 +1323,7 @@ free_shadow:
1315 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1323 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1316 } 1324 }
1317 } 1325 }
1318 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); 1326 free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1319 rinfo->ring.sring = NULL; 1327 rinfo->ring.sring = NULL;
1320 1328
1321 if (rinfo->irq) 1329 if (rinfo->irq)
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
2007 struct split_bio *split_bio; 2015 struct split_bio *split_bio;
2008 2016
2009 blkfront_gather_backend_features(info); 2017 blkfront_gather_backend_features(info);
2018 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2019 blkif_set_queue_limits(info);
2010 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; 2020 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2011 blk_queue_max_segments(info->rq, segs); 2021 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2012 2022
2013 for (r_index = 0; r_index < info->nr_rings; r_index++) { 2023 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2014 struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; 2024 struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
2432 if (err) { 2442 if (err) {
2433 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 2443 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2434 info->xbdev->otherend); 2444 info->xbdev->otherend);
2435 return; 2445 goto fail;
2436 } 2446 }
2437 2447
2438 xenbus_switch_state(info->xbdev, XenbusStateConnected); 2448 xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
2445 device_add_disk(&info->xbdev->dev, info->gd); 2455 device_add_disk(&info->xbdev->dev, info->gd);
2446 2456
2447 info->is_ready = 1; 2457 info->is_ready = 1;
2458 return;
2459
2460fail:
2461 blkif_free(info, 0);
2462 return;
2448} 2463}
2449 2464
2450/** 2465/**
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 5755907f836f..ffa7c9dcbd7a 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
551 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), 551 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
552 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), 552 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
553 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), 553 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
554 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE), 554 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
555 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), 555 CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
556 NULL 556 NULL
557}; 557};
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 97a9185af433..884c0305e290 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -187,6 +187,7 @@ struct arm_ccn {
187 struct arm_ccn_component *xp; 187 struct arm_ccn_component *xp;
188 188
189 struct arm_ccn_dt dt; 189 struct arm_ccn_dt dt;
190 int mn_id;
190}; 191};
191 192
192static DEFINE_MUTEX(arm_ccn_mutex); 193static DEFINE_MUTEX(arm_ccn_mutex);
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
212#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) 213#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
213#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) 214#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
214#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) 215#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
216#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3)
215#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) 217#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
216#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) 218#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
217#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) 219#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
241static CCN_FORMAT_ATTR(type, "config:8-15"); 243static CCN_FORMAT_ATTR(type, "config:8-15");
242static CCN_FORMAT_ATTR(event, "config:16-23"); 244static CCN_FORMAT_ATTR(event, "config:16-23");
243static CCN_FORMAT_ATTR(port, "config:24-25"); 245static CCN_FORMAT_ATTR(port, "config:24-25");
246static CCN_FORMAT_ATTR(bus, "config:24-25");
244static CCN_FORMAT_ATTR(vc, "config:26-28"); 247static CCN_FORMAT_ATTR(vc, "config:26-28");
245static CCN_FORMAT_ATTR(dir, "config:29-29"); 248static CCN_FORMAT_ATTR(dir, "config:29-29");
246static CCN_FORMAT_ATTR(mask, "config:30-33"); 249static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
253 &arm_ccn_pmu_format_attr_type.attr.attr, 256 &arm_ccn_pmu_format_attr_type.attr.attr,
254 &arm_ccn_pmu_format_attr_event.attr.attr, 257 &arm_ccn_pmu_format_attr_event.attr.attr,
255 &arm_ccn_pmu_format_attr_port.attr.attr, 258 &arm_ccn_pmu_format_attr_port.attr.attr,
259 &arm_ccn_pmu_format_attr_bus.attr.attr,
256 &arm_ccn_pmu_format_attr_vc.attr.attr, 260 &arm_ccn_pmu_format_attr_vc.attr.attr,
257 &arm_ccn_pmu_format_attr_dir.attr.attr, 261 &arm_ccn_pmu_format_attr_dir.attr.attr,
258 &arm_ccn_pmu_format_attr_mask.attr.attr, 262 &arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event {
328static ssize_t arm_ccn_pmu_event_show(struct device *dev, 332static ssize_t arm_ccn_pmu_event_show(struct device *dev,
329 struct device_attribute *attr, char *buf) 333 struct device_attribute *attr, char *buf)
330{ 334{
335 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
331 struct arm_ccn_pmu_event *event = container_of(attr, 336 struct arm_ccn_pmu_event *event = container_of(attr,
332 struct arm_ccn_pmu_event, attr); 337 struct arm_ccn_pmu_event, attr);
333 ssize_t res; 338 ssize_t res;
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
349 break; 354 break;
350 case CCN_TYPE_XP: 355 case CCN_TYPE_XP:
351 res += snprintf(buf + res, PAGE_SIZE - res, 356 res += snprintf(buf + res, PAGE_SIZE - res,
352 ",xp=?,port=?,vc=?,dir=?"); 357 ",xp=?,vc=?");
353 if (event->event == CCN_EVENT_WATCHPOINT) 358 if (event->event == CCN_EVENT_WATCHPOINT)
354 res += snprintf(buf + res, PAGE_SIZE - res, 359 res += snprintf(buf + res, PAGE_SIZE - res,
355 ",cmp_l=?,cmp_h=?,mask=?"); 360 ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
361 else
362 res += snprintf(buf + res, PAGE_SIZE - res,
363 ",bus=?");
364
365 break;
366 case CCN_TYPE_MN:
367 res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
356 break; 368 break;
357 default: 369 default:
358 res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); 370 res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
383} 395}
384 396
385static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { 397static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
386 CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), 398 CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
387 CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), 399 CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
388 CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), 400 CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
389 CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), 401 CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
390 CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), 402 CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
391 CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), 403 CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
733 745
734 if (has_branch_stack(event) || event->attr.exclude_user || 746 if (has_branch_stack(event) || event->attr.exclude_user ||
735 event->attr.exclude_kernel || event->attr.exclude_hv || 747 event->attr.exclude_kernel || event->attr.exclude_hv ||
736 event->attr.exclude_idle) { 748 event->attr.exclude_idle || event->attr.exclude_host ||
749 event->attr.exclude_guest) {
737 dev_warn(ccn->dev, "Can't exclude execution levels!\n"); 750 dev_warn(ccn->dev, "Can't exclude execution levels!\n");
738 return -EOPNOTSUPP; 751 return -EINVAL;
739 } 752 }
740 753
741 if (event->cpu < 0) { 754 if (event->cpu < 0) {
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
759 772
760 /* Validate node/xp vs topology */ 773 /* Validate node/xp vs topology */
761 switch (type) { 774 switch (type) {
775 case CCN_TYPE_MN:
776 if (node_xp != ccn->mn_id) {
777 dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
778 return -EINVAL;
779 }
780 break;
762 case CCN_TYPE_XP: 781 case CCN_TYPE_XP:
763 if (node_xp >= ccn->num_xps) { 782 if (node_xp >= ccn->num_xps) {
764 dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); 783 dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
886 struct arm_ccn_component *xp; 905 struct arm_ccn_component *xp;
887 u32 val, dt_cfg; 906 u32 val, dt_cfg;
888 907
908 /* Nothing to do for cycle counter */
909 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
910 return;
911
889 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) 912 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
890 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; 913 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
891 else 914 else
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
917 arm_ccn_pmu_read_counter(ccn, hw->idx)); 940 arm_ccn_pmu_read_counter(ccn, hw->idx));
918 hw->state = 0; 941 hw->state = 0;
919 942
920 /*
921 * Pin the timer, so that the overflows are handled by the chosen
922 * event->cpu (this is the same one as presented in "cpumask"
923 * attribute).
924 */
925 if (!ccn->irq)
926 hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
927 HRTIMER_MODE_REL_PINNED);
928
929 /* Set the DT bus input, engaging the counter */ 943 /* Set the DT bus input, engaging the counter */
930 arm_ccn_pmu_xp_dt_config(event, 1); 944 arm_ccn_pmu_xp_dt_config(event, 1);
931} 945}
932 946
933static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) 947static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
934{ 948{
935 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
936 struct hw_perf_event *hw = &event->hw; 949 struct hw_perf_event *hw = &event->hw;
937 u64 timeout;
938 950
939 /* Disable counting, setting the DT bus to pass-through mode */ 951 /* Disable counting, setting the DT bus to pass-through mode */
940 arm_ccn_pmu_xp_dt_config(event, 0); 952 arm_ccn_pmu_xp_dt_config(event, 0);
941 953
942 if (!ccn->irq)
943 hrtimer_cancel(&ccn->dt.hrtimer);
944
945 /* Let the DT bus drain */
946 timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
947 ccn->num_xps;
948 while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
949 timeout)
950 cpu_relax();
951
952 if (flags & PERF_EF_UPDATE) 954 if (flags & PERF_EF_UPDATE)
953 arm_ccn_pmu_event_update(event); 955 arm_ccn_pmu_event_update(event);
954 956
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
988 990
989 /* Comparison values */ 991 /* Comparison values */
990 writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); 992 writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
991 writel((cmp_l >> 32) & 0xefffffff, 993 writel((cmp_l >> 32) & 0x7fffffff,
992 source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); 994 source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
993 writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); 995 writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
994 writel((cmp_h >> 32) & 0x0fffffff, 996 writel((cmp_h >> 32) & 0x0fffffff,
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
996 998
997 /* Mask */ 999 /* Mask */
998 writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); 1000 writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
999 writel((mask_l >> 32) & 0xefffffff, 1001 writel((mask_l >> 32) & 0x7fffffff,
1000 source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); 1002 source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
1001 writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); 1003 writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
1002 writel((mask_h >> 32) & 0x0fffffff, 1004 writel((mask_h >> 32) & 0x0fffffff,
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
1014 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); 1016 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
1015 1017
1016 id = (CCN_CONFIG_VC(event->attr.config) << 4) | 1018 id = (CCN_CONFIG_VC(event->attr.config) << 4) |
1017 (CCN_CONFIG_PORT(event->attr.config) << 3) | 1019 (CCN_CONFIG_BUS(event->attr.config) << 3) |
1018 (CCN_CONFIG_EVENT(event->attr.config) << 0); 1020 (CCN_CONFIG_EVENT(event->attr.config) << 0);
1019 1021
1020 val = readl(source->base + CCN_XP_PMU_EVENT_SEL); 1022 val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
1099 spin_unlock(&ccn->dt.config_lock); 1101 spin_unlock(&ccn->dt.config_lock);
1100} 1102}
1101 1103
1104static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
1105{
1106 return bitmap_weight(ccn->dt.pmu_counters_mask,
1107 CCN_NUM_PMU_EVENT_COUNTERS + 1);
1108}
1109
1102static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) 1110static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1103{ 1111{
1104 int err; 1112 int err;
1105 struct hw_perf_event *hw = &event->hw; 1113 struct hw_perf_event *hw = &event->hw;
1114 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1106 1115
1107 err = arm_ccn_pmu_event_alloc(event); 1116 err = arm_ccn_pmu_event_alloc(event);
1108 if (err) 1117 if (err)
1109 return err; 1118 return err;
1110 1119
1120 /*
1121 * Pin the timer, so that the overflows are handled by the chosen
1122 * event->cpu (this is the same one as presented in "cpumask"
1123 * attribute).
1124 */
1125 if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
1126 hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
1127 HRTIMER_MODE_REL_PINNED);
1128
1111 arm_ccn_pmu_event_config(event); 1129 arm_ccn_pmu_event_config(event);
1112 1130
1113 hw->state = PERF_HES_STOPPED; 1131 hw->state = PERF_HES_STOPPED;
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1120 1138
1121static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) 1139static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
1122{ 1140{
1141 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
1142
1123 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); 1143 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
1124 1144
1125 arm_ccn_pmu_event_release(event); 1145 arm_ccn_pmu_event_release(event);
1146
1147 if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
1148 hrtimer_cancel(&ccn->dt.hrtimer);
1126} 1149}
1127 1150
1128static void arm_ccn_pmu_event_read(struct perf_event *event) 1151static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
1130 arm_ccn_pmu_event_update(event); 1153 arm_ccn_pmu_event_update(event);
1131} 1154}
1132 1155
1156static void arm_ccn_pmu_enable(struct pmu *pmu)
1157{
1158 struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1159
1160 u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1161 val |= CCN_DT_PMCR__PMU_EN;
1162 writel(val, ccn->dt.base + CCN_DT_PMCR);
1163}
1164
1165static void arm_ccn_pmu_disable(struct pmu *pmu)
1166{
1167 struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
1168
1169 u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
1170 val &= ~CCN_DT_PMCR__PMU_EN;
1171 writel(val, ccn->dt.base + CCN_DT_PMCR);
1172}
1173
1133static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) 1174static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
1134{ 1175{
1135 u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); 1176 u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1252 .start = arm_ccn_pmu_event_start, 1293 .start = arm_ccn_pmu_event_start,
1253 .stop = arm_ccn_pmu_event_stop, 1294 .stop = arm_ccn_pmu_event_stop,
1254 .read = arm_ccn_pmu_event_read, 1295 .read = arm_ccn_pmu_event_read,
1296 .pmu_enable = arm_ccn_pmu_enable,
1297 .pmu_disable = arm_ccn_pmu_disable,
1255 }; 1298 };
1256 1299
1257 /* No overflow interrupt? Have to use a timer instead. */ 1300 /* No overflow interrupt? Have to use a timer instead. */
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
1361 1404
1362 switch (type) { 1405 switch (type) {
1363 case CCN_TYPE_MN: 1406 case CCN_TYPE_MN:
1407 ccn->mn_id = id;
1408 return 0;
1364 case CCN_TYPE_DT: 1409 case CCN_TYPE_DT:
1365 return 0; 1410 return 0;
1366 case CCN_TYPE_XP: 1411 case CCN_TYPE_XP:
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
1471 /* Can set 'disable' bits, so can acknowledge interrupts */ 1516 /* Can set 'disable' bits, so can acknowledge interrupts */
1472 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, 1517 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
1473 ccn->base + CCN_MN_ERRINT_STATUS); 1518 ccn->base + CCN_MN_ERRINT_STATUS);
1474 err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0, 1519 err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
1475 dev_name(ccn->dev), ccn); 1520 IRQF_NOBALANCING | IRQF_NO_THREAD,
1521 dev_name(ccn->dev), ccn);
1476 if (err) 1522 if (err)
1477 return err; 1523 return err;
1478 1524
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index c3cb76b363c6..9efdf1de4035 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
178 178
179 parent = class_find_device(vexpress_config_class, NULL, bridge, 179 parent = class_find_device(vexpress_config_class, NULL, bridge,
180 vexpress_config_node_match); 180 vexpress_config_node_match);
181 of_node_put(bridge);
181 if (WARN_ON(!parent)) 182 if (WARN_ON(!parent))
182 return -ENODEV; 183 return -ENODEV;
183 184
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 56ad5a5936a9..8c0770bf8881 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
244 244
245config HW_RANDOM_MXC_RNGA 245config HW_RANDOM_MXC_RNGA
246 tristate "Freescale i.MX RNGA Random Number Generator" 246 tristate "Freescale i.MX RNGA Random Number Generator"
247 depends on ARCH_HAS_RNGA 247 depends on SOC_IMX31
248 default HW_RANDOM 248 default HW_RANDOM
249 ---help--- 249 ---help---
250 This driver provides kernel-side support for the Random Number 250 This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 08c7e23ed535..0c75c3f1689f 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
957 goto out; 957 goto out;
958 958
959 rc = tpm2_do_selftest(chip); 959 rc = tpm2_do_selftest(chip);
960 if (rc != TPM2_RC_INITIALIZE) { 960 if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
961 dev_err(&chip->dev, "TPM self test failed\n"); 961 dev_err(&chip->dev, "TPM self test failed\n");
962 goto out; 962 goto out;
963 } 963 }
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
974 } 974 }
975 } 975 }
976 976
977 return rc;
978out: 977out:
979 if (rc > 0) 978 if (rc > 0)
980 rc = -ENODEV; 979 rc = -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d2406fe25533..5da47e26a012 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -165,6 +165,12 @@ struct ports_device {
165 */ 165 */
166 struct virtqueue *c_ivq, *c_ovq; 166 struct virtqueue *c_ivq, *c_ovq;
167 167
168 /*
169 * A control packet buffer for guest->host requests, protected
170 * by c_ovq_lock.
171 */
172 struct virtio_console_control cpkt;
173
168 /* Array of per-port IO virtqueues */ 174 /* Array of per-port IO virtqueues */
169 struct virtqueue **in_vqs, **out_vqs; 175 struct virtqueue **in_vqs, **out_vqs;
170 176
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
560 unsigned int event, unsigned int value) 566 unsigned int event, unsigned int value)
561{ 567{
562 struct scatterlist sg[1]; 568 struct scatterlist sg[1];
563 struct virtio_console_control cpkt;
564 struct virtqueue *vq; 569 struct virtqueue *vq;
565 unsigned int len; 570 unsigned int len;
566 571
567 if (!use_multiport(portdev)) 572 if (!use_multiport(portdev))
568 return 0; 573 return 0;
569 574
570 cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
571 cpkt.event = cpu_to_virtio16(portdev->vdev, event);
572 cpkt.value = cpu_to_virtio16(portdev->vdev, value);
573
574 vq = portdev->c_ovq; 575 vq = portdev->c_ovq;
575 576
576 sg_init_one(sg, &cpkt, sizeof(cpkt));
577
578 spin_lock(&portdev->c_ovq_lock); 577 spin_lock(&portdev->c_ovq_lock);
579 if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) { 578
579 portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
580 portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
581 portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
582
583 sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
584
585 if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
580 virtqueue_kick(vq); 586 virtqueue_kick(vq);
581 while (!virtqueue_get_buf(vq, &len) 587 while (!virtqueue_get_buf(vq, &len)
582 && !virtqueue_is_broken(vq)) 588 && !virtqueue_is_broken(vq))
583 cpu_relax(); 589 cpu_relax();
584 } 590 }
591
585 spin_unlock(&portdev->c_ovq_lock); 592 spin_unlock(&portdev->c_ovq_lock);
586 return 0; 593 return 0;
587} 594}
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index d359c92e13a6..e38bf60c0ff4 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
69 DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), 69 DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
70 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), 70 DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
71 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), 71 DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
72 DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
72 73
73 /* Core Clock Outputs */ 74 /* Core Clock Outputs */
74 DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), 75 DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
87 DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1), 88 DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
88 DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1), 89 DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
89 90
90 DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074), 91 DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x0074),
91 DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078), 92 DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x0078),
92 DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268), 93 DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x0268),
93 DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c), 94 DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x026c),
94 95
95 DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1), 96 DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
96 DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1), 97 DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index c109d80e7a8a..cdfabeb9a034 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
833 833
834 /* perihp */ 834 /* perihp */
835 GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, 835 GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
836 RK3399_CLKGATE_CON(5), 0, GFLAGS),
837 GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
838 RK3399_CLKGATE_CON(5), 1, GFLAGS), 836 RK3399_CLKGATE_CON(5), 1, GFLAGS),
837 GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
838 RK3399_CLKGATE_CON(5), 0, GFLAGS),
839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, 839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, 840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
841 RK3399_CLKGATE_CON(5), 2, GFLAGS), 841 RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
923 RK3399_CLKGATE_CON(6), 14, GFLAGS), 923 RK3399_CLKGATE_CON(6), 14, GFLAGS),
924 924
925 GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED, 925 GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
926 RK3399_CLKGATE_CON(6), 12, GFLAGS),
927 GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
928 RK3399_CLKGATE_CON(6), 13, GFLAGS), 926 RK3399_CLKGATE_CON(6), 13, GFLAGS),
927 GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
928 RK3399_CLKGATE_CON(6), 12, GFLAGS),
929 COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED, 929 COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
930 RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS), 930 RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
931 GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED, 931 GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
1071 /* vio */ 1071 /* vio */
1072 COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED, 1072 COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
1073 RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS, 1073 RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
1074 RK3399_CLKGATE_CON(11), 10, GFLAGS), 1074 RK3399_CLKGATE_CON(11), 0, GFLAGS),
1075 COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0, 1075 COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
1076 RK3399_CLKSEL_CON(43), 0, 5, DFLAGS, 1076 RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
1077 RK3399_CLKGATE_CON(11), 1, GFLAGS), 1077 RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
1484 "hclk_perilp1", 1484 "hclk_perilp1",
1485 "hclk_perilp1_noc", 1485 "hclk_perilp1_noc",
1486 "aclk_dmac0_perilp", 1486 "aclk_dmac0_perilp",
1487 "aclk_emmc_noc",
1487 "gpll_hclk_perilp1_src", 1488 "gpll_hclk_perilp1_src",
1488 "gpll_aclk_perilp0_src", 1489 "gpll_aclk_perilp0_src",
1489 "gpll_aclk_perihp_src", 1490 "gpll_aclk_perihp_src",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 9af359544110..267f99523fbe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
783 [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, 783 [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
784 [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, 784 [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
785 785
786 [RST_BUS_I2C0] = { 0x2d4, BIT(0) }, 786 [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
787 [RST_BUS_I2C1] = { 0x2d4, BIT(1) }, 787 [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
788 [RST_BUS_I2C2] = { 0x2d4, BIT(2) }, 788 [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
789 [RST_BUS_UART0] = { 0x2d4, BIT(16) }, 789 [RST_BUS_UART0] = { 0x2d8, BIT(16) },
790 [RST_BUS_UART1] = { 0x2d4, BIT(17) }, 790 [RST_BUS_UART1] = { 0x2d8, BIT(17) },
791 [RST_BUS_UART2] = { 0x2d4, BIT(18) }, 791 [RST_BUS_UART2] = { 0x2d8, BIT(18) },
792 [RST_BUS_UART3] = { 0x2d4, BIT(19) }, 792 [RST_BUS_UART3] = { 0x2d8, BIT(19) },
793 [RST_BUS_SCR] = { 0x2d4, BIT(20) }, 793 [RST_BUS_SCR] = { 0x2d8, BIT(20) },
794}; 794};
795 795
796static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { 796static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index fc17b5295e16..51d4bac97ab3 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
31 return; 31 return;
32 32
33 WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg, 33 WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
34 !(reg & lock), 100, 70000)); 34 reg & lock, 100, 70000));
35} 35}
36 36
37int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, 37int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 4470ffc8cf0d..d6fafb397489 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -14,9 +14,9 @@
14#include "ccu_gate.h" 14#include "ccu_gate.h"
15#include "ccu_nk.h" 15#include "ccu_nk.h"
16 16
17void ccu_nk_find_best(unsigned long parent, unsigned long rate, 17static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
18 unsigned int max_n, unsigned int max_k, 18 unsigned int max_n, unsigned int max_k,
19 unsigned int *n, unsigned int *k) 19 unsigned int *n, unsigned int *k)
20{ 20{
21 unsigned long best_rate = 0; 21 unsigned long best_rate = 0;
22 unsigned int best_k = 0, best_n = 0; 22 unsigned int best_k = 0, best_n = 0;
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 0ee1f363e4be..d8eab90ae661 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
73 SUN4I_PLL2_PRE_DIV_WIDTH, 73 SUN4I_PLL2_PRE_DIV_WIDTH,
74 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, 74 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
75 &sun4i_a10_pll2_lock); 75 &sun4i_a10_pll2_lock);
76 if (!prediv_clk) { 76 if (IS_ERR(prediv_clk)) {
77 pr_err("Couldn't register the prediv clock\n"); 77 pr_err("Couldn't register the prediv clock\n");
78 goto err_free_array; 78 goto err_free_array;
79 } 79 }
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
106 &mult->hw, &clk_multiplier_ops, 106 &mult->hw, &clk_multiplier_ops,
107 &gate->hw, &clk_gate_ops, 107 &gate->hw, &clk_gate_ops,
108 CLK_SET_RATE_PARENT); 108 CLK_SET_RATE_PARENT);
109 if (!base_clk) { 109 if (IS_ERR(base_clk)) {
110 pr_err("Couldn't register the base multiplier clock\n"); 110 pr_err("Couldn't register the base multiplier clock\n");
111 goto err_free_multiplier; 111 goto err_free_multiplier;
112 } 112 }
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 411d3033a96e..b200ebf159ee 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
48 return; 48 return;
49 49
50 reg = of_io_request_and_map(node, 0, of_node_full_name(node)); 50 reg = of_io_request_and_map(node, 0, of_node_full_name(node));
51 if (!reg) { 51 if (IS_ERR(reg)) {
52 pr_err("Could not get registers for sun8i-mbus-clk\n"); 52 pr_err("Could not get registers for sun8i-mbus-clk\n");
53 goto err_free_parents; 53 goto err_free_parents;
54 } 54 }
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 64da7b79a6e4..933b5dd698b8 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
428 .div_nmp = &pllp_nmp, 428 .div_nmp = &pllp_nmp,
429 .freq_table = pll_d_freq_table, 429 .freq_table = pll_d_freq_table,
430 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | 430 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
431 TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 431 TEGRA_PLL_HAS_LOCK_ENABLE,
432}; 432};
433 433
434static struct tegra_clk_pll_params pll_d2_params = { 434static struct tegra_clk_pll_params pll_d2_params = {
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
446 .div_nmp = &pllp_nmp, 446 .div_nmp = &pllp_nmp,
447 .freq_table = pll_d_freq_table, 447 .freq_table = pll_d_freq_table,
448 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | 448 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
449 TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 449 TEGRA_PLL_HAS_LOCK_ENABLE,
450}; 450};
451 451
452static const struct pdiv_map pllu_p[] = { 452static const struct pdiv_map pllu_p[] = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 28bce3f4f81d..57700541f951 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -8,6 +8,9 @@
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
11#include <linux/init.h> 14#include <linux/init.h>
12#include <linux/kernel.h> 15#include <linux/kernel.h>
13#include <linux/device.h> 16#include <linux/device.h>
@@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void)
370 arch_timer_ppi[PHYS_NONSECURE_PPI]); 373 arch_timer_ppi[PHYS_NONSECURE_PPI]);
371} 374}
372 375
376static u32 check_ppi_trigger(int irq)
377{
378 u32 flags = irq_get_trigger_type(irq);
379
380 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
381 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
382 pr_warn("WARNING: Please fix your firmware\n");
383 flags = IRQF_TRIGGER_LOW;
384 }
385
386 return flags;
387}
388
373static int arch_timer_starting_cpu(unsigned int cpu) 389static int arch_timer_starting_cpu(unsigned int cpu)
374{ 390{
375 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 391 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
392 u32 flags;
376 393
377 __arch_timer_setup(ARCH_CP15_TIMER, clk); 394 __arch_timer_setup(ARCH_CP15_TIMER, clk);
378 395
379 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); 396 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
397 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
380 398
381 if (arch_timer_has_nonsecure_ppi()) 399 if (arch_timer_has_nonsecure_ppi()) {
382 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 400 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
401 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
402 }
383 403
384 arch_counter_set_user_access(); 404 arch_counter_set_user_access();
385 if (evtstrm_enable) 405 if (evtstrm_enable)
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 7e3fd375a627..92f6e4deee74 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
66 66
67} 67}
68 68
69static void 69static int
70kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) 70kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
71{ 71{
72 int loop_limit = 4; 72 int loop_limit = 3;
73 73
74 /* 74 /*
75 * Read 64-bit free running counter 75 * Read 64-bit free running counter
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
83 * if new hi-word is equal to previously read hi-word then stop. 83 * if new hi-word is equal to previously read hi-word then stop.
84 */ 84 */
85 85
86 while (--loop_limit) { 86 do {
87 *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); 87 *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
88 *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); 88 *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
89 if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) 89 if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
90 break; 90 break;
91 } 91 } while (--loop_limit);
92 if (!loop_limit) { 92 if (!loop_limit) {
93 pr_err("bcm_kona_timer: getting counter failed.\n"); 93 pr_err("bcm_kona_timer: getting counter failed.\n");
94 pr_err(" Timer will be impacted\n"); 94 pr_err(" Timer will be impacted\n");
95 return -ETIMEDOUT;
95 } 96 }
96 97
97 return; 98 return 0;
98} 99}
99 100
100static int kona_timer_set_next_event(unsigned long clc, 101static int kona_timer_set_next_event(unsigned long clc,
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
112 113
113 uint32_t lsw, msw; 114 uint32_t lsw, msw;
114 uint32_t reg; 115 uint32_t reg;
116 int ret;
115 117
116 kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); 118 ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
119 if (ret)
120 return ret;
117 121
118 /* Load the "next" event tick value */ 122 /* Load the "next" event tick value */
119 writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); 123 writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index d91e8725917c..b4b3ab5a11ad 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
164 gic_start_count(); 164 gic_start_count();
165} 165}
166 166
167static void __init gic_clocksource_of_init(struct device_node *node) 167static int __init gic_clocksource_of_init(struct device_node *node)
168{ 168{
169 struct clk *clk; 169 struct clk *clk;
170 int ret; 170 int ret;
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 937e10b84d58..3e1cb512f3ce 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -21,6 +21,8 @@
21#include <linux/of_irq.h> 21#include <linux/of_irq.h>
22#include <linux/sched_clock.h> 22#include <linux/sched_clock.h>
23 23
24#include <clocksource/pxa.h>
25
24#include <asm/div64.h> 26#include <asm/div64.h>
25 27
26#define OSMR0 0x00 /* OS Timer 0 Match Register */ 28#define OSMR0 0x00 /* OS Timer 0 Match Register */
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 97669ee4df2a..c83452cacb41 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
123 .set_next_event = sun4i_clkevt_next_event, 123 .set_next_event = sun4i_clkevt_next_event,
124}; 124};
125 125
126static void sun4i_timer_clear_interrupt(void)
127{
128 writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
129}
126 130
127static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) 131static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
128{ 132{
129 struct clock_event_device *evt = (struct clock_event_device *)dev_id; 133 struct clock_event_device *evt = (struct clock_event_device *)dev_id;
130 134
131 writel(0x1, timer_base + TIMER_IRQ_ST_REG); 135 sun4i_timer_clear_interrupt();
132 evt->event_handler(evt); 136 evt->event_handler(evt);
133 137
134 return IRQ_HANDLED; 138 return IRQ_HANDLED;
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
208 /* Make sure timer is stopped before playing with interrupts */ 212 /* Make sure timer is stopped before playing with interrupts */
209 sun4i_clkevt_time_stop(0); 213 sun4i_clkevt_time_stop(0);
210 214
215 /* clear timer0 interrupt */
216 sun4i_timer_clear_interrupt();
217
211 sun4i_clockevent.cpumask = cpu_possible_mask; 218 sun4i_clockevent.cpumask = cpu_possible_mask;
212 sun4i_clockevent.irq = irq; 219 sun4i_clockevent.irq = irq;
213 220
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 719b478d136e..3c39e6f45971 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
338 struct clk *clk = of_clk_get_by_name(np, "fixed"); 338 struct clk *clk = of_clk_get_by_name(np, "fixed");
339 int ret; 339 int ret;
340 340
341 clk = of_clk_get(np, 0);
342 if (IS_ERR(clk)) { 341 if (IS_ERR(clk)) {
343 pr_err("Failed to get clock"); 342 pr_err("Failed to get clock");
344 return PTR_ERR(clk); 343 return PTR_ERR(clk);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index a7d9a08e4b0e..a8e6c7df853d 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
202 rate = clk_get_rate(fast_clk); 202 rate = clk_get_rate(fast_clk);
203 203
204 /* Disable irq's for clocksource usage */ 204 /* Disable irq's for clocksource usage */
205 gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); 205 gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
206 gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); 206 gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
207 gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); 207 gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
208 gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); 208 gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
209 209
210 /* Enable timer block */ 210 /* Enable timer block */
211 writel(TIMER_ME_GLOBAL, pcs_gpt.base); 211 writel(TIMER_ME_GLOBAL, pcs_gpt.base);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index 1ffac0cb0cb7..7f0f5b26d8c5 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
240static int __init at91sam926x_pit_dt_init(struct device_node *node) 240static int __init at91sam926x_pit_dt_init(struct device_node *node)
241{ 241{
242 struct pit_data *data; 242 struct pit_data *data;
243 int ret;
243 244
244 data = kzalloc(sizeof(*data), GFP_KERNEL); 245 data = kzalloc(sizeof(*data), GFP_KERNEL);
245 if (!data) 246 if (!data)
@@ -261,6 +262,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
261 return PTR_ERR(data->mck); 262 return PTR_ERR(data->mck);
262 } 263 }
263 264
265 ret = clk_prepare_enable(data->mck);
266 if (ret) {
267 pr_err("Unable to enable mck\n");
268 return ret;
269 }
270
264 /* Get the interrupts property */ 271 /* Get the interrupts property */
265 data->irq = irq_of_parse_and_map(node, 0); 272 data->irq = irq_of_parse_and_map(node, 0);
266 if (!data->irq) { 273 if (!data->irq) {
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 0bb44d5b5df4..2ee40fd360ca 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = {
74 { .compatible = "ti,omap5", }, 74 { .compatible = "ti,omap5", },
75 75
76 { .compatible = "xlnx,zynq-7000", }, 76 { .compatible = "xlnx,zynq-7000", },
77
78 { }
77}; 79};
78 80
79static int __init cpufreq_dt_platdev_init(void) 81static int __init cpufreq_dt_platdev_init(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 87796e0864e9..d3ffde806629 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -145,11 +145,30 @@ static struct powernv_pstate_info {
145/* Use following macros for conversions between pstate_id and index */ 145/* Use following macros for conversions between pstate_id and index */
146static inline int idx_to_pstate(unsigned int i) 146static inline int idx_to_pstate(unsigned int i)
147{ 147{
148 if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
149 pr_warn_once("index %u is out of bound\n", i);
150 return powernv_freqs[powernv_pstate_info.nominal].driver_data;
151 }
152
148 return powernv_freqs[i].driver_data; 153 return powernv_freqs[i].driver_data;
149} 154}
150 155
151static inline unsigned int pstate_to_idx(int pstate) 156static inline unsigned int pstate_to_idx(int pstate)
152{ 157{
158 int min = powernv_freqs[powernv_pstate_info.min].driver_data;
159 int max = powernv_freqs[powernv_pstate_info.max].driver_data;
160
161 if (min > 0) {
162 if (unlikely((pstate < max) || (pstate > min))) {
163 pr_warn_once("pstate %d is out of bound\n", pstate);
164 return powernv_pstate_info.nominal;
165 }
166 } else {
167 if (unlikely((pstate > max) || (pstate < min))) {
168 pr_warn_once("pstate %d is out of bound\n", pstate);
169 return powernv_pstate_info.nominal;
170 }
171 }
153 /* 172 /*
154 * abs() is deliberately used so that is works with 173 * abs() is deliberately used so that is works with
155 * both monotonically increasing and decreasing 174 * both monotonically increasing and decreasing
@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data)
593 } else { 612 } else {
594 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, 613 gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
595 gpstates->highest_lpstate_idx, 614 gpstates->highest_lpstate_idx,
596 freq_data.pstate_id); 615 gpstates->last_lpstate_idx);
597 } 616 }
598 617
599 /* 618 /*
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ea8189f4b021..b3044219772c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
441 OP_ALG_AAI_CTR_MOD128); 441 OP_ALG_AAI_CTR_MOD128);
442 const bool is_rfc3686 = alg->caam.rfc3686; 442 const bool is_rfc3686 = alg->caam.rfc3686;
443 443
444 if (!ctx->authsize)
445 return 0;
446
444 /* NULL encryption / decryption */ 447 /* NULL encryption / decryption */
445 if (!ctx->enckeylen) 448 if (!ctx->enckeylen)
446 return aead_null_set_sh_desc(aead); 449 return aead_null_set_sh_desc(aead);
@@ -553,7 +556,10 @@ skip_enc:
553 556
554 /* Read and write assoclen bytes */ 557 /* Read and write assoclen bytes */
555 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 558 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
556 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 559 if (alg->caam.geniv)
560 append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
561 else
562 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
557 563
558 /* Skip assoc data */ 564 /* Skip assoc data */
559 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); 565 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -562,6 +568,14 @@ skip_enc:
562 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | 568 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
563 KEY_VLF); 569 KEY_VLF);
564 570
571 if (alg->caam.geniv) {
572 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
573 LDST_SRCDST_BYTE_CONTEXT |
574 (ctx1_iv_off << LDST_OFFSET_SHIFT));
575 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
576 (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
577 }
578
565 /* Load Counter into CONTEXT1 reg */ 579 /* Load Counter into CONTEXT1 reg */
566 if (is_rfc3686) 580 if (is_rfc3686)
567 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | 581 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -614,7 +628,7 @@ skip_enc:
614 keys_fit_inline = true; 628 keys_fit_inline = true;
615 629
616 /* aead_givencrypt shared descriptor */ 630 /* aead_givencrypt shared descriptor */
617 desc = ctx->sh_desc_givenc; 631 desc = ctx->sh_desc_enc;
618 632
619 /* Note: Context registers are saved. */ 633 /* Note: Context registers are saved. */
620 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); 634 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
@@ -645,13 +659,13 @@ copy_iv:
645 append_operation(desc, ctx->class2_alg_type | 659 append_operation(desc, ctx->class2_alg_type |
646 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); 660 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
647 661
648 /* ivsize + cryptlen = seqoutlen - authsize */
649 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
650
651 /* Read and write assoclen bytes */ 662 /* Read and write assoclen bytes */
652 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 663 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
653 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 664 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
654 665
666 /* ivsize + cryptlen = seqoutlen - authsize */
667 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
668
655 /* Skip assoc data */ 669 /* Skip assoc data */
656 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); 670 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
657 671
@@ -697,7 +711,7 @@ copy_iv:
697 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 711 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
698 desc_bytes(desc), 712 desc_bytes(desc),
699 DMA_TO_DEVICE); 713 DMA_TO_DEVICE);
700 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 714 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
701 dev_err(jrdev, "unable to map shared descriptor\n"); 715 dev_err(jrdev, "unable to map shared descriptor\n");
702 return -ENOMEM; 716 return -ENOMEM;
703 } 717 }
@@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
2147 2161
2148 init_aead_job(req, edesc, all_contig, encrypt); 2162 init_aead_job(req, edesc, all_contig, encrypt);
2149 2163
2150 if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) 2164 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2151 append_load_as_imm(desc, req->iv, ivsize, 2165 append_load_as_imm(desc, req->iv, ivsize,
2152 LDST_CLASS_1_CCB | 2166 LDST_CLASS_1_CCB |
2153 LDST_SRCDST_BYTE_CONTEXT | 2167 LDST_SRCDST_BYTE_CONTEXT |
@@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
2534 return ret; 2548 return ret;
2535} 2549}
2536 2550
2537static int aead_givdecrypt(struct aead_request *req)
2538{
2539 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2540 unsigned int ivsize = crypto_aead_ivsize(aead);
2541
2542 if (req->cryptlen < ivsize)
2543 return -EINVAL;
2544
2545 req->cryptlen -= ivsize;
2546 req->assoclen += ivsize;
2547
2548 return aead_decrypt(req);
2549}
2550
2551/* 2551/*
2552 * allocate and map the ablkcipher extended descriptor for ablkcipher 2552 * allocate and map the ablkcipher extended descriptor for ablkcipher
2553 */ 2553 */
@@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
3207 .setkey = aead_setkey, 3207 .setkey = aead_setkey,
3208 .setauthsize = aead_setauthsize, 3208 .setauthsize = aead_setauthsize,
3209 .encrypt = aead_encrypt, 3209 .encrypt = aead_encrypt,
3210 .decrypt = aead_givdecrypt, 3210 .decrypt = aead_decrypt,
3211 .ivsize = AES_BLOCK_SIZE, 3211 .ivsize = AES_BLOCK_SIZE,
3212 .maxauthsize = MD5_DIGEST_SIZE, 3212 .maxauthsize = MD5_DIGEST_SIZE,
3213 }, 3213 },
@@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
3253 .setkey = aead_setkey, 3253 .setkey = aead_setkey,
3254 .setauthsize = aead_setauthsize, 3254 .setauthsize = aead_setauthsize,
3255 .encrypt = aead_encrypt, 3255 .encrypt = aead_encrypt,
3256 .decrypt = aead_givdecrypt, 3256 .decrypt = aead_decrypt,
3257 .ivsize = AES_BLOCK_SIZE, 3257 .ivsize = AES_BLOCK_SIZE,
3258 .maxauthsize = SHA1_DIGEST_SIZE, 3258 .maxauthsize = SHA1_DIGEST_SIZE,
3259 }, 3259 },
@@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
3299 .setkey = aead_setkey, 3299 .setkey = aead_setkey,
3300 .setauthsize = aead_setauthsize, 3300 .setauthsize = aead_setauthsize,
3301 .encrypt = aead_encrypt, 3301 .encrypt = aead_encrypt,
3302 .decrypt = aead_givdecrypt, 3302 .decrypt = aead_decrypt,
3303 .ivsize = AES_BLOCK_SIZE, 3303 .ivsize = AES_BLOCK_SIZE,
3304 .maxauthsize = SHA224_DIGEST_SIZE, 3304 .maxauthsize = SHA224_DIGEST_SIZE,
3305 }, 3305 },
@@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
3345 .setkey = aead_setkey, 3345 .setkey = aead_setkey,
3346 .setauthsize = aead_setauthsize, 3346 .setauthsize = aead_setauthsize,
3347 .encrypt = aead_encrypt, 3347 .encrypt = aead_encrypt,
3348 .decrypt = aead_givdecrypt, 3348 .decrypt = aead_decrypt,
3349 .ivsize = AES_BLOCK_SIZE, 3349 .ivsize = AES_BLOCK_SIZE,
3350 .maxauthsize = SHA256_DIGEST_SIZE, 3350 .maxauthsize = SHA256_DIGEST_SIZE,
3351 }, 3351 },
@@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
3391 .setkey = aead_setkey, 3391 .setkey = aead_setkey,
3392 .setauthsize = aead_setauthsize, 3392 .setauthsize = aead_setauthsize,
3393 .encrypt = aead_encrypt, 3393 .encrypt = aead_encrypt,
3394 .decrypt = aead_givdecrypt, 3394 .decrypt = aead_decrypt,
3395 .ivsize = AES_BLOCK_SIZE, 3395 .ivsize = AES_BLOCK_SIZE,
3396 .maxauthsize = SHA384_DIGEST_SIZE, 3396 .maxauthsize = SHA384_DIGEST_SIZE,
3397 }, 3397 },
@@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
3437 .setkey = aead_setkey, 3437 .setkey = aead_setkey,
3438 .setauthsize = aead_setauthsize, 3438 .setauthsize = aead_setauthsize,
3439 .encrypt = aead_encrypt, 3439 .encrypt = aead_encrypt,
3440 .decrypt = aead_givdecrypt, 3440 .decrypt = aead_decrypt,
3441 .ivsize = AES_BLOCK_SIZE, 3441 .ivsize = AES_BLOCK_SIZE,
3442 .maxauthsize = SHA512_DIGEST_SIZE, 3442 .maxauthsize = SHA512_DIGEST_SIZE,
3443 }, 3443 },
@@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
3483 .setkey = aead_setkey, 3483 .setkey = aead_setkey,
3484 .setauthsize = aead_setauthsize, 3484 .setauthsize = aead_setauthsize,
3485 .encrypt = aead_encrypt, 3485 .encrypt = aead_encrypt,
3486 .decrypt = aead_givdecrypt, 3486 .decrypt = aead_decrypt,
3487 .ivsize = DES3_EDE_BLOCK_SIZE, 3487 .ivsize = DES3_EDE_BLOCK_SIZE,
3488 .maxauthsize = MD5_DIGEST_SIZE, 3488 .maxauthsize = MD5_DIGEST_SIZE,
3489 }, 3489 },
@@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
3531 .setkey = aead_setkey, 3531 .setkey = aead_setkey,
3532 .setauthsize = aead_setauthsize, 3532 .setauthsize = aead_setauthsize,
3533 .encrypt = aead_encrypt, 3533 .encrypt = aead_encrypt,
3534 .decrypt = aead_givdecrypt, 3534 .decrypt = aead_decrypt,
3535 .ivsize = DES3_EDE_BLOCK_SIZE, 3535 .ivsize = DES3_EDE_BLOCK_SIZE,
3536 .maxauthsize = SHA1_DIGEST_SIZE, 3536 .maxauthsize = SHA1_DIGEST_SIZE,
3537 }, 3537 },
@@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
3579 .setkey = aead_setkey, 3579 .setkey = aead_setkey,
3580 .setauthsize = aead_setauthsize, 3580 .setauthsize = aead_setauthsize,
3581 .encrypt = aead_encrypt, 3581 .encrypt = aead_encrypt,
3582 .decrypt = aead_givdecrypt, 3582 .decrypt = aead_decrypt,
3583 .ivsize = DES3_EDE_BLOCK_SIZE, 3583 .ivsize = DES3_EDE_BLOCK_SIZE,
3584 .maxauthsize = SHA224_DIGEST_SIZE, 3584 .maxauthsize = SHA224_DIGEST_SIZE,
3585 }, 3585 },
@@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
3627 .setkey = aead_setkey, 3627 .setkey = aead_setkey,
3628 .setauthsize = aead_setauthsize, 3628 .setauthsize = aead_setauthsize,
3629 .encrypt = aead_encrypt, 3629 .encrypt = aead_encrypt,
3630 .decrypt = aead_givdecrypt, 3630 .decrypt = aead_decrypt,
3631 .ivsize = DES3_EDE_BLOCK_SIZE, 3631 .ivsize = DES3_EDE_BLOCK_SIZE,
3632 .maxauthsize = SHA256_DIGEST_SIZE, 3632 .maxauthsize = SHA256_DIGEST_SIZE,
3633 }, 3633 },
@@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
3675 .setkey = aead_setkey, 3675 .setkey = aead_setkey,
3676 .setauthsize = aead_setauthsize, 3676 .setauthsize = aead_setauthsize,
3677 .encrypt = aead_encrypt, 3677 .encrypt = aead_encrypt,
3678 .decrypt = aead_givdecrypt, 3678 .decrypt = aead_decrypt,
3679 .ivsize = DES3_EDE_BLOCK_SIZE, 3679 .ivsize = DES3_EDE_BLOCK_SIZE,
3680 .maxauthsize = SHA384_DIGEST_SIZE, 3680 .maxauthsize = SHA384_DIGEST_SIZE,
3681 }, 3681 },
@@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
3723 .setkey = aead_setkey, 3723 .setkey = aead_setkey,
3724 .setauthsize = aead_setauthsize, 3724 .setauthsize = aead_setauthsize,
3725 .encrypt = aead_encrypt, 3725 .encrypt = aead_encrypt,
3726 .decrypt = aead_givdecrypt, 3726 .decrypt = aead_decrypt,
3727 .ivsize = DES3_EDE_BLOCK_SIZE, 3727 .ivsize = DES3_EDE_BLOCK_SIZE,
3728 .maxauthsize = SHA512_DIGEST_SIZE, 3728 .maxauthsize = SHA512_DIGEST_SIZE,
3729 }, 3729 },
@@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
3769 .setkey = aead_setkey, 3769 .setkey = aead_setkey,
3770 .setauthsize = aead_setauthsize, 3770 .setauthsize = aead_setauthsize,
3771 .encrypt = aead_encrypt, 3771 .encrypt = aead_encrypt,
3772 .decrypt = aead_givdecrypt, 3772 .decrypt = aead_decrypt,
3773 .ivsize = DES_BLOCK_SIZE, 3773 .ivsize = DES_BLOCK_SIZE,
3774 .maxauthsize = MD5_DIGEST_SIZE, 3774 .maxauthsize = MD5_DIGEST_SIZE,
3775 }, 3775 },
@@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
3815 .setkey = aead_setkey, 3815 .setkey = aead_setkey,
3816 .setauthsize = aead_setauthsize, 3816 .setauthsize = aead_setauthsize,
3817 .encrypt = aead_encrypt, 3817 .encrypt = aead_encrypt,
3818 .decrypt = aead_givdecrypt, 3818 .decrypt = aead_decrypt,
3819 .ivsize = DES_BLOCK_SIZE, 3819 .ivsize = DES_BLOCK_SIZE,
3820 .maxauthsize = SHA1_DIGEST_SIZE, 3820 .maxauthsize = SHA1_DIGEST_SIZE,
3821 }, 3821 },
@@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
3861 .setkey = aead_setkey, 3861 .setkey = aead_setkey,
3862 .setauthsize = aead_setauthsize, 3862 .setauthsize = aead_setauthsize,
3863 .encrypt = aead_encrypt, 3863 .encrypt = aead_encrypt,
3864 .decrypt = aead_givdecrypt, 3864 .decrypt = aead_decrypt,
3865 .ivsize = DES_BLOCK_SIZE, 3865 .ivsize = DES_BLOCK_SIZE,
3866 .maxauthsize = SHA224_DIGEST_SIZE, 3866 .maxauthsize = SHA224_DIGEST_SIZE,
3867 }, 3867 },
@@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
3907 .setkey = aead_setkey, 3907 .setkey = aead_setkey,
3908 .setauthsize = aead_setauthsize, 3908 .setauthsize = aead_setauthsize,
3909 .encrypt = aead_encrypt, 3909 .encrypt = aead_encrypt,
3910 .decrypt = aead_givdecrypt, 3910 .decrypt = aead_decrypt,
3911 .ivsize = DES_BLOCK_SIZE, 3911 .ivsize = DES_BLOCK_SIZE,
3912 .maxauthsize = SHA256_DIGEST_SIZE, 3912 .maxauthsize = SHA256_DIGEST_SIZE,
3913 }, 3913 },
@@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
3953 .setkey = aead_setkey, 3953 .setkey = aead_setkey,
3954 .setauthsize = aead_setauthsize, 3954 .setauthsize = aead_setauthsize,
3955 .encrypt = aead_encrypt, 3955 .encrypt = aead_encrypt,
3956 .decrypt = aead_givdecrypt, 3956 .decrypt = aead_decrypt,
3957 .ivsize = DES_BLOCK_SIZE, 3957 .ivsize = DES_BLOCK_SIZE,
3958 .maxauthsize = SHA384_DIGEST_SIZE, 3958 .maxauthsize = SHA384_DIGEST_SIZE,
3959 }, 3959 },
@@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
3999 .setkey = aead_setkey, 3999 .setkey = aead_setkey,
4000 .setauthsize = aead_setauthsize, 4000 .setauthsize = aead_setauthsize,
4001 .encrypt = aead_encrypt, 4001 .encrypt = aead_encrypt,
4002 .decrypt = aead_givdecrypt, 4002 .decrypt = aead_decrypt,
4003 .ivsize = DES_BLOCK_SIZE, 4003 .ivsize = DES_BLOCK_SIZE,
4004 .maxauthsize = SHA512_DIGEST_SIZE, 4004 .maxauthsize = SHA512_DIGEST_SIZE,
4005 }, 4005 },
@@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
4048 .setkey = aead_setkey, 4048 .setkey = aead_setkey,
4049 .setauthsize = aead_setauthsize, 4049 .setauthsize = aead_setauthsize,
4050 .encrypt = aead_encrypt, 4050 .encrypt = aead_encrypt,
4051 .decrypt = aead_givdecrypt, 4051 .decrypt = aead_decrypt,
4052 .ivsize = CTR_RFC3686_IV_SIZE, 4052 .ivsize = CTR_RFC3686_IV_SIZE,
4053 .maxauthsize = MD5_DIGEST_SIZE, 4053 .maxauthsize = MD5_DIGEST_SIZE,
4054 }, 4054 },
@@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
4099 .setkey = aead_setkey, 4099 .setkey = aead_setkey,
4100 .setauthsize = aead_setauthsize, 4100 .setauthsize = aead_setauthsize,
4101 .encrypt = aead_encrypt, 4101 .encrypt = aead_encrypt,
4102 .decrypt = aead_givdecrypt, 4102 .decrypt = aead_decrypt,
4103 .ivsize = CTR_RFC3686_IV_SIZE, 4103 .ivsize = CTR_RFC3686_IV_SIZE,
4104 .maxauthsize = SHA1_DIGEST_SIZE, 4104 .maxauthsize = SHA1_DIGEST_SIZE,
4105 }, 4105 },
@@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
4150 .setkey = aead_setkey, 4150 .setkey = aead_setkey,
4151 .setauthsize = aead_setauthsize, 4151 .setauthsize = aead_setauthsize,
4152 .encrypt = aead_encrypt, 4152 .encrypt = aead_encrypt,
4153 .decrypt = aead_givdecrypt, 4153 .decrypt = aead_decrypt,
4154 .ivsize = CTR_RFC3686_IV_SIZE, 4154 .ivsize = CTR_RFC3686_IV_SIZE,
4155 .maxauthsize = SHA224_DIGEST_SIZE, 4155 .maxauthsize = SHA224_DIGEST_SIZE,
4156 }, 4156 },
@@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
4201 .setkey = aead_setkey, 4201 .setkey = aead_setkey,
4202 .setauthsize = aead_setauthsize, 4202 .setauthsize = aead_setauthsize,
4203 .encrypt = aead_encrypt, 4203 .encrypt = aead_encrypt,
4204 .decrypt = aead_givdecrypt, 4204 .decrypt = aead_decrypt,
4205 .ivsize = CTR_RFC3686_IV_SIZE, 4205 .ivsize = CTR_RFC3686_IV_SIZE,
4206 .maxauthsize = SHA256_DIGEST_SIZE, 4206 .maxauthsize = SHA256_DIGEST_SIZE,
4207 }, 4207 },
@@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
4252 .setkey = aead_setkey, 4252 .setkey = aead_setkey,
4253 .setauthsize = aead_setauthsize, 4253 .setauthsize = aead_setauthsize,
4254 .encrypt = aead_encrypt, 4254 .encrypt = aead_encrypt,
4255 .decrypt = aead_givdecrypt, 4255 .decrypt = aead_decrypt,
4256 .ivsize = CTR_RFC3686_IV_SIZE, 4256 .ivsize = CTR_RFC3686_IV_SIZE,
4257 .maxauthsize = SHA384_DIGEST_SIZE, 4257 .maxauthsize = SHA384_DIGEST_SIZE,
4258 }, 4258 },
@@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
4303 .setkey = aead_setkey, 4303 .setkey = aead_setkey,
4304 .setauthsize = aead_setauthsize, 4304 .setauthsize = aead_setauthsize,
4305 .encrypt = aead_encrypt, 4305 .encrypt = aead_encrypt,
4306 .decrypt = aead_givdecrypt, 4306 .decrypt = aead_decrypt,
4307 .ivsize = CTR_RFC3686_IV_SIZE, 4307 .ivsize = CTR_RFC3686_IV_SIZE,
4308 .maxauthsize = SHA512_DIGEST_SIZE, 4308 .maxauthsize = SHA512_DIGEST_SIZE,
4309 }, 4309 },
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index f1ecc8df8d41..36365b3efdfd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template,
1898 template->name); 1898 template->name);
1899 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1899 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1900 template->driver_name); 1900 template->driver_name);
1901 t_alg->ahash_alg.setkey = NULL;
1901 } 1902 }
1902 alg->cra_module = THIS_MODULE; 1903 alg->cra_module = THIS_MODULE;
1903 alg->cra_init = caam_hash_cra_init; 1904 alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 769148dbaeb3..20f35df8a01f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
1260 .setkey = qat_alg_ablkcipher_xts_setkey, 1260 .setkey = qat_alg_ablkcipher_xts_setkey,
1261 .decrypt = qat_alg_ablkcipher_decrypt, 1261 .decrypt = qat_alg_ablkcipher_decrypt,
1262 .encrypt = qat_alg_ablkcipher_encrypt, 1262 .encrypt = qat_alg_ablkcipher_encrypt,
1263 .min_keysize = AES_MIN_KEY_SIZE, 1263 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1264 .max_keysize = AES_MAX_KEY_SIZE, 1264 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1265 .ivsize = AES_BLOCK_SIZE, 1265 .ivsize = AES_BLOCK_SIZE,
1266 }, 1266 },
1267 }, 1267 },
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index cfb25413917c..24353ec336c5 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
129 129
130 blkcipher_walk_init(&walk, dst, src, nbytes); 130 blkcipher_walk_init(&walk, dst, src, nbytes);
131 131
132 iv = (u8 *)walk.iv;
133 ret = blkcipher_walk_virt(desc, &walk); 132 ret = blkcipher_walk_virt(desc, &walk);
133 iv = walk.iv;
134 memset(tweak, 0, AES_BLOCK_SIZE); 134 memset(tweak, 0, AES_BLOCK_SIZE);
135 aes_p8_encrypt(iv, tweak, &ctx->tweak_key); 135 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
136 136
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 803f3953b341..29f600f2c447 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
459 } 459 }
460 460
461 pgoff = linear_page_index(vma, pmd_addr); 461 pgoff = linear_page_index(vma, pmd_addr);
462 phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE); 462 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
463 if (phys == -1) { 463 if (phys == -1) {
464 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, 464 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
465 pgoff); 465 pgoff);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index dfb168568af1..1f01e98c83c7 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
116 if (rc) 116 if (rc)
117 return rc; 117 return rc;
118 118
119 /* adjust the dax_region resource to the start of data */
120 res.start += le64_to_cpu(pfn_sb->dataoff);
121
119 nd_region = to_nd_region(dev->parent); 122 nd_region = to_nd_region(dev->parent);
120 dax_region = alloc_dax_region(dev, nd_region->id, &res, 123 dax_region = alloc_dax_region(dev, nd_region->id, &res,
121 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); 124 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index e434ffe7bc5c..832cbd647145 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2067,7 +2067,7 @@ err_dma_unregister:
2067err_clk_disable: 2067err_clk_disable:
2068 clk_disable_unprepare(atxdmac->clk); 2068 clk_disable_unprepare(atxdmac->clk);
2069err_free_irq: 2069err_free_irq:
2070 free_irq(atxdmac->irq, atxdmac->dma.dev); 2070 free_irq(atxdmac->irq, atxdmac);
2071 return ret; 2071 return ret;
2072} 2072}
2073 2073
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
2081 dma_async_device_unregister(&atxdmac->dma); 2081 dma_async_device_unregister(&atxdmac->dma);
2082 clk_disable_unprepare(atxdmac->clk); 2082 clk_disable_unprepare(atxdmac->clk);
2083 2083
2084 free_irq(atxdmac->irq, atxdmac->dma.dev); 2084 free_irq(atxdmac->irq, atxdmac);
2085 2085
2086 for (i = 0; i < atxdmac->dma.chancnt; i++) { 2086 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2087 struct at_xdmac_chan *atchan = &atxdmac->chan[i]; 2087 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index aad167eaaee8..de2a2a2b1d75 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
836 rc = of_property_read_u32(np, "reg", &off); 836 rc = of_property_read_u32(np, "reg", &off);
837 if (rc) { 837 if (rc) {
838 dev_err(dev, "Reg property not found in JQ node\n"); 838 dev_err(dev, "Reg property not found in JQ node\n");
839 of_node_put(np);
839 return -ENODEV; 840 return -ENODEV;
840 } 841 }
841 /* Find out the Job Rings present under each JQ */ 842 /* Find out the Job Rings present under each JQ */
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index a4c53be482cf..624f1e1e9c55 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
861{ 861{
862 struct mdc_dma *mdma; 862 struct mdc_dma *mdma;
863 struct resource *res; 863 struct resource *res;
864 const struct of_device_id *match;
865 unsigned int i; 864 unsigned int i;
866 u32 val; 865 u32 val;
867 int ret; 866 int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
871 return -ENOMEM; 870 return -ENOMEM;
872 platform_set_drvdata(pdev, mdma); 871 platform_set_drvdata(pdev, mdma);
873 872
874 match = of_match_device(mdc_dma_of_match, &pdev->dev); 873 mdma->soc = of_device_get_match_data(&pdev->dev);
875 mdma->soc = match->data;
876 874
877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 875 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
878 mdma->regs = devm_ioremap_resource(&pdev->dev, res); 876 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index dc7850a422b8..3f56f9ca4482 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
638 vd_last_issued = list_entry(vc->desc_issued.prev, 638 vd_last_issued = list_entry(vc->desc_issued.prev,
639 struct virt_dma_desc, node); 639 struct virt_dma_desc, node);
640 pxad_desc_chain(vd_last_issued, vd); 640 pxad_desc_chain(vd_last_issued, vd);
641 if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) 641 if (is_chan_running(chan) || is_desc_completed(vd))
642 return true; 642 return true;
643 } 643 }
644 644
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
671 struct virt_dma_desc *vd, *tmp; 671 struct virt_dma_desc *vd, *tmp;
672 unsigned int dcsr; 672 unsigned int dcsr;
673 unsigned long flags; 673 unsigned long flags;
674 bool vd_completed;
674 dma_cookie_t last_started = 0; 675 dma_cookie_t last_started = 0;
675 676
676 BUG_ON(!chan); 677 BUG_ON(!chan);
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
681 682
682 spin_lock_irqsave(&chan->vc.lock, flags); 683 spin_lock_irqsave(&chan->vc.lock, flags);
683 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { 684 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
685 vd_completed = is_desc_completed(vd);
684 dev_dbg(&chan->vc.chan.dev->device, 686 dev_dbg(&chan->vc.chan.dev->device,
685 "%s(): checking txd %p[%x]: completed=%d\n", 687 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
686 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 688 __func__, vd, vd->tx.cookie, vd_completed,
689 dcsr);
687 last_started = vd->tx.cookie; 690 last_started = vd->tx.cookie;
688 if (to_pxad_sw_desc(vd)->cyclic) { 691 if (to_pxad_sw_desc(vd)->cyclic) {
689 vchan_cyclic_callback(vd); 692 vchan_cyclic_callback(vd);
690 break; 693 break;
691 } 694 }
692 if (is_desc_completed(vd)) { 695 if (vd_completed) {
693 list_del(&vd->node); 696 list_del(&vd->node);
694 vchan_cookie_complete(vd); 697 vchan_cookie_complete(vd);
695 } else { 698 } else {
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 749f1bd5d65d..06ecdc38cee0 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
600{ 600{
601 struct usb_dmac_chan *chan = dev; 601 struct usb_dmac_chan *chan = dev;
602 irqreturn_t ret = IRQ_NONE; 602 irqreturn_t ret = IRQ_NONE;
603 u32 mask = USB_DMACHCR_TE; 603 u32 mask = 0;
604 u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
605 u32 chcr; 604 u32 chcr;
605 bool xfer_end = false;
606 606
607 spin_lock(&chan->vc.lock); 607 spin_lock(&chan->vc.lock);
608 608
609 chcr = usb_dmac_chan_read(chan, USB_DMACHCR); 609 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
610 if (chcr & check_bits) 610 if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
611 mask |= USB_DMACHCR_DE | check_bits; 611 mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
612 if (chcr & USB_DMACHCR_DE)
613 xfer_end = true;
614 ret |= IRQ_HANDLED;
615 }
612 if (chcr & USB_DMACHCR_NULL) { 616 if (chcr & USB_DMACHCR_NULL) {
613 /* An interruption of TE will happen after we set FTE */ 617 /* An interruption of TE will happen after we set FTE */
614 mask |= USB_DMACHCR_NULL; 618 mask |= USB_DMACHCR_NULL;
615 chcr |= USB_DMACHCR_FTE; 619 chcr |= USB_DMACHCR_FTE;
616 ret |= IRQ_HANDLED; 620 ret |= IRQ_HANDLED;
617 } 621 }
618 usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); 622 if (mask)
623 usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
619 624
620 if (chcr & check_bits) { 625 if (xfer_end)
621 usb_dmac_isr_transfer_end(chan); 626 usb_dmac_isr_transfer_end(chan);
622 ret |= IRQ_HANDLED;
623 }
624 627
625 spin_unlock(&chan->vc.lock); 628 spin_unlock(&chan->vc.lock);
626 629
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index d0c1dab9b435..dff1a4a6dc1b 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE
251 Support for error detection and correction the Intel 251 Support for error detection and correction the Intel
252 Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. 252 Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
253 253
254config EDAC_SKX
255 tristate "Intel Skylake server Integrated MC"
256 depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
257 depends on PCI_MMCONFIG
258 help
259 Support for error detection and correction the Intel
260 Skylake server Integrated Memory Controllers.
261
254config EDAC_MPC85XX 262config EDAC_MPC85XX
255 tristate "Freescale MPC83xx / MPC85xx" 263 tristate "Freescale MPC83xx / MPC85xx"
256 depends on EDAC_MM_EDAC && FSL_SOC 264 depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9e4a3e0e6e9..986049925b08 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
31obj-$(CONFIG_EDAC_I7300) += i7300_edac.o 31obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
34obj-$(CONFIG_EDAC_SKX) += skx_edac.o
34obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 35obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
35obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 36obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
36obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 37obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4fb2eb7c800d..ce0067b7a2f6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
552/* Knight's Landing Support */ 552/* Knight's Landing Support */
553/* 553/*
554 * KNL's memory channels are swizzled between memory controllers. 554 * KNL's memory channels are swizzled between memory controllers.
555 * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 555 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
556 */ 556 */
557#define knl_channel_remap(channel) ((channel + 3) % 6) 557#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
558 558
559/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ 559/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
560#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 560#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg)
1286 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); 1286 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1287 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); 1287 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1288 1288
1289 return knl_channel_remap(mc*3 + chan); 1289 return knl_channel_remap(mc, chan);
1290} 1290}
1291 1291
1292/* 1292/*
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2997 } else { 2997 } else {
2998 char A = *("A"); 2998 char A = *("A");
2999 2999
3000 channel = knl_channel_remap(channel); 3000 /*
3001 * Reported channel is in range 0-2, so we can't map it
3002 * back to mc. To figure out mc we check machine check
3003 * bank register that reported this error.
3004 * bank15 means mc0 and bank16 means mc1.
3005 */
3006 channel = knl_channel_remap(m->bank == 16, channel);
3001 channel_mask = 1 << channel; 3007 channel_mask = 1 << channel;
3008
3002 snprintf(msg, sizeof(msg), 3009 snprintf(msg, sizeof(msg),
3003 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", 3010 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3004 overflow ? " OVERFLOW" : "", 3011 overflow ? " OVERFLOW" : "",
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
new file mode 100644
index 000000000000..0ff4878c2aa1
--- /dev/null
+++ b/drivers/edac/skx_edac.c
@@ -0,0 +1,1121 @@
1/*
2 * EDAC driver for Intel(R) Xeon(R) Skylake processors
3 * Copyright (c) 2016, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/pci_ids.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/edac.h>
22#include <linux/mmzone.h>
23#include <linux/smp.h>
24#include <linux/bitmap.h>
25#include <linux/math64.h>
26#include <linux/mod_devicetable.h>
27#include <asm/cpu_device_id.h>
28#include <asm/processor.h>
29#include <asm/mce.h>
30
31#include "edac_core.h"
32
33#define SKX_REVISION " Ver: 1.0 "
34
35/*
36 * Debug macros
37 */
38#define skx_printk(level, fmt, arg...) \
39 edac_printk(level, "skx", fmt, ##arg)
40
41#define skx_mc_printk(mci, level, fmt, arg...) \
42 edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
43
44/*
45 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
46 */
47#define GET_BITFIELD(v, lo, hi) \
48 (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
49
50static LIST_HEAD(skx_edac_list);
51
52static u64 skx_tolm, skx_tohm;
53
54#define NUM_IMC 2 /* memory controllers per socket */
55#define NUM_CHANNELS 3 /* channels per memory controller */
56#define NUM_DIMMS 2 /* Max DIMMS per channel */
57
58#define MASK26 0x3FFFFFF /* Mask for 2^26 */
59#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
60
61/*
62 * Each cpu socket contains some pci devices that provide global
63 * information, and also some that are local to each of the two
64 * memory controllers on the die.
65 */
66struct skx_dev {
67 struct list_head list;
68 u8 bus[4];
69 struct pci_dev *sad_all;
70 struct pci_dev *util_all;
71 u32 mcroute;
72 struct skx_imc {
73 struct mem_ctl_info *mci;
74 u8 mc; /* system wide mc# */
75 u8 lmc; /* socket relative mc# */
76 u8 src_id, node_id;
77 struct skx_channel {
78 struct pci_dev *cdev;
79 struct skx_dimm {
80 u8 close_pg;
81 u8 bank_xor_enable;
82 u8 fine_grain_bank;
83 u8 rowbits;
84 u8 colbits;
85 } dimms[NUM_DIMMS];
86 } chan[NUM_CHANNELS];
87 } imc[NUM_IMC];
88};
89static int skx_num_sockets;
90
91struct skx_pvt {
92 struct skx_imc *imc;
93};
94
95struct decoded_addr {
96 struct skx_dev *dev;
97 u64 addr;
98 int socket;
99 int imc;
100 int channel;
101 u64 chan_addr;
102 int sktways;
103 int chanways;
104 int dimm;
105 int rank;
106 int channel_rank;
107 u64 rank_address;
108 int row;
109 int column;
110 int bank_address;
111 int bank_group;
112};
113
114static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
115{
116 struct skx_dev *d;
117
118 list_for_each_entry(d, &skx_edac_list, list) {
119 if (d->bus[idx] == bus)
120 return d;
121 }
122
123 return NULL;
124}
125
126enum munittype {
127 CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
128};
129
130struct munit {
131 u16 did;
132 u16 devfn[NUM_IMC];
133 u8 busidx;
134 u8 per_socket;
135 enum munittype mtype;
136};
137
138/*
139 * List of PCI device ids that we need together with some device
140 * number and function numbers to tell which memory controller the
141 * device belongs to.
142 */
143static const struct munit skx_all_munits[] = {
144 { 0x2054, { }, 1, 1, SAD_ALL },
145 { 0x2055, { }, 1, 1, UTIL_ALL },
146 { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
147 { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
148 { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
149 { 0x208e, { }, 1, 0, SAD },
150 { }
151};
152
153/*
154 * We use the per-socket device 0x2016 to count how many sockets are present,
155 * and to detemine which PCI buses are associated with each socket. Allocate
156 * and build the full list of all the skx_dev structures that we need here.
157 */
158static int get_all_bus_mappings(void)
159{
160 struct pci_dev *pdev, *prev;
161 struct skx_dev *d;
162 u32 reg;
163 int ndev = 0;
164
165 prev = NULL;
166 for (;;) {
167 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
168 if (!pdev)
169 break;
170 ndev++;
171 d = kzalloc(sizeof(*d), GFP_KERNEL);
172 if (!d) {
173 pci_dev_put(pdev);
174 return -ENOMEM;
175 }
176 pci_read_config_dword(pdev, 0xCC, &reg);
177 d->bus[0] = GET_BITFIELD(reg, 0, 7);
178 d->bus[1] = GET_BITFIELD(reg, 8, 15);
179 d->bus[2] = GET_BITFIELD(reg, 16, 23);
180 d->bus[3] = GET_BITFIELD(reg, 24, 31);
181 edac_dbg(2, "busses: %x, %x, %x, %x\n",
182 d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
183 list_add_tail(&d->list, &skx_edac_list);
184 skx_num_sockets++;
185 prev = pdev;
186 }
187
188 return ndev;
189}
190
191static int get_all_munits(const struct munit *m)
192{
193 struct pci_dev *pdev, *prev;
194 struct skx_dev *d;
195 u32 reg;
196 int i = 0, ndev = 0;
197
198 prev = NULL;
199 for (;;) {
200 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
201 if (!pdev)
202 break;
203 ndev++;
204 if (m->per_socket == NUM_IMC) {
205 for (i = 0; i < NUM_IMC; i++)
206 if (m->devfn[i] == pdev->devfn)
207 break;
208 if (i == NUM_IMC)
209 goto fail;
210 }
211 d = get_skx_dev(pdev->bus->number, m->busidx);
212 if (!d)
213 goto fail;
214
215 /* Be sure that the device is enabled */
216 if (unlikely(pci_enable_device(pdev) < 0)) {
217 skx_printk(KERN_ERR,
218 "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
219 goto fail;
220 }
221
222 switch (m->mtype) {
223 case CHAN0: case CHAN1: case CHAN2:
224 pci_dev_get(pdev);
225 d->imc[i].chan[m->mtype].cdev = pdev;
226 break;
227 case SAD_ALL:
228 pci_dev_get(pdev);
229 d->sad_all = pdev;
230 break;
231 case UTIL_ALL:
232 pci_dev_get(pdev);
233 d->util_all = pdev;
234 break;
235 case SAD:
236 /*
237 * one of these devices per core, including cores
238 * that don't exist on this SKU. Ignore any that
239 * read a route table of zero, make sure all the
240 * non-zero values match.
241 */
242 pci_read_config_dword(pdev, 0xB4, &reg);
243 if (reg != 0) {
244 if (d->mcroute == 0)
245 d->mcroute = reg;
246 else if (d->mcroute != reg) {
247 skx_printk(KERN_ERR,
248 "mcroute mismatch\n");
249 goto fail;
250 }
251 }
252 ndev--;
253 break;
254 }
255
256 prev = pdev;
257 }
258
259 return ndev;
260fail:
261 pci_dev_put(pdev);
262 return -ENODEV;
263}
264
265const struct x86_cpu_id skx_cpuids[] = {
266 { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
267 { }
268};
269MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
270
271static u8 get_src_id(struct skx_dev *d)
272{
273 u32 reg;
274
275 pci_read_config_dword(d->util_all, 0xF0, &reg);
276
277 return GET_BITFIELD(reg, 12, 14);
278}
279
280static u8 skx_get_node_id(struct skx_dev *d)
281{
282 u32 reg;
283
284 pci_read_config_dword(d->util_all, 0xF4, &reg);
285
286 return GET_BITFIELD(reg, 0, 2);
287}
288
289static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
290 int maxval, char *name)
291{
292 u32 val = GET_BITFIELD(reg, lobit, hibit);
293
294 if (val < minval || val > maxval) {
295 edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
296 return -EINVAL;
297 }
298 return val + add;
299}
300
301#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
302
303#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
304#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
305#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
306
307static int get_width(u32 mtr)
308{
309 switch (GET_BITFIELD(mtr, 8, 9)) {
310 case 0:
311 return DEV_X4;
312 case 1:
313 return DEV_X8;
314 case 2:
315 return DEV_X16;
316 }
317 return DEV_UNKNOWN;
318}
319
320static int skx_get_hi_lo(void)
321{
322 struct pci_dev *pdev;
323 u32 reg;
324
325 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
326 if (!pdev) {
327 edac_dbg(0, "Can't get tolm/tohm\n");
328 return -ENODEV;
329 }
330
331 pci_read_config_dword(pdev, 0xD0, &reg);
332 skx_tolm = reg;
333 pci_read_config_dword(pdev, 0xD4, &reg);
334 skx_tohm = reg;
335 pci_read_config_dword(pdev, 0xD8, &reg);
336 skx_tohm |= (u64)reg << 32;
337
338 pci_dev_put(pdev);
339 edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
340
341 return 0;
342}
343
344static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
345 struct skx_imc *imc, int chan, int dimmno)
346{
347 int banks = 16, ranks, rows, cols, npages;
348 u64 size;
349
350 if (!IS_DIMM_PRESENT(mtr))
351 return 0;
352 ranks = numrank(mtr);
353 rows = numrow(mtr);
354 cols = numcol(mtr);
355
356 /*
357 * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
358 */
359 size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
360 npages = MiB_TO_PAGES(size);
361
362 edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
363 imc->mc, chan, dimmno, size, npages,
364 banks, ranks, rows, cols);
365
366 imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
367 imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
368 imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
369 imc->chan[chan].dimms[dimmno].rowbits = rows;
370 imc->chan[chan].dimms[dimmno].colbits = cols;
371
372 dimm->nr_pages = npages;
373 dimm->grain = 32;
374 dimm->dtype = get_width(mtr);
375 dimm->mtype = MEM_DDR4;
376 dimm->edac_mode = EDAC_SECDED; /* likely better than this */
377 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
378 imc->src_id, imc->lmc, chan, dimmno);
379
380 return 1;
381}
382
383#define SKX_GET_MTMTR(dev, reg) \
384 pci_read_config_dword((dev), 0x87c, &reg)
385
386static bool skx_check_ecc(struct pci_dev *pdev)
387{
388 u32 mtmtr;
389
390 SKX_GET_MTMTR(pdev, mtmtr);
391
392 return !!GET_BITFIELD(mtmtr, 2, 2);
393}
394
395static int skx_get_dimm_config(struct mem_ctl_info *mci)
396{
397 struct skx_pvt *pvt = mci->pvt_info;
398 struct skx_imc *imc = pvt->imc;
399 struct dimm_info *dimm;
400 int i, j;
401 u32 mtr, amap;
402 int ndimms;
403
404 for (i = 0; i < NUM_CHANNELS; i++) {
405 ndimms = 0;
406 pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
407 for (j = 0; j < NUM_DIMMS; j++) {
408 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
409 mci->n_layers, i, j, 0);
410 pci_read_config_dword(imc->chan[i].cdev,
411 0x80 + 4*j, &mtr);
412 ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
413 }
414 if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
415 skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
416 return -ENODEV;
417 }
418 }
419
420 return 0;
421}
422
423static void skx_unregister_mci(struct skx_imc *imc)
424{
425 struct mem_ctl_info *mci = imc->mci;
426
427 if (!mci)
428 return;
429
430 edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
431
432 /* Remove MC sysfs nodes */
433 edac_mc_del_mc(mci->pdev);
434
435 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
436 kfree(mci->ctl_name);
437 edac_mc_free(mci);
438}
439
440static int skx_register_mci(struct skx_imc *imc)
441{
442 struct mem_ctl_info *mci;
443 struct edac_mc_layer layers[2];
444 struct pci_dev *pdev = imc->chan[0].cdev;
445 struct skx_pvt *pvt;
446 int rc;
447
448 /* allocate a new MC control structure */
449 layers[0].type = EDAC_MC_LAYER_CHANNEL;
450 layers[0].size = NUM_CHANNELS;
451 layers[0].is_virt_csrow = false;
452 layers[1].type = EDAC_MC_LAYER_SLOT;
453 layers[1].size = NUM_DIMMS;
454 layers[1].is_virt_csrow = true;
455 mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
456 sizeof(struct skx_pvt));
457
458 if (unlikely(!mci))
459 return -ENOMEM;
460
461 edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
462
463 /* Associate skx_dev and mci for future usage */
464 imc->mci = mci;
465 pvt = mci->pvt_info;
466 pvt->imc = imc;
467
468 mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
469 imc->node_id, imc->lmc);
470 mci->mtype_cap = MEM_FLAG_DDR4;
471 mci->edac_ctl_cap = EDAC_FLAG_NONE;
472 mci->edac_cap = EDAC_FLAG_NONE;
473 mci->mod_name = "skx_edac.c";
474 mci->dev_name = pci_name(imc->chan[0].cdev);
475 mci->mod_ver = SKX_REVISION;
476 mci->ctl_page_to_phys = NULL;
477
478 rc = skx_get_dimm_config(mci);
479 if (rc < 0)
480 goto fail;
481
482 /* record ptr to the generic device */
483 mci->pdev = &pdev->dev;
484
485 /* add this new MC control structure to EDAC's list of MCs */
486 if (unlikely(edac_mc_add_mc(mci))) {
487 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
488 rc = -EINVAL;
489 goto fail;
490 }
491
492 return 0;
493
494fail:
495 kfree(mci->ctl_name);
496 edac_mc_free(mci);
497 imc->mci = NULL;
498 return rc;
499}
500
501#define SKX_MAX_SAD 24
502
503#define SKX_GET_SAD(d, i, reg) \
504 pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
505#define SKX_GET_ILV(d, i, reg) \
506 pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
507
508#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
509#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
510#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
511#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
512#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
513#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
514#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
515
516#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
517#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
518
519static bool skx_sad_decode(struct decoded_addr *res)
520{
521 struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
522 u64 addr = res->addr;
523 int i, idx, tgt, lchan, shift;
524 u32 sad, ilv;
525 u64 limit, prev_limit;
526 int remote = 0;
527
528 /* Simple sanity check for I/O space or out of range */
529 if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
530 edac_dbg(0, "Address %llx out of range\n", addr);
531 return false;
532 }
533
534restart:
535 prev_limit = 0;
536 for (i = 0; i < SKX_MAX_SAD; i++) {
537 SKX_GET_SAD(d, i, sad);
538 limit = SKX_SAD_LIMIT(sad);
539 if (SKX_SAD_ENABLE(sad)) {
540 if (addr >= prev_limit && addr <= limit)
541 goto sad_found;
542 }
543 prev_limit = limit + 1;
544 }
545 edac_dbg(0, "No SAD entry for %llx\n", addr);
546 return false;
547
548sad_found:
549 SKX_GET_ILV(d, i, ilv);
550
551 switch (SKX_SAD_INTERLEAVE(sad)) {
552 case 0:
553 idx = GET_BITFIELD(addr, 6, 8);
554 break;
555 case 1:
556 idx = GET_BITFIELD(addr, 8, 10);
557 break;
558 case 2:
559 idx = GET_BITFIELD(addr, 12, 14);
560 break;
561 case 3:
562 idx = GET_BITFIELD(addr, 30, 32);
563 break;
564 }
565
566 tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
567
568 /* If point to another node, find it and start over */
569 if (SKX_ILV_REMOTE(tgt)) {
570 if (remote) {
571 edac_dbg(0, "Double remote!\n");
572 return false;
573 }
574 remote = 1;
575 list_for_each_entry(d, &skx_edac_list, list) {
576 if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
577 goto restart;
578 }
579 edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
580 return false;
581 }
582
583 if (SKX_SAD_MOD3(sad) == 0)
584 lchan = SKX_ILV_TARGET(tgt);
585 else {
586 switch (SKX_SAD_MOD3MODE(sad)) {
587 case 0:
588 shift = 6;
589 break;
590 case 1:
591 shift = 8;
592 break;
593 case 2:
594 shift = 12;
595 break;
596 default:
597 edac_dbg(0, "illegal mod3mode\n");
598 return false;
599 }
600 switch (SKX_SAD_MOD3ASMOD2(sad)) {
601 case 0:
602 lchan = (addr >> shift) % 3;
603 break;
604 case 1:
605 lchan = (addr >> shift) % 2;
606 break;
607 case 2:
608 lchan = (addr >> shift) % 2;
609 lchan = (lchan << 1) | ~lchan;
610 break;
611 case 3:
612 lchan = ((addr >> shift) % 2) << 1;
613 break;
614 }
615 lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
616 }
617
618 res->dev = d;
619 res->socket = d->imc[0].src_id;
620 res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
621 res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
622
623 edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
624 res->addr, res->socket, res->imc, res->channel);
625 return true;
626}
627
628#define SKX_MAX_TAD 8
629
630#define SKX_GET_TADBASE(d, mc, i, reg) \
631 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
632#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
633 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
634#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
635 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
636
637#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
638#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
639#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
640#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
641#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
642#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
643#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
644
645/* which bit used for both socket and channel interleave */
646static int skx_granularity[] = { 6, 8, 12, 30 };
647
648static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
649{
650 addr >>= shift;
651 addr /= ways;
652 addr <<= shift;
653
654 return addr | (lowbits & ((1ull << shift) - 1));
655}
656
657static bool skx_tad_decode(struct decoded_addr *res)
658{
659 int i;
660 u32 base, wayness, chnilvoffset;
661 int skt_interleave_bit, chn_interleave_bit;
662 u64 channel_addr;
663
664 for (i = 0; i < SKX_MAX_TAD; i++) {
665 SKX_GET_TADBASE(res->dev, res->imc, i, base);
666 SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
667 if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
668 goto tad_found;
669 }
670 edac_dbg(0, "No TAD entry for %llx\n", res->addr);
671 return false;
672
673tad_found:
674 res->sktways = SKX_TAD_SKTWAYS(wayness);
675 res->chanways = SKX_TAD_CHNWAYS(wayness);
676 skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
677 chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
678
679 SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
680 channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
681
682 if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
683 /* Must handle channel first, then socket */
684 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
685 res->chanways, channel_addr);
686 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
687 res->sktways, channel_addr);
688 } else {
689 /* Handle socket then channel. Preserve low bits from original address */
690 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
691 res->sktways, res->addr);
692 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
693 res->chanways, res->addr);
694 }
695
696 res->chan_addr = channel_addr;
697
698 edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
699 res->addr, res->chan_addr, res->sktways, res->chanways);
700 return true;
701}
702
703#define SKX_MAX_RIR 4
704
705#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
706 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
707 0x108 + 4 * (i), &reg)
708#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
709 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
710 0x120 + 16 * idx + 4 * (i), &reg)
711
712#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
713#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
714#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
715#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
716#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
717
718static bool skx_rir_decode(struct decoded_addr *res)
719{
720 int i, idx, chan_rank;
721 int shift;
722 u32 rirway, rirlv;
723 u64 rank_addr, prev_limit = 0, limit;
724
725 if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
726 shift = 6;
727 else
728 shift = 13;
729
730 for (i = 0; i < SKX_MAX_RIR; i++) {
731 SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
732 limit = SKX_RIR_LIMIT(rirway);
733 if (SKX_RIR_VALID(rirway)) {
734 if (prev_limit <= res->chan_addr &&
735 res->chan_addr <= limit)
736 goto rir_found;
737 }
738 prev_limit = limit;
739 }
740 edac_dbg(0, "No RIR entry for %llx\n", res->addr);
741 return false;
742
743rir_found:
744 rank_addr = res->chan_addr >> shift;
745 rank_addr /= SKX_RIR_WAYS(rirway);
746 rank_addr <<= shift;
747 rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
748
749 res->rank_address = rank_addr;
750 idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
751
752 SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
753 res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
754 chan_rank = SKX_RIR_CHAN_RANK(rirlv);
755 res->channel_rank = chan_rank;
756 res->dimm = chan_rank / 4;
757 res->rank = chan_rank % 4;
758
759 edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
760 res->addr, res->dimm, res->rank,
761 res->channel_rank, res->rank_address);
762 return true;
763}
764
765static u8 skx_close_row[] = {
766 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
767};
768static u8 skx_close_column[] = {
769 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
770};
771static u8 skx_open_row[] = {
772 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
773};
774static u8 skx_open_column[] = {
775 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
776};
777static u8 skx_open_fine_column[] = {
778 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
779};
780
781static int skx_bits(u64 addr, int nbits, u8 *bits)
782{
783 int i, res = 0;
784
785 for (i = 0; i < nbits; i++)
786 res |= ((addr >> bits[i]) & 1) << i;
787 return res;
788}
789
790static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
791{
792 int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
793
794 if (do_xor)
795 ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
796
797 return ret;
798}
799
800static bool skx_mad_decode(struct decoded_addr *r)
801{
802 struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
803 int bg0 = dimm->fine_grain_bank ? 6 : 13;
804
805 if (dimm->close_pg) {
806 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
807 r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
808 r->column |= 0x400; /* C10 is autoprecharge, always set */
809 r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
810 r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
811 } else {
812 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
813 if (dimm->fine_grain_bank)
814 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
815 else
816 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
817 r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
818 r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
819 }
820 r->row &= (1u << dimm->rowbits) - 1;
821
822 edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
823 r->addr, r->row, r->column, r->bank_address,
824 r->bank_group);
825 return true;
826}
827
828static bool skx_decode(struct decoded_addr *res)
829{
830
831 return skx_sad_decode(res) && skx_tad_decode(res) &&
832 skx_rir_decode(res) && skx_mad_decode(res);
833}
834
835#ifdef CONFIG_EDAC_DEBUG
836/*
837 * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
838 * Write an address to this file to exercise the address decode
839 * logic in this driver.
840 */
841static struct dentry *skx_test;
842static u64 skx_fake_addr;
843
844static int debugfs_u64_set(void *data, u64 val)
845{
846 struct decoded_addr res;
847
848 res.addr = val;
849 skx_decode(&res);
850
851 return 0;
852}
853
854DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
855
856static struct dentry *mydebugfs_create(const char *name, umode_t mode,
857 struct dentry *parent, u64 *value)
858{
859 return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
860}
861
862static void setup_skx_debug(void)
863{
864 skx_test = debugfs_create_dir("skx_edac_test", NULL);
865 mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
866}
867
868static void teardown_skx_debug(void)
869{
870 debugfs_remove_recursive(skx_test);
871}
872#else
873static void setup_skx_debug(void)
874{
875}
876
877static void teardown_skx_debug(void)
878{
879}
880#endif /*CONFIG_EDAC_DEBUG*/
881
882static void skx_mce_output_error(struct mem_ctl_info *mci,
883 const struct mce *m,
884 struct decoded_addr *res)
885{
886 enum hw_event_mc_err_type tp_event;
887 char *type, *optype, msg[256];
888 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
889 bool overflow = GET_BITFIELD(m->status, 62, 62);
890 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
891 bool recoverable;
892 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
893 u32 mscod = GET_BITFIELD(m->status, 16, 31);
894 u32 errcode = GET_BITFIELD(m->status, 0, 15);
895 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
896
897 recoverable = GET_BITFIELD(m->status, 56, 56);
898
899 if (uncorrected_error) {
900 if (ripv) {
901 type = "FATAL";
902 tp_event = HW_EVENT_ERR_FATAL;
903 } else {
904 type = "NON_FATAL";
905 tp_event = HW_EVENT_ERR_UNCORRECTED;
906 }
907 } else {
908 type = "CORRECTED";
909 tp_event = HW_EVENT_ERR_CORRECTED;
910 }
911
912 /*
913 * According with Table 15-9 of the Intel Architecture spec vol 3A,
914 * memory errors should fit in this mask:
915 * 000f 0000 1mmm cccc (binary)
916 * where:
917 * f = Correction Report Filtering Bit. If 1, subsequent errors
918 * won't be shown
919 * mmm = error type
920 * cccc = channel
921 * If the mask doesn't match, report an error to the parsing logic
922 */
923 if (!((errcode & 0xef80) == 0x80)) {
924 optype = "Can't parse: it is not a mem";
925 } else {
926 switch (optypenum) {
927 case 0:
928 optype = "generic undef request error";
929 break;
930 case 1:
931 optype = "memory read error";
932 break;
933 case 2:
934 optype = "memory write error";
935 break;
936 case 3:
937 optype = "addr/cmd error";
938 break;
939 case 4:
940 optype = "memory scrubbing error";
941 break;
942 default:
943 optype = "reserved";
944 break;
945 }
946 }
947
948 snprintf(msg, sizeof(msg),
949 "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
950 overflow ? " OVERFLOW" : "",
951 (uncorrected_error && recoverable) ? " recoverable" : "",
952 mscod, errcode,
953 res->socket, res->imc, res->rank,
954 res->bank_group, res->bank_address, res->row, res->column);
955
956 edac_dbg(0, "%s\n", msg);
957
958 /* Call the helper to output message */
959 edac_mc_handle_error(tp_event, mci, core_err_cnt,
960 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
961 res->channel, res->dimm, -1,
962 optype, msg);
963}
964
965static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
966 void *data)
967{
968 struct mce *mce = (struct mce *)data;
969 struct decoded_addr res;
970 struct mem_ctl_info *mci;
971 char *type;
972
973 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
974 return NOTIFY_DONE;
975
976 /* ignore unless this is memory related with an address */
977 if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
978 return NOTIFY_DONE;
979
980 res.addr = mce->addr;
981 if (!skx_decode(&res))
982 return NOTIFY_DONE;
983 mci = res.dev->imc[res.imc].mci;
984
985 if (mce->mcgstatus & MCG_STATUS_MCIP)
986 type = "Exception";
987 else
988 type = "Event";
989
990 skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
991
992 skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
993 "Bank %d: %016Lx\n", mce->extcpu, type,
994 mce->mcgstatus, mce->bank, mce->status);
995 skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
996 skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
997 skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
998
999 skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
1000 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
1001 mce->time, mce->socketid, mce->apicid);
1002
1003 skx_mce_output_error(mci, mce, &res);
1004
1005 return NOTIFY_DONE;
1006}
1007
1008static struct notifier_block skx_mce_dec = {
1009 .notifier_call = skx_mce_check_error,
1010};
1011
1012static void skx_remove(void)
1013{
1014 int i, j;
1015 struct skx_dev *d, *tmp;
1016
1017 edac_dbg(0, "\n");
1018
1019 list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
1020 list_del(&d->list);
1021 for (i = 0; i < NUM_IMC; i++) {
1022 skx_unregister_mci(&d->imc[i]);
1023 for (j = 0; j < NUM_CHANNELS; j++)
1024 pci_dev_put(d->imc[i].chan[j].cdev);
1025 }
1026 pci_dev_put(d->util_all);
1027 pci_dev_put(d->sad_all);
1028
1029 kfree(d);
1030 }
1031}
1032
1033/*
1034 * skx_init:
1035 * make sure we are running on the correct cpu model
1036 * search for all the devices we need
1037 * check which DIMMs are present.
1038 */
1039int __init skx_init(void)
1040{
1041 const struct x86_cpu_id *id;
1042 const struct munit *m;
1043 int rc = 0, i;
1044 u8 mc = 0, src_id, node_id;
1045 struct skx_dev *d;
1046
1047 edac_dbg(2, "\n");
1048
1049 id = x86_match_cpu(skx_cpuids);
1050 if (!id)
1051 return -ENODEV;
1052
1053 rc = skx_get_hi_lo();
1054 if (rc)
1055 return rc;
1056
1057 rc = get_all_bus_mappings();
1058 if (rc < 0)
1059 goto fail;
1060 if (rc == 0) {
1061 edac_dbg(2, "No memory controllers found\n");
1062 return -ENODEV;
1063 }
1064
1065 for (m = skx_all_munits; m->did; m++) {
1066 rc = get_all_munits(m);
1067 if (rc < 0)
1068 goto fail;
1069 if (rc != m->per_socket * skx_num_sockets) {
1070 edac_dbg(2, "Expected %d, got %d of %x\n",
1071 m->per_socket * skx_num_sockets, rc, m->did);
1072 rc = -ENODEV;
1073 goto fail;
1074 }
1075 }
1076
1077 list_for_each_entry(d, &skx_edac_list, list) {
1078 src_id = get_src_id(d);
1079 node_id = skx_get_node_id(d);
1080 edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
1081 for (i = 0; i < NUM_IMC; i++) {
1082 d->imc[i].mc = mc++;
1083 d->imc[i].lmc = i;
1084 d->imc[i].src_id = src_id;
1085 d->imc[i].node_id = node_id;
1086 rc = skx_register_mci(&d->imc[i]);
1087 if (rc < 0)
1088 goto fail;
1089 }
1090 }
1091
1092 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1093 opstate_init();
1094
1095 setup_skx_debug();
1096
1097 mce_register_decode_chain(&skx_mce_dec);
1098
1099 return 0;
1100fail:
1101 skx_remove();
1102 return rc;
1103}
1104
1105static void __exit skx_exit(void)
1106{
1107 edac_dbg(2, "\n");
1108 mce_unregister_decode_chain(&skx_mce_dec);
1109 skx_remove();
1110 teardown_skx_debug();
1111}
1112
1113module_init(skx_init);
1114module_exit(skx_exit);
1115
1116module_param(edac_op_state, int, 0444);
1117MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1118
1119MODULE_LICENSE("GPL v2");
1120MODULE_AUTHOR("Tony Luck");
1121MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 438893762076..ce2bc2a38101 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
709 struct mbox_client *cl = &pchan->cl; 709 struct mbox_client *cl = &pchan->cl;
710 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 710 struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
711 711
712 if (of_address_to_resource(shmem, 0, &res)) { 712 ret = of_address_to_resource(shmem, 0, &res);
713 of_node_put(shmem);
714 if (ret) {
713 dev_err(dev, "failed to get SCPI payload mem resource\n"); 715 dev_err(dev, "failed to get SCPI payload mem resource\n");
714 ret = -EINVAL;
715 goto err; 716 goto err;
716 } 717 }
717 718
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 94a58a082b99..44c01390d035 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
229 229
230 ret = device_register(dmi_dev); 230 ret = device_register(dmi_dev);
231 if (ret) 231 if (ret)
232 goto fail_free_dmi_dev; 232 goto fail_put_dmi_dev;
233 233
234 return 0; 234 return 0;
235 235
236fail_free_dmi_dev: 236fail_put_dmi_dev:
237 kfree(dmi_dev); 237 put_device(dmi_dev);
238fail_class_unregister:
239 238
239fail_class_unregister:
240 class_unregister(&dmi_class); 240 class_unregister(&dmi_class);
241 241
242 return ret; 242 return ret;
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index c99c24bc79b0..9ae6c116c474 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/efi.h> 18#include <linux/efi.h>
19#include <linux/vmalloc.h>
19 20
20#define NO_FURTHER_WRITE_ACTION -1 21#define NO_FURTHER_WRITE_ACTION -1
21 22
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
108 int ret; 109 int ret;
109 void *cap_hdr_temp; 110 void *cap_hdr_temp;
110 111
111 cap_hdr_temp = kmap(cap_info->pages[0]); 112 cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
113 VM_MAP, PAGE_KERNEL);
112 if (!cap_hdr_temp) { 114 if (!cap_hdr_temp) {
113 pr_debug("%s: kmap() failed\n", __func__); 115 pr_debug("%s: vmap() failed\n", __func__);
114 return -EFAULT; 116 return -EFAULT;
115 } 117 }
116 118
117 ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); 119 ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
118 kunmap(cap_info->pages[0]); 120 vunmap(cap_hdr_temp);
119 if (ret) { 121 if (ret) {
120 pr_err("%s: efi_capsule_update() failed\n", __func__); 122 pr_err("%s: efi_capsule_update() failed\n", __func__);
121 return ret; 123 return ret;
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 53b9fd2293ee..6eedff45e6d7 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
190 * map the capsule described by @capsule with its data in @pages and 190 * map the capsule described by @capsule with its data in @pages and
191 * send it to the firmware via the UpdateCapsule() runtime service. 191 * send it to the firmware via the UpdateCapsule() runtime service.
192 * 192 *
193 * @capsule must be a virtual mapping of the first page in @pages 193 * @capsule must be a virtual mapping of the complete capsule update in the
194 * (@pages[0]) in the kernel address space. That is, a 194 * kernel address space, as the capsule can be consumed immediately.
195 * capsule_header_t that describes the entire contents of the capsule 195 * A capsule_header_t that describes the entire contents of the capsule
196 * must be at the start of the first data page. 196 * must be at the start of the first data page.
197 * 197 *
198 * Even though this function will validate that the firmware supports 198 * Even though this function will validate that the firmware supports
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5a2631af7410..7dd2e2d37231 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
657 } 657 }
658 658
659 if (subnode) { 659 if (subnode) {
660 node = of_get_flat_dt_subnode_by_name(node, subnode); 660 int err = of_get_flat_dt_subnode_by_name(node, subnode);
661 if (node < 0) 661
662 if (err < 0)
662 return 0; 663 return 0;
664
665 node = err;
663 } 666 }
664 667
665 return __find_uefi_params(node, info, dt_params[i].params); 668 return __find_uefi_params(node, info, dt_params[i].params);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3bd127f95315..aded10662020 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
41#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE 41#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
42#endif 42#endif
43 43
44#define EFI_MMAP_NR_SLACK_SLOTS 8
45
44struct file_info { 46struct file_info {
45 efi_file_handle_t *handle; 47 efi_file_handle_t *handle;
46 u64 size; 48 u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
63 } 65 }
64} 66}
65 67
68static inline bool mmap_has_headroom(unsigned long buff_size,
69 unsigned long map_size,
70 unsigned long desc_size)
71{
72 unsigned long slack = buff_size - map_size;
73
74 return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
75}
76
66efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, 77efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
67 efi_memory_desc_t **map, 78 struct efi_boot_memmap *map)
68 unsigned long *map_size,
69 unsigned long *desc_size,
70 u32 *desc_ver,
71 unsigned long *key_ptr)
72{ 79{
73 efi_memory_desc_t *m = NULL; 80 efi_memory_desc_t *m = NULL;
74 efi_status_t status; 81 efi_status_t status;
75 unsigned long key; 82 unsigned long key;
76 u32 desc_version; 83 u32 desc_version;
77 84
78 *map_size = sizeof(*m) * 32; 85 *map->desc_size = sizeof(*m);
86 *map->map_size = *map->desc_size * 32;
87 *map->buff_size = *map->map_size;
79again: 88again:
80 /*
81 * Add an additional efi_memory_desc_t because we're doing an
82 * allocation which may be in a new descriptor region.
83 */
84 *map_size += sizeof(*m);
85 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 89 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
86 *map_size, (void **)&m); 90 *map->map_size, (void **)&m);
87 if (status != EFI_SUCCESS) 91 if (status != EFI_SUCCESS)
88 goto fail; 92 goto fail;
89 93
90 *desc_size = 0; 94 *map->desc_size = 0;
91 key = 0; 95 key = 0;
92 status = efi_call_early(get_memory_map, map_size, m, 96 status = efi_call_early(get_memory_map, map->map_size, m,
93 &key, desc_size, &desc_version); 97 &key, map->desc_size, &desc_version);
94 if (status == EFI_BUFFER_TOO_SMALL) { 98 if (status == EFI_BUFFER_TOO_SMALL ||
99 !mmap_has_headroom(*map->buff_size, *map->map_size,
100 *map->desc_size)) {
95 efi_call_early(free_pool, m); 101 efi_call_early(free_pool, m);
102 /*
103 * Make sure there is some entries of headroom so that the
104 * buffer can be reused for a new map after allocations are
105 * no longer permitted. Its unlikely that the map will grow to
106 * exceed this headroom once we are ready to trigger
107 * ExitBootServices()
108 */
109 *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
110 *map->buff_size = *map->map_size;
96 goto again; 111 goto again;
97 } 112 }
98 113
99 if (status != EFI_SUCCESS) 114 if (status != EFI_SUCCESS)
100 efi_call_early(free_pool, m); 115 efi_call_early(free_pool, m);
101 116
102 if (key_ptr && status == EFI_SUCCESS) 117 if (map->key_ptr && status == EFI_SUCCESS)
103 *key_ptr = key; 118 *map->key_ptr = key;
104 if (desc_ver && status == EFI_SUCCESS) 119 if (map->desc_ver && status == EFI_SUCCESS)
105 *desc_ver = desc_version; 120 *map->desc_ver = desc_version;
106 121
107fail: 122fail:
108 *map = m; 123 *map->map = m;
109 return status; 124 return status;
110} 125}
111 126
@@ -113,13 +128,20 @@ fail:
113unsigned long get_dram_base(efi_system_table_t *sys_table_arg) 128unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
114{ 129{
115 efi_status_t status; 130 efi_status_t status;
116 unsigned long map_size; 131 unsigned long map_size, buff_size;
117 unsigned long membase = EFI_ERROR; 132 unsigned long membase = EFI_ERROR;
118 struct efi_memory_map map; 133 struct efi_memory_map map;
119 efi_memory_desc_t *md; 134 efi_memory_desc_t *md;
135 struct efi_boot_memmap boot_map;
120 136
121 status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, 137 boot_map.map = (efi_memory_desc_t **)&map.map;
122 &map_size, &map.desc_size, NULL, NULL); 138 boot_map.map_size = &map_size;
139 boot_map.desc_size = &map.desc_size;
140 boot_map.desc_ver = NULL;
141 boot_map.key_ptr = NULL;
142 boot_map.buff_size = &buff_size;
143
144 status = efi_get_memory_map(sys_table_arg, &boot_map);
123 if (status != EFI_SUCCESS) 145 if (status != EFI_SUCCESS)
124 return membase; 146 return membase;
125 147
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
144 unsigned long size, unsigned long align, 166 unsigned long size, unsigned long align,
145 unsigned long *addr, unsigned long max) 167 unsigned long *addr, unsigned long max)
146{ 168{
147 unsigned long map_size, desc_size; 169 unsigned long map_size, desc_size, buff_size;
148 efi_memory_desc_t *map; 170 efi_memory_desc_t *map;
149 efi_status_t status; 171 efi_status_t status;
150 unsigned long nr_pages; 172 unsigned long nr_pages;
151 u64 max_addr = 0; 173 u64 max_addr = 0;
152 int i; 174 int i;
175 struct efi_boot_memmap boot_map;
176
177 boot_map.map = &map;
178 boot_map.map_size = &map_size;
179 boot_map.desc_size = &desc_size;
180 boot_map.desc_ver = NULL;
181 boot_map.key_ptr = NULL;
182 boot_map.buff_size = &buff_size;
153 183
154 status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, 184 status = efi_get_memory_map(sys_table_arg, &boot_map);
155 NULL, NULL);
156 if (status != EFI_SUCCESS) 185 if (status != EFI_SUCCESS)
157 goto fail; 186 goto fail;
158 187
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
230 unsigned long size, unsigned long align, 259 unsigned long size, unsigned long align,
231 unsigned long *addr) 260 unsigned long *addr)
232{ 261{
233 unsigned long map_size, desc_size; 262 unsigned long map_size, desc_size, buff_size;
234 efi_memory_desc_t *map; 263 efi_memory_desc_t *map;
235 efi_status_t status; 264 efi_status_t status;
236 unsigned long nr_pages; 265 unsigned long nr_pages;
237 int i; 266 int i;
267 struct efi_boot_memmap boot_map;
238 268
239 status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, 269 boot_map.map = &map;
240 NULL, NULL); 270 boot_map.map_size = &map_size;
271 boot_map.desc_size = &desc_size;
272 boot_map.desc_ver = NULL;
273 boot_map.key_ptr = NULL;
274 boot_map.buff_size = &buff_size;
275
276 status = efi_get_memory_map(sys_table_arg, &boot_map);
241 if (status != EFI_SUCCESS) 277 if (status != EFI_SUCCESS)
242 goto fail; 278 goto fail;
243 279
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
704 *cmd_line_len = options_bytes; 740 *cmd_line_len = options_bytes;
705 return (char *)cmdline_addr; 741 return (char *)cmdline_addr;
706} 742}
743
744/*
745 * Handle calling ExitBootServices according to the requirements set out by the
746 * spec. Obtains the current memory map, and returns that info after calling
747 * ExitBootServices. The client must specify a function to perform any
748 * processing of the memory map data prior to ExitBootServices. A client
749 * specific structure may be passed to the function via priv. The client
750 * function may be called multiple times.
751 */
752efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
753 void *handle,
754 struct efi_boot_memmap *map,
755 void *priv,
756 efi_exit_boot_map_processing priv_func)
757{
758 efi_status_t status;
759
760 status = efi_get_memory_map(sys_table_arg, map);
761
762 if (status != EFI_SUCCESS)
763 goto fail;
764
765 status = priv_func(sys_table_arg, map, priv);
766 if (status != EFI_SUCCESS)
767 goto free_map;
768
769 status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
770
771 if (status == EFI_INVALID_PARAMETER) {
772 /*
773 * The memory map changed between efi_get_memory_map() and
774 * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
775 * EFI_BOOT_SERVICES.ExitBootServices we need to get the
776 * updated map, and try again. The spec implies one retry
777 * should be sufficent, which is confirmed against the EDK2
778 * implementation. Per the spec, we can only invoke
779 * get_memory_map() and exit_boot_services() - we cannot alloc
780 * so efi_get_memory_map() cannot be used, and we must reuse
781 * the buffer. For all practical purposes, the headroom in the
782 * buffer should account for any changes in the map so the call
783 * to get_memory_map() is expected to succeed here.
784 */
785 *map->map_size = *map->buff_size;
786 status = efi_call_early(get_memory_map,
787 map->map_size,
788 *map->map,
789 map->key_ptr,
790 map->desc_size,
791 map->desc_ver);
792
793 /* exit_boot_services() was called, thus cannot free */
794 if (status != EFI_SUCCESS)
795 goto fail;
796
797 status = priv_func(sys_table_arg, map, priv);
798 /* exit_boot_services() was called, thus cannot free */
799 if (status != EFI_SUCCESS)
800 goto fail;
801
802 status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
803 }
804
805 /* exit_boot_services() was called, thus cannot free */
806 if (status != EFI_SUCCESS)
807 goto fail;
808
809 return EFI_SUCCESS;
810
811free_map:
812 efi_call_early(free_pool, *map->map);
813fail:
814 return status;
815}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index e58abfa953cc..a6a93116a8f0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -152,6 +152,27 @@ fdt_set_fail:
152#define EFI_FDT_ALIGN EFI_PAGE_SIZE 152#define EFI_FDT_ALIGN EFI_PAGE_SIZE
153#endif 153#endif
154 154
155struct exit_boot_struct {
156 efi_memory_desc_t *runtime_map;
157 int *runtime_entry_count;
158};
159
160static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
161 struct efi_boot_memmap *map,
162 void *priv)
163{
164 struct exit_boot_struct *p = priv;
165 /*
166 * Update the memory map with virtual addresses. The function will also
167 * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
168 * entries so that we can pass it straight to SetVirtualAddressMap()
169 */
170 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
171 p->runtime_map, p->runtime_entry_count);
172
173 return EFI_SUCCESS;
174}
175
155/* 176/*
156 * Allocate memory for a new FDT, then add EFI, commandline, and 177 * Allocate memory for a new FDT, then add EFI, commandline, and
157 * initrd related fields to the FDT. This routine increases the 178 * initrd related fields to the FDT. This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
175 unsigned long fdt_addr, 196 unsigned long fdt_addr,
176 unsigned long fdt_size) 197 unsigned long fdt_size)
177{ 198{
178 unsigned long map_size, desc_size; 199 unsigned long map_size, desc_size, buff_size;
179 u32 desc_ver; 200 u32 desc_ver;
180 unsigned long mmap_key; 201 unsigned long mmap_key;
181 efi_memory_desc_t *memory_map, *runtime_map; 202 efi_memory_desc_t *memory_map, *runtime_map;
182 unsigned long new_fdt_size; 203 unsigned long new_fdt_size;
183 efi_status_t status; 204 efi_status_t status;
184 int runtime_entry_count = 0; 205 int runtime_entry_count = 0;
206 struct efi_boot_memmap map;
207 struct exit_boot_struct priv;
208
209 map.map = &runtime_map;
210 map.map_size = &map_size;
211 map.desc_size = &desc_size;
212 map.desc_ver = &desc_ver;
213 map.key_ptr = &mmap_key;
214 map.buff_size = &buff_size;
185 215
186 /* 216 /*
187 * Get a copy of the current memory map that we will use to prepare 217 * Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
189 * subsequent allocations adding entries, since they could not affect 219 * subsequent allocations adding entries, since they could not affect
190 * the number of EFI_MEMORY_RUNTIME regions. 220 * the number of EFI_MEMORY_RUNTIME regions.
191 */ 221 */
192 status = efi_get_memory_map(sys_table, &runtime_map, &map_size, 222 status = efi_get_memory_map(sys_table, &map);
193 &desc_size, &desc_ver, &mmap_key);
194 if (status != EFI_SUCCESS) { 223 if (status != EFI_SUCCESS) {
195 pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); 224 pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
196 return status; 225 return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
199 pr_efi(sys_table, 228 pr_efi(sys_table,
200 "Exiting boot services and installing virtual address map...\n"); 229 "Exiting boot services and installing virtual address map...\n");
201 230
231 map.map = &memory_map;
202 /* 232 /*
203 * Estimate size of new FDT, and allocate memory for it. We 233 * Estimate size of new FDT, and allocate memory for it. We
204 * will allocate a bigger buffer if this ends up being too 234 * will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
218 * we can get the memory map key needed for 248 * we can get the memory map key needed for
219 * exit_boot_services(). 249 * exit_boot_services().
220 */ 250 */
221 status = efi_get_memory_map(sys_table, &memory_map, &map_size, 251 status = efi_get_memory_map(sys_table, &map);
222 &desc_size, &desc_ver, &mmap_key);
223 if (status != EFI_SUCCESS) 252 if (status != EFI_SUCCESS)
224 goto fail_free_new_fdt; 253 goto fail_free_new_fdt;
225 254
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
250 } 279 }
251 } 280 }
252 281
253 /* 282 sys_table->boottime->free_pool(memory_map);
254 * Update the memory map with virtual addresses. The function will also 283 priv.runtime_map = runtime_map;
255 * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME 284 priv.runtime_entry_count = &runtime_entry_count;
256 * entries so that we can pass it straight into SetVirtualAddressMap() 285 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
257 */ 286 exit_boot_func);
258 efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
259 &runtime_entry_count);
260
261 /* Now we are ready to exit_boot_services.*/
262 status = sys_table->boottime->exit_boot_services(handle, mmap_key);
263 287
264 if (status == EFI_SUCCESS) { 288 if (status == EFI_SUCCESS) {
265 efi_set_virtual_address_map_t *svam; 289 efi_set_virtual_address_map_t *svam;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 53f6d3fe6d86..0c9f58c5ba50 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
73 unsigned long random_seed) 73 unsigned long random_seed)
74{ 74{
75 unsigned long map_size, desc_size, total_slots = 0, target_slot; 75 unsigned long map_size, desc_size, total_slots = 0, target_slot;
76 unsigned long buff_size;
76 efi_status_t status; 77 efi_status_t status;
77 efi_memory_desc_t *memory_map; 78 efi_memory_desc_t *memory_map;
78 int map_offset; 79 int map_offset;
80 struct efi_boot_memmap map;
79 81
80 status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size, 82 map.map = &memory_map;
81 &desc_size, NULL, NULL); 83 map.map_size = &map_size;
84 map.desc_size = &desc_size;
85 map.desc_ver = NULL;
86 map.key_ptr = NULL;
87 map.buff_size = &buff_size;
88
89 status = efi_get_memory_map(sys_table_arg, &map);
82 if (status != EFI_SUCCESS) 90 if (status != EFI_SUCCESS)
83 return status; 91 return status;
84 92
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 98dd47a30fc7..24caedb00a7a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -50,6 +50,7 @@ config GPIO_DEVRES
50config OF_GPIO 50config OF_GPIO
51 def_bool y 51 def_bool y
52 depends on OF 52 depends on OF
53 depends on HAS_IOMEM
53 54
54config GPIO_ACPI 55config GPIO_ACPI
55 def_bool y 56 def_bool y
@@ -188,7 +189,7 @@ config GPIO_EP93XX
188config GPIO_ETRAXFS 189config GPIO_ETRAXFS
189 bool "Axis ETRAX FS General I/O" 190 bool "Axis ETRAX FS General I/O"
190 depends on CRIS || COMPILE_TEST 191 depends on CRIS || COMPILE_TEST
191 depends on OF 192 depends on OF_GPIO
192 select GPIO_GENERIC 193 select GPIO_GENERIC
193 select GPIOLIB_IRQCHIP 194 select GPIOLIB_IRQCHIP
194 help 195 help
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
214 215
215config GPIO_GRGPIO 216config GPIO_GRGPIO
216 tristate "Aeroflex Gaisler GRGPIO support" 217 tristate "Aeroflex Gaisler GRGPIO support"
217 depends on OF 218 depends on OF_GPIO
218 select GPIO_GENERIC 219 select GPIO_GENERIC
219 select IRQ_DOMAIN 220 select IRQ_DOMAIN
220 help 221 help
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX
312config GPIO_MVEBU 313config GPIO_MVEBU
313 def_bool y 314 def_bool y
314 depends on PLAT_ORION 315 depends on PLAT_ORION
315 depends on OF 316 depends on OF_GPIO
316 select GENERIC_IRQ_CHIP 317 select GENERIC_IRQ_CHIP
317 318
318config GPIO_MXC 319config GPIO_MXC
@@ -405,7 +406,7 @@ config GPIO_TEGRA
405 bool "NVIDIA Tegra GPIO support" 406 bool "NVIDIA Tegra GPIO support"
406 default ARCH_TEGRA 407 default ARCH_TEGRA
407 depends on ARCH_TEGRA || COMPILE_TEST 408 depends on ARCH_TEGRA || COMPILE_TEST
408 depends on OF 409 depends on OF_GPIO
409 help 410 help
410 Say yes here to support GPIO pins on NVIDIA Tegra SoCs. 411 Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
411 412
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders"
1099 1100
1100config GPIO_74X164 1101config GPIO_74X164
1101 tristate "74x164 serial-in/parallel-out 8-bits shift register" 1102 tristate "74x164 serial-in/parallel-out 8-bits shift register"
1102 depends on OF 1103 depends on OF_GPIO
1103 help 1104 help
1104 Driver for 74x164 compatible serial-in/parallel-out 8-outputs 1105 Driver for 74x164 compatible serial-in/parallel-out 8-outputs
1105 shift registers. This driver can be used to provide access 1106 shift registers. This driver can be used to provide access
@@ -1130,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
1130 1131
1131config GPIO_MCP23S08 1132config GPIO_MCP23S08
1132 tristate "Microchip MCP23xxx I/O expander" 1133 tristate "Microchip MCP23xxx I/O expander"
1134 depends on OF_GPIO
1133 select GPIOLIB_IRQCHIP 1135 select GPIOLIB_IRQCHIP
1134 help 1136 help
1135 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 1137 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 08807368f007..946d09195598 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
192 ts->chip.parent = dev; 192 ts->chip.parent = dev;
193 ts->chip.owner = THIS_MODULE; 193 ts->chip.owner = THIS_MODULE;
194 194
195 ret = gpiochip_add_data(&ts->chip, ts);
196 if (ret)
197 goto exit_destroy;
198
195 /* 199 /*
196 * initialize pullups according to platform data and cache the 200 * initialize pullups according to platform data and cache the
197 * register values for later use. 201 * register values for later use.
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
213 } 217 }
214 } 218 }
215 219
216 ret = gpiochip_add_data(&ts->chip, ts);
217 if (ret)
218 goto exit_destroy;
219
220 return ret; 220 return ret;
221 221
222exit_destroy: 222exit_destroy:
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index ac22efc1840e..99d37b56c258 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
564 mcp->chip.direction_output = mcp23s08_direction_output; 564 mcp->chip.direction_output = mcp23s08_direction_output;
565 mcp->chip.set = mcp23s08_set; 565 mcp->chip.set = mcp23s08_set;
566 mcp->chip.dbg_show = mcp23s08_dbg_show; 566 mcp->chip.dbg_show = mcp23s08_dbg_show;
567#ifdef CONFIG_OF 567#ifdef CONFIG_OF_GPIO
568 mcp->chip.of_gpio_n_cells = 2; 568 mcp->chip.of_gpio_n_cells = 2;
569 mcp->chip.of_node = dev->of_node; 569 mcp->chip.of_node = dev->of_node;
570#endif 570#endif
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 0c99e8fb9af3..8d8ee0ebf14c 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
155{ 155{
156 irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, 156 irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
157 handle_edge_irq); 157 handle_edge_irq);
158 irq_set_noprobe(irq); 158 irq_set_probe(irq);
159 159
160 return 0; 160 return 0;
161} 161}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 75e7b3919ea7..a28feb3edf33 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,7 +16,6 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/io-mapping.h>
20#include <linux/gpio/consumer.h> 19#include <linux/gpio/consumer.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_address.h> 21#include <linux/of_address.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8ebc5f1eb4c0..700c56baf2de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -426,6 +426,8 @@ struct amdgpu_mman {
426 426
427 /* custom LRU management */ 427 /* custom LRU management */
428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
429 /* guard for log2_size array, don't add anything in between */
430 struct amdgpu_mman_lru guard;
429}; 431};
430 432
431int amdgpu_copy_buffer(struct amdgpu_ring *ring, 433int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
646void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 648void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
647int amdgpu_gart_init(struct amdgpu_device *adev); 649int amdgpu_gart_init(struct amdgpu_device *adev);
648void amdgpu_gart_fini(struct amdgpu_device *adev); 650void amdgpu_gart_fini(struct amdgpu_device *adev);
649void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 651void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
650 int pages); 652 int pages);
651int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 653int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
652 int pages, struct page **pagelist, 654 int pages, struct page **pagelist,
653 dma_addr_t *dma_addr, uint32_t flags); 655 dma_addr_t *dma_addr, uint32_t flags);
654 656
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363b06..fe872b82e619 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
321 (le16_to_cpu(path->usConnObjectId) & 321 (le16_to_cpu(path->usConnObjectId) &
322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 322 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
323 323
324 /* Skip TV/CV support */
325 if ((le16_to_cpu(path->usDeviceTag) ==
326 ATOM_DEVICE_TV1_SUPPORT) ||
327 (le16_to_cpu(path->usDeviceTag) ==
328 ATOM_DEVICE_CV_SUPPORT))
329 continue;
330
331 if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
332 DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
333 con_obj_id, le16_to_cpu(path->usDeviceTag));
334 continue;
335 }
336
324 connector_type = 337 connector_type =
325 object_connector_convert[con_obj_id]; 338 object_connector_convert[con_obj_id];
326 connector_object_id = con_obj_id; 339 connector_object_id = con_obj_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..10b5ddf2c588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
200 atpx->is_hybrid = false; 200 atpx->is_hybrid = false;
201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
202 printk("ATPX Hybrid Graphics\n"); 202 printk("ATPX Hybrid Graphics\n");
203#if 1
204 /* This is a temporary hack until the D3 cold support
205 * makes it upstream. The ATPX power_control method seems
206 * to still work on even if the system should be using
207 * the new standardized hybrid D3 cold ACPI interface.
208 */
209 atpx->functions.power_cntl = true;
210#else
211 atpx->functions.power_cntl = false; 203 atpx->functions.power_cntl = false;
212#endif
213 atpx->is_hybrid = true; 204 atpx->is_hybrid = true;
214 } 205 }
215 206
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df0b0..0feea347f680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
221 * Unbinds the requested pages from the gart page table and 221 * Unbinds the requested pages from the gart page table and
222 * replaces them with the dummy page (all asics). 222 * replaces them with the dummy page (all asics).
223 */ 223 */
224void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 224void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
225 int pages) 225 int pages)
226{ 226{
227 unsigned t; 227 unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
268 * (all asics). 268 * (all asics).
269 * Returns 0 for success, -EINVAL for failure. 269 * Returns 0 for success, -EINVAL for failure.
270 */ 270 */
271int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 271int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
272 int pages, struct page **pagelist, dma_addr_t *dma_addr, 272 int pages, struct page **pagelist, dma_addr_t *dma_addr,
273 uint32_t flags) 273 uint32_t flags)
274{ 274{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a31d7ef3032c..ec1282af2479 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
280int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 280int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
281{ 281{
282 unsigned i; 282 unsigned i;
283 int r; 283 int r, ret = 0;
284 284
285 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 285 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
286 struct amdgpu_ring *ring = adev->rings[i]; 286 struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
301 } else { 301 } else {
302 /* still not good, but we can live with it */ 302 /* still not good, but we can live with it */
303 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); 303 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
304 ret = r;
304 } 305 }
305 } 306 }
306 } 307 }
307 return 0; 308 return ret;
308} 309}
309 310
310/* 311/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ff63b88b0ffa..5cc7052e391d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
305 struct drm_device *ddev = dev_get_drvdata(dev); 305 struct drm_device *ddev = dev_get_drvdata(dev);
306 struct amdgpu_device *adev = ddev->dev_private; 306 struct amdgpu_device *adev = ddev->dev_private;
307 char *table = NULL; 307 char *table = NULL;
308 int size, i; 308 int size;
309 309
310 if (adev->pp_enabled) 310 if (adev->pp_enabled)
311 size = amdgpu_dpm_get_pp_table(adev, &table); 311 size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
315 if (size >= PAGE_SIZE) 315 if (size >= PAGE_SIZE)
316 size = PAGE_SIZE - 1; 316 size = PAGE_SIZE - 1;
317 317
318 for (i = 0; i < size; i++) { 318 memcpy(buf, table, size);
319 sprintf(buf + i, "%02x", table[i]);
320 }
321 sprintf(buf + i, "\n");
322 319
323 return size; 320 return size;
324} 321}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b7742e62972a..716f2afeb6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
251 251
252 adev = amdgpu_get_adev(bo->bdev); 252 adev = amdgpu_get_adev(bo->bdev);
253 ring = adev->mman.buffer_funcs_ring; 253 ring = adev->mman.buffer_funcs_ring;
254 old_start = old_mem->start << PAGE_SHIFT; 254 old_start = (u64)old_mem->start << PAGE_SHIFT;
255 new_start = new_mem->start << PAGE_SHIFT; 255 new_start = (u64)new_mem->start << PAGE_SHIFT;
256 256
257 switch (old_mem->mem_type) { 257 switch (old_mem->mem_type) {
258 case TTM_PL_VRAM: 258 case TTM_PL_VRAM:
@@ -335,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
335 if (unlikely(r)) { 335 if (unlikely(r)) {
336 goto out_cleanup; 336 goto out_cleanup;
337 } 337 }
338 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 338 r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
339out_cleanup: 339out_cleanup:
340 ttm_bo_mem_put(bo, &tmp_mem); 340 ttm_bo_mem_put(bo, &tmp_mem);
341 return r; 341 return r;
@@ -368,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
368 if (unlikely(r)) { 368 if (unlikely(r)) {
369 return r; 369 return r;
370 } 370 }
371 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 371 r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
372 if (unlikely(r)) { 372 if (unlikely(r)) {
373 goto out_cleanup; 373 goto out_cleanup;
374 } 374 }
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
950 struct list_head *res = lru->lru[tbo->mem.mem_type]; 950 struct list_head *res = lru->lru[tbo->mem.mem_type];
951 951
952 lru->lru[tbo->mem.mem_type] = &tbo->lru; 952 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 while ((++lru)->lru[tbo->mem.mem_type] == res)
954 lru->lru[tbo->mem.mem_type] = &tbo->lru;
953 955
954 return res; 956 return res;
955} 957}
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
960 struct list_head *res = lru->swap_lru; 962 struct list_head *res = lru->swap_lru;
961 963
962 lru->swap_lru = &tbo->swap; 964 lru->swap_lru = &tbo->swap;
965 while ((++lru)->swap_lru == res)
966 lru->swap_lru = &tbo->swap;
963 967
964 return res; 968 return res;
965} 969}
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1011 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1015 lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
1012 } 1016 }
1013 1017
1018 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1019 adev->mman.guard.lru[j] = NULL;
1020 adev->mman.guard.swap_lru = NULL;
1021
1014 adev->mman.initialized = true; 1022 adev->mman.initialized = true;
1015 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, 1023 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1016 adev->mc.real_vram_size >> PAGE_SHIFT); 1024 adev->mc.real_vram_size >> PAGE_SHIFT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b11f4e8868d7..4aa993d19018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1187 r = 0; 1187 r = 0;
1188 } 1188 }
1189 1189
1190error:
1191 fence_put(fence); 1190 fence_put(fence);
1191
1192error:
1192 return r; 1193 return r;
1193} 1194}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8e642fc48df4..80120fa4092c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1535 r = amd_sched_entity_init(&ring->sched, &vm->entity, 1535 r = amd_sched_entity_init(&ring->sched, &vm->entity,
1536 rq, amdgpu_sched_jobs); 1536 rq, amdgpu_sched_jobs);
1537 if (r) 1537 if (r)
1538 return r; 1538 goto err;
1539 1539
1540 vm->page_directory_fence = NULL; 1540 vm->page_directory_fence = NULL;
1541 1541
@@ -1565,6 +1565,9 @@ error_free_page_directory:
1565error_free_sched_entity: 1565error_free_sched_entity:
1566 amd_sched_entity_fini(&ring->sched, &vm->entity); 1566 amd_sched_entity_fini(&ring->sched, &vm->entity);
1567 1567
1568err:
1569 drm_free_large(vm->page_tables);
1570
1568 return r; 1571 return r;
1569} 1572}
1570 1573
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index e2f0e5d58d5c..a5c94b482459 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5779 break; 5779 break;
5780 case CHIP_KAVERI: 5780 case CHIP_KAVERI:
5781 case CHIP_KABINI: 5781 case CHIP_KABINI:
5782 case CHIP_MULLINS:
5782 default: BUG(); 5783 default: BUG();
5783 } 5784 }
5784 5785
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ee6466912497..77fdd9911c3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); 52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); 53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
55static int cik_sdma_soft_reset(void *handle);
55 56
56MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 57MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
57MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); 58MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle)
1037{ 1038{
1038 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1039 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1039 1040
1041 cik_sdma_soft_reset(handle);
1042
1040 return cik_sdma_hw_init(adev); 1043 return cik_sdma_hw_init(adev);
1041} 1044}
1042 1045
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d869d058ef24..425413fcaf02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2755 u64 wb_gpu_addr; 2755 u64 wb_gpu_addr;
2756 u32 *buf; 2756 u32 *buf;
2757 struct bonaire_mqd *mqd; 2757 struct bonaire_mqd *mqd;
2758 2758 struct amdgpu_ring *ring;
2759 gfx_v7_0_cp_compute_enable(adev, true);
2760 2759
2761 /* fix up chicken bits */ 2760 /* fix up chicken bits */
2762 tmp = RREG32(mmCP_CPF_DEBUG); 2761 tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2791 2790
2792 /* init the queues. Just two for now. */ 2791 /* init the queues. Just two for now. */
2793 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2792 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2794 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2793 ring = &adev->gfx.compute_ring[i];
2795 2794
2796 if (ring->mqd_obj == NULL) { 2795 if (ring->mqd_obj == NULL) {
2797 r = amdgpu_bo_create(adev, 2796 r = amdgpu_bo_create(adev,
@@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2970 amdgpu_bo_unreserve(ring->mqd_obj); 2969 amdgpu_bo_unreserve(ring->mqd_obj);
2971 2970
2972 ring->ready = true; 2971 ring->ready = true;
2972 }
2973
2974 gfx_v7_0_cp_compute_enable(adev, true);
2975
2976 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2977 ring = &adev->gfx.compute_ring[i];
2978
2973 r = amdgpu_ring_test_ring(ring); 2979 r = amdgpu_ring_test_ring(ring);
2974 if (r) 2980 if (r)
2975 ring->ready = false; 2981 ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bff8668e9e6d..b8184617ca25 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
270 270
271static const u32 golden_settings_polaris11_a11[] = 271static const u32 golden_settings_polaris11_a11[] =
272{ 272{
273 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, 273 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
274 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
274 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 275 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
275 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 276 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
276 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 277 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
279 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 280 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
280 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 281 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
281 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 282 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
282 mmSQ_CONFIG, 0x07f80000, 0x07180000, 283 mmSQ_CONFIG, 0x07f80000, 0x01180000,
283 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 284 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
284 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 285 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
285 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, 286 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
301static const u32 golden_settings_polaris10_a11[] = 302static const u32 golden_settings_polaris10_a11[] =
302{ 303{
303 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 304 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
304 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, 305 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
305 mmCB_HW_CONTROL_2, 0, 0x0f000000, 306 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
306 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 307 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
307 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 308 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
308 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 309 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
409 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 410 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
410 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, 411 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
411 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 412 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
413 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
412 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 414 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
413 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 415 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
414 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 416 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
505 mmGB_GPU_ID, 0x0000000f, 0x00000000, 507 mmGB_GPU_ID, 0x0000000f, 0x00000000,
506 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, 508 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
507 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 509 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
510 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
508 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 511 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
509 mmTA_CNTL_AUX, 0x000f000f, 0x00010000, 512 mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
513 mmTCC_CTRL, 0x00100000, 0xf31fff7f,
510 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 514 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
511 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, 515 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
512 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 516 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index d24a82bd0c7a..0b0f08641eed 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
144 break; 144 break;
145 case CHIP_KAVERI: 145 case CHIP_KAVERI:
146 case CHIP_KABINI: 146 case CHIP_KABINI:
147 case CHIP_MULLINS:
147 return 0; 148 return 0;
148 default: BUG(); 149 default: BUG();
149 } 150 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 717359d3ba8c..2aee2c6f3cd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
103 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 103 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
104}; 104};
105 105
106static const u32 golden_settings_stoney_common[] =
107{
108 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
109 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
110};
106 111
107static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 112static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
108{ 113{
@@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
142 amdgpu_program_register_sequence(adev, 147 amdgpu_program_register_sequence(adev,
143 stoney_mgcg_cgcg_init, 148 stoney_mgcg_cgcg_init,
144 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 149 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
150 amdgpu_program_register_sequence(adev,
151 golden_settings_stoney_common,
152 (const u32)ARRAY_SIZE(golden_settings_stoney_common));
145 break; 153 break;
146 default: 154 default:
147 break; 155 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 1351c7e834a2..a64715d90503 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
714 DRM_ERROR("amdgpu: IB test timed out\n"); 714 DRM_ERROR("amdgpu: IB test timed out\n");
715 r = -ETIMEDOUT; 715 r = -ETIMEDOUT;
716 goto err1; 716 goto err1;
717 } else if (r) { 717 } else if (r < 0) {
718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
719 goto err1; 719 goto err1;
720 } 720 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63126..a7d3cb3fead0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
184 sizeof(u32)) + inx; 184 sizeof(u32)) + inx;
185 185
186 pr_debug("kfd: get kernel queue doorbell\n" 186 pr_debug("kfd: get kernel queue doorbell\n"
187 " doorbell offset == 0x%08d\n" 187 " doorbell offset == 0x%08X\n"
188 " kernel address == 0x%08lX\n", 188 " kernel address == 0x%08lX\n",
189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); 189 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
190 190
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index ef312bb75fda..963a24d46a93 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
405 spin_lock(&sched->job_list_lock); 405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list, 406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node); 407 struct amd_sched_job, node);
408 if (s_job) 408 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
409 schedule_delayed_work(&s_job->work_tdr, sched->timeout); 409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
410 410
411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index a978381ef95b..9b17a66cf0e1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); 387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
388} 388}
389 389
390void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) 390static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
391{ 391{
392 struct atmel_hlcdc_crtc_state *state; 392 struct atmel_hlcdc_crtc_state *state;
393 393
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 016c191221f3..52c527f6642a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
320 u32 *coeff_tab = heo_upscaling_ycoef; 320 u32 *coeff_tab = heo_upscaling_ycoef;
321 u32 max_memsize; 321 u32 max_memsize;
322 322
323 if (state->crtc_w < state->src_w) 323 if (state->crtc_h < state->src_h)
324 coeff_tab = heo_downscaling_ycoef; 324 coeff_tab = heo_downscaling_ycoef;
325 for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) 325 for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
326 atmel_hlcdc_layer_update_cfg(&plane->layer, 326 atmel_hlcdc_layer_update_cfg(&plane->layer,
327 33 + i, 327 33 + i,
328 0xffffffff, 328 0xffffffff,
329 coeff_tab[i]); 329 coeff_tab[i]);
330 factor = ((8 * 256 * state->src_w) - (256 * 4)) / 330 factor = ((8 * 256 * state->src_h) - (256 * 4)) /
331 state->crtc_w; 331 state->crtc_h;
332 factor++; 332 factor++;
333 max_memsize = ((factor * state->crtc_w) + (256 * 4)) / 333 max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
334 2048; 334 2048;
335 if (max_memsize > state->src_w) 335 if (max_memsize > state->src_h)
336 factor--; 336 factor--;
337 factor_reg |= (factor << 16) | 0x80000000; 337 factor_reg |= (factor << 16) | 0x80000000;
338 } 338 }
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 80446e2d3ab6..76bcb43e7c06 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
185 goto out; 185 goto out;
186 } 186 }
187 187
188 /*
189 * cirrus_modeset_init() is initializing/registering the emulated fbdev
190 * and DRM internals can access/test some of the fields in
191 * mode_config->funcs as part of the fbdev registration process.
192 * Make sure dev->mode_config.funcs is properly set to avoid
193 * dereferencing a NULL pointer.
194 * FIXME: mode_config.funcs assignment should probably be done in
195 * cirrus_modeset_init() (that's a common pattern seen in other DRM
196 * drivers).
197 */
198 dev->mode_config.funcs = &cirrus_mode_funcs;
188 r = cirrus_modeset_init(cdev); 199 r = cirrus_modeset_init(cdev);
189 if (r) { 200 if (r) {
190 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 201 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
191 goto out; 202 goto out;
192 } 203 }
193 204
194 dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
195
196 return 0; 205 return 0;
197out: 206out:
198 cirrus_driver_unload(dev); 207 cirrus_driver_unload(dev);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index fa3930757972..2a3ded44cf2a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
475 val, 475 val,
476 -1, 476 -1,
477 &replaced); 477 &replaced);
478 state->color_mgmt_changed = replaced; 478 state->color_mgmt_changed |= replaced;
479 return ret; 479 return ret;
480 } else if (property == config->ctm_property) { 480 } else if (property == config->ctm_property) {
481 ret = drm_atomic_replace_property_blob_from_id(crtc, 481 ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
483 val, 483 val,
484 sizeof(struct drm_color_ctm), 484 sizeof(struct drm_color_ctm),
485 &replaced); 485 &replaced);
486 state->color_mgmt_changed = replaced; 486 state->color_mgmt_changed |= replaced;
487 return ret; 487 return ret;
488 } else if (property == config->gamma_lut_property) { 488 } else if (property == config->gamma_lut_property) {
489 ret = drm_atomic_replace_property_blob_from_id(crtc, 489 ret = drm_atomic_replace_property_blob_from_id(crtc,
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
491 val, 491 val,
492 -1, 492 -1,
493 &replaced); 493 &replaced);
494 state->color_mgmt_changed = replaced; 494 state->color_mgmt_changed |= replaced;
495 return ret; 495 return ret;
496 } else if (crtc->funcs->atomic_set_property) 496 } else if (crtc->funcs->atomic_set_property)
497 return crtc->funcs->atomic_set_property(crtc, state, property, val); 497 return crtc->funcs->atomic_set_property(crtc, state, property, val);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f1d9f0569d7f..ddebe54cd5ca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1121,16 +1121,14 @@ static int drm_connector_register_all(struct drm_device *dev)
1121 struct drm_connector *connector; 1121 struct drm_connector *connector;
1122 int ret; 1122 int ret;
1123 1123
1124 mutex_lock(&dev->mode_config.mutex); 1124 /* FIXME: taking the mode config mutex ends up in a clash with
1125 1125 * fbcon/backlight registration */
1126 drm_for_each_connector(connector, dev) { 1126 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1127 ret = drm_connector_register(connector); 1127 ret = drm_connector_register(connector);
1128 if (ret) 1128 if (ret)
1129 goto err; 1129 goto err;
1130 } 1130 }
1131 1131
1132 mutex_unlock(&dev->mode_config.mutex);
1133
1134 return 0; 1132 return 0;
1135 1133
1136err: 1134err:
@@ -5406,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
5406 struct drm_pending_vblank_event *e = NULL; 5404 struct drm_pending_vblank_event *e = NULL;
5407 int ret = -EINVAL; 5405 int ret = -EINVAL;
5408 5406
5407 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5408 return -EINVAL;
5409
5409 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 5410 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
5410 page_flip->reserved != 0) 5411 page_flip->reserved != 0)
5411 return -EINVAL; 5412 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7df26d4b7ad8..637a0aa4d3a0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -74,6 +74,8 @@
74#define EDID_QUIRK_FORCE_8BPC (1 << 8) 74#define EDID_QUIRK_FORCE_8BPC (1 << 8)
75/* Force 12bpc */ 75/* Force 12bpc */
76#define EDID_QUIRK_FORCE_12BPC (1 << 9) 76#define EDID_QUIRK_FORCE_12BPC (1 << 9)
77/* Force 6bpc */
78#define EDID_QUIRK_FORCE_6BPC (1 << 10)
77 79
78struct detailed_mode_closure { 80struct detailed_mode_closure {
79 struct drm_connector *connector; 81 struct drm_connector *connector;
@@ -100,6 +102,9 @@ static struct edid_quirk {
100 /* Unknown Acer */ 102 /* Unknown Acer */
101 { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, 103 { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
102 104
105 /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
106 { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
107
103 /* Belinea 10 15 55 */ 108 /* Belinea 10 15 55 */
104 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, 109 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
105 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, 110 { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3862,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid,
3862 /* HDMI deep color modes supported? Assign to info, if so */ 3867 /* HDMI deep color modes supported? Assign to info, if so */
3863 drm_assign_hdmi_deep_color_info(edid, info, connector); 3868 drm_assign_hdmi_deep_color_info(edid, info, connector);
3864 3869
3870 /*
3871 * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
3872 *
3873 * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
3874 * tells us to assume 8 bpc color depth if the EDID doesn't have
3875 * extensions which tell otherwise.
3876 */
3877 if ((info->bpc == 0) && (edid->revision < 4) &&
3878 (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
3879 info->bpc = 8;
3880 DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
3881 connector->name, info->bpc);
3882 }
3883
3865 /* Only defined for 1.4 with digital displays */ 3884 /* Only defined for 1.4 with digital displays */
3866 if (edid->revision < 4) 3885 if (edid->revision < 4)
3867 return; 3886 return;
@@ -4082,6 +4101,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
4082 4101
4083 drm_add_display_info(edid, &connector->display_info, connector); 4102 drm_add_display_info(edid, &connector->display_info, connector);
4084 4103
4104 if (quirks & EDID_QUIRK_FORCE_6BPC)
4105 connector->display_info.bpc = 6;
4106
4085 if (quirks & EDID_QUIRK_FORCE_8BPC) 4107 if (quirks & EDID_QUIRK_FORCE_8BPC)
4086 connector->display_info.bpc = 8; 4108 connector->display_info.bpc = 8;
4087 4109
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index ce54e985d91b..0a06f9120b5a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
464 464
465 /* Sometimes user space wants everything disabled, so don't steal the 465 /* Sometimes user space wants everything disabled, so don't steal the
466 * display if there's a master. */ 466 * display if there's a master. */
467 if (lockless_dereference(dev->master)) 467 if (READ_ONCE(dev->master))
468 return false; 468 return false;
469 469
470 drm_for_each_crtc(crtc, dev) { 470 drm_for_each_crtc(crtc, dev) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 87ef34150d46..b382cf505262 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1333 if (ret < 0) 1333 if (ret < 0)
1334 return ret; 1334 return ret;
1335 1335
1336 mutex_lock(&gpu->lock);
1337
1338 /* 1336 /*
1339 * TODO 1337 * TODO
1340 * 1338 *
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1348 if (unlikely(event == ~0U)) { 1346 if (unlikely(event == ~0U)) {
1349 DRM_ERROR("no free event\n"); 1347 DRM_ERROR("no free event\n");
1350 ret = -EBUSY; 1348 ret = -EBUSY;
1351 goto out_unlock; 1349 goto out_pm_put;
1352 } 1350 }
1353 1351
1354 fence = etnaviv_gpu_fence_alloc(gpu); 1352 fence = etnaviv_gpu_fence_alloc(gpu);
1355 if (!fence) { 1353 if (!fence) {
1356 event_free(gpu, event); 1354 event_free(gpu, event);
1357 ret = -ENOMEM; 1355 ret = -ENOMEM;
1358 goto out_unlock; 1356 goto out_pm_put;
1359 } 1357 }
1360 1358
1359 mutex_lock(&gpu->lock);
1360
1361 gpu->event[event].fence = fence; 1361 gpu->event[event].fence = fence;
1362 submit->fence = fence->seqno; 1362 submit->fence = fence->seqno;
1363 gpu->active_fence = submit->fence; 1363 gpu->active_fence = submit->fence;
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1395 hangcheck_timer_reset(gpu); 1395 hangcheck_timer_reset(gpu);
1396 ret = 0; 1396 ret = 0;
1397 1397
1398out_unlock:
1399 mutex_unlock(&gpu->lock); 1398 mutex_unlock(&gpu->lock);
1400 1399
1400out_pm_put:
1401 etnaviv_gpu_pm_put(gpu); 1401 etnaviv_gpu_pm_put(gpu);
1402 1402
1403 return ret; 1403 return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 95ddd56b89f0..5de36d8dcc68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1281 1281
1282 intel_runtime_pm_enable(dev_priv); 1282 intel_runtime_pm_enable(dev_priv);
1283 1283
1284 /* Everything is in place, we can now relax! */
1285 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
1286 driver.name, driver.major, driver.minor, driver.patchlevel,
1287 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
1288
1284 intel_runtime_pm_put(dev_priv); 1289 intel_runtime_pm_put(dev_priv);
1285 1290
1286 return 0; 1291 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21f939074abc..f68c78918d63 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -882,11 +882,12 @@ struct i915_gem_context {
882 882
883 struct i915_ctx_hang_stats hang_stats; 883 struct i915_ctx_hang_stats hang_stats;
884 884
885 /* Unique identifier for this context, used by the hw for tracking */
886 unsigned long flags; 885 unsigned long flags;
887#define CONTEXT_NO_ZEROMAP BIT(0) 886#define CONTEXT_NO_ZEROMAP BIT(0)
888#define CONTEXT_NO_ERROR_CAPTURE BIT(1) 887#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
889 unsigned hw_id; 888
889 /* Unique identifier for this context, used by the hw for tracking */
890 unsigned int hw_id;
890 u32 user_handle; 891 u32 user_handle;
891 892
892 u32 ggtt_alignment; 893 u32 ggtt_alignment;
@@ -1854,6 +1855,7 @@ struct drm_i915_private {
1854 enum modeset_restore modeset_restore; 1855 enum modeset_restore modeset_restore;
1855 struct mutex modeset_restore_lock; 1856 struct mutex modeset_restore_lock;
1856 struct drm_atomic_state *modeset_restore_state; 1857 struct drm_atomic_state *modeset_restore_state;
1858 struct drm_modeset_acquire_ctx reset_ctx;
1857 1859
1858 struct list_head vm_list; /* Global list of all address spaces */ 1860 struct list_head vm_list; /* Global list of all address spaces */
1859 struct i915_ggtt ggtt; /* VM representing the global address space */ 1861 struct i915_ggtt ggtt; /* VM representing the global address space */
@@ -1962,6 +1964,13 @@ struct drm_i915_private {
1962 struct i915_suspend_saved_registers regfile; 1964 struct i915_suspend_saved_registers regfile;
1963 struct vlv_s0ix_state vlv_s0ix_state; 1965 struct vlv_s0ix_state vlv_s0ix_state;
1964 1966
1967 enum {
1968 I915_SKL_SAGV_UNKNOWN = 0,
1969 I915_SKL_SAGV_DISABLED,
1970 I915_SKL_SAGV_ENABLED,
1971 I915_SKL_SAGV_NOT_CONTROLLED
1972 } skl_sagv_status;
1973
1965 struct { 1974 struct {
1966 /* 1975 /*
1967 * Raw watermark latency values: 1976 * Raw watermark latency values:
@@ -3590,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3590/* belongs in i915_gem_gtt.h */ 3599/* belongs in i915_gem_gtt.h */
3591static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3600static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3592{ 3601{
3602 wmb();
3593 if (INTEL_GEN(dev_priv) < 6) 3603 if (INTEL_GEN(dev_priv) < 6)
3594 intel_gtt_chipset_flush(); 3604 intel_gtt_chipset_flush();
3595} 3605}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 11681501d7b1..a77ce9983f69 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
879 ret = i915_gem_shmem_pread(dev, obj, args, file); 879 ret = i915_gem_shmem_pread(dev, obj, args, file);
880 880
881 /* pread for non shmem backed objects */ 881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV) 882 if (ret == -EFAULT || ret == -ENODEV) {
883 intel_runtime_pm_get(to_i915(dev));
883 ret = i915_gem_gtt_pread(dev, obj, args->size, 884 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr); 885 args->offset, args->data_ptr);
886 intel_runtime_pm_put(to_i915(dev));
887 }
885 888
886out: 889out:
887 drm_gem_object_unreference(&obj->base); 890 drm_gem_object_unreference(&obj->base);
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1306 * textures). Fallback to the shmem path in that case. */ 1309 * textures). Fallback to the shmem path in that case. */
1307 } 1310 }
1308 1311
1309 if (ret == -EFAULT) { 1312 if (ret == -EFAULT || ret == -ENOSPC) {
1310 if (obj->phys_handle) 1313 if (obj->phys_handle)
1311 ret = i915_gem_phys_pwrite(obj, args, file); 1314 ret = i915_gem_phys_pwrite(obj, args, file);
1312 else if (i915_gem_object_has_struct_page(obj)) 1315 else if (i915_gem_object_has_struct_page(obj))
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
3169 } 3172 }
3170 3173
3171 intel_ring_init_seqno(engine, engine->last_submitted_seqno); 3174 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
3175
3176 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
3172} 3177}
3173 3178
3174void i915_gem_reset(struct drm_device *dev) 3179void i915_gem_reset(struct drm_device *dev)
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev)
3186 3191
3187 for_each_engine(engine, dev_priv) 3192 for_each_engine(engine, dev_priv)
3188 i915_gem_reset_engine_cleanup(engine); 3193 i915_gem_reset_engine_cleanup(engine);
3194 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
3189 3195
3190 i915_gem_context_reset(dev); 3196 i915_gem_context_reset(dev);
3191 3197
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1978633e7549..b35e5b6475b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
943{ 943{
944 const unsigned other_rings = ~intel_engine_flag(req->engine); 944 const unsigned other_rings = ~intel_engine_flag(req->engine);
945 struct i915_vma *vma; 945 struct i915_vma *vma;
946 uint32_t flush_domains = 0;
947 bool flush_chipset = false;
948 int ret; 946 int ret;
949 947
950 list_for_each_entry(vma, vmas, exec_list) { 948 list_for_each_entry(vma, vmas, exec_list) {
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
957 } 955 }
958 956
959 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 957 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
960 flush_chipset |= i915_gem_clflush_object(obj, false); 958 i915_gem_clflush_object(obj, false);
961
962 flush_domains |= obj->base.write_domain;
963 } 959 }
964 960
965 if (flush_chipset) 961 /* Unconditionally flush any chipset caches (for streaming writes). */
966 i915_gem_chipset_flush(req->engine->i915); 962 i915_gem_chipset_flush(req->engine->i915);
967
968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb();
970 963
971 /* Unconditionally invalidate gpu caches and ensure that we do flush 964 /* Unconditionally invalidate gpu caches and ensure that we do flush
972 * any residual writes from the previous batch. 965 * any residual writes from the previous batch.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 10f1e32767e6..f38ceffd82c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
122 has_full_48bit_ppgtt = 122 has_full_48bit_ppgtt =
123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; 123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
124 124
125 if (intel_vgpu_active(dev_priv)) 125 if (intel_vgpu_active(dev_priv)) {
126 has_full_ppgtt = false; /* emulation is too hard */ 126 /* emulation is too hard */
127 has_full_ppgtt = false;
128 has_full_48bit_ppgtt = false;
129 }
127 130
128 if (!has_aliasing_ppgtt) 131 if (!has_aliasing_ppgtt)
129 return 0; 132 return 0;
@@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
158 return 0; 161 return 0;
159 } 162 }
160 163
161 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) 164 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
162 return has_full_48bit_ppgtt ? 3 : 2; 165 return has_full_48bit_ppgtt ? 3 : 2;
163 else 166 else
164 return has_aliasing_ppgtt ? 1 : 0; 167 return has_aliasing_ppgtt ? 1 : 0;
@@ -2873,6 +2876,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2873 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2876 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2874 2877
2875 ppgtt->base.cleanup(&ppgtt->base); 2878 ppgtt->base.cleanup(&ppgtt->base);
2879 kfree(ppgtt);
2876 } 2880 }
2877 2881
2878 i915_gem_cleanup_stolen(dev); 2882 i915_gem_cleanup_stolen(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ce14fe09d962..bf2cad3f9e1f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
1536#define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) 1536#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
1537/* Balance leg disable bits */ 1537/* Balance leg disable bits */
1538#define BALANCE_LEG_DISABLE_SHIFT 23 1538#define BALANCE_LEG_DISABLE_SHIFT 23
1539#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
1539 1540
1540/* 1541/*
1541 * Fence registers 1542 * Fence registers
@@ -7144,6 +7145,15 @@ enum {
7144 7145
7145#define GEN6_PCODE_MAILBOX _MMIO(0x138124) 7146#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
7146#define GEN6_PCODE_READY (1<<31) 7147#define GEN6_PCODE_READY (1<<31)
7148#define GEN6_PCODE_ERROR_MASK 0xFF
7149#define GEN6_PCODE_SUCCESS 0x0
7150#define GEN6_PCODE_ILLEGAL_CMD 0x1
7151#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
7152#define GEN6_PCODE_TIMEOUT 0x3
7153#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
7154#define GEN7_PCODE_TIMEOUT 0x2
7155#define GEN7_PCODE_ILLEGAL_DATA 0x3
7156#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
7147#define GEN6_PCODE_WRITE_RC6VIDS 0x4 7157#define GEN6_PCODE_WRITE_RC6VIDS 0x4
7148#define GEN6_PCODE_READ_RC6VIDS 0x5 7158#define GEN6_PCODE_READ_RC6VIDS 0x5
7149#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 7159#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
@@ -7165,6 +7175,10 @@ enum {
7165#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 7175#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
7166#define DISPLAY_IPS_CONTROL 0x19 7176#define DISPLAY_IPS_CONTROL 0x19
7167#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A 7177#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
7178#define GEN9_PCODE_SAGV_CONTROL 0x21
7179#define GEN9_SAGV_DISABLE 0x0
7180#define GEN9_SAGV_IS_DISABLED 0x1
7181#define GEN9_SAGV_ENABLE 0x3
7168#define GEN6_PCODE_DATA _MMIO(0x138128) 7182#define GEN6_PCODE_DATA _MMIO(0x138128)
7169#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 7183#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
7170#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 7184#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index f6acb5a0e701..b81cfb3b22ec 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
65 65
66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
67 67
68 if (!IS_HASWELL(dev_priv))
69 return;
70
71 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); 68 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
72 if (magic != VGT_MAGIC) 69 if (magic != VGT_MAGIC)
73 return; 70 return;
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 6700a7be7f78..d32f586f9c05 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
600 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 600 if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
601 return; 601 return;
602 602
603 i915_audio_component_get_power(dev);
604
603 /* 605 /*
604 * Enable/disable generating the codec wake signal, overriding the 606 * Enable/disable generating the codec wake signal, overriding the
605 * internal logic to generate the codec wake to controller. 607 * internal logic to generate the codec wake to controller.
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
615 I915_WRITE(HSW_AUD_CHICKENBIT, tmp); 617 I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
616 usleep_range(1000, 1500); 618 usleep_range(1000, 1500);
617 } 619 }
620
621 i915_audio_component_put_power(dev);
618} 622}
619 623
620/* Get CDCLK in kHz */ 624/* Get CDCLK in kHz */
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
648 !IS_HASWELL(dev_priv)) 652 !IS_HASWELL(dev_priv))
649 return 0; 653 return 0;
650 654
655 i915_audio_component_get_power(dev);
651 mutex_lock(&dev_priv->av_mutex); 656 mutex_lock(&dev_priv->av_mutex);
652 /* 1. get the pipe */ 657 /* 1. get the pipe */
653 intel_encoder = dev_priv->dig_port_map[port]; 658 intel_encoder = dev_priv->dig_port_map[port];
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
698 703
699 unlock: 704 unlock:
700 mutex_unlock(&dev_priv->av_mutex); 705 mutex_unlock(&dev_priv->av_mutex);
706 i915_audio_component_put_power(dev);
701 return err; 707 return err;
702} 708}
703 709
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 3edb9580928e..c3b33a10c15c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,15 +41,15 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" 44#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
45MODULE_FIRMWARE(I915_CSR_KBL); 45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47 47
48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
49MODULE_FIRMWARE(I915_CSR_SKL); 49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
51 51
52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
53MODULE_FIRMWARE(I915_CSR_BXT); 53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
55 55
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index dd1d6fe12297..1a7efac65fd5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { 145static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
146 { 0x0000201B, 0x000000A2, 0x0 }, 146 { 0x0000201B, 0x000000A2, 0x0 },
147 { 0x00005012, 0x00000088, 0x0 }, 147 { 0x00005012, 0x00000088, 0x0 },
148 { 0x80007011, 0x000000CD, 0x0 }, 148 { 0x80007011, 0x000000CD, 0x1 },
149 { 0x80009010, 0x000000C0, 0x1 }, 149 { 0x80009010, 0x000000C0, 0x1 },
150 { 0x0000201B, 0x0000009D, 0x0 }, 150 { 0x0000201B, 0x0000009D, 0x0 },
151 { 0x80005012, 0x000000C0, 0x1 }, 151 { 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
158static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { 158static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
159 { 0x00000018, 0x000000A2, 0x0 }, 159 { 0x00000018, 0x000000A2, 0x0 },
160 { 0x00005012, 0x00000088, 0x0 }, 160 { 0x00005012, 0x00000088, 0x0 },
161 { 0x80007011, 0x000000CD, 0x0 }, 161 { 0x80007011, 0x000000CD, 0x3 },
162 { 0x80009010, 0x000000C0, 0x3 }, 162 { 0x80009010, 0x000000C0, 0x3 },
163 { 0x00000018, 0x0000009D, 0x0 }, 163 { 0x00000018, 0x0000009D, 0x0 },
164 { 0x80005012, 0x000000C0, 0x3 }, 164 { 0x80005012, 0x000000C0, 0x3 },
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
388 } 388 }
389} 389}
390 390
391static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
392{
393 int n_hdmi_entries;
394 int hdmi_level;
395 int hdmi_default_entry;
396
397 hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
398
399 if (IS_BROXTON(dev_priv))
400 return hdmi_level;
401
402 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
403 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
404 hdmi_default_entry = 8;
405 } else if (IS_BROADWELL(dev_priv)) {
406 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
407 hdmi_default_entry = 7;
408 } else if (IS_HASWELL(dev_priv)) {
409 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
410 hdmi_default_entry = 6;
411 } else {
412 WARN(1, "ddi translation table missing\n");
413 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
414 hdmi_default_entry = 7;
415 }
416
417 /* Choose a good default if VBT is badly populated */
418 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
419 hdmi_level >= n_hdmi_entries)
420 hdmi_level = hdmi_default_entry;
421
422 return hdmi_level;
423}
424
391/* 425/*
392 * Starting with Haswell, DDI port buffers must be programmed with correct 426 * Starting with Haswell, DDI port buffers must be programmed with correct
393 * values in advance. The buffer values are different for FDI and DP modes, 427 * values in advance. The buffer values are different for FDI and DP modes,
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
399{ 433{
400 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 434 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
401 u32 iboost_bit = 0; 435 u32 iboost_bit = 0;
402 int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, 436 int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
403 size; 437 size;
404 int hdmi_level; 438 int hdmi_level;
405 enum port port; 439 enum port port;
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
410 const struct ddi_buf_trans *ddi_translations; 444 const struct ddi_buf_trans *ddi_translations;
411 445
412 port = intel_ddi_get_encoder_port(encoder); 446 port = intel_ddi_get_encoder_port(encoder);
413 hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 447 hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
414 448
415 if (IS_BROXTON(dev_priv)) { 449 if (IS_BROXTON(dev_priv)) {
416 if (encoder->type != INTEL_OUTPUT_HDMI) 450 if (encoder->type != INTEL_OUTPUT_HDMI)
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
430 skl_get_buf_trans_edp(dev_priv, &n_edp_entries); 464 skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
431 ddi_translations_hdmi = 465 ddi_translations_hdmi =
432 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); 466 skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
433 hdmi_default_entry = 8;
434 /* If we're boosting the current, set bit 31 of trans1 */ 467 /* If we're boosting the current, set bit 31 of trans1 */
435 if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || 468 if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
436 dev_priv->vbt.ddi_port_info[port].dp_boost_level) 469 dev_priv->vbt.ddi_port_info[port].dp_boost_level)
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
456 489
457 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 490 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
458 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 491 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
459 hdmi_default_entry = 7;
460 } else if (IS_HASWELL(dev_priv)) { 492 } else if (IS_HASWELL(dev_priv)) {
461 ddi_translations_fdi = hsw_ddi_translations_fdi; 493 ddi_translations_fdi = hsw_ddi_translations_fdi;
462 ddi_translations_dp = hsw_ddi_translations_dp; 494 ddi_translations_dp = hsw_ddi_translations_dp;
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
464 ddi_translations_hdmi = hsw_ddi_translations_hdmi; 496 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
465 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); 497 n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
466 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); 498 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
467 hdmi_default_entry = 6;
468 } else { 499 } else {
469 WARN(1, "ddi translation table missing\n"); 500 WARN(1, "ddi translation table missing\n");
470 ddi_translations_edp = bdw_ddi_translations_dp; 501 ddi_translations_edp = bdw_ddi_translations_dp;
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
474 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 505 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
475 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 506 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
476 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 507 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
477 hdmi_default_entry = 7;
478 } 508 }
479 509
480 switch (encoder->type) { 510 switch (encoder->type) {
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
505 if (encoder->type != INTEL_OUTPUT_HDMI) 535 if (encoder->type != INTEL_OUTPUT_HDMI)
506 return; 536 return;
507 537
508 /* Choose a good default if VBT is badly populated */
509 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
510 hdmi_level >= n_hdmi_entries)
511 hdmi_level = hdmi_default_entry;
512
513 /* Entry 9 is for HDMI: */ 538 /* Entry 9 is for HDMI: */
514 I915_WRITE(DDI_BUF_TRANS_LO(port, i), 539 I915_WRITE(DDI_BUF_TRANS_LO(port, i),
515 ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); 540 ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1379 TRANS_CLK_SEL_DISABLED); 1404 TRANS_CLK_SEL_DISABLED);
1380} 1405}
1381 1406
1382static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, 1407static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1383 u32 level, enum port port, int type) 1408 enum port port, uint8_t iboost)
1384{ 1409{
1410 u32 tmp;
1411
1412 tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
1413 tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
1414 if (iboost)
1415 tmp |= iboost << BALANCE_LEG_SHIFT(port);
1416 else
1417 tmp |= BALANCE_LEG_DISABLE(port);
1418 I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
1419}
1420
1421static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
1422{
1423 struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
1424 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
1425 enum port port = intel_dig_port->port;
1426 int type = encoder->type;
1385 const struct ddi_buf_trans *ddi_translations; 1427 const struct ddi_buf_trans *ddi_translations;
1386 uint8_t iboost; 1428 uint8_t iboost;
1387 uint8_t dp_iboost, hdmi_iboost; 1429 uint8_t dp_iboost, hdmi_iboost;
1388 int n_entries; 1430 int n_entries;
1389 u32 reg;
1390 1431
1391 /* VBT may override standard boost values */ 1432 /* VBT may override standard boost values */
1392 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; 1433 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1428 return; 1469 return;
1429 } 1470 }
1430 1471
1431 reg = I915_READ(DISPIO_CR_TX_BMU_CR0); 1472 _skl_ddi_set_iboost(dev_priv, port, iboost);
1432 reg &= ~BALANCE_LEG_MASK(port);
1433 reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
1434
1435 if (iboost)
1436 reg |= iboost << BALANCE_LEG_SHIFT(port);
1437 else
1438 reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
1439 1473
1440 I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); 1474 if (port == PORT_A && intel_dig_port->max_lanes == 4)
1475 _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
1441} 1476}
1442 1477
1443static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, 1478static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
1568 level = translate_signal_level(signal_levels); 1603 level = translate_signal_level(signal_levels);
1569 1604
1570 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 1605 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1571 skl_ddi_set_iboost(dev_priv, level, port, encoder->type); 1606 skl_ddi_set_iboost(encoder, level);
1572 else if (IS_BROXTON(dev_priv)) 1607 else if (IS_BROXTON(dev_priv))
1573 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); 1608 bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
1574 1609
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1637 intel_dp_stop_link_train(intel_dp); 1672 intel_dp_stop_link_train(intel_dp);
1638 } else if (type == INTEL_OUTPUT_HDMI) { 1673 } else if (type == INTEL_OUTPUT_HDMI) {
1639 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1674 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1675 int level = intel_ddi_hdmi_level(dev_priv, port);
1676
1677 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1678 skl_ddi_set_iboost(intel_encoder, level);
1640 1679
1641 intel_hdmi->set_infoframes(encoder, 1680 intel_hdmi->set_infoframes(encoder,
1642 crtc->config->has_hdmi_sink, 1681 crtc->config->has_hdmi_sink,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c457eed76f1f..175595fc3e45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev)
3093 3093
3094 for_each_crtc(dev, crtc) { 3094 for_each_crtc(dev, crtc) {
3095 struct intel_plane *plane = to_intel_plane(crtc->primary); 3095 struct intel_plane *plane = to_intel_plane(crtc->primary);
3096 struct intel_plane_state *plane_state; 3096 struct intel_plane_state *plane_state =
3097 3097 to_intel_plane_state(plane->base.state);
3098 drm_modeset_lock_crtc(crtc, &plane->base);
3099 plane_state = to_intel_plane_state(plane->base.state);
3100 3098
3101 if (plane_state->visible) 3099 if (plane_state->visible)
3102 plane->update_plane(&plane->base, 3100 plane->update_plane(&plane->base,
3103 to_intel_crtc_state(crtc->state), 3101 to_intel_crtc_state(crtc->state),
3104 plane_state); 3102 plane_state);
3103 }
3104}
3105
3106static int
3107__intel_display_resume(struct drm_device *dev,
3108 struct drm_atomic_state *state)
3109{
3110 struct drm_crtc_state *crtc_state;
3111 struct drm_crtc *crtc;
3112 int i, ret;
3113
3114 intel_modeset_setup_hw_state(dev);
3115 i915_redisable_vga(dev);
3116
3117 if (!state)
3118 return 0;
3105 3119
3106 drm_modeset_unlock_crtc(crtc); 3120 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3121 /*
3122 * Force recalculation even if we restore
3123 * current state. With fast modeset this may not result
3124 * in a modeset when the state is compatible.
3125 */
3126 crtc_state->mode_changed = true;
3107 } 3127 }
3128
3129 /* ignore any reset values/BIOS leftovers in the WM registers */
3130 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3131
3132 ret = drm_atomic_commit(state);
3133
3134 WARN_ON(ret == -EDEADLK);
3135 return ret;
3108} 3136}
3109 3137
3110void intel_prepare_reset(struct drm_i915_private *dev_priv) 3138void intel_prepare_reset(struct drm_i915_private *dev_priv)
3111{ 3139{
3140 struct drm_device *dev = &dev_priv->drm;
3141 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3142 struct drm_atomic_state *state;
3143 int ret;
3144
3112 /* no reset support for gen2 */ 3145 /* no reset support for gen2 */
3113 if (IS_GEN2(dev_priv)) 3146 if (IS_GEN2(dev_priv))
3114 return; 3147 return;
3115 3148
3116 /* reset doesn't touch the display */ 3149 /*
3150 * Need mode_config.mutex so that we don't
3151 * trample ongoing ->detect() and whatnot.
3152 */
3153 mutex_lock(&dev->mode_config.mutex);
3154 drm_modeset_acquire_init(ctx, 0);
3155 while (1) {
3156 ret = drm_modeset_lock_all_ctx(dev, ctx);
3157 if (ret != -EDEADLK)
3158 break;
3159
3160 drm_modeset_backoff(ctx);
3161 }
3162
3163 /* reset doesn't touch the display, but flips might get nuked anyway, */
3117 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3164 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3118 return; 3165 return;
3119 3166
3120 drm_modeset_lock_all(&dev_priv->drm);
3121 /* 3167 /*
3122 * Disabling the crtcs gracefully seems nicer. Also the 3168 * Disabling the crtcs gracefully seems nicer. Also the
3123 * g33 docs say we should at least disable all the planes. 3169 * g33 docs say we should at least disable all the planes.
3124 */ 3170 */
3125 intel_display_suspend(&dev_priv->drm); 3171 state = drm_atomic_helper_duplicate_state(dev, ctx);
3172 if (IS_ERR(state)) {
3173 ret = PTR_ERR(state);
3174 state = NULL;
3175 DRM_ERROR("Duplicating state failed with %i\n", ret);
3176 goto err;
3177 }
3178
3179 ret = drm_atomic_helper_disable_all(dev, ctx);
3180 if (ret) {
3181 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3182 goto err;
3183 }
3184
3185 dev_priv->modeset_restore_state = state;
3186 state->acquire_ctx = ctx;
3187 return;
3188
3189err:
3190 drm_atomic_state_free(state);
3126} 3191}
3127 3192
3128void intel_finish_reset(struct drm_i915_private *dev_priv) 3193void intel_finish_reset(struct drm_i915_private *dev_priv)
3129{ 3194{
3195 struct drm_device *dev = &dev_priv->drm;
3196 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3197 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3198 int ret;
3199
3130 /* 3200 /*
3131 * Flips in the rings will be nuked by the reset, 3201 * Flips in the rings will be nuked by the reset,
3132 * so complete all pending flips so that user space 3202 * so complete all pending flips so that user space
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3138 if (IS_GEN2(dev_priv)) 3208 if (IS_GEN2(dev_priv))
3139 return; 3209 return;
3140 3210
3211 dev_priv->modeset_restore_state = NULL;
3212
3141 /* reset doesn't touch the display */ 3213 /* reset doesn't touch the display */
3142 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { 3214 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3143 /* 3215 /*
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3149 * FIXME: Atomic will make this obsolete since we won't schedule 3221 * FIXME: Atomic will make this obsolete since we won't schedule
3150 * CS-based flips (which might get lost in gpu resets) any more. 3222 * CS-based flips (which might get lost in gpu resets) any more.
3151 */ 3223 */
3152 intel_update_primary_planes(&dev_priv->drm); 3224 intel_update_primary_planes(dev);
3153 return; 3225 } else {
3154 } 3226 /*
3155 3227 * The display has been reset as well,
3156 /* 3228 * so need a full re-initialization.
3157 * The display has been reset as well, 3229 */
3158 * so need a full re-initialization. 3230 intel_runtime_pm_disable_interrupts(dev_priv);
3159 */ 3231 intel_runtime_pm_enable_interrupts(dev_priv);
3160 intel_runtime_pm_disable_interrupts(dev_priv);
3161 intel_runtime_pm_enable_interrupts(dev_priv);
3162 3232
3163 intel_modeset_init_hw(&dev_priv->drm); 3233 intel_modeset_init_hw(dev);
3164 3234
3165 spin_lock_irq(&dev_priv->irq_lock); 3235 spin_lock_irq(&dev_priv->irq_lock);
3166 if (dev_priv->display.hpd_irq_setup) 3236 if (dev_priv->display.hpd_irq_setup)
3167 dev_priv->display.hpd_irq_setup(dev_priv); 3237 dev_priv->display.hpd_irq_setup(dev_priv);
3168 spin_unlock_irq(&dev_priv->irq_lock); 3238 spin_unlock_irq(&dev_priv->irq_lock);
3169 3239
3170 intel_display_resume(&dev_priv->drm); 3240 ret = __intel_display_resume(dev, state);
3241 if (ret)
3242 DRM_ERROR("Restoring old state failed with %i\n", ret);
3171 3243
3172 intel_hpd_init(dev_priv); 3244 intel_hpd_init(dev_priv);
3245 }
3173 3246
3174 drm_modeset_unlock_all(&dev_priv->drm); 3247 drm_modeset_drop_locks(ctx);
3248 drm_modeset_acquire_fini(ctx);
3249 mutex_unlock(&dev->mode_config.mutex);
3175} 3250}
3176 3251
3177static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3252static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -5691,15 +5766,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5691 5766
5692static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5767static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5693{ 5768{
5694 unsigned int i; 5769 return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
5695
5696 for (i = 0; i < 15; i++) {
5697 if (skl_cdclk_pcu_ready(dev_priv))
5698 return true;
5699 udelay(10);
5700 }
5701
5702 return false;
5703} 5770}
5704 5771
5705static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 5772static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
@@ -12114,21 +12181,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
12114 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12181 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12115 } 12182 }
12116 12183
12117 /* Clamp bpp to default limit on screens without EDID 1.4 */ 12184 /* Clamp bpp to 8 on screens without EDID 1.4 */
12118 if (connector->base.display_info.bpc == 0) { 12185 if (connector->base.display_info.bpc == 0 && bpp > 24) {
12119 int type = connector->base.connector_type; 12186 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12120 int clamp_bpp = 24; 12187 bpp);
12121 12188 pipe_config->pipe_bpp = 24;
12122 /* Fall back to 18 bpp when DP sink capability is unknown. */
12123 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12124 type == DRM_MODE_CONNECTOR_eDP)
12125 clamp_bpp = 18;
12126
12127 if (bpp > clamp_bpp) {
12128 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12129 bpp, clamp_bpp);
12130 pipe_config->pipe_bpp = clamp_bpp;
12131 }
12132 } 12189 }
12133} 12190}
12134 12191
@@ -13702,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13702 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) 13759 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13703 dev_priv->display.modeset_commit_cdclk(state); 13760 dev_priv->display.modeset_commit_cdclk(state);
13704 13761
13762 /*
13763 * SKL workaround: bspec recommends we disable the SAGV when we
13764 * have more then one pipe enabled
13765 */
13766 if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
13767 skl_disable_sagv(dev_priv);
13768
13705 intel_modeset_verify_disabled(dev); 13769 intel_modeset_verify_disabled(dev);
13706 } 13770 }
13707 13771
@@ -13775,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13775 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13839 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13776 } 13840 }
13777 13841
13842 if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
13843 skl_can_enable_sagv(state))
13844 skl_enable_sagv(dev_priv);
13845
13778 drm_atomic_helper_commit_hw_done(state); 13846 drm_atomic_helper_commit_hw_done(state);
13779 13847
13780 if (intel_state->modeset) 13848 if (intel_state->modeset)
@@ -16174,9 +16242,10 @@ void intel_display_resume(struct drm_device *dev)
16174 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 16242 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16175 struct drm_modeset_acquire_ctx ctx; 16243 struct drm_modeset_acquire_ctx ctx;
16176 int ret; 16244 int ret;
16177 bool setup = false;
16178 16245
16179 dev_priv->modeset_restore_state = NULL; 16246 dev_priv->modeset_restore_state = NULL;
16247 if (state)
16248 state->acquire_ctx = &ctx;
16180 16249
16181 /* 16250 /*
16182 * This is a cludge because with real atomic modeset mode_config.mutex 16251 * This is a cludge because with real atomic modeset mode_config.mutex
@@ -16187,43 +16256,17 @@ void intel_display_resume(struct drm_device *dev)
16187 mutex_lock(&dev->mode_config.mutex); 16256 mutex_lock(&dev->mode_config.mutex);
16188 drm_modeset_acquire_init(&ctx, 0); 16257 drm_modeset_acquire_init(&ctx, 0);
16189 16258
16190retry: 16259 while (1) {
16191 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16260 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16192 16261 if (ret != -EDEADLK)
16193 if (ret == 0 && !setup) { 16262 break;
16194 setup = true;
16195
16196 intel_modeset_setup_hw_state(dev);
16197 i915_redisable_vga(dev);
16198 }
16199
16200 if (ret == 0 && state) {
16201 struct drm_crtc_state *crtc_state;
16202 struct drm_crtc *crtc;
16203 int i;
16204
16205 state->acquire_ctx = &ctx;
16206
16207 /* ignore any reset values/BIOS leftovers in the WM registers */
16208 to_intel_atomic_state(state)->skip_intermediate_wm = true;
16209
16210 for_each_crtc_in_state(state, crtc, crtc_state, i) {
16211 /*
16212 * Force recalculation even if we restore
16213 * current state. With fast modeset this may not result
16214 * in a modeset when the state is compatible.
16215 */
16216 crtc_state->mode_changed = true;
16217 }
16218
16219 ret = drm_atomic_commit(state);
16220 }
16221 16263
16222 if (ret == -EDEADLK) {
16223 drm_modeset_backoff(&ctx); 16264 drm_modeset_backoff(&ctx);
16224 goto retry;
16225 } 16265 }
16226 16266
16267 if (!ret)
16268 ret = __intel_display_resume(dev, state);
16269
16227 drm_modeset_drop_locks(&ctx); 16270 drm_modeset_drop_locks(&ctx);
16228 drm_modeset_acquire_fini(&ctx); 16271 drm_modeset_acquire_fini(&ctx);
16229 mutex_unlock(&dev->mode_config.mutex); 16272 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cc937a19b1ba..ff399b9a5c1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
1716void skl_wm_get_hw_state(struct drm_device *dev); 1716void skl_wm_get_hw_state(struct drm_device *dev);
1717void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 1717void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1718 struct skl_ddb_allocation *ddb /* out */); 1718 struct skl_ddb_allocation *ddb /* out */);
1719bool skl_can_enable_sagv(struct drm_atomic_state *state);
1720int skl_enable_sagv(struct drm_i915_private *dev_priv);
1721int skl_disable_sagv(struct drm_i915_private *dev_priv);
1719uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1722uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1720bool ilk_disable_lp_wm(struct drm_device *dev); 1723bool ilk_disable_lp_wm(struct drm_device *dev);
1721int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); 1724int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 47bdf9dad0d3..b9e5a63a7c9e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
554 return; 554 return;
555 } 555 }
556 556
557 drm_encoder_cleanup(&intel_encoder->base);
558 kfree(intel_dvo); 557 kfree(intel_dvo);
559 kfree(intel_connector); 558 kfree(intel_connector);
560} 559}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6a7ad3ed1463..3836a1c79714 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1230 if (i915.enable_fbc >= 0) 1230 if (i915.enable_fbc >= 0)
1231 return !!i915.enable_fbc; 1231 return !!i915.enable_fbc;
1232 1232
1233 if (!HAS_FBC(dev_priv))
1234 return 0;
1235
1233 if (IS_BROADWELL(dev_priv)) 1236 if (IS_BROADWELL(dev_priv))
1234 return 1; 1237 return 1;
1235 1238
1236 return 0; 1239 return 0;
1237} 1240}
1238 1241
1242static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1243{
1244#ifdef CONFIG_INTEL_IOMMU
1245 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1246 if (intel_iommu_gfx_mapped &&
1247 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1248 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1249 return true;
1250 }
1251#endif
1252
1253 return false;
1254}
1255
1239/** 1256/**
1240 * intel_fbc_init - Initialize FBC 1257 * intel_fbc_init - Initialize FBC
1241 * @dev_priv: the i915 device 1258 * @dev_priv: the i915 device
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1253 fbc->active = false; 1270 fbc->active = false;
1254 fbc->work.scheduled = false; 1271 fbc->work.scheduled = false;
1255 1272
1273 if (need_fbc_vtd_wa(dev_priv))
1274 mkwrite_device_info(dev_priv)->has_fbc = false;
1275
1256 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1276 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1257 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); 1277 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
1258 1278
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 86b00c6db1a6..3e3632c18733 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -782,7 +782,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
782 struct intel_fbdev *ifbdev = dev_priv->fbdev; 782 struct intel_fbdev *ifbdev = dev_priv->fbdev;
783 struct fb_info *info; 783 struct fb_info *info;
784 784
785 if (!ifbdev) 785 if (!ifbdev || !ifbdev->fb)
786 return; 786 return;
787 787
788 info = ifbdev->helper.fbdev; 788 info = ifbdev->helper.fbdev;
@@ -827,31 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
827 827
828void intel_fbdev_output_poll_changed(struct drm_device *dev) 828void intel_fbdev_output_poll_changed(struct drm_device *dev)
829{ 829{
830 struct drm_i915_private *dev_priv = to_i915(dev); 830 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
831 if (dev_priv->fbdev) 831
832 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 832 if (ifbdev && ifbdev->fb)
833 drm_fb_helper_hotplug_event(&ifbdev->helper);
833} 834}
834 835
835void intel_fbdev_restore_mode(struct drm_device *dev) 836void intel_fbdev_restore_mode(struct drm_device *dev)
836{ 837{
837 int ret; 838 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
838 struct drm_i915_private *dev_priv = to_i915(dev);
839 struct intel_fbdev *ifbdev = dev_priv->fbdev;
840 struct drm_fb_helper *fb_helper;
841 839
842 if (!ifbdev) 840 if (!ifbdev)
843 return; 841 return;
844 842
845 intel_fbdev_sync(ifbdev); 843 intel_fbdev_sync(ifbdev);
844 if (!ifbdev->fb)
845 return;
846 846
847 fb_helper = &ifbdev->helper; 847 if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
848
849 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
850 if (ret) {
851 DRM_DEBUG("failed to restore crtc mode\n"); 848 DRM_DEBUG("failed to restore crtc mode\n");
852 } else { 849 } else {
853 mutex_lock(&fb_helper->dev->struct_mutex); 850 mutex_lock(&dev->struct_mutex);
854 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 851 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
855 mutex_unlock(&fb_helper->dev->struct_mutex); 852 mutex_unlock(&dev->struct_mutex);
856 } 853 }
857} 854}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index adca262d591a..7acbbbf97833 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -1047,6 +1047,23 @@ err_out:
1047 return err; 1047 return err;
1048} 1048}
1049 1049
1050static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
1051{
1052 DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
1053 return 1;
1054}
1055
1056static const struct dmi_system_id intel_use_opregion_panel_type[] = {
1057 {
1058 .callback = intel_use_opregion_panel_type_callback,
1059 .ident = "Conrac GmbH IX45GM2",
1060 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
1061 DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
1062 },
1063 },
1064 { }
1065};
1066
1050int 1067int
1051intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) 1068intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1052{ 1069{
@@ -1073,6 +1090,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1073 } 1090 }
1074 1091
1075 /* 1092 /*
1093 * So far we know that some machined must use it, others must not use it.
1094 * There doesn't seem to be any way to determine which way to go, except
1095 * via a quirk list :(
1096 */
1097 if (!dmi_check_system(intel_use_opregion_panel_type)) {
1098 DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
1099 return -ENODEV;
1100 }
1101
1102 /*
1076 * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us 1103 * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
1077 * low vswing for eDP, whereas the VBT panel type (2) gives us normal 1104 * low vswing for eDP, whereas the VBT panel type (2) gives us normal
1078 * vswing instead. Low vswing results in some display flickers, so 1105 * vswing instead. Low vswing results in some display flickers, so
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f4f3fcc8b3be..2d2481392824 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
2852 2852
2853#define SKL_DDB_SIZE 896 /* in blocks */ 2853#define SKL_DDB_SIZE 896 /* in blocks */
2854#define BXT_DDB_SIZE 512 2854#define BXT_DDB_SIZE 512
2855#define SKL_SAGV_BLOCK_TIME 30 /* µs */
2855 2856
2856/* 2857/*
2857 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2858 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
2875 } 2876 }
2876} 2877}
2877 2878
2879/*
2880 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2881 * depending on power and performance requirements. The display engine access
2882 * to system memory is blocked during the adjustment time. Because of the
2883 * blocking time, having this enabled can cause full system hangs and/or pipe
2884 * underruns if we don't meet all of the following requirements:
2885 *
2886 * - <= 1 pipe enabled
2887 * - All planes can enable watermarks for latencies >= SAGV engine block time
2888 * - We're not using an interlaced display configuration
2889 */
2890int
2891skl_enable_sagv(struct drm_i915_private *dev_priv)
2892{
2893 int ret;
2894
2895 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2896 dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
2897 return 0;
2898
2899 DRM_DEBUG_KMS("Enabling the SAGV\n");
2900 mutex_lock(&dev_priv->rps.hw_lock);
2901
2902 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2903 GEN9_SAGV_ENABLE);
2904
2905 /* We don't need to wait for the SAGV when enabling */
2906 mutex_unlock(&dev_priv->rps.hw_lock);
2907
2908 /*
2909 * Some skl systems, pre-release machines in particular,
2910 * don't actually have an SAGV.
2911 */
2912 if (ret == -ENXIO) {
2913 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2914 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2915 return 0;
2916 } else if (ret < 0) {
2917 DRM_ERROR("Failed to enable the SAGV\n");
2918 return ret;
2919 }
2920
2921 dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
2922 return 0;
2923}
2924
2925static int
2926skl_do_sagv_disable(struct drm_i915_private *dev_priv)
2927{
2928 int ret;
2929 uint32_t temp = GEN9_SAGV_DISABLE;
2930
2931 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2932 &temp);
2933 if (ret)
2934 return ret;
2935 else
2936 return temp & GEN9_SAGV_IS_DISABLED;
2937}
2938
2939int
2940skl_disable_sagv(struct drm_i915_private *dev_priv)
2941{
2942 int ret, result;
2943
2944 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2945 dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
2946 return 0;
2947
2948 DRM_DEBUG_KMS("Disabling the SAGV\n");
2949 mutex_lock(&dev_priv->rps.hw_lock);
2950
2951 /* bspec says to keep retrying for at least 1 ms */
2952 ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
2953 mutex_unlock(&dev_priv->rps.hw_lock);
2954
2955 if (ret == -ETIMEDOUT) {
2956 DRM_ERROR("Request to disable SAGV timed out\n");
2957 return -ETIMEDOUT;
2958 }
2959
2960 /*
2961 * Some skl systems, pre-release machines in particular,
2962 * don't actually have an SAGV.
2963 */
2964 if (result == -ENXIO) {
2965 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2966 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2967 return 0;
2968 } else if (result < 0) {
2969 DRM_ERROR("Failed to disable the SAGV\n");
2970 return result;
2971 }
2972
2973 dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
2974 return 0;
2975}
2976
2977bool skl_can_enable_sagv(struct drm_atomic_state *state)
2978{
2979 struct drm_device *dev = state->dev;
2980 struct drm_i915_private *dev_priv = to_i915(dev);
2981 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2982 struct drm_crtc *crtc;
2983 enum pipe pipe;
2984 int level, plane;
2985
2986 /*
2987 * SKL workaround: bspec recommends we disable the SAGV when we have
2988 * more then one pipe enabled
2989 *
2990 * If there are no active CRTCs, no additional checks need be performed
2991 */
2992 if (hweight32(intel_state->active_crtcs) == 0)
2993 return true;
2994 else if (hweight32(intel_state->active_crtcs) > 1)
2995 return false;
2996
2997 /* Since we're now guaranteed to only have one active CRTC... */
2998 pipe = ffs(intel_state->active_crtcs) - 1;
2999 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3000
3001 if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
3002 return false;
3003
3004 for_each_plane(dev_priv, pipe, plane) {
3005 /* Skip this plane if it's not enabled */
3006 if (intel_state->wm_results.plane[pipe][plane][0] == 0)
3007 continue;
3008
3009 /* Find the highest enabled wm level for this plane */
3010 for (level = ilk_wm_max_level(dev);
3011 intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
3012 { }
3013
3014 /*
3015 * If any of the planes on this pipe don't enable wm levels
3016 * that incur memory latencies higher then 30µs we can't enable
3017 * the SAGV
3018 */
3019 if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
3020 return false;
3021 }
3022
3023 return true;
3024}
3025
2878static void 3026static void
2879skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3027skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2880 const struct intel_crtc_state *cstate, 3028 const struct intel_crtc_state *cstate,
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; 3255 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3108 } 3256 }
3109 3257
3110 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3111
3112 return total_data_rate; 3258 return total_data_rate;
3113} 3259}
3114 3260
@@ -3344,6 +3490,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3344 plane_bytes_per_line *= 4; 3490 plane_bytes_per_line *= 4;
3345 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3491 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3346 plane_blocks_per_line /= 4; 3492 plane_blocks_per_line /= 4;
3493 } else if (tiling == DRM_FORMAT_MOD_NONE) {
3494 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
3347 } else { 3495 } else {
3348 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3496 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3349 } 3497 }
@@ -3910,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
3910 * pretend that all pipes switched active status so that we'll 4058 * pretend that all pipes switched active status so that we'll
3911 * ensure a full DDB recompute. 4059 * ensure a full DDB recompute.
3912 */ 4060 */
3913 if (dev_priv->wm.distrust_bios_wm) 4061 if (dev_priv->wm.distrust_bios_wm) {
4062 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4063 state->acquire_ctx);
4064 if (ret)
4065 return ret;
4066
3914 intel_state->active_pipe_changes = ~0; 4067 intel_state->active_pipe_changes = ~0;
3915 4068
4069 /*
4070 * We usually only initialize intel_state->active_crtcs if we
4071 * we're doing a modeset; make sure this field is always
4072 * initialized during the sanitization process that happens
4073 * on the first commit too.
4074 */
4075 if (!intel_state->modeset)
4076 intel_state->active_crtcs = dev_priv->active_crtcs;
4077 }
4078
3916 /* 4079 /*
3917 * If the modeset changes which CRTC's are active, we need to 4080 * If the modeset changes which CRTC's are active, we need to
3918 * recompute the DDB allocation for *all* active pipes, even 4081 * recompute the DDB allocation for *all* active pipes, even
@@ -3941,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
3941 ret = skl_allocate_pipe_ddb(cstate, ddb); 4104 ret = skl_allocate_pipe_ddb(cstate, ddb);
3942 if (ret) 4105 if (ret)
3943 return ret; 4106 return ret;
4107
4108 ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
4109 if (ret)
4110 return ret;
3944 } 4111 }
3945 4112
3946 return 0; 4113 return 0;
3947} 4114}
3948 4115
4116static void
4117skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4118 struct skl_wm_values *src,
4119 enum pipe pipe)
4120{
4121 dst->wm_linetime[pipe] = src->wm_linetime[pipe];
4122 memcpy(dst->plane[pipe], src->plane[pipe],
4123 sizeof(dst->plane[pipe]));
4124 memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
4125 sizeof(dst->plane_trans[pipe]));
4126
4127 dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
4128 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4129 sizeof(dst->ddb.y_plane[pipe]));
4130 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4131 sizeof(dst->ddb.plane[pipe]));
4132}
4133
3949static int 4134static int
3950skl_compute_wm(struct drm_atomic_state *state) 4135skl_compute_wm(struct drm_atomic_state *state)
3951{ 4136{
@@ -4018,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
4018 struct drm_device *dev = crtc->dev; 4203 struct drm_device *dev = crtc->dev;
4019 struct drm_i915_private *dev_priv = to_i915(dev); 4204 struct drm_i915_private *dev_priv = to_i915(dev);
4020 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4205 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4206 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4021 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4207 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4022 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4208 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4209 int pipe;
4023 4210
4024 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4211 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4025 return; 4212 return;
@@ -4031,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc)
4031 skl_write_wm_values(dev_priv, results); 4218 skl_write_wm_values(dev_priv, results);
4032 skl_flush_wm_values(dev_priv, results); 4219 skl_flush_wm_values(dev_priv, results);
4033 4220
4034 /* store the new configuration */ 4221 /*
4035 dev_priv->wm.skl_hw = *results; 4222 * Store the new configuration (but only for the pipes that have
4223 * changed; the other values weren't recomputed).
4224 */
4225 for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
4226 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4036 4227
4037 mutex_unlock(&dev_priv->wm.wm_mutex); 4228 mutex_unlock(&dev_priv->wm.wm_mutex);
4038} 4229}
@@ -4892,7 +5083,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
4892 else 5083 else
4893 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 5084 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4894 dev_priv->rps.last_adj = 0; 5085 dev_priv->rps.last_adj = 0;
4895 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 5086 I915_WRITE(GEN6_PMINTRMSK,
5087 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4896 } 5088 }
4897 mutex_unlock(&dev_priv->rps.hw_lock); 5089 mutex_unlock(&dev_priv->rps.hw_lock);
4898 5090
@@ -6573,9 +6765,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6573 6765
6574void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 6766void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6575{ 6767{
6576 if (IS_CHERRYVIEW(dev_priv)) 6768 if (IS_VALLEYVIEW(dev_priv))
6577 return;
6578 else if (IS_VALLEYVIEW(dev_priv))
6579 valleyview_cleanup_gt_powersave(dev_priv); 6769 valleyview_cleanup_gt_powersave(dev_priv);
6580 6770
6581 if (!i915.enable_rc6) 6771 if (!i915.enable_rc6)
@@ -7657,8 +7847,54 @@ void intel_init_pm(struct drm_device *dev)
7657 } 7847 }
7658} 7848}
7659 7849
7850static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7851{
7852 uint32_t flags =
7853 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7854
7855 switch (flags) {
7856 case GEN6_PCODE_SUCCESS:
7857 return 0;
7858 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7859 case GEN6_PCODE_ILLEGAL_CMD:
7860 return -ENXIO;
7861 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7862 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7863 return -EOVERFLOW;
7864 case GEN6_PCODE_TIMEOUT:
7865 return -ETIMEDOUT;
7866 default:
7867 MISSING_CASE(flags)
7868 return 0;
7869 }
7870}
7871
7872static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7873{
7874 uint32_t flags =
7875 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7876
7877 switch (flags) {
7878 case GEN6_PCODE_SUCCESS:
7879 return 0;
7880 case GEN6_PCODE_ILLEGAL_CMD:
7881 return -ENXIO;
7882 case GEN7_PCODE_TIMEOUT:
7883 return -ETIMEDOUT;
7884 case GEN7_PCODE_ILLEGAL_DATA:
7885 return -EINVAL;
7886 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7887 return -EOVERFLOW;
7888 default:
7889 MISSING_CASE(flags);
7890 return 0;
7891 }
7892}
7893
7660int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7894int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7661{ 7895{
7896 int status;
7897
7662 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7898 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7663 7899
7664 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7900 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7685,12 +7921,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7685 *val = I915_READ_FW(GEN6_PCODE_DATA); 7921 *val = I915_READ_FW(GEN6_PCODE_DATA);
7686 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7922 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7687 7923
7924 if (INTEL_GEN(dev_priv) > 6)
7925 status = gen7_check_mailbox_status(dev_priv);
7926 else
7927 status = gen6_check_mailbox_status(dev_priv);
7928
7929 if (status) {
7930 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7931 status);
7932 return status;
7933 }
7934
7688 return 0; 7935 return 0;
7689} 7936}
7690 7937
7691int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7938int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7692 u32 mbox, u32 val) 7939 u32 mbox, u32 val)
7693{ 7940{
7941 int status;
7942
7694 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7943 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7695 7944
7696 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7945 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7715,6 +7964,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7715 7964
7716 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7965 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7717 7966
7967 if (INTEL_GEN(dev_priv) > 6)
7968 status = gen7_check_mailbox_status(dev_priv);
7969 else
7970 status = gen6_check_mailbox_status(dev_priv);
7971
7972 if (status) {
7973 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7974 status);
7975 return status;
7976 }
7977
7718 return 0; 7978 return 0;
7719} 7979}
7720 7980
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2b0d1baf15b3..cf171b4b8c67 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
255 struct drm_i915_private *dev_priv = to_i915(dev); 255 struct drm_i915_private *dev_priv = to_i915(dev);
256 256
257 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
258 /* Lately it was identified that depending on panel idle frame count 258 /*
259 * calculated at HW can be off by 1. So let's use what came 259 * Let's respect VBT in case VBT asks a higher idle_frame value.
260 * from VBT + 1. 260 * Let's use 6 as the minimum to cover all known cases including
261 * There are also other cases where panel demands at least 4 261 * the off-by-one issue that HW has in some cases. Also there are
262 * but VBT is not being set. To cover these 2 cases lets use 262 * cases where sink should be able to train
263 * at least 5 when VBT isn't set to be on the safest side. 263 * with the 5 or 6 idle patterns.
264 */ 264 */
265 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1; 265 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
266 uint32_t val = EDP_PSR_ENABLE; 266 uint32_t val = EDP_PSR_ENABLE;
267 267
268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cca7792f26d5..1d3161bbea24 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1179 L3_HIGH_PRIO_CREDITS(2)); 1179 L3_HIGH_PRIO_CREDITS(2));
1180 1180
1181 /* WaInsertDummyPushConstPs:bxt */ 1181 /* WaToEnableHwFixForPushConstHWBug:bxt */
1182 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1182 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1185 1185
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1222 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1222 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1223 GEN8_LQSC_RO_PERF_DIS); 1223 GEN8_LQSC_RO_PERF_DIS);
1224 1224
1225 /* WaInsertDummyPushConstPs:kbl */ 1225 /* WaToEnableHwFixForPushConstHWBug:kbl */
1226 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 1226 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1227 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1227 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1228 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1228 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1229 1229
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9f7dafce3a4c..7bf90e9e6139 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
171 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper); 171 drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
172} 172}
173 173
174static int imx_drm_atomic_check(struct drm_device *dev,
175 struct drm_atomic_state *state)
176{
177 int ret;
178
179 ret = drm_atomic_helper_check_modeset(dev, state);
180 if (ret)
181 return ret;
182
183 ret = drm_atomic_helper_check_planes(dev, state);
184 if (ret)
185 return ret;
186
187 /*
188 * Check modeset again in case crtc_state->mode_changed is
189 * updated in plane's ->atomic_check callback.
190 */
191 ret = drm_atomic_helper_check_modeset(dev, state);
192 if (ret)
193 return ret;
194
195 return ret;
196}
197
174static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 198static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
175 .fb_create = drm_fb_cma_create, 199 .fb_create = drm_fb_cma_create,
176 .output_poll_changed = imx_drm_output_poll_changed, 200 .output_poll_changed = imx_drm_output_poll_changed,
177 .atomic_check = drm_atomic_helper_check, 201 .atomic_check = imx_drm_atomic_check,
178 .atomic_commit = drm_atomic_helper_commit, 202 .atomic_commit = drm_atomic_helper_commit,
179}; 203};
180 204
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 08e188bc10fc..462056e4b9e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
76 crtc->state->event = NULL; 76 crtc->state->event = NULL;
77 } 77 }
78 spin_unlock_irq(&crtc->dev->event_lock); 78 spin_unlock_irq(&crtc->dev->event_lock);
79
80 drm_crtc_vblank_off(crtc);
79} 81}
80 82
81static void imx_drm_crtc_reset(struct drm_crtc *crtc) 83static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
175static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, 177static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
176 struct drm_crtc_state *old_crtc_state) 178 struct drm_crtc_state *old_crtc_state)
177{ 179{
180 drm_crtc_vblank_on(crtc);
181
178 spin_lock_irq(&crtc->dev->event_lock); 182 spin_lock_irq(&crtc->dev->event_lock);
179 if (crtc->state->event) { 183 if (crtc->state->event) {
180 WARN_ON(drm_crtc_vblank_get(crtc)); 184 WARN_ON(drm_crtc_vblank_get(crtc));
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 4ad67d015ec7..29423e757d36 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
319 return -EINVAL; 319 return -EINVAL;
320 320
321 /* 321 /*
322 * since we cannot touch active IDMAC channels, we do not support 322 * We support resizing active plane or changing its format by
323 * resizing the enabled plane or changing its format 323 * forcing CRTC mode change and disabling-enabling plane in plane's
324 * ->atomic_update callback.
324 */ 325 */
325 if (old_fb && (state->src_w != old_state->src_w || 326 if (old_fb && (state->src_w != old_state->src_w ||
326 state->src_h != old_state->src_h || 327 state->src_h != old_state->src_h ||
327 fb->pixel_format != old_fb->pixel_format)) 328 fb->pixel_format != old_fb->pixel_format))
328 return -EINVAL; 329 crtc_state->mode_changed = true;
329 330
330 eba = drm_plane_state_to_eba(state); 331 eba = drm_plane_state_to_eba(state);
331 332
@@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
336 return -EINVAL; 337 return -EINVAL;
337 338
338 if (old_fb && fb->pitches[0] != old_fb->pitches[0]) 339 if (old_fb && fb->pitches[0] != old_fb->pitches[0])
339 return -EINVAL; 340 crtc_state->mode_changed = true;
340 341
341 switch (fb->pixel_format) { 342 switch (fb->pixel_format) {
342 case DRM_FORMAT_YUV420: 343 case DRM_FORMAT_YUV420:
@@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
372 return -EINVAL; 373 return -EINVAL;
373 374
374 if (old_fb && old_fb->pitches[1] != fb->pitches[1]) 375 if (old_fb && old_fb->pitches[1] != fb->pitches[1])
375 return -EINVAL; 376 crtc_state->mode_changed = true;
376 } 377 }
377 378
378 return 0; 379 return 0;
@@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
392 enum ipu_color_space ics; 393 enum ipu_color_space ics;
393 394
394 if (old_state->fb) { 395 if (old_state->fb) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state); 396 struct drm_crtc_state *crtc_state = state->crtc->state;
396 return; 397
398 if (!crtc_state->mode_changed) {
399 ipu_plane_atomic_set_base(ipu_plane, old_state);
400 return;
401 }
402
403 ipu_disable_plane(plane);
397 } 404 }
398 405
399 switch (ipu_plane->dp_flow) { 406 switch (ipu_plane->dp_flow) {
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 23ac8041c562..294de4549922 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -2,6 +2,9 @@ config DRM_MEDIATEK
2 tristate "DRM Support for Mediatek SoCs" 2 tristate "DRM Support for Mediatek SoCs"
3 depends on DRM 3 depends on DRM
4 depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) 4 depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
5 depends on COMMON_CLK
6 depends on HAVE_ARM_SMCCC
7 depends on OF
5 select DRM_GEM_CMA_HELPER 8 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_HELPER 9 select DRM_KMS_HELPER
7 select DRM_MIPI_DSI 10 select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b4bc7f1ef717..d0da52f2a806 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -157,6 +157,12 @@ struct msm_drm_private {
157 struct shrinker shrinker; 157 struct shrinker shrinker;
158 158
159 struct msm_vblank_ctrl vblank_ctrl; 159 struct msm_vblank_ctrl vblank_ctrl;
160
161 /* task holding struct_mutex.. currently only used in submit path
162 * to detect and reject faults from copy_from_user() for submit
163 * ioctl.
164 */
165 struct task_struct *struct_mutex_task;
160}; 166};
161 167
162struct msm_format { 168struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6cd4af443139..85f3047e05ae 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196{ 196{
197 struct drm_gem_object *obj = vma->vm_private_data; 197 struct drm_gem_object *obj = vma->vm_private_data;
198 struct drm_device *dev = obj->dev; 198 struct drm_device *dev = obj->dev;
199 struct msm_drm_private *priv = dev->dev_private;
199 struct page **pages; 200 struct page **pages;
200 unsigned long pfn; 201 unsigned long pfn;
201 pgoff_t pgoff; 202 pgoff_t pgoff;
202 int ret; 203 int ret;
203 204
205 /* This should only happen if userspace tries to pass a mmap'd
206 * but unfaulted gem bo vaddr into submit ioctl, triggering
207 * a page fault while struct_mutex is already held. This is
208 * not a valid use-case so just bail.
209 */
210 if (priv->struct_mutex_task == current)
211 return VM_FAULT_SIGBUS;
212
204 /* Make sure we don't parallel update on a fault, nor move or remove 213 /* Make sure we don't parallel update on a fault, nor move or remove
205 * something from beneath our feet 214 * something from beneath our feet
206 */ 215 */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 9766f9ae4b7d..880d6a9af7c8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
64 kfree(submit); 64 kfree(submit);
65} 65}
66 66
67static inline unsigned long __must_check
68copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
69{
70 if (access_ok(VERIFY_READ, from, n))
71 return __copy_from_user_inatomic(to, from, n);
72 return -EFAULT;
73}
74
67static int submit_lookup_objects(struct msm_gem_submit *submit, 75static int submit_lookup_objects(struct msm_gem_submit *submit,
68 struct drm_msm_gem_submit *args, struct drm_file *file) 76 struct drm_msm_gem_submit *args, struct drm_file *file)
69{ 77{
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
71 int ret = 0; 79 int ret = 0;
72 80
73 spin_lock(&file->table_lock); 81 spin_lock(&file->table_lock);
82 pagefault_disable();
74 83
75 for (i = 0; i < args->nr_bos; i++) { 84 for (i = 0; i < args->nr_bos; i++) {
76 struct drm_msm_gem_submit_bo submit_bo; 85 struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
84 */ 93 */
85 submit->bos[i].flags = 0; 94 submit->bos[i].flags = 0;
86 95
87 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); 96 ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
88 if (ret) { 97 if (unlikely(ret)) {
89 ret = -EFAULT; 98 pagefault_enable();
90 goto out_unlock; 99 spin_unlock(&file->table_lock);
100 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
101 if (ret)
102 goto out;
103 spin_lock(&file->table_lock);
104 pagefault_disable();
91 } 105 }
92 106
93 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { 107 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
127 } 141 }
128 142
129out_unlock: 143out_unlock:
130 submit->nr_bos = i; 144 pagefault_enable();
131 spin_unlock(&file->table_lock); 145 spin_unlock(&file->table_lock);
132 146
147out:
148 submit->nr_bos = i;
149
133 return ret; 150 return ret;
134} 151}
135 152
@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
377 if (ret) 394 if (ret)
378 return ret; 395 return ret;
379 396
397 priv->struct_mutex_task = current;
398
380 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); 399 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
381 if (!submit) { 400 if (!submit) {
382 ret = -ENOMEM; 401 ret = -ENOMEM;
@@ -468,6 +487,7 @@ out:
468 if (ret) 487 if (ret)
469 msm_gem_submit_free(submit); 488 msm_gem_submit_free(submit);
470out_unlock: 489out_unlock:
490 priv->struct_mutex_task = NULL;
471 mutex_unlock(&dev->struct_mutex); 491 mutex_unlock(&dev->struct_mutex);
472 return ret; 492 return ret;
473} 493}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f2ad17aa33f0..dc57b628e074 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
225 if (!parent_pdev) 225 if (!parent_pdev)
226 return false; 226 return false;
227 227
228 if (!parent_pdev->bridge_d3) {
229 /*
230 * Parent PCI bridge is currently not power managed.
231 * Since userspace can change these afterwards to be on
232 * the safe side we stick with _DSM and prevent usage of
233 * _PR3 from the bridge.
234 */
235 pci_d3cold_disable(pdev);
236 return false;
237 }
238
228 parent_adev = ACPI_COMPANION(&parent_pdev->dev); 239 parent_adev = ACPI_COMPANION(&parent_pdev->dev);
229 if (!parent_adev) 240 if (!parent_adev)
230 return false; 241 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 528bdeffb339..6190035edfea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1151 if (ret) 1151 if (ret)
1152 goto out; 1152 goto out;
1153 1153
1154 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 1154 ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
1155out: 1155out:
1156 ttm_bo_mem_put(bo, &tmp_mem); 1156 ttm_bo_mem_put(bo, &tmp_mem);
1157 return ret; 1157 return ret;
@@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1179 if (ret) 1179 if (ret)
1180 return ret; 1180 return ret;
1181 1181
1182 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 1182 ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
1183 if (ret) 1183 if (ret)
1184 goto out; 1184 goto out;
1185 1185
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index df2657051afd..28c1423049c5 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
73 } 73 }
74} 74}
75 75
76#ifdef CONFIG_DRM_FBDEV_EMULATION
76static struct fb_deferred_io qxl_defio = { 77static struct fb_deferred_io qxl_defio = {
77 .delay = QXL_DIRTY_DELAY, 78 .delay = QXL_DIRTY_DELAY,
78 .deferred_io = drm_fb_helper_deferred_io, 79 .deferred_io = drm_fb_helper_deferred_io,
79}; 80};
81#endif
80 82
81static struct fb_ops qxlfb_ops = { 83static struct fb_ops qxlfb_ops = {
82 .owner = THIS_MODULE, 84 .owner = THIS_MODULE,
@@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
313 goto out_destroy_fbi; 315 goto out_destroy_fbi;
314 } 316 }
315 317
318#ifdef CONFIG_DRM_FBDEV_EMULATION
316 info->fbdefio = &qxl_defio; 319 info->fbdefio = &qxl_defio;
317 fb_deferred_io_init(info); 320 fb_deferred_io_init(info);
321#endif
318 322
319 qdev->fbdev_info = info; 323 qdev->fbdev_info = info;
320 qdev->fbdev_qfb = &qfbdev->qfb; 324 qdev->fbdev_qfb = &qfbdev->qfb;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a97abc8af657..1dcf39084555 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
627 if (radeon_crtc->ss.refdiv) { 627 if (radeon_crtc->ss.refdiv) {
628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
630 if (rdev->family >= CHIP_RV770) 630 if (ASIC_IS_AVIVO(rdev) &&
631 rdev->family != CHIP_RS780 &&
632 rdev->family != CHIP_RS880)
631 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 633 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
632 } 634 }
633 } 635 }
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6de342861202..ddef0d494084 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
198 atpx->is_hybrid = false; 198 atpx->is_hybrid = false;
199 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 199 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
200 printk("ATPX Hybrid Graphics\n"); 200 printk("ATPX Hybrid Graphics\n");
201#if 1
202 /* This is a temporary hack until the D3 cold support
203 * makes it upstream. The ATPX power_control method seems
204 * to still work on even if the system should be using
205 * the new standardized hybrid D3 cold ACPI interface.
206 */
207 atpx->functions.power_cntl = true;
208#else
209 atpx->functions.power_cntl = false; 201 atpx->functions.power_cntl = false;
210#endif
211 atpx->is_hybrid = true; 202 atpx->is_hybrid = true;
212 } 203 }
213 204
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index ffdad81ef964..c2e0a1ccdfbc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
263 263
264 rdev = radeon_get_rdev(bo->bdev); 264 rdev = radeon_get_rdev(bo->bdev);
265 ridx = radeon_copy_ring_index(rdev); 265 ridx = radeon_copy_ring_index(rdev);
266 old_start = old_mem->start << PAGE_SHIFT; 266 old_start = (u64)old_mem->start << PAGE_SHIFT;
267 new_start = new_mem->start << PAGE_SHIFT; 267 new_start = (u64)new_mem->start << PAGE_SHIFT;
268 268
269 switch (old_mem->mem_type) { 269 switch (old_mem->mem_type) {
270 case TTM_PL_VRAM: 270 case TTM_PL_VRAM:
@@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
346 if (unlikely(r)) { 346 if (unlikely(r)) {
347 goto out_cleanup; 347 goto out_cleanup;
348 } 348 }
349 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 349 r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
350out_cleanup: 350out_cleanup:
351 ttm_bo_mem_put(bo, &tmp_mem); 351 ttm_bo_mem_put(bo, &tmp_mem);
352 return r; 352 return r;
@@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
379 if (unlikely(r)) { 379 if (unlikely(r)) {
380 return r; 380 return r;
381 } 381 }
382 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 382 r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
383 if (unlikely(r)) { 383 if (unlikely(r)) {
384 goto out_cleanup; 384 goto out_cleanup;
385 } 385 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 4de3ff0dbebd..e03004f4588d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
125 125
126 /* Link drm_bridge to encoder */ 126 /* Link drm_bridge to encoder */
127 bridge->encoder = encoder; 127 bridge->encoder = encoder;
128 encoder->bridge = bridge;
128 129
129 ret = drm_bridge_attach(rcdu->ddev, bridge); 130 ret = drm_bridge_attach(rcdu->ddev, bridge);
130 if (ret) { 131 if (ret) {
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 3d228ad90e0f..3dea1216bafd 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
840 .destroy = tegra_output_encoder_destroy, 840 .destroy = tegra_output_encoder_destroy,
841}; 841};
842 842
843static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
844{
845 int err;
846
847 if (dsi->slave)
848 tegra_dsi_unprepare(dsi->slave);
849
850 err = tegra_mipi_disable(dsi->mipi);
851 if (err < 0)
852 dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
853 err);
854
855 pm_runtime_put(dsi->dev);
856}
857
843static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) 858static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
844{ 859{
845 struct tegra_output *output = encoder_to_output(encoder); 860 struct tegra_output *output = encoder_to_output(encoder);
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
876 891
877 tegra_dsi_disable(dsi); 892 tegra_dsi_disable(dsi);
878 893
879 pm_runtime_put(dsi->dev); 894 tegra_dsi_unprepare(dsi);
895}
896
897static void tegra_dsi_prepare(struct tegra_dsi *dsi)
898{
899 int err;
900
901 pm_runtime_get_sync(dsi->dev);
902
903 err = tegra_mipi_enable(dsi->mipi);
904 if (err < 0)
905 dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
906 err);
907
908 err = tegra_dsi_pad_calibrate(dsi);
909 if (err < 0)
910 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
911
912 if (dsi->slave)
913 tegra_dsi_prepare(dsi->slave);
880} 914}
881 915
882static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) 916static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
887 struct tegra_dsi *dsi = to_dsi(output); 921 struct tegra_dsi *dsi = to_dsi(output);
888 struct tegra_dsi_state *state; 922 struct tegra_dsi_state *state;
889 u32 value; 923 u32 value;
890 int err;
891
892 pm_runtime_get_sync(dsi->dev);
893 924
894 err = tegra_dsi_pad_calibrate(dsi); 925 tegra_dsi_prepare(dsi);
895 if (err < 0)
896 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
897 926
898 state = tegra_dsi_get_state(dsi); 927 state = tegra_dsi_get_state(dsi);
899 928
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4054d804fe06..42c074a9c955 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -354,7 +354,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
354 354
355 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 355 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
356 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 356 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
357 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 357 ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
358 mem);
358 else if (bdev->driver->move) 359 else if (bdev->driver->move)
359 ret = bdev->driver->move(bo, evict, interruptible, 360 ret = bdev->driver->move(bo, evict, interruptible,
360 no_wait_gpu, mem); 361 no_wait_gpu, mem);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2df602a35f92..f157a9efd220 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
45} 45}
46 46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48 bool evict, 48 bool evict, bool interruptible,
49 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 49 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50{ 50{
51 struct ttm_tt *ttm = bo->ttm; 51 struct ttm_tt *ttm = bo->ttm;
@@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 int ret; 53 int ret;
54 54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) { 55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
57
58 if (unlikely(ret != 0)) {
59 if (ret != -ERESTARTSYS)
60 pr_err("Failed to expire sync object before unbinding TTM\n");
61 return ret;
62 }
63
56 ttm_tt_unbind(ttm); 64 ttm_tt_unbind(ttm);
57 ttm_bo_free_old_node(bo); 65 ttm_bo_free_old_node(bo);
58 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 66 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d5df555aeba0..9688bfa92ccd 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
203 203
204 ufbdev->fb_count++; 204 ufbdev->fb_count++;
205 205
206#ifdef CONFIG_DRM_FBDEV_EMULATION
206 if (fb_defio && (info->fbdefio == NULL)) { 207 if (fb_defio && (info->fbdefio == NULL)) {
207 /* enable defio at last moment if not disabled by client */ 208 /* enable defio at last moment if not disabled by client */
208 209
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
218 info->fbdefio = fbdefio; 219 info->fbdefio = fbdefio;
219 fb_deferred_io_init(info); 220 fb_deferred_io_init(info);
220 } 221 }
222#endif
221 223
222 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 224 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
223 info->node, user, info, ufbdev->fb_count); 225 info->node, user, info, ufbdev->fb_count);
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
235 237
236 ufbdev->fb_count--; 238 ufbdev->fb_count--;
237 239
240#ifdef CONFIG_DRM_FBDEV_EMULATION
238 if ((ufbdev->fb_count == 0) && (info->fbdefio)) { 241 if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
239 fb_deferred_io_cleanup(info); 242 fb_deferred_io_cleanup(info);
240 kfree(info->fbdefio); 243 kfree(info->fbdefio);
241 info->fbdefio = NULL; 244 info->fbdefio = NULL;
242 info->fbops->fb_mmap = udl_fb_mmap; 245 info->fbops->fb_mmap = udl_fb_mmap;
243 } 246 }
247#endif
244 248
245 pr_warn("released /dev/fb%d user=%d count=%d\n", 249 pr_warn("released /dev/fb%d user=%d count=%d\n",
246 info->node, user, ufbdev->fb_count); 250 info->node, user, ufbdev->fb_count);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 59adcf8532dd..3f6704cf6608 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
144 return &vc4->bo_cache.size_list[page_index]; 144 return &vc4->bo_cache.size_list[page_index];
145} 145}
146 146
147void vc4_bo_cache_purge(struct drm_device *dev) 147static void vc4_bo_cache_purge(struct drm_device *dev)
148{ 148{
149 struct vc4_dev *vc4 = to_vc4_dev(dev); 149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150 150
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8b42d31a7f0e..9ecef9385491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
57 switch (args->param) { 57 switch (args->param) {
58 case DRM_VC4_PARAM_V3D_IDENT0: 58 case DRM_VC4_PARAM_V3D_IDENT0:
59 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 59 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
60 if (ret) 60 if (ret < 0)
61 return ret; 61 return ret;
62 args->value = V3D_READ(V3D_IDENT0); 62 args->value = V3D_READ(V3D_IDENT0);
63 pm_runtime_put(&vc4->v3d->pdev->dev); 63 pm_runtime_put(&vc4->v3d->pdev->dev);
64 break; 64 break;
65 case DRM_VC4_PARAM_V3D_IDENT1: 65 case DRM_VC4_PARAM_V3D_IDENT1:
66 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 66 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
67 if (ret) 67 if (ret < 0)
68 return ret; 68 return ret;
69 args->value = V3D_READ(V3D_IDENT1); 69 args->value = V3D_READ(V3D_IDENT1);
70 pm_runtime_put(&vc4->v3d->pdev->dev); 70 pm_runtime_put(&vc4->v3d->pdev->dev);
71 break; 71 break;
72 case DRM_VC4_PARAM_V3D_IDENT2: 72 case DRM_VC4_PARAM_V3D_IDENT2:
73 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); 73 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
74 if (ret) 74 if (ret < 0)
75 return ret; 75 return ret;
76 args->value = V3D_READ(V3D_IDENT2); 76 args->value = V3D_READ(V3D_IDENT2);
77 pm_runtime_put(&vc4->v3d->pdev->dev); 77 pm_runtime_put(&vc4->v3d->pdev->dev);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 489e3de0c050..428e24919ef1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
321 struct vc4_exec_info, head); 321 struct vc4_exec_info, head);
322} 322}
323 323
324static inline struct vc4_exec_info *
325vc4_last_render_job(struct vc4_dev *vc4)
326{
327 if (list_empty(&vc4->render_job_list))
328 return NULL;
329 return list_last_entry(&vc4->render_job_list,
330 struct vc4_exec_info, head);
331}
332
324/** 333/**
325 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 334 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
326 * setup parameters. 335 * setup parameters.
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6155e8aca1c6..b262c5c26f10 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
534 return -EINVAL; 534 return -EINVAL;
535 } 535 }
536 536
537 exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *), 537 exec->bo = drm_calloc_large(exec->bo_count,
538 GFP_KERNEL); 538 sizeof(struct drm_gem_cma_object *));
539 if (!exec->bo) { 539 if (!exec->bo) {
540 DRM_ERROR("Failed to allocate validated BO pointers\n"); 540 DRM_ERROR("Failed to allocate validated BO pointers\n");
541 return -ENOMEM; 541 return -ENOMEM;
@@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
572 spin_unlock(&file_priv->table_lock); 572 spin_unlock(&file_priv->table_lock);
573 573
574fail: 574fail:
575 kfree(handles); 575 drm_free_large(handles);
576 return 0; 576 return ret;
577} 577}
578 578
579static int 579static int
@@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
608 * read the contents back for validation, and I think the 608 * read the contents back for validation, and I think the
609 * bo->vaddr is uncached access. 609 * bo->vaddr is uncached access.
610 */ 610 */
611 temp = kmalloc(temp_size, GFP_KERNEL); 611 temp = drm_malloc_ab(temp_size, 1);
612 if (!temp) { 612 if (!temp) {
613 DRM_ERROR("Failed to allocate storage for copying " 613 DRM_ERROR("Failed to allocate storage for copying "
614 "in bin/render CLs.\n"); 614 "in bin/render CLs.\n");
@@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
675 ret = vc4_validate_shader_recs(dev, exec); 675 ret = vc4_validate_shader_recs(dev, exec);
676 676
677fail: 677fail:
678 kfree(temp); 678 drm_free_large(temp);
679 return ret; 679 return ret;
680} 680}
681 681
@@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
688 if (exec->bo) { 688 if (exec->bo) {
689 for (i = 0; i < exec->bo_count; i++) 689 for (i = 0; i < exec->bo_count; i++)
690 drm_gem_object_unreference_unlocked(&exec->bo[i]->base); 690 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
691 kfree(exec->bo); 691 drm_free_large(exec->bo);
692 } 692 }
693 693
694 while (!list_empty(&exec->unref_list)) { 694 while (!list_empty(&exec->unref_list)) {
@@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
942 vc4->overflow_mem = NULL; 942 vc4->overflow_mem = NULL;
943 } 943 }
944 944
945 vc4_bo_cache_destroy(dev);
946
947 if (vc4->hang_state) 945 if (vc4->hang_state)
948 vc4_free_hang_state(dev, vc4->hang_state); 946 vc4_free_hang_state(dev, vc4->hang_state);
947
948 vc4_bo_cache_destroy(dev);
949} 949}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b0104a346a74..094bc6a475c1 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
83 83
84 spin_lock_irqsave(&vc4->job_lock, irqflags); 84 spin_lock_irqsave(&vc4->job_lock, irqflags);
85 current_exec = vc4_first_bin_job(vc4); 85 current_exec = vc4_first_bin_job(vc4);
86 if (!current_exec)
87 current_exec = vc4_last_render_job(vc4);
86 if (current_exec) { 88 if (current_exec) {
87 vc4->overflow_mem->seqno = vc4->finished_seqno + 1; 89 vc4->overflow_mem->seqno = current_exec->seqno;
88 list_add_tail(&vc4->overflow_mem->unref_head, 90 list_add_tail(&vc4->overflow_mem->unref_head,
89 &current_exec->unref_list); 91 &current_exec->unref_list);
90 vc4->overflow_mem = NULL; 92 vc4->overflow_mem = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 46527e989ce3..2543cf5b8b51 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
309 * of uniforms on each side. However, this scheme is easy to 309 * of uniforms on each side. However, this scheme is easy to
310 * validate so it's all we allow for now. 310 * validate so it's all we allow for now.
311 */ 311 */
312 312 switch (QPU_GET_FIELD(inst, QPU_SIG)) {
313 if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) { 313 case QPU_SIG_NONE:
314 case QPU_SIG_SCOREBOARD_UNLOCK:
315 case QPU_SIG_COLOR_LOAD:
316 case QPU_SIG_LOAD_TMU0:
317 case QPU_SIG_LOAD_TMU1:
318 break;
319 default:
314 DRM_ERROR("uniforms address change must be " 320 DRM_ERROR("uniforms address change must be "
315 "normal math\n"); 321 "normal math\n");
316 return false; 322 return false;
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 52a6fd224127..e00809d996a2 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
242 dev->pads = args.args[0]; 242 dev->pads = args.args[0];
243 dev->device = device; 243 dev->device = device;
244 244
245 mutex_lock(&dev->mipi->lock);
246
247 if (dev->mipi->usage_count++ == 0) {
248 err = tegra_mipi_power_up(dev->mipi);
249 if (err < 0) {
250 dev_err(dev->mipi->dev,
251 "failed to power up MIPI bricks: %d\n",
252 err);
253 return ERR_PTR(err);
254 }
255 }
256
257 mutex_unlock(&dev->mipi->lock);
258
259 return dev; 245 return dev;
260 246
261put: 247put:
@@ -270,29 +256,42 @@ EXPORT_SYMBOL(tegra_mipi_request);
270 256
271void tegra_mipi_free(struct tegra_mipi_device *device) 257void tegra_mipi_free(struct tegra_mipi_device *device)
272{ 258{
273 int err; 259 platform_device_put(device->pdev);
260 kfree(device);
261}
262EXPORT_SYMBOL(tegra_mipi_free);
274 263
275 mutex_lock(&device->mipi->lock); 264int tegra_mipi_enable(struct tegra_mipi_device *dev)
265{
266 int err = 0;
276 267
277 if (--device->mipi->usage_count == 0) { 268 mutex_lock(&dev->mipi->lock);
278 err = tegra_mipi_power_down(device->mipi);
279 if (err < 0) {
280 /*
281 * Not much that can be done here, so an error message
282 * will have to do.
283 */
284 dev_err(device->mipi->dev,
285 "failed to power down MIPI bricks: %d\n",
286 err);
287 }
288 }
289 269
290 mutex_unlock(&device->mipi->lock); 270 if (dev->mipi->usage_count++ == 0)
271 err = tegra_mipi_power_up(dev->mipi);
272
273 mutex_unlock(&dev->mipi->lock);
274
275 return err;
291 276
292 platform_device_put(device->pdev);
293 kfree(device);
294} 277}
295EXPORT_SYMBOL(tegra_mipi_free); 278EXPORT_SYMBOL(tegra_mipi_enable);
279
280int tegra_mipi_disable(struct tegra_mipi_device *dev)
281{
282 int err = 0;
283
284 mutex_lock(&dev->mipi->lock);
285
286 if (--dev->mipi->usage_count == 0)
287 err = tegra_mipi_power_down(dev->mipi);
288
289 mutex_unlock(&dev->mipi->lock);
290
291 return err;
292
293}
294EXPORT_SYMBOL(tegra_mipi_disable);
296 295
297static int tegra_mipi_wait(struct tegra_mipi *mipi) 296static int tegra_mipi_wait(struct tegra_mipi *mipi)
298{ 297{
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 730d84028260..4667012b46b7 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -491,7 +491,7 @@ struct it87_sio_data {
491struct it87_data { 491struct it87_data {
492 const struct attribute_group *groups[7]; 492 const struct attribute_group *groups[7];
493 enum chips type; 493 enum chips type;
494 u16 features; 494 u32 features;
495 u8 peci_mask; 495 u8 peci_mask;
496 u8 old_peci_mask; 496 u8 old_peci_mask;
497 497
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = {
2015 &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */ 2015 &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
2016 &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */ 2016 &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
2017 &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */ 2017 &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
2018 NULL
2018}; 2019};
2019 2020
2020static const struct attribute_group it87_group_in = { 2021static const struct attribute_group it87_group_in = {
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f23372669f77..1bb97f658b47 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -38,6 +38,7 @@
38#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ 38#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
39#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ 39#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
40#define AUTOSUSPEND_TIMEOUT 2000 40#define AUTOSUSPEND_TIMEOUT 2000
41#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
41 42
42/* AT91 TWI register definitions */ 43/* AT91 TWI register definitions */
43#define AT91_TWI_CR 0x0000 /* Control Register */ 44#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -141,6 +142,7 @@ struct at91_twi_dev {
141 unsigned twi_cwgr_reg; 142 unsigned twi_cwgr_reg;
142 struct at91_twi_pdata *pdata; 143 struct at91_twi_pdata *pdata;
143 bool use_dma; 144 bool use_dma;
145 bool use_alt_cmd;
144 bool recv_len_abort; 146 bool recv_len_abort;
145 u32 fifo_size; 147 u32 fifo_size;
146 struct at91_twi_dma dma; 148 struct at91_twi_dma dma;
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
269 271
270 /* send stop when last byte has been written */ 272 /* send stop when last byte has been written */
271 if (--dev->buf_len == 0) 273 if (--dev->buf_len == 0)
272 if (!dev->pdata->has_alt_cmd) 274 if (!dev->use_alt_cmd)
273 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); 275 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
274 276
275 dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); 277 dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data)
292 * we just have to enable TXCOMP one. 294 * we just have to enable TXCOMP one.
293 */ 295 */
294 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); 296 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
295 if (!dev->pdata->has_alt_cmd) 297 if (!dev->use_alt_cmd)
296 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); 298 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
297} 299}
298 300
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
410 } 412 }
411 413
412 /* send stop if second but last byte has been read */ 414 /* send stop if second but last byte has been read */
413 if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) 415 if (!dev->use_alt_cmd && dev->buf_len == 1)
414 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); 416 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
415 417
416 dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); 418 dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data)
426 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), 428 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
427 dev->buf_len, DMA_FROM_DEVICE); 429 dev->buf_len, DMA_FROM_DEVICE);
428 430
429 if (!dev->pdata->has_alt_cmd) { 431 if (!dev->use_alt_cmd) {
430 /* The last two bytes have to be read without using dma */ 432 /* The last two bytes have to be read without using dma */
431 dev->buf += dev->buf_len - 2; 433 dev->buf += dev->buf_len - 2;
432 dev->buf_len = 2; 434 dev->buf_len = 2;
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
443 struct dma_chan *chan_rx = dma->chan_rx; 445 struct dma_chan *chan_rx = dma->chan_rx;
444 size_t buf_len; 446 size_t buf_len;
445 447
446 buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; 448 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
447 dma->direction = DMA_FROM_DEVICE; 449 dma->direction = DMA_FROM_DEVICE;
448 450
449 /* Keep in mind that we won't use dma to read the last two bytes */ 451 /* Keep in mind that we won't use dma to read the last two bytes */
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
651 unsigned start_flags = AT91_TWI_START; 653 unsigned start_flags = AT91_TWI_START;
652 654
653 /* if only one byte is to be read, immediately stop transfer */ 655 /* if only one byte is to be read, immediately stop transfer */
654 if (!has_alt_cmd && dev->buf_len <= 1 && 656 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
655 !(dev->msg->flags & I2C_M_RECV_LEN)) 657 !(dev->msg->flags & I2C_M_RECV_LEN))
656 start_flags |= AT91_TWI_STOP; 658 start_flags |= AT91_TWI_STOP;
657 at91_twi_write(dev, AT91_TWI_CR, start_flags); 659 at91_twi_write(dev, AT91_TWI_CR, start_flags);
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
745 int ret; 747 int ret;
746 unsigned int_addr_flag = 0; 748 unsigned int_addr_flag = 0;
747 struct i2c_msg *m_start = msg; 749 struct i2c_msg *m_start = msg;
748 bool is_read, use_alt_cmd = false; 750 bool is_read;
749 751
750 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); 752 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
751 753
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
768 at91_twi_write(dev, AT91_TWI_IADR, internal_address); 770 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
769 } 771 }
770 772
773 dev->use_alt_cmd = false;
771 is_read = (m_start->flags & I2C_M_RD); 774 is_read = (m_start->flags & I2C_M_RD);
772 if (dev->pdata->has_alt_cmd) { 775 if (dev->pdata->has_alt_cmd) {
773 if (m_start->len > 0) { 776 if (m_start->len > 0 &&
777 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
774 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); 778 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
775 at91_twi_write(dev, AT91_TWI_ACR, 779 at91_twi_write(dev, AT91_TWI_ACR,
776 AT91_TWI_ACR_DATAL(m_start->len) | 780 AT91_TWI_ACR_DATAL(m_start->len) |
777 ((is_read) ? AT91_TWI_ACR_DIR : 0)); 781 ((is_read) ? AT91_TWI_ACR_DIR : 0));
778 use_alt_cmd = true; 782 dev->use_alt_cmd = true;
779 } else { 783 } else {
780 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); 784 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
781 } 785 }
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
784 at91_twi_write(dev, AT91_TWI_MMR, 788 at91_twi_write(dev, AT91_TWI_MMR,
785 (m_start->addr << 16) | 789 (m_start->addr << 16) |
786 int_addr_flag | 790 int_addr_flag |
787 ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); 791 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
788 792
789 dev->buf_len = m_start->len; 793 dev->buf_len = m_start->len;
790 dev->buf = m_start->buf; 794 dev->buf = m_start->buf;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 19c843828fe2..95f7cac76f89 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
158 158
159 if (status & BIT(IS_M_START_BUSY_SHIFT)) { 159 if (status & BIT(IS_M_START_BUSY_SHIFT)) {
160 iproc_i2c->xfer_is_done = 1; 160 iproc_i2c->xfer_is_done = 1;
161 complete_all(&iproc_i2c->done); 161 complete(&iproc_i2c->done);
162 } 162 }
163 163
164 writel(status, iproc_i2c->base + IS_OFFSET); 164 writel(status, iproc_i2c->base + IS_OFFSET);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index ac9f47679c3a..258cb9a40ab3 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
229 dev->base + TXFCR_OFFSET); 229 dev->base + TXFCR_OFFSET);
230 230
231 writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); 231 writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
232 complete_all(&dev->done); 232 complete(&dev->done);
233 233
234 return IRQ_HANDLED; 234 return IRQ_HANDLED;
235} 235}
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
643 if (rc < 0) { 643 if (rc < 0) {
644 dev_err(dev->device, 644 dev_err(dev->device,
645 "restart cmd failed rc = %d\n", rc); 645 "restart cmd failed rc = %d\n", rc);
646 goto xfer_send_stop; 646 goto xfer_send_stop;
647 } 647 }
648 } 648 }
649 649
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3f5a4d71d3bf..385b57bfcb38 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid)
228 return IRQ_NONE; 228 return IRQ_NONE;
229 229
230 brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); 230 brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
231 complete_all(&dev->done); 231 complete(&dev->done);
232 232
233 dev_dbg(dev->device, "isr handled"); 233 dev_dbg(dev->device, "isr handled");
234 return IRQ_HANDLED; 234 return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 90bbd9f9dd8f..3c16a2f7c673 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
767 * depending on the scaling direction. 767 * depending on the scaling direction.
768 * 768 *
769 * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK 769 * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
770 * to acknowedge the change, NOTIFY_DONE if the notification is 770 * to acknowledge the change, NOTIFY_DONE if the notification is
771 * considered irrelevant. 771 * considered irrelevant.
772 */ 772 */
773static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long 773static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index a0d95ff682ae..2d5ff86398d0 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
215 msg->outsize = request_len; 215 msg->outsize = request_len;
216 msg->insize = response_len; 216 msg->insize = response_len;
217 217
218 result = cros_ec_cmd_xfer(bus->ec, msg); 218 result = cros_ec_cmd_xfer_status(bus->ec, msg);
219 if (result < 0) { 219 if (result < 0) {
220 dev_err(dev, "Error transferring EC i2c message %d\n", result); 220 dev_err(dev, "Error transferring EC i2c message %d\n", result);
221 goto exit; 221 goto exit;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index c6922b806fb7..fcd973d5131e 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
367 dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); 367 dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
368 368
369 /* Configure SDA Hold Time if required */ 369 /* Configure SDA Hold Time if required */
370 if (dev->sda_hold_time) { 370 reg = dw_readl(dev, DW_IC_COMP_VERSION);
371 reg = dw_readl(dev, DW_IC_COMP_VERSION); 371 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
372 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) 372 if (dev->sda_hold_time) {
373 dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); 373 dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
374 else 374 } else {
375 dev_warn(dev->dev, 375 /* Keep previous hold time setting if no one set it */
376 "Hardware too old to adjust SDA hold time."); 376 dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
377 }
378 } else {
379 dev_warn(dev->dev,
380 "Hardware too old to adjust SDA hold time.\n");
377 } 381 }
378 382
379 /* Configure Tx/Rx FIFO threshold levels */ 383 /* Configure Tx/Rx FIFO threshold levels */
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 71d3929adf54..76e28980904f 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c)
211 meson_i2c_add_token(i2c, TOKEN_STOP); 211 meson_i2c_add_token(i2c, TOKEN_STOP);
212 } else { 212 } else {
213 i2c->state = STATE_IDLE; 213 i2c->state = STATE_IDLE;
214 complete_all(&i2c->done); 214 complete(&i2c->done);
215 } 215 }
216} 216}
217 217
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
238 dev_dbg(i2c->dev, "error bit set\n"); 238 dev_dbg(i2c->dev, "error bit set\n");
239 i2c->error = -ENXIO; 239 i2c->error = -ENXIO;
240 i2c->state = STATE_IDLE; 240 i2c->state = STATE_IDLE;
241 complete_all(&i2c->done); 241 complete(&i2c->done);
242 goto out; 242 goto out;
243 } 243 }
244 244
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
269 break; 269 break;
270 case STATE_STOP: 270 case STATE_STOP:
271 i2c->state = STATE_IDLE; 271 i2c->state = STATE_IDLE;
272 complete_all(&i2c->done); 272 complete(&i2c->done);
273 break; 273 break;
274 case STATE_IDLE: 274 case STATE_IDLE:
275 break; 275 break;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index dfa7a4b4a91d..ac88a524143e 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
379 if (!clock_frequency_present) { 379 if (!clock_frequency_present) {
380 dev_err(&pdev->dev, 380 dev_err(&pdev->dev,
381 "Missing required parameter 'opencores,ip-clock-frequency'\n"); 381 "Missing required parameter 'opencores,ip-clock-frequency'\n");
382 clk_disable_unprepare(i2c->clk);
382 return -ENODEV; 383 return -ENODEV;
383 } 384 }
384 i2c->ip_clock_khz = clock_frequency / 1000; 385 i2c->ip_clock_khz = clock_frequency / 1000;
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev)
467 default: 468 default:
468 dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", 469 dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
469 i2c->reg_io_width); 470 i2c->reg_io_width);
470 return -EINVAL; 471 ret = -EINVAL;
472 goto err_clk;
471 } 473 }
472 } 474 }
473 475
474 ret = ocores_init(&pdev->dev, i2c); 476 ret = ocores_init(&pdev->dev, i2c);
475 if (ret) 477 if (ret)
476 return ret; 478 goto err_clk;
477 479
478 init_waitqueue_head(&i2c->wait); 480 init_waitqueue_head(&i2c->wait);
479 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, 481 ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
480 pdev->name, i2c); 482 pdev->name, i2c);
481 if (ret) { 483 if (ret) {
482 dev_err(&pdev->dev, "Cannot claim IRQ\n"); 484 dev_err(&pdev->dev, "Cannot claim IRQ\n");
483 return ret; 485 goto err_clk;
484 } 486 }
485 487
486 /* hook up driver to tree */ 488 /* hook up driver to tree */
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
494 ret = i2c_add_adapter(&i2c->adap); 496 ret = i2c_add_adapter(&i2c->adap);
495 if (ret) { 497 if (ret) {
496 dev_err(&pdev->dev, "Failed to add adapter\n"); 498 dev_err(&pdev->dev, "Failed to add adapter\n");
497 return ret; 499 goto err_clk;
498 } 500 }
499 501
500 /* add in known devices to the bus */ 502 /* add in known devices to the bus */
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev)
504 } 506 }
505 507
506 return 0; 508 return 0;
509
510err_clk:
511 clk_disable_unprepare(i2c->clk);
512 return ret;
507} 513}
508 514
509static int ocores_i2c_remove(struct platform_device *pdev) 515static int ocores_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 52407f3c9e1c..9bd849dacee8 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
378 } 378 }
379 379
380 dma_addr = dma_map_single(chan->device->dev, buf, len, dir); 380 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
381 if (dma_mapping_error(dev, dma_addr)) { 381 if (dma_mapping_error(chan->device->dev, dma_addr)) {
382 dev_dbg(dev, "dma map failed, using PIO\n"); 382 dev_dbg(dev, "dma map failed, using PIO\n");
383 return; 383 return;
384 } 384 }
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 2bc8b01153d6..5c5b7cada8be 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
918 * Code adapted from i2c-cadence.c. 918 * Code adapted from i2c-cadence.c.
919 * 919 *
920 * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK 920 * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
921 * to acknowedge the change, NOTIFY_DONE if the notification is 921 * to acknowledge the change, NOTIFY_DONE if the notification is
922 * considered irrelevant. 922 * considered irrelevant.
923 */ 923 */
924static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long 924static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
1111 return ret < 0 ? ret : num; 1111 return ret < 0 ? ret : num;
1112} 1112}
1113 1113
1114static __maybe_unused int rk3x_i2c_resume(struct device *dev)
1115{
1116 struct rk3x_i2c *i2c = dev_get_drvdata(dev);
1117
1118 rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
1119
1120 return 0;
1121}
1122
1114static u32 rk3x_i2c_func(struct i2c_adapter *adap) 1123static u32 rk3x_i2c_func(struct i2c_adapter *adap)
1115{ 1124{
1116 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; 1125 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
1334 return 0; 1343 return 0;
1335} 1344}
1336 1345
1346static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
1347
1337static struct platform_driver rk3x_i2c_driver = { 1348static struct platform_driver rk3x_i2c_driver = {
1338 .probe = rk3x_i2c_probe, 1349 .probe = rk3x_i2c_probe,
1339 .remove = rk3x_i2c_remove, 1350 .remove = rk3x_i2c_remove,
1340 .driver = { 1351 .driver = {
1341 .name = "rk3x-i2c", 1352 .name = "rk3x-i2c",
1342 .of_match_table = rk3x_i2c_match, 1353 .of_match_table = rk3x_i2c_match,
1354 .pm = &rk3x_i2c_pm_ops,
1343 }, 1355 },
1344}; 1356};
1345 1357
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6fb3e2645992..05b1eeab9cf5 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
610 return; 610 return;
611 611
612 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); 612 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
613 if (dma_mapping_error(pd->dev, dma_addr)) { 613 if (dma_mapping_error(chan->device->dev, dma_addr)) {
614 dev_dbg(pd->dev, "dma map failed, using PIO\n"); 614 dev_dbg(pd->dev, "dma map failed, using PIO\n");
615 return; 615 return;
616 } 616 }
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 8de073aed001..b3893f6282ba 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
37 struct i2c_demux_pinctrl_chan chan[]; 37 struct i2c_demux_pinctrl_chan chan[];
38}; 38};
39 39
40static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
41
42static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 40static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
43{ 41{
44 struct i2c_demux_pinctrl_priv *priv = adap->algo_data; 42 struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -68,7 +66,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
68 adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); 66 adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
69 if (!adap) { 67 if (!adap) {
70 ret = -ENODEV; 68 ret = -ENODEV;
71 goto err; 69 goto err_with_revert;
72 } 70 }
73 71
74 p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); 72 p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
@@ -103,8 +101,11 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
103 101
104 err_with_put: 102 err_with_put:
105 i2c_put_adapter(adap); 103 i2c_put_adapter(adap);
104 err_with_revert:
105 of_changeset_revert(&priv->chan[new_chan].chgset);
106 err: 106 err:
107 dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); 107 dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
108 priv->cur_chan = -EINVAL;
108 return ret; 109 return ret;
109} 110}
110 111
@@ -190,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
190{ 191{
191 struct device_node *np = pdev->dev.of_node; 192 struct device_node *np = pdev->dev.of_node;
192 struct i2c_demux_pinctrl_priv *priv; 193 struct i2c_demux_pinctrl_priv *priv;
194 struct property *props;
193 int num_chan, i, j, err; 195 int num_chan, i, j, err;
194 196
195 num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL); 197 num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -200,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
200 202
201 priv = devm_kzalloc(&pdev->dev, sizeof(*priv) 203 priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
202 + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL); 204 + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
203 if (!priv) 205
206 props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
207
208 if (!priv || !props)
204 return -ENOMEM; 209 return -ENOMEM;
205 210
206 err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name); 211 err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -218,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
218 } 223 }
219 priv->chan[i].parent_np = adap_np; 224 priv->chan[i].parent_np = adap_np;
220 225
226 props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
227 props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
228 props[i].length = 3;
229
221 of_changeset_init(&priv->chan[i].chgset); 230 of_changeset_init(&priv->chan[i].chgset);
222 of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay); 231 of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
223 } 232 }
224 233
225 priv->num_chan = num_chan; 234 priv->num_chan = num_chan;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 89d78208de3f..78f148ea9d9f 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -20,6 +20,8 @@ config BMA180
20config BMA220 20config BMA220
21 tristate "Bosch BMA220 3-Axis Accelerometer Driver" 21 tristate "Bosch BMA220 3-Axis Accelerometer Driver"
22 depends on SPI 22 depends on SPI
23 select IIO_BUFFER
24 select IIO_TRIGGERED_BUFFER
23 help 25 help
24 Say yes here to add support for the Bosch BMA220 triaxial 26 Say yes here to add support for the Bosch BMA220 triaxial
25 acceleration sensor. 27 acceleration sensor.
@@ -234,7 +236,8 @@ config STK8312
234config STK8BA50 236config STK8BA50
235 tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver" 237 tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
236 depends on I2C 238 depends on I2C
237 depends on IIO_TRIGGER 239 select IIO_BUFFER
240 select IIO_TRIGGERED_BUFFER
238 help 241 help
239 Say yes here to get support for the Sensortek STK8BA50 3-axis 242 Say yes here to get support for the Sensortek STK8BA50 3-axis
240 accelerometer. 243 accelerometer.
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 1098d10df8e8..5099f295dd37 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi)
253 if (ret < 0) 253 if (ret < 0)
254 return ret; 254 return ret;
255 255
256 ret = iio_triggered_buffer_setup(indio_dev, NULL, 256 ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
257 bma220_trigger_handler, NULL); 257 bma220_trigger_handler, NULL);
258 if (ret < 0) { 258 if (ret < 0) {
259 dev_err(&spi->dev, "iio triggered buffer setup failed\n"); 259 dev_err(&spi->dev, "iio triggered buffer setup failed\n");
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index bf17aae66145..59b380dbf27f 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -67,6 +67,9 @@
67#define BMC150_ACCEL_REG_PMU_BW 0x10 67#define BMC150_ACCEL_REG_PMU_BW 0x10
68#define BMC150_ACCEL_DEF_BW 125 68#define BMC150_ACCEL_DEF_BW 125
69 69
70#define BMC150_ACCEL_REG_RESET 0x14
71#define BMC150_ACCEL_RESET_VAL 0xB6
72
70#define BMC150_ACCEL_REG_INT_MAP_0 0x19 73#define BMC150_ACCEL_REG_INT_MAP_0 0x19
71#define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2) 74#define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2)
72 75
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
1497 int ret, i; 1500 int ret, i;
1498 unsigned int val; 1501 unsigned int val;
1499 1502
1503 /*
1504 * Reset chip to get it in a known good state. A delay of 1.8ms after
1505 * reset is required according to the data sheets of supported chips.
1506 */
1507 regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
1508 BMC150_ACCEL_RESET_VAL);
1509 usleep_range(1800, 2500);
1510
1500 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); 1511 ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
1501 if (ret < 0) { 1512 if (ret < 0) {
1502 dev_err(dev, "Error: Reading chip id\n"); 1513 dev_err(dev, "Error: Reading chip id\n");
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 3a9f106787d2..9d72d4bcf5e9 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
160 if (ret < 0) 160 if (ret < 0)
161 goto error_ret; 161 goto error_ret;
162 *val = ret; 162 *val = ret;
163 ret = IIO_VAL_INT;
163 break; 164 break;
164 case IIO_CHAN_INFO_SCALE: 165 case IIO_CHAN_INFO_SCALE:
165 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); 166 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
166 if (ret < 0) 167 if (ret < 0)
167 goto error_ret; 168 goto error_ret;
169 *val = 0;
168 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; 170 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
169 ret = IIO_VAL_INT_PLUS_MICRO; 171 ret = IIO_VAL_INT_PLUS_MICRO;
170 break; 172 break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 1de31bdd4ce4..767577298ee3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC
389config ROCKCHIP_SARADC 389config ROCKCHIP_SARADC
390 tristate "Rockchip SARADC driver" 390 tristate "Rockchip SARADC driver"
391 depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) 391 depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
392 depends on RESET_CONTROLLER
392 help 393 help
393 Say yes here to build support for the SARADC found in SoCs from 394 Say yes here to build support for the SARADC found in SoCs from
394 Rockchip. 395 Rockchip.
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index b6163764489c..9704090b7908 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = {
527static const struct iio_info ad7991_info = { 527static const struct iio_info ad7991_info = {
528 .read_raw = &ad799x_read_raw, 528 .read_raw = &ad799x_read_raw,
529 .driver_module = THIS_MODULE, 529 .driver_module = THIS_MODULE,
530 .update_scan_mode = ad799x_update_scan_mode,
530}; 531};
531 532
532static const struct iio_info ad7993_4_7_8_noirq_info = { 533static const struct iio_info ad7993_4_7_8_noirq_info = {
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 52430ba171f3..0438c68015e8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
381 st->ts_bufferedmeasure = false; 381 st->ts_bufferedmeasure = false;
382 input_report_key(st->ts_input, BTN_TOUCH, 0); 382 input_report_key(st->ts_input, BTN_TOUCH, 0);
383 input_sync(st->ts_input); 383 input_sync(st->ts_input);
384 } else if (status & AT91_ADC_EOC(3)) { 384 } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
385 /* Conversion finished */ 385 /* Conversion finished and we've a touchscreen */
386 if (st->ts_bufferedmeasure) { 386 if (st->ts_bufferedmeasure) {
387 /* 387 /*
388 * Last measurement is always discarded, since it can 388 * Last measurement is always discarded, since it can
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index f9ad6c2d6821..85d701291654 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -21,6 +21,8 @@
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/delay.h>
25#include <linux/reset.h>
24#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
25#include <linux/iio/iio.h> 27#include <linux/iio/iio.h>
26 28
@@ -53,6 +55,7 @@ struct rockchip_saradc {
53 struct clk *clk; 55 struct clk *clk;
54 struct completion completion; 56 struct completion completion;
55 struct regulator *vref; 57 struct regulator *vref;
58 struct reset_control *reset;
56 const struct rockchip_saradc_data *data; 59 const struct rockchip_saradc_data *data;
57 u16 last_val; 60 u16 last_val;
58}; 61};
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
190}; 193};
191MODULE_DEVICE_TABLE(of, rockchip_saradc_match); 194MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
192 195
196/**
197 * Reset SARADC Controller.
198 */
199static void rockchip_saradc_reset_controller(struct reset_control *reset)
200{
201 reset_control_assert(reset);
202 usleep_range(10, 20);
203 reset_control_deassert(reset);
204}
205
193static int rockchip_saradc_probe(struct platform_device *pdev) 206static int rockchip_saradc_probe(struct platform_device *pdev)
194{ 207{
195 struct rockchip_saradc *info = NULL; 208 struct rockchip_saradc *info = NULL;
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
218 if (IS_ERR(info->regs)) 231 if (IS_ERR(info->regs))
219 return PTR_ERR(info->regs); 232 return PTR_ERR(info->regs);
220 233
234 /*
235 * The reset should be an optional property, as it should work
236 * with old devicetrees as well
237 */
238 info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
239 if (IS_ERR(info->reset)) {
240 ret = PTR_ERR(info->reset);
241 if (ret != -ENOENT)
242 return ret;
243
244 dev_dbg(&pdev->dev, "no reset control found\n");
245 info->reset = NULL;
246 }
247
221 init_completion(&info->completion); 248 init_completion(&info->completion);
222 249
223 irq = platform_get_irq(pdev, 0); 250 irq = platform_get_irq(pdev, 0);
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
252 return PTR_ERR(info->vref); 279 return PTR_ERR(info->vref);
253 } 280 }
254 281
282 if (info->reset)
283 rockchip_saradc_reset_controller(info->reset);
284
255 /* 285 /*
256 * Use a default value for the converter clock. 286 * Use a default value for the converter clock.
257 * This may become user-configurable in the future. 287 * This may become user-configurable in the future.
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 1ef398770a1f..066abaf80201 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = {
489#ifdef CONFIG_OF 489#ifdef CONFIG_OF
490static int ads1015_get_channels_config_of(struct i2c_client *client) 490static int ads1015_get_channels_config_of(struct i2c_client *client)
491{ 491{
492 struct ads1015_data *data = i2c_get_clientdata(client); 492 struct iio_dev *indio_dev = i2c_get_clientdata(client);
493 struct ads1015_data *data = iio_priv(indio_dev);
493 struct device_node *node; 494 struct device_node *node;
494 495
495 if (!client->dev.of_node || 496 if (!client->dev.of_node ||
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 8a368756881b..c3cfacca2541 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -32,6 +32,7 @@
32 32
33struct tiadc_device { 33struct tiadc_device {
34 struct ti_tscadc_dev *mfd_tscadc; 34 struct ti_tscadc_dev *mfd_tscadc;
35 struct mutex fifo1_lock; /* to protect fifo access */
35 int channels; 36 int channels;
36 u8 channel_line[8]; 37 u8 channel_line[8];
37 u8 channel_step[8]; 38 u8 channel_step[8];
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
359 int *val, int *val2, long mask) 360 int *val, int *val2, long mask)
360{ 361{
361 struct tiadc_device *adc_dev = iio_priv(indio_dev); 362 struct tiadc_device *adc_dev = iio_priv(indio_dev);
363 int ret = IIO_VAL_INT;
362 int i, map_val; 364 int i, map_val;
363 unsigned int fifo1count, read, stepid; 365 unsigned int fifo1count, read, stepid;
364 bool found = false; 366 bool found = false;
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
372 if (!step_en) 374 if (!step_en)
373 return -EINVAL; 375 return -EINVAL;
374 376
377 mutex_lock(&adc_dev->fifo1_lock);
375 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 378 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
376 while (fifo1count--) 379 while (fifo1count--)
377 tiadc_readl(adc_dev, REG_FIFO1); 380 tiadc_readl(adc_dev, REG_FIFO1);
378 381
379 am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); 382 am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
380 383
381 timeout = jiffies + usecs_to_jiffies 384 timeout = jiffies + msecs_to_jiffies
382 (IDLE_TIMEOUT * adc_dev->channels); 385 (IDLE_TIMEOUT * adc_dev->channels);
383 /* Wait for Fifo threshold interrupt */ 386 /* Wait for Fifo threshold interrupt */
384 while (1) { 387 while (1) {
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
388 391
389 if (time_after(jiffies, timeout)) { 392 if (time_after(jiffies, timeout)) {
390 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 393 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
391 return -EAGAIN; 394 ret = -EAGAIN;
395 goto err_unlock;
392 } 396 }
393 } 397 }
394 map_val = adc_dev->channel_step[chan->scan_index]; 398 map_val = adc_dev->channel_step[chan->scan_index];
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
414 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 418 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
415 419
416 if (found == false) 420 if (found == false)
417 return -EBUSY; 421 ret = -EBUSY;
418 return IIO_VAL_INT; 422
423err_unlock:
424 mutex_unlock(&adc_dev->fifo1_lock);
425 return ret;
419} 426}
420 427
421static const struct iio_info tiadc_info = { 428static const struct iio_info tiadc_info = {
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev)
483 490
484 tiadc_step_config(indio_dev); 491 tiadc_step_config(indio_dev);
485 tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); 492 tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
493 mutex_init(&adc_dev->fifo1_lock);
486 494
487 err = tiadc_channel_init(indio_dev, adc_dev->channels); 495 err = tiadc_channel_init(indio_dev, adc_dev->channels);
488 if (err < 0) 496 if (err < 0)
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index ae038a59d256..407f141a1eee 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
434 break; 434 break;
435 case IIO_ELECTRICALCONDUCTIVITY: 435 case IIO_ELECTRICALCONDUCTIVITY:
436 *val = 1; /* 0.00001 */ 436 *val = 1; /* 0.00001 */
437 *val = 100000; 437 *val2 = 100000;
438 break; 438 break;
439 case IIO_CONCENTRATION: 439 case IIO_CONCENTRATION:
440 *val = 0; /* 0.000000001 */ 440 *val = 0; /* 0.000000001 */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index e81f434760f4..dc33c1dd5191 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -56,8 +56,8 @@ static struct {
56 {HID_USAGE_SENSOR_ALS, 0, 1, 0}, 56 {HID_USAGE_SENSOR_ALS, 0, 1, 0},
57 {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, 57 {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
58 58
59 {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0}, 59 {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
60 {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0}, 60 {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
61}; 61};
62 62
63static int pow_10(unsigned power) 63static int pow_10(unsigned power)
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
index 792a97164cb2..bebbd00304ce 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/dac/stx104.c
@@ -65,6 +65,16 @@ struct stx104_gpio {
65 unsigned int out_state; 65 unsigned int out_state;
66}; 66};
67 67
68/**
69 * struct stx104_dev - STX104 device private data structure
70 * @indio_dev: IIO device
71 * @chip: instance of the gpio_chip
72 */
73struct stx104_dev {
74 struct iio_dev *indio_dev;
75 struct gpio_chip *chip;
76};
77
68static int stx104_read_raw(struct iio_dev *indio_dev, 78static int stx104_read_raw(struct iio_dev *indio_dev,
69 struct iio_chan_spec const *chan, int *val, int *val2, long mask) 79 struct iio_chan_spec const *chan, int *val, int *val2, long mask)
70{ 80{
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
107static int stx104_gpio_get_direction(struct gpio_chip *chip, 117static int stx104_gpio_get_direction(struct gpio_chip *chip,
108 unsigned int offset) 118 unsigned int offset)
109{ 119{
120 /* GPIO 0-3 are input only, while the rest are output only */
110 if (offset < 4) 121 if (offset < 4)
111 return 1; 122 return 1;
112 123
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
169 struct iio_dev *indio_dev; 180 struct iio_dev *indio_dev;
170 struct stx104_iio *priv; 181 struct stx104_iio *priv;
171 struct stx104_gpio *stx104gpio; 182 struct stx104_gpio *stx104gpio;
183 struct stx104_dev *stx104dev;
172 int err; 184 int err;
173 185
174 indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); 186 indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id)
179 if (!stx104gpio) 191 if (!stx104gpio)
180 return -ENOMEM; 192 return -ENOMEM;
181 193
194 stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
195 if (!stx104dev)
196 return -ENOMEM;
197
182 if (!devm_request_region(dev, base[id], STX104_EXTENT, 198 if (!devm_request_region(dev, base[id], STX104_EXTENT,
183 dev_name(dev))) { 199 dev_name(dev))) {
184 dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", 200 dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id)
199 outw(0, base[id] + 4); 215 outw(0, base[id] + 4);
200 outw(0, base[id] + 6); 216 outw(0, base[id] + 6);
201 217
202 err = devm_iio_device_register(dev, indio_dev);
203 if (err) {
204 dev_err(dev, "IIO device registering failed (%d)\n", err);
205 return err;
206 }
207
208 stx104gpio->chip.label = dev_name(dev); 218 stx104gpio->chip.label = dev_name(dev);
209 stx104gpio->chip.parent = dev; 219 stx104gpio->chip.parent = dev;
210 stx104gpio->chip.owner = THIS_MODULE; 220 stx104gpio->chip.owner = THIS_MODULE;
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id)
220 230
221 spin_lock_init(&stx104gpio->lock); 231 spin_lock_init(&stx104gpio->lock);
222 232
223 dev_set_drvdata(dev, stx104gpio); 233 stx104dev->indio_dev = indio_dev;
234 stx104dev->chip = &stx104gpio->chip;
235 dev_set_drvdata(dev, stx104dev);
224 236
225 err = gpiochip_add_data(&stx104gpio->chip, stx104gpio); 237 err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
226 if (err) { 238 if (err) {
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id)
228 return err; 240 return err;
229 } 241 }
230 242
243 err = iio_device_register(indio_dev);
244 if (err) {
245 dev_err(dev, "IIO device registering failed (%d)\n", err);
246 gpiochip_remove(&stx104gpio->chip);
247 return err;
248 }
249
231 return 0; 250 return 0;
232} 251}
233 252
234static int stx104_remove(struct device *dev, unsigned int id) 253static int stx104_remove(struct device *dev, unsigned int id)
235{ 254{
236 struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev); 255 struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
237 256
238 gpiochip_remove(&stx104gpio->chip); 257 iio_device_unregister(stx104dev->indio_dev);
258 gpiochip_remove(stx104dev->chip);
239 259
240 return 0; 260 return 0;
241} 261}
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 738a86d9e4a9..d04124345992 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -6,6 +6,8 @@ menu "Humidity sensors"
6config AM2315 6config AM2315
7 tristate "Aosong AM2315 relative humidity and temperature sensor" 7 tristate "Aosong AM2315 relative humidity and temperature sensor"
8 depends on I2C 8 depends on I2C
9 select IIO_BUFFER
10 select IIO_TRIGGERED_BUFFER
9 help 11 help
10 If you say yes here you get support for the Aosong AM2315 12 If you say yes here you get support for the Aosong AM2315
11 relative humidity and ambient temperature sensor. 13 relative humidity and ambient temperature sensor.
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 3e200f69e886..ff96b6d0fdae 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
244 indio_dev->channels = am2315_channels; 244 indio_dev->channels = am2315_channels;
245 indio_dev->num_channels = ARRAY_SIZE(am2315_channels); 245 indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
246 246
247 ret = iio_triggered_buffer_setup(indio_dev, NULL, 247 ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
248 am2315_trigger_handler, NULL); 248 am2315_trigger_handler, NULL);
249 if (ret < 0) { 249 if (ret < 0) {
250 dev_err(&client->dev, "iio triggered buffer setup failed\n"); 250 dev_err(&client->dev, "iio triggered buffer setup failed\n");
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index a03832a5fc95..e0c9c70c2a4a 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
142 struct i2c_client *client = data->client; 142 struct i2c_client *client = data->client;
143 int delay = data->adc_int_us[chan->address]; 143 int delay = data->adc_int_us[chan->address];
144 int ret; 144 int ret;
145 int val; 145 __be16 val;
146 146
147 /* start measurement */ 147 /* start measurement */
148 ret = i2c_smbus_write_byte(client, chan->address); 148 ret = i2c_smbus_write_byte(client, chan->address);
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
154 /* wait for integration time to pass */ 154 /* wait for integration time to pass */
155 usleep_range(delay, delay + 1000); 155 usleep_range(delay, delay + 1000);
156 156
157 /* 157 /* read measurement */
158 * i2c_smbus_read_word_data cannot() be used here due to the command 158 ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
159 * value not being understood and causes NAKs preventing any reading
160 * from being accessed.
161 */
162 ret = i2c_smbus_read_byte(client);
163 if (ret < 0) { 159 if (ret < 0) {
164 dev_err(&client->dev, "cannot read high byte measurement"); 160 dev_err(&client->dev, "cannot read sensor data\n");
165 return ret; 161 return ret;
166 } 162 }
167 val = ret << 8; 163 return be16_to_cpu(val);
168
169 ret = i2c_smbus_read_byte(client);
170 if (ret < 0) {
171 dev_err(&client->dev, "cannot read low byte measurement");
172 return ret;
173 }
174 val |= ret;
175
176 return val;
177} 164}
178 165
179static int hdc100x_get_heater_status(struct hdc100x_data *data) 166static int hdc100x_get_heater_status(struct hdc100x_data *data)
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
272 struct iio_dev *indio_dev; 259 struct iio_dev *indio_dev;
273 struct hdc100x_data *data; 260 struct hdc100x_data *data;
274 261
275 if (!i2c_check_functionality(client->adapter, 262 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
276 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE)) 263 I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
277 return -EOPNOTSUPP; 264 return -EOPNOTSUPP;
278 265
279 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); 266 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 90462fcf5436..158aaf44dd95 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
107{ 107{
108 struct iio_dev *indio_dev = filp->private_data; 108 struct iio_dev *indio_dev = filp->private_data;
109 struct iio_buffer *rb = indio_dev->buffer; 109 struct iio_buffer *rb = indio_dev->buffer;
110 DEFINE_WAIT_FUNC(wait, woken_wake_function);
110 size_t datum_size; 111 size_t datum_size;
111 size_t to_wait; 112 size_t to_wait;
112 int ret; 113 int ret = 0;
113 114
114 if (!indio_dev->info) 115 if (!indio_dev->info)
115 return -ENODEV; 116 return -ENODEV;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
131 else 132 else
132 to_wait = min_t(size_t, n / datum_size, rb->watermark); 133 to_wait = min_t(size_t, n / datum_size, rb->watermark);
133 134
135 add_wait_queue(&rb->pollq, &wait);
134 do { 136 do {
135 ret = wait_event_interruptible(rb->pollq, 137 if (!indio_dev->info) {
136 iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)); 138 ret = -ENODEV;
137 if (ret) 139 break;
138 return ret; 140 }
139 141
140 if (!indio_dev->info) 142 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
141 return -ENODEV; 143 if (signal_pending(current)) {
144 ret = -ERESTARTSYS;
145 break;
146 }
147
148 wait_woken(&wait, TASK_INTERRUPTIBLE,
149 MAX_SCHEDULE_TIMEOUT);
150 continue;
151 }
142 152
143 ret = rb->access->read_first_n(rb, n, buf); 153 ret = rb->access->read_first_n(rb, n, buf);
144 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 154 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
145 ret = -EAGAIN; 155 ret = -EAGAIN;
146 } while (ret == 0); 156 } while (ret == 0);
157 remove_wait_queue(&rb->pollq, &wait);
147 158
148 return ret; 159 return ret;
149} 160}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index f914d5d140e4..d2b889918c3e 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
613 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); 613 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
614 case IIO_VAL_FRACTIONAL: 614 case IIO_VAL_FRACTIONAL:
615 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 615 tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
616 vals[1] = do_div(tmp, 1000000000LL); 616 vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
617 vals[0] = tmp; 617 return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
618 return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
619 case IIO_VAL_FRACTIONAL_LOG2: 618 case IIO_VAL_FRACTIONAL_LOG2:
620 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 619 tmp = (s64)vals[0] * 1000000000LL >> vals[1];
621 vals[1] = do_div(tmp, 1000000000LL); 620 vals[1] = do_div(tmp, 1000000000LL);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 7c566f516572..3574945183fe 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -76,7 +76,6 @@ config BH1750
76config BH1780 76config BH1780
77 tristate "ROHM BH1780 ambient light sensor" 77 tristate "ROHM BH1780 ambient light sensor"
78 depends on I2C 78 depends on I2C
79 depends on !SENSORS_BH1780
80 help 79 help
81 Say Y here to build support for the ROHM BH1780GLI ambient 80 Say Y here to build support for the ROHM BH1780GLI ambient
82 light sensor. 81 light sensor.
@@ -238,6 +237,8 @@ config MAX44000
238 tristate "MAX44000 Ambient and Infrared Proximity Sensor" 237 tristate "MAX44000 Ambient and Infrared Proximity Sensor"
239 depends on I2C 238 depends on I2C
240 select REGMAP_I2C 239 select REGMAP_I2C
240 select IIO_BUFFER
241 select IIO_TRIGGERED_BUFFER
241 help 242 help
242 Say Y here if you want to build support for Maxim Integrated's 243 Say Y here if you want to build support for Maxim Integrated's
243 MAX44000 ambient and infrared proximity sensor device. 244 MAX44000 ambient and infrared proximity sensor device.
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6943688e66df..e5a533cbd53f 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev,
970 data->vdda = devm_regulator_get(dev, "vdda"); 970 data->vdda = devm_regulator_get(dev, "vdda");
971 if (IS_ERR(data->vdda)) { 971 if (IS_ERR(data->vdda)) {
972 dev_err(dev, "failed to get VDDA regulator\n"); 972 dev_err(dev, "failed to get VDDA regulator\n");
973 ret = PTR_ERR(data->vddd); 973 ret = PTR_ERR(data->vdda);
974 goto out_disable_vddd; 974 goto out_disable_vddd;
975 } 975 }
976 ret = regulator_enable(data->vdda); 976 ret = regulator_enable(data->vdda);
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove);
1079#ifdef CONFIG_PM 1079#ifdef CONFIG_PM
1080static int bmp280_runtime_suspend(struct device *dev) 1080static int bmp280_runtime_suspend(struct device *dev)
1081{ 1081{
1082 struct bmp280_data *data = dev_get_drvdata(dev); 1082 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1083 struct bmp280_data *data = iio_priv(indio_dev);
1083 int ret; 1084 int ret;
1084 1085
1085 ret = regulator_disable(data->vdda); 1086 ret = regulator_disable(data->vdda);
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev)
1090 1091
1091static int bmp280_runtime_resume(struct device *dev) 1092static int bmp280_runtime_resume(struct device *dev)
1092{ 1093{
1093 struct bmp280_data *data = dev_get_drvdata(dev); 1094 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1095 struct bmp280_data *data = iio_priv(indio_dev);
1094 int ret; 1096 int ret;
1095 1097
1096 ret = regulator_enable(data->vddd); 1098 ret = regulator_enable(data->vddd);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 2e3a70e1b245..5656deb17261 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 ret = iio_triggered_buffer_setup(indio_dev, NULL, 400 ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
401 &as3935_trigger_handler, NULL); 401 &as3935_trigger_handler, NULL);
402 402
403 if (ret) { 403 if (ret) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e6dfa1bd3def..5f65a78b27c9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2462 2462
2463 if (addr->dev_addr.bound_dev_if) { 2463 if (addr->dev_addr.bound_dev_if) {
2464 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2464 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
2465 if (!ndev) 2465 if (!ndev) {
2466 return -ENODEV; 2466 ret = -ENODEV;
2467 goto err2;
2468 }
2467 2469
2468 if (ndev->flags & IFF_LOOPBACK) { 2470 if (ndev->flags & IFF_LOOPBACK) {
2469 dev_put(ndev); 2471 dev_put(ndev);
2470 if (!id_priv->id.device->get_netdev) 2472 if (!id_priv->id.device->get_netdev) {
2471 return -EOPNOTSUPP; 2473 ret = -EOPNOTSUPP;
2474 goto err2;
2475 }
2472 2476
2473 ndev = id_priv->id.device->get_netdev(id_priv->id.device, 2477 ndev = id_priv->id.device->get_netdev(id_priv->id.device,
2474 id_priv->id.port_num); 2478 id_priv->id.port_num);
2475 if (!ndev) 2479 if (!ndev) {
2476 return -ENODEV; 2480 ret = -ENODEV;
2481 goto err2;
2482 }
2477 } 2483 }
2478 2484
2479 route->path_rec->net = &init_net; 2485 route->path_rec->net = &init_net;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 3a3c5d73bbfc..51c79b2fb0b8 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,7 +106,6 @@ struct mcast_group {
106 atomic_t refcount; 106 atomic_t refcount;
107 enum mcast_group_state state; 107 enum mcast_group_state state;
108 struct ib_sa_query *query; 108 struct ib_sa_query *query;
109 int query_id;
110 u16 pkey_index; 109 u16 pkey_index;
111 u8 leave_state; 110 u8 leave_state;
112 int retries; 111 int retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
340 member->multicast.comp_mask, 339 member->multicast.comp_mask,
341 3000, GFP_KERNEL, join_handler, group, 340 3000, GFP_KERNEL, join_handler, group,
342 &group->query); 341 &group->query);
343 if (ret >= 0) { 342 return (ret > 0) ? 0 : ret;
344 group->query_id = ret;
345 ret = 0;
346 }
347 return ret;
348} 343}
349 344
350static int send_leave(struct mcast_group *group, u8 leave_state) 345static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
364 IB_SA_MCMEMBER_REC_JOIN_STATE, 359 IB_SA_MCMEMBER_REC_JOIN_STATE,
365 3000, GFP_KERNEL, leave_handler, 360 3000, GFP_KERNEL, leave_handler,
366 group, &group->query); 361 group, &group->query);
367 if (ret >= 0) { 362 return (ret > 0) ? 0 : ret;
368 group->query_id = ret;
369 ret = 0;
370 }
371 return ret;
372} 363}
373 364
374static void join_group(struct mcast_group *group, struct mcast_member *member, 365static void join_group(struct mcast_group *group, struct mcast_member *member,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 3aca7f6171b4..80f988984f44 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
333 333
334 spin_lock_irqsave(&ep->com.dev->lock, flags); 334 spin_lock_irqsave(&ep->com.dev->lock, flags);
335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); 335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
336 if (idr_is_empty(&ep->com.dev->hwtid_idr))
337 wake_up(&ep->com.dev->wait);
336 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 338 spin_unlock_irqrestore(&ep->com.dev->lock, flags);
337} 339}
338 340
@@ -1827,8 +1829,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1827 (ep->mpa_pkt + sizeof(*mpa)); 1829 (ep->mpa_pkt + sizeof(*mpa));
1828 ep->ird = ntohs(mpa_v2_params->ird) & 1830 ep->ird = ntohs(mpa_v2_params->ird) &
1829 MPA_V2_IRD_ORD_MASK; 1831 MPA_V2_IRD_ORD_MASK;
1832 ep->ird = min_t(u32, ep->ird,
1833 cur_max_read_depth(ep->com.dev));
1830 ep->ord = ntohs(mpa_v2_params->ord) & 1834 ep->ord = ntohs(mpa_v2_params->ord) &
1831 MPA_V2_IRD_ORD_MASK; 1835 MPA_V2_IRD_ORD_MASK;
1836 ep->ord = min_t(u32, ep->ord,
1837 cur_max_read_depth(ep->com.dev));
1832 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1838 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1833 ep->ord); 1839 ep->ord);
1834 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1840 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -2113,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2113 } 2119 }
2114 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2120 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2115 n, pdev, rt_tos2priority(tos)); 2121 n, pdev, rt_tos2priority(tos));
2116 if (!ep->l2t) 2122 if (!ep->l2t) {
2123 dev_put(pdev);
2117 goto out; 2124 goto out;
2125 }
2118 ep->mtu = pdev->mtu; 2126 ep->mtu = pdev->mtu;
2119 ep->tx_chan = cxgb4_port_chan(pdev); 2127 ep->tx_chan = cxgb4_port_chan(pdev);
2120 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2128 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
@@ -3136,7 +3144,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3136 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3144 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3137 if (conn_param->ord > ep->ird) { 3145 if (conn_param->ord > ep->ird) {
3138 if (RELAXED_IRD_NEGOTIATION) { 3146 if (RELAXED_IRD_NEGOTIATION) {
3139 ep->ord = ep->ird; 3147 conn_param->ord = ep->ird;
3140 } else { 3148 } else {
3141 ep->ird = conn_param->ird; 3149 ep->ird = conn_param->ird;
3142 ep->ord = conn_param->ord; 3150 ep->ord = conn_param->ord;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 812ab7278b8e..ac926c942fee 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1016int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 1016int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1017{ 1017{
1018 struct c4iw_cq *chp; 1018 struct c4iw_cq *chp;
1019 int ret; 1019 int ret = 0;
1020 unsigned long flag; 1020 unsigned long flag;
1021 1021
1022 chp = to_c4iw_cq(ibcq); 1022 chp = to_c4iw_cq(ibcq);
1023 spin_lock_irqsave(&chp->lock, flag); 1023 spin_lock_irqsave(&chp->lock, flag);
1024 ret = t4_arm_cq(&chp->cq, 1024 t4_arm_cq(&chp->cq,
1025 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); 1025 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1026 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1027 ret = t4_cq_notempty(&chp->cq);
1026 spin_unlock_irqrestore(&chp->lock, flag); 1028 spin_unlock_irqrestore(&chp->lock, flag);
1027 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1028 ret = 0;
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 071d7332ec06..3c4b2126e0d1 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
872static void c4iw_dealloc(struct uld_ctx *ctx) 872static void c4iw_dealloc(struct uld_ctx *ctx)
873{ 873{
874 c4iw_rdev_close(&ctx->dev->rdev); 874 c4iw_rdev_close(&ctx->dev->rdev);
875 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
875 idr_destroy(&ctx->dev->cqidr); 876 idr_destroy(&ctx->dev->cqidr);
877 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
876 idr_destroy(&ctx->dev->qpidr); 878 idr_destroy(&ctx->dev->qpidr);
879 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
877 idr_destroy(&ctx->dev->mmidr); 880 idr_destroy(&ctx->dev->mmidr);
881 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
878 idr_destroy(&ctx->dev->hwtid_idr); 882 idr_destroy(&ctx->dev->hwtid_idr);
879 idr_destroy(&ctx->dev->stid_idr); 883 idr_destroy(&ctx->dev->stid_idr);
880 idr_destroy(&ctx->dev->atid_idr); 884 idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
992 mutex_init(&devp->rdev.stats.lock); 996 mutex_init(&devp->rdev.stats.lock);
993 mutex_init(&devp->db_mutex); 997 mutex_init(&devp->db_mutex);
994 INIT_LIST_HEAD(&devp->db_fc_list); 998 INIT_LIST_HEAD(&devp->db_fc_list);
999 init_waitqueue_head(&devp->wait);
995 devp->avail_ird = devp->rdev.lldi.max_ird_adapter; 1000 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
996 1001
997 if (c4iw_debugfs_root) { 1002 if (c4iw_debugfs_root) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index aa47e0ae80bc..4b83b84f7ddf 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -263,6 +263,7 @@ struct c4iw_dev {
263 struct idr stid_idr; 263 struct idr stid_idr;
264 struct list_head db_fc_list; 264 struct list_head db_fc_list;
265 u32 avail_ird; 265 u32 avail_ird;
266 wait_queue_head_t wait;
266}; 267};
267 268
268static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 269static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index edb1172b6f54..690435229be7 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
683 return 0; 683 return 0;
684} 684}
685 685
686void _free_qp(struct kref *kref) 686static void _free_qp(struct kref *kref)
687{ 687{
688 struct c4iw_qp *qhp; 688 struct c4iw_qp *qhp;
689 689
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 6126bbe36095..02173f4315fa 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
634 return (CQE_GENBIT(cqe) == cq->gen); 634 return (CQE_GENBIT(cqe) == cq->gen);
635} 635}
636 636
637static inline int t4_cq_notempty(struct t4_cq *cq)
638{
639 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
640}
641
637static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 642static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
638{ 643{
639 int ret; 644 int ret;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 79575ee873f2..0566393e5aba 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -47,7 +47,6 @@
47#include <linux/topology.h> 47#include <linux/topology.h>
48#include <linux/cpumask.h> 48#include <linux/cpumask.h>
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/cpumask.h>
51 50
52#include "hfi.h" 51#include "hfi.h"
53#include "affinity.h" 52#include "affinity.h"
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
682 size_t count) 681 size_t count)
683{ 682{
684 struct hfi1_affinity_node *entry; 683 struct hfi1_affinity_node *entry;
685 struct cpumask mask; 684 cpumask_var_t mask;
686 int ret, i; 685 int ret, i;
687 686
688 spin_lock(&node_affinity.lock); 687 spin_lock(&node_affinity.lock);
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
692 if (!entry) 691 if (!entry)
693 return -EINVAL; 692 return -EINVAL;
694 693
695 ret = cpulist_parse(buf, &mask); 694 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
695 if (!ret)
696 return -ENOMEM;
697
698 ret = cpulist_parse(buf, mask);
696 if (ret) 699 if (ret)
697 return ret; 700 goto out;
698 701
699 if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) { 702 if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
700 dd_dev_warn(dd, "Invalid CPU mask\n"); 703 dd_dev_warn(dd, "Invalid CPU mask\n");
701 return -EINVAL; 704 ret = -EINVAL;
705 goto out;
702 } 706 }
703 707
704 mutex_lock(&sdma_affinity_mutex); 708 mutex_lock(&sdma_affinity_mutex);
705 /* reset the SDMA interrupt affinity details */ 709 /* reset the SDMA interrupt affinity details */
706 init_cpu_mask_set(&entry->def_intr); 710 init_cpu_mask_set(&entry->def_intr);
707 cpumask_copy(&entry->def_intr.mask, &mask); 711 cpumask_copy(&entry->def_intr.mask, mask);
708 /* 712 /*
709 * Reassign the affinity for each SDMA interrupt. 713 * Reassign the affinity for each SDMA interrupt.
710 */ 714 */
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
720 if (ret) 724 if (ret)
721 break; 725 break;
722 } 726 }
723
724 mutex_unlock(&sdma_affinity_mutex); 727 mutex_unlock(&sdma_affinity_mutex);
728out:
729 free_cpumask_var(mask);
725 return ret ? ret : strnlen(buf, PAGE_SIZE); 730 return ret ? ret : strnlen(buf, PAGE_SIZE);
726} 731}
727 732
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b32638d58ae8..cc38004cea42 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
9490 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); 9490 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9491} 9491}
9492 9492
9493/*
9494 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9495 * on error.
9496 */
9497static int test_qsfp_read(struct hfi1_pportdata *ppd)
9498{
9499 int ret;
9500 u8 status;
9501
9502 /* report success if not a QSFP */
9503 if (ppd->port_type != PORT_TYPE_QSFP)
9504 return 0;
9505
9506 /* read byte 2, the status byte */
9507 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9508 if (ret < 0)
9509 return ret;
9510 if (ret != 1)
9511 return -EIO;
9512
9513 return 0; /* success */
9514}
9515
9516/*
9517 * Values for QSFP retry.
9518 *
9519 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9520 * arrived at from experience on a large cluster.
9521 */
9522#define MAX_QSFP_RETRIES 20
9523#define QSFP_RETRY_WAIT 500 /* msec */
9524
9525/*
9526 * Try a QSFP read. If it fails, schedule a retry for later.
9527 * Called on first link activation after driver load.
9528 */
9529static void try_start_link(struct hfi1_pportdata *ppd)
9530{
9531 if (test_qsfp_read(ppd)) {
9532 /* read failed */
9533 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9534 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9535 return;
9536 }
9537 dd_dev_info(ppd->dd,
9538 "QSFP not responding, waiting and retrying %d\n",
9539 (int)ppd->qsfp_retry_count);
9540 ppd->qsfp_retry_count++;
9541 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9542 msecs_to_jiffies(QSFP_RETRY_WAIT));
9543 return;
9544 }
9545 ppd->qsfp_retry_count = 0;
9546
9547 /*
9548 * Tune the SerDes to a ballpark setting for optimal signal and bit
9549 * error rate. Needs to be done before starting the link.
9550 */
9551 tune_serdes(ppd);
9552 start_link(ppd);
9553}
9554
9555/*
9556 * Workqueue function to start the link after a delay.
9557 */
9558void handle_start_link(struct work_struct *work)
9559{
9560 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9561 start_link_work.work);
9562 try_start_link(ppd);
9563}
9564
9493int bringup_serdes(struct hfi1_pportdata *ppd) 9565int bringup_serdes(struct hfi1_pportdata *ppd)
9494{ 9566{
9495 struct hfi1_devdata *dd = ppd->dd; 9567 struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
9525 set_qsfp_int_n(ppd, 1); 9597 set_qsfp_int_n(ppd, 1);
9526 } 9598 }
9527 9599
9528 /* 9600 try_start_link(ppd);
9529 * Tune the SerDes to a ballpark setting for 9601 return 0;
9530 * optimal signal and bit error rate
9531 * Needs to be done before starting the link
9532 */
9533 tune_serdes(ppd);
9534
9535 return start_link(ppd);
9536} 9602}
9537 9603
9538void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) 9604void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9549 ppd->driver_link_ready = 0; 9615 ppd->driver_link_ready = 0;
9550 ppd->link_enabled = 0; 9616 ppd->link_enabled = 0;
9551 9617
9618 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9619 flush_delayed_work(&ppd->start_link_work);
9620 cancel_delayed_work_sync(&ppd->start_link_work);
9621
9552 ppd->offline_disabled_reason = 9622 ppd->offline_disabled_reason =
9553 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); 9623 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9554 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, 9624 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
12865 */ 12935 */
12866static int set_up_context_variables(struct hfi1_devdata *dd) 12936static int set_up_context_variables(struct hfi1_devdata *dd)
12867{ 12937{
12868 int num_kernel_contexts; 12938 unsigned long num_kernel_contexts;
12869 int total_contexts; 12939 int total_contexts;
12870 int ret; 12940 int ret;
12871 unsigned ngroups; 12941 unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
12894 */ 12964 */
12895 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { 12965 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12896 dd_dev_err(dd, 12966 dd_dev_err(dd,
12897 "Reducing # kernel rcv contexts to: %d, from %d\n", 12967 "Reducing # kernel rcv contexts to: %d, from %lu\n",
12898 (int)(dd->chip_send_contexts - num_vls - 1), 12968 (int)(dd->chip_send_contexts - num_vls - 1),
12899 (int)num_kernel_contexts); 12969 num_kernel_contexts);
12900 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; 12970 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12901 } 12971 }
12902 /* 12972 /*
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index ed11107c50fe..e29573769efc 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
706void handle_link_down(struct work_struct *work); 706void handle_link_down(struct work_struct *work);
707void handle_link_downgrade(struct work_struct *work); 707void handle_link_downgrade(struct work_struct *work);
708void handle_link_bounce(struct work_struct *work); 708void handle_link_bounce(struct work_struct *work);
709void handle_start_link(struct work_struct *work);
709void handle_sma_message(struct work_struct *work); 710void handle_sma_message(struct work_struct *work);
710void reset_qsfp(struct hfi1_pportdata *ppd); 711void reset_qsfp(struct hfi1_pportdata *ppd);
711void qsfp_event(struct work_struct *work); 712void qsfp_event(struct work_struct *work);
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..5e9be16f6cd3 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -59,6 +59,40 @@
59 59
60static struct dentry *hfi1_dbg_root; 60static struct dentry *hfi1_dbg_root;
61 61
62/* wrappers to enforce srcu in seq file */
63static ssize_t hfi1_seq_read(
64 struct file *file,
65 char __user *buf,
66 size_t size,
67 loff_t *ppos)
68{
69 struct dentry *d = file->f_path.dentry;
70 int srcu_idx;
71 ssize_t r;
72
73 r = debugfs_use_file_start(d, &srcu_idx);
74 if (likely(!r))
75 r = seq_read(file, buf, size, ppos);
76 debugfs_use_file_finish(srcu_idx);
77 return r;
78}
79
80static loff_t hfi1_seq_lseek(
81 struct file *file,
82 loff_t offset,
83 int whence)
84{
85 struct dentry *d = file->f_path.dentry;
86 int srcu_idx;
87 loff_t r;
88
89 r = debugfs_use_file_start(d, &srcu_idx);
90 if (likely(!r))
91 r = seq_lseek(file, offset, whence);
92 debugfs_use_file_finish(srcu_idx);
93 return r;
94}
95
62#define private2dd(file) (file_inode(file)->i_private) 96#define private2dd(file) (file_inode(file)->i_private)
63#define private2ppd(file) (file_inode(file)->i_private) 97#define private2ppd(file) (file_inode(file)->i_private)
64 98
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
87static const struct file_operations _##name##_file_ops = { \ 121static const struct file_operations _##name##_file_ops = { \
88 .owner = THIS_MODULE, \ 122 .owner = THIS_MODULE, \
89 .open = _##name##_open, \ 123 .open = _##name##_open, \
90 .read = seq_read, \ 124 .read = hfi1_seq_read, \
91 .llseek = seq_lseek, \ 125 .llseek = hfi1_seq_lseek, \
92 .release = seq_release \ 126 .release = seq_release \
93} 127}
94 128
@@ -105,11 +139,9 @@ do { \
105 DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) 139 DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
106 140
107static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) 141static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
108__acquires(RCU)
109{ 142{
110 struct hfi1_opcode_stats_perctx *opstats; 143 struct hfi1_opcode_stats_perctx *opstats;
111 144
112 rcu_read_lock();
113 if (*pos >= ARRAY_SIZE(opstats->stats)) 145 if (*pos >= ARRAY_SIZE(opstats->stats))
114 return NULL; 146 return NULL;
115 return pos; 147 return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
126} 158}
127 159
128static void _opcode_stats_seq_stop(struct seq_file *s, void *v) 160static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
129__releases(RCU)
130{ 161{
131 rcu_read_unlock();
132} 162}
133 163
134static int _opcode_stats_seq_show(struct seq_file *s, void *v) 164static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -223,28 +253,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
223DEBUGFS_FILE_OPS(ctx_stats); 253DEBUGFS_FILE_OPS(ctx_stats);
224 254
225static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) 255static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
226__acquires(RCU) 256 __acquires(RCU)
227{ 257{
228 struct qp_iter *iter; 258 struct qp_iter *iter;
229 loff_t n = *pos; 259 loff_t n = *pos;
230 260
231 rcu_read_lock();
232 iter = qp_iter_init(s->private); 261 iter = qp_iter_init(s->private);
262
263 /* stop calls rcu_read_unlock */
264 rcu_read_lock();
265
233 if (!iter) 266 if (!iter)
234 return NULL; 267 return NULL;
235 268
236 while (n--) { 269 do {
237 if (qp_iter_next(iter)) { 270 if (qp_iter_next(iter)) {
238 kfree(iter); 271 kfree(iter);
239 return NULL; 272 return NULL;
240 } 273 }
241 } 274 } while (n--);
242 275
243 return iter; 276 return iter;
244} 277}
245 278
246static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, 279static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
247 loff_t *pos) 280 loff_t *pos)
281 __must_hold(RCU)
248{ 282{
249 struct qp_iter *iter = iter_ptr; 283 struct qp_iter *iter = iter_ptr;
250 284
@@ -259,7 +293,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
259} 293}
260 294
261static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) 295static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
262__releases(RCU) 296 __releases(RCU)
263{ 297{
264 rcu_read_unlock(); 298 rcu_read_unlock();
265} 299}
@@ -281,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
281DEBUGFS_FILE_OPS(qp_stats); 315DEBUGFS_FILE_OPS(qp_stats);
282 316
283static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) 317static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
284__acquires(RCU)
285{ 318{
286 struct hfi1_ibdev *ibd; 319 struct hfi1_ibdev *ibd;
287 struct hfi1_devdata *dd; 320 struct hfi1_devdata *dd;
288 321
289 rcu_read_lock();
290 ibd = (struct hfi1_ibdev *)s->private; 322 ibd = (struct hfi1_ibdev *)s->private;
291 dd = dd_from_dev(ibd); 323 dd = dd_from_dev(ibd);
292 if (!dd->per_sdma || *pos >= dd->num_sdma) 324 if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -306,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
306} 338}
307 339
308static void _sdes_seq_stop(struct seq_file *s, void *v) 340static void _sdes_seq_stop(struct seq_file *s, void *v)
309__releases(RCU)
310{ 341{
311 rcu_read_unlock();
312} 342}
313 343
314static int _sdes_seq_show(struct seq_file *s, void *v) 344static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -335,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
335 struct hfi1_devdata *dd; 365 struct hfi1_devdata *dd;
336 ssize_t rval; 366 ssize_t rval;
337 367
338 rcu_read_lock();
339 dd = private2dd(file); 368 dd = private2dd(file);
340 avail = hfi1_read_cntrs(dd, NULL, &counters); 369 avail = hfi1_read_cntrs(dd, NULL, &counters);
341 rval = simple_read_from_buffer(buf, count, ppos, counters, avail); 370 rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
342 rcu_read_unlock();
343 return rval; 371 return rval;
344} 372}
345 373
@@ -352,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
352 struct hfi1_devdata *dd; 380 struct hfi1_devdata *dd;
353 ssize_t rval; 381 ssize_t rval;
354 382
355 rcu_read_lock();
356 dd = private2dd(file); 383 dd = private2dd(file);
357 avail = hfi1_read_cntrs(dd, &names, NULL); 384 avail = hfi1_read_cntrs(dd, &names, NULL);
358 rval = simple_read_from_buffer(buf, count, ppos, names, avail); 385 rval = simple_read_from_buffer(buf, count, ppos, names, avail);
359 rcu_read_unlock();
360 return rval; 386 return rval;
361} 387}
362 388
@@ -379,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
379 struct hfi1_devdata *dd; 405 struct hfi1_devdata *dd;
380 ssize_t rval; 406 ssize_t rval;
381 407
382 rcu_read_lock();
383 dd = private2dd(file); 408 dd = private2dd(file);
384 avail = hfi1_read_portcntrs(dd->pport, &names, NULL); 409 avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
385 rval = simple_read_from_buffer(buf, count, ppos, names, avail); 410 rval = simple_read_from_buffer(buf, count, ppos, names, avail);
386 rcu_read_unlock();
387 return rval; 411 return rval;
388} 412}
389 413
@@ -396,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
396 struct hfi1_pportdata *ppd; 420 struct hfi1_pportdata *ppd;
397 ssize_t rval; 421 ssize_t rval;
398 422
399 rcu_read_lock();
400 ppd = private2ppd(file); 423 ppd = private2ppd(file);
401 avail = hfi1_read_portcntrs(ppd, NULL, &counters); 424 avail = hfi1_read_portcntrs(ppd, NULL, &counters);
402 rval = simple_read_from_buffer(buf, count, ppos, counters, avail); 425 rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
403 rcu_read_unlock();
404 return rval; 426 return rval;
405} 427}
406 428
@@ -430,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
430 int used; 452 int used;
431 int i; 453 int i;
432 454
433 rcu_read_lock();
434 ppd = private2ppd(file); 455 ppd = private2ppd(file);
435 dd = ppd->dd; 456 dd = ppd->dd;
436 size = PAGE_SIZE; 457 size = PAGE_SIZE;
437 used = 0; 458 used = 0;
438 tmp = kmalloc(size, GFP_KERNEL); 459 tmp = kmalloc(size, GFP_KERNEL);
439 if (!tmp) { 460 if (!tmp)
440 rcu_read_unlock();
441 return -ENOMEM; 461 return -ENOMEM;
442 }
443 462
444 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 463 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
445 used += scnprintf(tmp + used, size - used, 464 used += scnprintf(tmp + used, size - used,
@@ -466,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
466 used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); 485 used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
467 486
468 ret = simple_read_from_buffer(buf, count, ppos, tmp, used); 487 ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
469 rcu_read_unlock();
470 kfree(tmp); 488 kfree(tmp);
471 return ret; 489 return ret;
472} 490}
@@ -482,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
482 u64 scratch0; 500 u64 scratch0;
483 u64 clear; 501 u64 clear;
484 502
485 rcu_read_lock();
486 ppd = private2ppd(file); 503 ppd = private2ppd(file);
487 dd = ppd->dd; 504 dd = ppd->dd;
488 505
489 buff = kmalloc(count + 1, GFP_KERNEL); 506 buff = kmalloc(count + 1, GFP_KERNEL);
490 if (!buff) { 507 if (!buff)
491 ret = -ENOMEM; 508 return -ENOMEM;
492 goto do_return;
493 }
494 509
495 ret = copy_from_user(buff, buf, count); 510 ret = copy_from_user(buff, buf, count);
496 if (ret > 0) { 511 if (ret > 0) {
@@ -523,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
523 538
524 do_free: 539 do_free:
525 kfree(buff); 540 kfree(buff);
526 do_return:
527 rcu_read_unlock();
528 return ret; 541 return ret;
529} 542}
530 543
@@ -538,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
538 char *tmp; 551 char *tmp;
539 int ret; 552 int ret;
540 553
541 rcu_read_lock();
542 ppd = private2ppd(file); 554 ppd = private2ppd(file);
543 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); 555 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
544 if (!tmp) { 556 if (!tmp)
545 rcu_read_unlock();
546 return -ENOMEM; 557 return -ENOMEM;
547 }
548 558
549 ret = qsfp_dump(ppd, tmp, PAGE_SIZE); 559 ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
550 if (ret > 0) 560 if (ret > 0)
551 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); 561 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
552 rcu_read_unlock();
553 kfree(tmp); 562 kfree(tmp);
554 return ret; 563 return ret;
555} 564}
@@ -565,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
565 int offset; 574 int offset;
566 int total_written; 575 int total_written;
567 576
568 rcu_read_lock();
569 ppd = private2ppd(file); 577 ppd = private2ppd(file);
570 578
571 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ 579 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -573,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
573 offset = *ppos & 0xffff; 581 offset = *ppos & 0xffff;
574 582
575 /* explicitly reject invalid address 0 to catch cp and cat */ 583 /* explicitly reject invalid address 0 to catch cp and cat */
576 if (i2c_addr == 0) { 584 if (i2c_addr == 0)
577 ret = -EINVAL; 585 return -EINVAL;
578 goto _return;
579 }
580 586
581 buff = kmalloc(count, GFP_KERNEL); 587 buff = kmalloc(count, GFP_KERNEL);
582 if (!buff) { 588 if (!buff)
583 ret = -ENOMEM; 589 return -ENOMEM;
584 goto _return;
585 }
586 590
587 ret = copy_from_user(buff, buf, count); 591 ret = copy_from_user(buff, buf, count);
588 if (ret > 0) { 592 if (ret > 0) {
@@ -602,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
602 606
603 _free: 607 _free:
604 kfree(buff); 608 kfree(buff);
605 _return:
606 rcu_read_unlock();
607 return ret; 609 return ret;
608} 610}
609 611
@@ -632,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
632 int offset; 634 int offset;
633 int total_read; 635 int total_read;
634 636
635 rcu_read_lock();
636 ppd = private2ppd(file); 637 ppd = private2ppd(file);
637 638
638 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ 639 /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -640,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
640 offset = *ppos & 0xffff; 641 offset = *ppos & 0xffff;
641 642
642 /* explicitly reject invalid address 0 to catch cp and cat */ 643 /* explicitly reject invalid address 0 to catch cp and cat */
643 if (i2c_addr == 0) { 644 if (i2c_addr == 0)
644 ret = -EINVAL; 645 return -EINVAL;
645 goto _return;
646 }
647 646
648 buff = kmalloc(count, GFP_KERNEL); 647 buff = kmalloc(count, GFP_KERNEL);
649 if (!buff) { 648 if (!buff)
650 ret = -ENOMEM; 649 return -ENOMEM;
651 goto _return;
652 }
653 650
654 total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); 651 total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
655 if (total_read < 0) { 652 if (total_read < 0) {
@@ -669,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
669 666
670 _free: 667 _free:
671 kfree(buff); 668 kfree(buff);
672 _return:
673 rcu_read_unlock();
674 return ret; 669 return ret;
675} 670}
676 671
@@ -697,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
697 int ret; 692 int ret;
698 int total_written; 693 int total_written;
699 694
700 rcu_read_lock(); 695 if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
701 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ 696 return -EINVAL;
702 ret = -EINVAL;
703 goto _return;
704 }
705 697
706 ppd = private2ppd(file); 698 ppd = private2ppd(file);
707 699
708 buff = kmalloc(count, GFP_KERNEL); 700 buff = kmalloc(count, GFP_KERNEL);
709 if (!buff) { 701 if (!buff)
710 ret = -ENOMEM; 702 return -ENOMEM;
711 goto _return;
712 }
713 703
714 ret = copy_from_user(buff, buf, count); 704 ret = copy_from_user(buff, buf, count);
715 if (ret > 0) { 705 if (ret > 0) {
716 ret = -EFAULT; 706 ret = -EFAULT;
717 goto _free; 707 goto _free;
718 } 708 }
719
720 total_written = qsfp_write(ppd, target, *ppos, buff, count); 709 total_written = qsfp_write(ppd, target, *ppos, buff, count);
721 if (total_written < 0) { 710 if (total_written < 0) {
722 ret = total_written; 711 ret = total_written;
@@ -729,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
729 718
730 _free: 719 _free:
731 kfree(buff); 720 kfree(buff);
732 _return:
733 rcu_read_unlock();
734 return ret; 721 return ret;
735} 722}
736 723
@@ -757,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
757 int ret; 744 int ret;
758 int total_read; 745 int total_read;
759 746
760 rcu_read_lock();
761 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ 747 if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
762 ret = -EINVAL; 748 ret = -EINVAL;
763 goto _return; 749 goto _return;
@@ -790,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
790 _free: 776 _free:
791 kfree(buff); 777 kfree(buff);
792 _return: 778 _return:
793 rcu_read_unlock();
794 return ret; 779 return ret;
795} 780}
796 781
@@ -1006,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
1006 debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); 991 debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
1007out: 992out:
1008 ibd->hfi1_ibdev_dbg = NULL; 993 ibd->hfi1_ibdev_dbg = NULL;
1009 synchronize_rcu();
1010} 994}
1011 995
1012/* 996/*
@@ -1031,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
1031}; 1015};
1032 1016
1033static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) 1017static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
1034__acquires(RCU)
1035{ 1018{
1036 rcu_read_lock();
1037 if (*pos >= ARRAY_SIZE(hfi1_statnames)) 1019 if (*pos >= ARRAY_SIZE(hfi1_statnames))
1038 return NULL; 1020 return NULL;
1039 return pos; 1021 return pos;
@@ -1051,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
1051} 1033}
1052 1034
1053static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) 1035static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
1054__releases(RCU)
1055{ 1036{
1056 rcu_read_unlock();
1057} 1037}
1058 1038
1059static int _driver_stats_names_seq_show(struct seq_file *s, void *v) 1039static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1069,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
1069DEBUGFS_FILE_OPS(driver_stats_names); 1049DEBUGFS_FILE_OPS(driver_stats_names);
1070 1050
1071static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) 1051static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
1072__acquires(RCU)
1073{ 1052{
1074 rcu_read_lock();
1075 if (*pos >= ARRAY_SIZE(hfi1_statnames)) 1053 if (*pos >= ARRAY_SIZE(hfi1_statnames))
1076 return NULL; 1054 return NULL;
1077 return pos; 1055 return pos;
@@ -1086,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
1086} 1064}
1087 1065
1088static void _driver_stats_seq_stop(struct seq_file *s, void *v) 1066static void _driver_stats_seq_stop(struct seq_file *s, void *v)
1089__releases(RCU)
1090{ 1067{
1091 rcu_read_unlock();
1092} 1068}
1093 1069
1094static u64 hfi1_sps_ints(void) 1070static u64 hfi1_sps_ints(void)
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 8246dc7d0573..303f10555729 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
888} 888}
889 889
890static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, 890static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
891 struct hfi1_packet packet, 891 struct hfi1_packet *packet,
892 struct hfi1_devdata *dd) 892 struct hfi1_devdata *dd)
893{ 893{
894 struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; 894 struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
895 struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, 895 struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
896 packet.rhf_addr); 896 packet->rhf_addr);
897 u8 etype = rhf_rcv_type(packet->rhf);
897 898
898 if (hdr2sc(hdr, packet.rhf) != 0xf) { 899 if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
899 int hwstate = read_logical_state(dd); 900 int hwstate = read_logical_state(dd);
900 901
901 if (hwstate != LSTATE_ACTIVE) { 902 if (hwstate != LSTATE_ACTIVE) {
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
979 /* Auto activate link on non-SC15 packet receive */ 980 /* Auto activate link on non-SC15 packet receive */
980 if (unlikely(rcd->ppd->host_link_state == 981 if (unlikely(rcd->ppd->host_link_state ==
981 HLS_UP_ARMED) && 982 HLS_UP_ARMED) &&
982 set_armed_to_active(rcd, packet, dd)) 983 set_armed_to_active(rcd, &packet, dd))
983 goto bail; 984 goto bail;
984 last = process_rcv_packet(&packet, thread); 985 last = process_rcv_packet(&packet, thread);
985 } 986 }
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1ecbec192358..7e03ccd2554d 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
183 if (fd) { 183 if (fd) {
184 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 184 fd->rec_cpu_num = -1; /* no cpu affinity by default */
185 fd->mm = current->mm; 185 fd->mm = current->mm;
186 atomic_inc(&fd->mm->mm_count);
186 } 187 }
187 188
188 fp->private_data = fd; 189 fp->private_data = fd;
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
222 ret = assign_ctxt(fp, &uinfo); 223 ret = assign_ctxt(fp, &uinfo);
223 if (ret < 0) 224 if (ret < 0)
224 return ret; 225 return ret;
225 setup_ctxt(fp); 226 ret = setup_ctxt(fp);
226 if (ret) 227 if (ret)
227 return ret; 228 return ret;
228 ret = user_init(fp); 229 ret = user_init(fp);
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
779 mutex_unlock(&hfi1_mutex); 780 mutex_unlock(&hfi1_mutex);
780 hfi1_free_ctxtdata(dd, uctxt); 781 hfi1_free_ctxtdata(dd, uctxt);
781done: 782done:
783 mmdrop(fdata->mm);
782 kobject_put(&dd->kobj); 784 kobject_put(&dd->kobj);
783 kfree(fdata); 785 kfree(fdata);
784 return 0; 786 return 0;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 1000e0fd96d9..325ec211370f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
605 struct work_struct freeze_work; 605 struct work_struct freeze_work;
606 struct work_struct link_downgrade_work; 606 struct work_struct link_downgrade_work;
607 struct work_struct link_bounce_work; 607 struct work_struct link_bounce_work;
608 struct delayed_work start_link_work;
608 /* host link state variables */ 609 /* host link state variables */
609 struct mutex hls_lock; 610 struct mutex hls_lock;
610 u32 host_link_state; 611 u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
659 u8 linkinit_reason; 660 u8 linkinit_reason;
660 u8 local_tx_rate; /* rate given to 8051 firmware */ 661 u8 local_tx_rate; /* rate given to 8051 firmware */
661 u8 last_pstate; /* info only */ 662 u8 last_pstate; /* info only */
663 u8 qsfp_retry_count;
662 664
663 /* placeholders for IB MAD packet settings */ 665 /* placeholders for IB MAD packet settings */
664 u8 overrun_threshold; 666 u8 overrun_threshold;
@@ -1272,9 +1274,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
1272 ((!!(rhf_dc_info(rhf))) << 4); 1274 ((!!(rhf_dc_info(rhf))) << 4);
1273} 1275}
1274 1276
1277#define HFI1_JKEY_WIDTH 16
1278#define HFI1_JKEY_MASK (BIT(16) - 1)
1279#define HFI1_ADMIN_JKEY_RANGE 32
1280
1281/*
1282 * J_KEYs are split and allocated in the following groups:
1283 * 0 - 31 - users with administrator privileges
1284 * 32 - 63 - kernel protocols using KDETH packets
1285 * 64 - 65535 - all other users using KDETH packets
1286 */
1275static inline u16 generate_jkey(kuid_t uid) 1287static inline u16 generate_jkey(kuid_t uid)
1276{ 1288{
1277 return from_kuid(current_user_ns(), uid) & 0xffff; 1289 u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
1290
1291 if (capable(CAP_SYS_ADMIN))
1292 jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
1293 else if (jkey < 64)
1294 jkey |= BIT(HFI1_JKEY_WIDTH - 1);
1295
1296 return jkey;
1278} 1297}
1279 1298
1280/* 1299/*
@@ -1656,7 +1675,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
1656struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, 1675struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
1657 const struct pci_device_id *); 1676 const struct pci_device_id *);
1658void hfi1_free_devdata(struct hfi1_devdata *); 1677void hfi1_free_devdata(struct hfi1_devdata *);
1659void cc_state_reclaim(struct rcu_head *rcu);
1660struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); 1678struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
1661 1679
1662/* LED beaconing functions */ 1680/* LED beaconing functions */
@@ -1788,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
1788extern unsigned int hfi1_cu; 1806extern unsigned int hfi1_cu;
1789extern unsigned int user_credit_return_threshold; 1807extern unsigned int user_credit_return_threshold;
1790extern int num_user_contexts; 1808extern int num_user_contexts;
1791extern unsigned n_krcvqs; 1809extern unsigned long n_krcvqs;
1792extern uint krcvqs[]; 1810extern uint krcvqs[];
1793extern int krcvqsset; 1811extern int krcvqsset;
1794extern uint kdeth_qp; 1812extern uint kdeth_qp;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index a358d23ecd54..384b43d2fd49 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); 94MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95 95
96/* computed based on above array */ 96/* computed based on above array */
97unsigned n_krcvqs; 97unsigned long n_krcvqs;
98 98
99static unsigned hfi1_rcvarr_split = 25; 99static unsigned hfi1_rcvarr_split = 25;
100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); 100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
500 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); 500 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
501 INIT_WORK(&ppd->sma_message_work, handle_sma_message); 501 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
502 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); 502 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
503 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
503 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); 504 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
504 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 505 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
505 506
@@ -1333,7 +1334,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
1333 spin_unlock(&ppd->cc_state_lock); 1334 spin_unlock(&ppd->cc_state_lock);
1334 1335
1335 if (cc_state) 1336 if (cc_state)
1336 call_rcu(&cc_state->rcu, cc_state_reclaim); 1337 kfree_rcu(cc_state, rcu);
1337 } 1338 }
1338 1339
1339 free_credit_return(dd); 1340 free_credit_return(dd);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 1263abe01999..7ffc14f21523 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1819 u32 len = OPA_AM_CI_LEN(am) + 1; 1819 u32 len = OPA_AM_CI_LEN(am) + 1;
1820 int ret; 1820 int ret;
1821 1821
1822 if (dd->pport->port_type != PORT_TYPE_QSFP) {
1823 smp->status |= IB_SMP_INVALID_FIELD;
1824 return reply((struct ib_mad_hdr *)smp);
1825 }
1826
1822#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ 1827#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
1823#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) 1828#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1824#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) 1829#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@@ -2599,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2599 u8 lq, num_vls; 2604 u8 lq, num_vls;
2600 u8 res_lli, res_ler; 2605 u8 res_lli, res_ler;
2601 u64 port_mask; 2606 u64 port_mask;
2602 unsigned long port_num; 2607 u8 port_num;
2603 unsigned long vl; 2608 unsigned long vl;
2604 u32 vl_select_mask; 2609 u32 vl_select_mask;
2605 int vfi; 2610 int vfi;
@@ -2633,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2633 */ 2638 */
2634 port_mask = be64_to_cpu(req->port_select_mask[3]); 2639 port_mask = be64_to_cpu(req->port_select_mask[3]);
2635 port_num = find_first_bit((unsigned long *)&port_mask, 2640 port_num = find_first_bit((unsigned long *)&port_mask,
2636 sizeof(port_mask)); 2641 sizeof(port_mask) * 8);
2637 2642
2638 if ((u8)port_num != port) { 2643 if (port_num != port) {
2639 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2644 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2640 return reply((struct ib_mad_hdr *)pmp); 2645 return reply((struct ib_mad_hdr *)pmp);
2641 } 2646 }
@@ -2837,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2837 */ 2842 */
2838 port_mask = be64_to_cpu(req->port_select_mask[3]); 2843 port_mask = be64_to_cpu(req->port_select_mask[3]);
2839 port_num = find_first_bit((unsigned long *)&port_mask, 2844 port_num = find_first_bit((unsigned long *)&port_mask,
2840 sizeof(port_mask)); 2845 sizeof(port_mask) * 8);
2841 2846
2842 if (port_num != port) { 2847 if (port_num != port) {
2843 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2848 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3010,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
3010 */ 3015 */
3011 port_mask = be64_to_cpu(req->port_select_mask[3]); 3016 port_mask = be64_to_cpu(req->port_select_mask[3]);
3012 port_num = find_first_bit((unsigned long *)&port_mask, 3017 port_num = find_first_bit((unsigned long *)&port_mask,
3013 sizeof(port_mask)); 3018 sizeof(port_mask) * 8);
3014 3019
3015 if (port_num != port) { 3020 if (port_num != port) {
3016 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3021 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3247,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3247 */ 3252 */
3248 port_mask = be64_to_cpu(req->port_select_mask[3]); 3253 port_mask = be64_to_cpu(req->port_select_mask[3]);
3249 port_num = find_first_bit((unsigned long *)&port_mask, 3254 port_num = find_first_bit((unsigned long *)&port_mask,
3250 sizeof(port_mask)); 3255 sizeof(port_mask) * 8);
3251 3256
3252 if (port_num != port) { 3257 if (port_num != port) {
3253 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 3258 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
3398 3403
3399 spin_unlock(&ppd->cc_state_lock); 3404 spin_unlock(&ppd->cc_state_lock);
3400 3405
3401 call_rcu(&old_cc_state->rcu, cc_state_reclaim); 3406 kfree_rcu(old_cc_state, rcu);
3402} 3407}
3403 3408
3404static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, 3409static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3553 return reply((struct ib_mad_hdr *)smp); 3558 return reply((struct ib_mad_hdr *)smp);
3554} 3559}
3555 3560
3556void cc_state_reclaim(struct rcu_head *rcu)
3557{
3558 struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
3559
3560 kfree(cc_state);
3561}
3562
3563static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, 3561static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3564 struct ib_device *ibdev, u8 port, 3562 struct ib_device *ibdev, u8 port,
3565 u32 *resp_len) 3563 u32 *resp_len)
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58849..3a1ef3056282 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
771 read_extra_bytes(pbuf, from, to_fill); 771 read_extra_bytes(pbuf, from, to_fill);
772 from += to_fill; 772 from += to_fill;
773 nbytes -= to_fill; 773 nbytes -= to_fill;
774 /* may not be enough valid bytes left to align */
775 if (extra > nbytes)
776 extra = nbytes;
774 777
775 /* ...now write carry */ 778 /* ...now write carry */
776 dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); 779 dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
798 read_low_bytes(pbuf, from, extra); 801 read_low_bytes(pbuf, from, extra);
799 from += extra; 802 from += extra;
800 nbytes -= extra; 803 nbytes -= extra;
804 /*
805 * If no bytes are left, return early - we are done.
806 * NOTE: This short-circuit is *required* because
807 * "extra" may have been reduced in size and "from"
808 * is not aligned, as required when leaving this
809 * if block.
810 */
811 if (nbytes == 0)
812 return;
801 } 813 }
802 814
803 /* at this point, from is QW aligned */ 815 /* at this point, from is QW aligned */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index a5aa3517e7d5..4e4d8317c281 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
656 656
657 iter->dev = dev; 657 iter->dev = dev;
658 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; 658 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
659 if (qp_iter_next(iter)) {
660 kfree(iter);
661 return NULL;
662 }
663 659
664 return iter; 660 return iter;
665} 661}
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index a207717ade2a..4e95ad810847 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
706 u8 *data) 706 u8 *data)
707{ 707{
708 struct hfi1_pportdata *ppd; 708 struct hfi1_pportdata *ppd;
709 u32 excess_len = 0; 709 u32 excess_len = len;
710 int ret = 0; 710 int ret = 0, offset = 0;
711 711
712 if (port_num > dd->num_pports || port_num < 1) { 712 if (port_num > dd->num_pports || port_num < 1) {
713 dd_dev_info(dd, "%s: Invalid port number %d\n", 713 dd_dev_info(dd, "%s: Invalid port number %d\n",
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
740 } 740 }
741 741
742 memcpy(data, &ppd->qsfp_info.cache[addr], len); 742 memcpy(data, &ppd->qsfp_info.cache[addr], len);
743
744 if (addr <= QSFP_MONITOR_VAL_END &&
745 (addr + len) >= QSFP_MONITOR_VAL_START) {
746 /* Overlap with the dynamic channel monitor range */
747 if (addr < QSFP_MONITOR_VAL_START) {
748 if (addr + len <= QSFP_MONITOR_VAL_END)
749 len = addr + len - QSFP_MONITOR_VAL_START;
750 else
751 len = QSFP_MONITOR_RANGE;
752 offset = QSFP_MONITOR_VAL_START - addr;
753 addr = QSFP_MONITOR_VAL_START;
754 } else if (addr == QSFP_MONITOR_VAL_START) {
755 offset = 0;
756 if (addr + len > QSFP_MONITOR_VAL_END)
757 len = QSFP_MONITOR_RANGE;
758 } else {
759 offset = 0;
760 if (addr + len > QSFP_MONITOR_VAL_END)
761 len = QSFP_MONITOR_VAL_END - addr + 1;
762 }
763 /* Refresh the values of the dynamic monitors from the cable */
764 ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
765 if (ret != len) {
766 ret = -EAGAIN;
767 goto set_zeroes;
768 }
769 }
770
743 return 0; 771 return 0;
744 772
745set_zeroes: 773set_zeroes:
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index 69275ebd9597..36cf52359848 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -74,6 +74,9 @@
74/* Defined fields that Intel requires of qualified cables */ 74/* Defined fields that Intel requires of qualified cables */
75/* Byte 0 is Identifier, not checked */ 75/* Byte 0 is Identifier, not checked */
76/* Byte 1 is reserved "status MSB" */ 76/* Byte 1 is reserved "status MSB" */
77#define QSFP_MONITOR_VAL_START 22
78#define QSFP_MONITOR_VAL_END 81
79#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
77#define QSFP_TX_CTRL_BYTE_OFFS 86 80#define QSFP_TX_CTRL_BYTE_OFFS 86
78#define QSFP_PWR_CTRL_BYTE_OFFS 93 81#define QSFP_PWR_CTRL_BYTE_OFFS 93
79#define QSFP_CDR_CTRL_BYTE_OFFS 98 82#define QSFP_CDR_CTRL_BYTE_OFFS 98
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0ecf27903dc2..1694037d1eee 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
114#define KDETH_HCRC_LOWER_SHIFT 24 114#define KDETH_HCRC_LOWER_SHIFT 24
115#define KDETH_HCRC_LOWER_MASK 0xff 115#define KDETH_HCRC_LOWER_MASK 0xff
116 116
117#define AHG_KDETH_INTR_SHIFT 12
118
117#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) 119#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
118#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) 120#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
119 121
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
1480 /* Clear KDETH.SH on last packet */ 1482 /* Clear KDETH.SH on last packet */
1481 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { 1483 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1482 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, 1484 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1483 INTR) >> 16); 1485 INTR) <<
1486 AHG_KDETH_INTR_SHIFT);
1484 val &= cpu_to_le16(~(1U << 13)); 1487 val &= cpu_to_le16(~(1U << 13));
1485 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); 1488 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1486 } else { 1489 } else {
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index b738acdb9b02..8ec09e470f84 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -232,7 +232,7 @@ struct i40iw_device {
232 struct i40e_client *client; 232 struct i40e_client *client;
233 struct i40iw_hw hw; 233 struct i40iw_hw hw;
234 struct i40iw_cm_core cm_core; 234 struct i40iw_cm_core cm_core;
235 unsigned long *mem_resources; 235 u8 *mem_resources;
236 unsigned long *allocated_qps; 236 unsigned long *allocated_qps;
237 unsigned long *allocated_cqs; 237 unsigned long *allocated_cqs;
238 unsigned long *allocated_mrs; 238 unsigned long *allocated_mrs;
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
435 *next = resource_num + 1; 435 *next = resource_num + 1;
436 if (*next == max_resources) 436 if (*next == max_resources)
437 *next = 0; 437 *next = 0;
438 spin_unlock_irqrestore(&iwdev->resource_lock, flags);
439 *req_resource_num = resource_num; 438 *req_resource_num = resource_num;
439 spin_unlock_irqrestore(&iwdev->resource_lock, flags);
440 440
441 return 0; 441 return 0;
442} 442}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 5026dc79978a..7ca0638579c0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
535 buf += hdr_len; 535 buf += hdr_len;
536 } 536 }
537 537
538 if (pd_len) 538 if (pdata && pdata->addr)
539 memcpy(buf, pdata->addr, pd_len); 539 memcpy(buf, pdata->addr, pdata->size);
540 540
541 atomic_set(&sqbuf->refcount, 1); 541 atomic_set(&sqbuf->refcount, 1);
542 542
@@ -3347,26 +3347,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
3347} 3347}
3348 3348
3349/** 3349/**
3350 * i40iw_loopback_nop - Send a nop
3351 * @qp: associated hw qp
3352 */
3353static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
3354{
3355 u64 *wqe;
3356 u64 header;
3357
3358 wqe = qp->qp_uk.sq_base->elem;
3359 set_64bit_val(wqe, 0, 0);
3360 set_64bit_val(wqe, 8, 0);
3361 set_64bit_val(wqe, 16, 0);
3362
3363 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3364 LS_64(0, I40IWQPSQ_SIGCOMPL) |
3365 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3366 set_64bit_val(wqe, 24, header);
3367}
3368
3369/**
3370 * i40iw_qp_disconnect - free qp and close cm 3350 * i40iw_qp_disconnect - free qp and close cm
3371 * @iwqp: associate qp for the connection 3351 * @iwqp: associate qp for the connection
3372 */ 3352 */
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3638 } else { 3618 } else {
3639 if (iwqp->page) 3619 if (iwqp->page)
3640 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); 3620 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3641 i40iw_loopback_nop(&iwqp->sc_qp); 3621 dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
3642 } 3622 }
3643 3623
3644 if (iwqp->page) 3624 if (iwqp->page)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 3ee0cad96bc6..0c92a40b3e86 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
265 info.dont_send_fin = false; 265 info.dont_send_fin = false;
266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) 266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
267 info.reset_tcp_conn = true; 267 info.reset_tcp_conn = true;
268 iwqp->hw_iwarp_state = state;
268 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); 269 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
269} 270}
270 271
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 6e9081380a27..445e230d5ff8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
100 .notifier_call = i40iw_net_event 100 .notifier_call = i40iw_net_event
101}; 101};
102 102
103static int i40iw_notifiers_registered; 103static atomic_t i40iw_notifiers_registered;
104 104
105/** 105/**
106 * i40iw_find_i40e_handler - find a handler given a client info 106 * i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
1342 */ 1342 */
1343static void i40iw_register_notifiers(void) 1343static void i40iw_register_notifiers(void)
1344{ 1344{
1345 if (!i40iw_notifiers_registered) { 1345 if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
1346 register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1346 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1347 register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1347 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1348 register_netevent_notifier(&i40iw_net_notifier); 1348 register_netevent_notifier(&i40iw_net_notifier);
1349 } 1349 }
1350 i40iw_notifiers_registered++;
1351} 1350}
1352 1351
1353/** 1352/**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
1429 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1428 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1430 /* fallthrough */ 1429 /* fallthrough */
1431 case INET_NOTIFIER: 1430 case INET_NOTIFIER:
1432 if (i40iw_notifiers_registered > 0) { 1431 if (!atomic_dec_return(&i40iw_notifiers_registered)) {
1433 i40iw_notifiers_registered--;
1434 unregister_netevent_notifier(&i40iw_net_notifier); 1432 unregister_netevent_notifier(&i40iw_net_notifier);
1435 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); 1433 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1436 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1434 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
@@ -1558,6 +1556,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1558 enum i40iw_status_code status; 1556 enum i40iw_status_code status;
1559 struct i40iw_handler *hdl; 1557 struct i40iw_handler *hdl;
1560 1558
1559 hdl = i40iw_find_netdev(ldev->netdev);
1560 if (hdl)
1561 return 0;
1562
1561 hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); 1563 hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1562 if (!hdl) 1564 if (!hdl)
1563 return -ENOMEM; 1565 return -ENOMEM;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0e8db0a35141..6fd043b1d714 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
673{ 673{
674 if (!mem) 674 if (!mem)
675 return I40IW_ERR_PARAM; 675 return I40IW_ERR_PARAM;
676 /*
677 * mem->va points to the parent of mem, so both mem and mem->va
678 * can not be touched once mem->va is freed
679 */
676 kfree(mem->va); 680 kfree(mem->va);
677 mem->va = NULL;
678 return 0; 681 return 0;
679} 682}
680 683
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 2360338877bf..6329c971c22f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
794 return &iwqp->ibqp; 794 return &iwqp->ibqp;
795error: 795error:
796 i40iw_free_qp_resources(iwdev, iwqp, qp_num); 796 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
797 kfree(mem);
798 return ERR_PTR(err_code); 797 return ERR_PTR(err_code);
799} 798}
800 799
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
1926 } 1925 }
1927 if (iwpbl->pbl_allocated) 1926 if (iwpbl->pbl_allocated)
1928 i40iw_free_pble(iwdev->pble_rsrc, palloc); 1927 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1929 kfree(iwpbl->iwmr); 1928 kfree(iwmr);
1930 iwpbl->iwmr = NULL;
1931 return 0; 1929 return 0;
1932 } 1930 }
1933 1931
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d6fc8a6e8c33..5df63dacaaa3 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
576 checksum == cpu_to_be16(0xffff); 576 checksum == cpu_to_be16(0xffff);
577} 577}
578 578
579static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, 579static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
580 unsigned tail, struct mlx4_cqe *cqe, int is_eth) 580 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
581{ 581{
582 struct mlx4_ib_proxy_sqp_hdr *hdr; 582 struct mlx4_ib_proxy_sqp_hdr *hdr;
583 583
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
600 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); 600 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
601 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); 601 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
602 } 602 }
603
604 return 0;
605} 603}
606 604
607static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, 605static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -689,12 +687,6 @@ repoll:
689 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 687 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
690 MLX4_CQE_OPCODE_ERROR; 688 MLX4_CQE_OPCODE_ERROR;
691 689
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
693 is_send)) {
694 pr_warn("Completion for NOP opcode detected!\n");
695 return -EINVAL;
696 }
697
698 /* Resize CQ in progress */ 690 /* Resize CQ in progress */
699 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 691 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
700 if (cq->resize_buf) { 692 if (cq->resize_buf) {
@@ -720,12 +712,6 @@ repoll:
720 */ 712 */
721 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 713 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
722 be32_to_cpu(cqe->vlan_my_qpn)); 714 be32_to_cpu(cqe->vlan_my_qpn));
723 if (unlikely(!mqp)) {
724 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
725 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
726 return -EINVAL;
727 }
728
729 *cur_qp = to_mibqp(mqp); 715 *cur_qp = to_mibqp(mqp);
730 } 716 }
731 717
@@ -738,11 +724,6 @@ repoll:
738 /* SRQ is also in the radix tree */ 724 /* SRQ is also in the radix tree */
739 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, 725 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
740 srq_num); 726 srq_num);
741 if (unlikely(!msrq)) {
742 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
743 cq->mcq.cqn, srq_num);
744 return -EINVAL;
745 }
746 } 727 }
747 728
748 if (is_send) { 729 if (is_send) {
@@ -852,9 +833,11 @@ repoll:
852 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { 833 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
853 if ((*cur_qp)->mlx4_ib_qp_type & 834 if ((*cur_qp)->mlx4_ib_qp_type &
854 (MLX4_IB_QPT_PROXY_SMI_OWNER | 835 (MLX4_IB_QPT_PROXY_SMI_OWNER |
855 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 836 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
856 return use_tunnel_data(*cur_qp, cq, wc, tail, 837 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
857 cqe, is_eth); 838 is_eth);
839 return 0;
840 }
858 } 841 }
859 842
860 wc->slid = be16_to_cpu(cqe->rlid); 843 wc->slid = be16_to_cpu(cqe->rlid);
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
891 struct mlx4_ib_qp *cur_qp = NULL; 874 struct mlx4_ib_qp *cur_qp = NULL;
892 unsigned long flags; 875 unsigned long flags;
893 int npolled; 876 int npolled;
894 int err = 0;
895 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); 877 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
896 878
897 spin_lock_irqsave(&cq->lock, flags); 879 spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
901 } 883 }
902 884
903 for (npolled = 0; npolled < num_entries; ++npolled) { 885 for (npolled = 0; npolled < num_entries; ++npolled) {
904 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 886 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
905 if (err)
906 break; 887 break;
907 } 888 }
908 889
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
911out: 892out:
912 spin_unlock_irqrestore(&cq->lock, flags); 893 spin_unlock_irqrestore(&cq->lock, flags);
913 894
914 if (err == 0 || err == -EAGAIN) 895 return npolled;
915 return npolled;
916 else
917 return err;
918} 896}
919 897
920int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 898int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9c2e53d28f98..0f21c3a25552 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
1128 1128
1129 /* Generate GUID changed event */ 1129 /* Generate GUID changed event */
1130 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { 1130 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1131 if (mlx4_is_master(dev->dev)) {
1132 union ib_gid gid;
1133 int err = 0;
1134
1135 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1136 err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1137 else
1138 gid.global.subnet_prefix =
1139 eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1140 if (err) {
1141 pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1142 port, err);
1143 } else {
1144 pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1145 port,
1146 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1147 be64_to_cpu(gid.global.subnet_prefix));
1148 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1149 be64_to_cpu(gid.global.subnet_prefix));
1150 }
1151 }
1131 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 1152 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1132 /*if master, notify all slaves*/ 1153 /*if master, notify all slaves*/
1133 if (mlx4_is_master(dev->dev)) 1154 if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2202 if (err) 2223 if (err)
2203 goto demux_err; 2224 goto demux_err;
2204 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; 2225 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2226 atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2227 be64_to_cpu(gid.global.subnet_prefix));
2205 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, 2228 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2206 &dev->sriov.sqps[i]); 2229 &dev->sriov.sqps[i]);
2207 if (err) 2230 if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2af44c2de262..87ba9bca4181 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2202 bool per_port = !!(ibdev->dev->caps.flags2 & 2202 bool per_port = !!(ibdev->dev->caps.flags2 &
2203 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); 2203 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2204 2204
2205 if (mlx4_is_slave(ibdev->dev))
2206 return 0;
2207
2205 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { 2208 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2206 /* i == 1 means we are building port counters */ 2209 /* i == 1 means we are building port counters */
2207 if (i && !per_port) 2210 if (i && !per_port)
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 8f7ad07915b0..097bfcc4ee99 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
489 if (!group->members[i]) 489 if (!group->members[i])
490 leave_state |= (1 << i); 490 leave_state |= (1 << i);
491 491
492 return leave_state & (group->rec.scope_join_state & 7); 492 return leave_state & (group->rec.scope_join_state & 0xf);
493} 493}
494 494
495static int join_group(struct mcast_group *group, int slave, u8 join_mask) 495static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
564 } else 564 } else
565 mcg_warn_group(group, "DRIVER BUG\n"); 565 mcg_warn_group(group, "DRIVER BUG\n");
566 } else if (group->state == MCAST_LEAVE_SENT) { 566 } else if (group->state == MCAST_LEAVE_SENT) {
567 if (group->rec.scope_join_state & 7) 567 if (group->rec.scope_join_state & 0xf)
568 group->rec.scope_join_state &= 0xf8; 568 group->rec.scope_join_state &= 0xf0;
569 group->state = MCAST_IDLE; 569 group->state = MCAST_IDLE;
570 mutex_unlock(&group->lock); 570 mutex_unlock(&group->lock);
571 if (release_group(group, 1)) 571 if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
605static int handle_join_req(struct mcast_group *group, u8 join_mask, 605static int handle_join_req(struct mcast_group *group, u8 join_mask,
606 struct mcast_req *req) 606 struct mcast_req *req)
607{ 607{
608 u8 group_join_state = group->rec.scope_join_state & 7; 608 u8 group_join_state = group->rec.scope_join_state & 0xf;
609 int ref = 0; 609 int ref = 0;
610 u16 status; 610 u16 status;
611 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 611 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
690 u8 cur_join_state; 690 u8 cur_join_state;
691 691
692 resp_join_state = ((struct ib_sa_mcmember_data *) 692 resp_join_state = ((struct ib_sa_mcmember_data *)
693 group->response_sa_mad.data)->scope_join_state & 7; 693 group->response_sa_mad.data)->scope_join_state & 0xf;
694 cur_join_state = group->rec.scope_join_state & 7; 694 cur_join_state = group->rec.scope_join_state & 0xf;
695 695
696 if (method == IB_MGMT_METHOD_GET_RESP) { 696 if (method == IB_MGMT_METHOD_GET_RESP) {
697 /* successfull join */ 697 /* successfull join */
@@ -710,7 +710,7 @@ process_requests:
710 req = list_first_entry(&group->pending_list, struct mcast_req, 710 req = list_first_entry(&group->pending_list, struct mcast_req,
711 group_list); 711 group_list);
712 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; 712 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
713 req_join_state = sa_data->scope_join_state & 0x7; 713 req_join_state = sa_data->scope_join_state & 0xf;
714 714
715 /* For a leave request, we will immediately answer the VF, and 715 /* For a leave request, we will immediately answer the VF, and
716 * update our internal counters. The actual leave will be sent 716 * update our internal counters. The actual leave will be sent
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7c5832ede4bd..686ab48ff644 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
448 struct workqueue_struct *wq; 448 struct workqueue_struct *wq;
449 struct workqueue_struct *ud_wq; 449 struct workqueue_struct *ud_wq;
450 spinlock_t ud_lock; 450 spinlock_t ud_lock;
451 __be64 subnet_prefix; 451 atomic64_t subnet_prefix;
452 __be64 guid_cache[128]; 452 __be64 guid_cache[128];
453 struct mlx4_ib_dev *dev; 453 struct mlx4_ib_dev *dev;
454 /* the following lock protects both mcg_table and mcg_mgid0_list */ 454 /* the following lock protects both mcg_table and mcg_mgid0_list */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 768085f59566..7fb9629bd12b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
2493 sqp->ud_header.grh.flow_label = 2493 sqp->ud_header.grh.flow_label =
2494 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); 2494 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
2495 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; 2495 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
2496 if (is_eth) 2496 if (is_eth) {
2497 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); 2497 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
2498 else { 2498 } else {
2499 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { 2499 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2500 /* When multi-function is enabled, the ib_core gid 2500 /* When multi-function is enabled, the ib_core gid
2501 * indexes don't necessarily match the hw ones, so 2501 * indexes don't necessarily match the hw ones, so
2502 * we must use our own cache */ 2502 * we must use our own cache
2503 sqp->ud_header.grh.source_gid.global.subnet_prefix = 2503 */
2504 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. 2504 sqp->ud_header.grh.source_gid.global.subnet_prefix =
2505 subnet_prefix; 2505 cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
2506 sqp->ud_header.grh.source_gid.global.interface_id = 2506 demux[sqp->qp.port - 1].
2507 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. 2507 subnet_prefix)));
2508 guid_cache[ah->av.ib.gid_index]; 2508 sqp->ud_header.grh.source_gid.global.interface_id =
2509 } else 2509 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
2510 ib_get_cached_gid(ib_dev, 2510 guid_cache[ah->av.ib.gid_index];
2511 be32_to_cpu(ah->av.ib.port_pd) >> 24, 2511 } else {
2512 ah->av.ib.gid_index, 2512 ib_get_cached_gid(ib_dev,
2513 &sqp->ud_header.grh.source_gid, NULL); 2513 be32_to_cpu(ah->av.ib.port_pd) >> 24,
2514 ah->av.ib.gid_index,
2515 &sqp->ud_header.grh.source_gid, NULL);
2516 }
2514 } 2517 }
2515 memcpy(sqp->ud_header.grh.destination_gid.raw, 2518 memcpy(sqp->ud_header.grh.destination_gid.raw,
2516 ah->av.ib.dgid, 16); 2519 ah->av.ib.dgid, 16);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 308a358e5b46..e4fac9292e4a 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -553,12 +553,6 @@ repoll:
553 * from the table. 553 * from the table.
554 */ 554 */
555 mqp = __mlx5_qp_lookup(dev->mdev, qpn); 555 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
556 if (unlikely(!mqp)) {
557 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
558 cq->mcq.cqn, qpn);
559 return -EINVAL;
560 }
561
562 *cur_qp = to_mibqp(mqp); 556 *cur_qp = to_mibqp(mqp);
563 } 557 }
564 558
@@ -619,13 +613,6 @@ repoll:
619 read_lock(&dev->mdev->priv.mkey_table.lock); 613 read_lock(&dev->mdev->priv.mkey_table.lock);
620 mmkey = __mlx5_mr_lookup(dev->mdev, 614 mmkey = __mlx5_mr_lookup(dev->mdev,
621 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 615 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
622 if (unlikely(!mmkey)) {
623 read_unlock(&dev->mdev->priv.mkey_table.lock);
624 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
625 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
626 return -EINVAL;
627 }
628
629 mr = to_mibmr(mmkey); 616 mr = to_mibmr(mmkey);
630 get_sig_err_item(sig_err_cqe, &mr->sig->err_item); 617 get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
631 mr->sig->sig_err_exists = true; 618 mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
676 unsigned long flags; 663 unsigned long flags;
677 int soft_polled = 0; 664 int soft_polled = 0;
678 int npolled; 665 int npolled;
679 int err = 0;
680 666
681 spin_lock_irqsave(&cq->lock, flags); 667 spin_lock_irqsave(&cq->lock, flags);
682 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 668 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
688 soft_polled = poll_soft_wc(cq, num_entries, wc); 674 soft_polled = poll_soft_wc(cq, num_entries, wc);
689 675
690 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { 676 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
691 err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); 677 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
692 if (err)
693 break; 678 break;
694 } 679 }
695 680
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
698out: 683out:
699 spin_unlock_irqrestore(&cq->lock, flags); 684 spin_unlock_irqrestore(&cq->lock, flags);
700 685
701 if (err == 0 || err == -EAGAIN) 686 return soft_polled + npolled;
702 return soft_polled + npolled;
703 else
704 return err;
705} 687}
706 688
707int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 689int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a84bb766fc62..e19537cf44ab 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -37,7 +37,6 @@
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/io-mapping.h>
41#if defined(CONFIG_X86) 40#if defined(CONFIG_X86)
42#include <asm/pat.h> 41#include <asm/pat.h>
43#endif 42#endif
@@ -289,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
289 288
290static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 289static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
291{ 290{
292 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 291 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
292 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
293 return 0;
293} 294}
294 295
295enum { 296enum {
@@ -1429,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
1429 dmac_47_16), 1430 dmac_47_16),
1430 ib_spec->eth.val.dst_mac); 1431 ib_spec->eth.val.dst_mac);
1431 1432
1433 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1434 smac_47_16),
1435 ib_spec->eth.mask.src_mac);
1436 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1437 smac_47_16),
1438 ib_spec->eth.val.src_mac);
1439
1432 if (ib_spec->eth.mask.vlan_tag) { 1440 if (ib_spec->eth.mask.vlan_tag) {
1433 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1441 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1434 vlan_tag, 1); 1442 vlan_tag, 1);
@@ -1850,6 +1858,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1850 int domain) 1858 int domain)
1851{ 1859{
1852 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1860 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1861 struct mlx5_ib_qp *mqp = to_mqp(qp);
1853 struct mlx5_ib_flow_handler *handler = NULL; 1862 struct mlx5_ib_flow_handler *handler = NULL;
1854 struct mlx5_flow_destination *dst = NULL; 1863 struct mlx5_flow_destination *dst = NULL;
1855 struct mlx5_ib_flow_prio *ft_prio; 1864 struct mlx5_ib_flow_prio *ft_prio;
@@ -1876,7 +1885,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1876 } 1885 }
1877 1886
1878 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 1887 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1879 dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; 1888 if (mqp->flags & MLX5_IB_QP_RSS)
1889 dst->tir_num = mqp->rss_qp.tirn;
1890 else
1891 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
1880 1892
1881 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1893 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1882 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 1894 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 40df2cca0609..996b54e366b0 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
71 71
72 addr = addr >> page_shift; 72 addr = addr >> page_shift;
73 tmp = (unsigned long)addr; 73 tmp = (unsigned long)addr;
74 m = find_first_bit(&tmp, sizeof(tmp)); 74 m = find_first_bit(&tmp, BITS_PER_LONG);
75 skip = 1 << m; 75 skip = 1 << m;
76 mask = skip - 1; 76 mask = skip - 1;
77 i = 0; 77 i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
81 for (k = 0; k < len; k++) { 81 for (k = 0; k < len; k++) {
82 if (!(i & mask)) { 82 if (!(i & mask)) {
83 tmp = (unsigned long)pfn; 83 tmp = (unsigned long)pfn;
84 m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp))); 84 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
85 skip = 1 << m; 85 skip = 1 << m;
86 mask = skip - 1; 86 mask = skip - 1;
87 base = pfn; 87 base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
89 } else { 89 } else {
90 if (base + p != pfn) { 90 if (base + p != pfn) {
91 tmp = (unsigned long)p; 91 tmp = (unsigned long)p;
92 m = find_first_bit(&tmp, sizeof(tmp)); 92 m = find_first_bit(&tmp, BITS_PER_LONG);
93 skip = 1 << m; 93 skip = 1 << m;
94 mask = skip - 1; 94 mask = skip - 1;
95 base = pfn; 95 base = pfn;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 372385d0f993..95146f4aa3e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
402 /* QP uses 1 as its source QP number */ 402 /* QP uses 1 as its source QP number */
403 MLX5_IB_QP_SQPN_QP1 = 1 << 6, 403 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, 404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
405 MLX5_IB_QP_RSS = 1 << 8,
405}; 406};
406 407
407struct mlx5_umr_wr { 408struct mlx5_umr_wr {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0dd7d93cac95..affc3f6598ca 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1449,6 +1449,7 @@ create_tir:
1449 kvfree(in); 1449 kvfree(in);
1450 /* qpn is reserved for that QP */ 1450 /* qpn is reserved for that QP */
1451 qp->trans_qp.base.mqp.qpn = 0; 1451 qp->trans_qp.base.mqp.qpn = 0;
1452 qp->flags |= MLX5_IB_QP_RSS;
1452 return 0; 1453 return 0;
1453 1454
1454err: 1455err:
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3658 struct ib_send_wr *wr, unsigned *idx, 3659 struct ib_send_wr *wr, unsigned *idx,
3659 int *size, int nreq) 3660 int *size, int nreq)
3660{ 3661{
3661 int err = 0; 3662 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
3662 3663 return -ENOMEM;
3663 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
3664 err = -ENOMEM;
3665 return err;
3666 }
3667 3664
3668 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 3665 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
3669 *seg = mlx5_get_send_wqe(qp, *idx); 3666 *seg = mlx5_get_send_wqe(qp, *idx);
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
3679 *seg += sizeof(**ctrl); 3676 *seg += sizeof(**ctrl);
3680 *size = sizeof(**ctrl) / 16; 3677 *size = sizeof(**ctrl) / 16;
3681 3678
3682 return err; 3679 return 0;
3683} 3680}
3684 3681
3685static void finish_wqe(struct mlx5_ib_qp *qp, 3682static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3758 num_sge = wr->num_sge; 3755 num_sge = wr->num_sge;
3759 if (unlikely(num_sge > qp->sq.max_gs)) { 3756 if (unlikely(num_sge > qp->sq.max_gs)) {
3760 mlx5_ib_warn(dev, "\n"); 3757 mlx5_ib_warn(dev, "\n");
3761 err = -ENOMEM; 3758 err = -EINVAL;
3762 *bad_wr = wr; 3759 *bad_wr = wr;
3763 goto out; 3760 goto out;
3764 } 3761 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 16740dcb876b..67fc0b6857e1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1156 attr->max_srq = 1156 attr->max_srq =
1157 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> 1157 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1158 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; 1158 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
1159 attr->max_send_sge = ((rsp->max_write_send_sge & 1159 attr->max_send_sge = ((rsp->max_recv_send_sge &
1160 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1160 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1161 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); 1161 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1162 attr->max_recv_sge = (rsp->max_write_send_sge & 1162 attr->max_recv_sge = (rsp->max_recv_send_sge &
1163 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1163 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
1164 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; 1164 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
1165 attr->max_srq_sge = (rsp->max_srq_rqe_sge & 1165 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1166 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> 1166 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1167 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; 1167 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1168 attr->max_rdma_sge = (rsp->max_write_send_sge & 1168 attr->max_rdma_sge = (rsp->max_wr_rd_sge &
1169 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >> 1169 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
1170 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT; 1170 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
1171 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1171 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1172 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1172 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1173 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; 1173 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 0efc9662c6d8..37df4481bb8f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -554,9 +554,9 @@ enum {
554 OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, 554 OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
555 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, 555 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
556 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, 556 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
557 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, 557 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
558 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << 558 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
559 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, 559 OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
560 560
561 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, 561 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
562 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, 562 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -612,6 +612,8 @@ enum {
612 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, 612 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
613 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << 613 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
614 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, 614 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
615 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
616 OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
615}; 617};
616 618
617struct ocrdma_mbx_query_config { 619struct ocrdma_mbx_query_config {
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
619 struct ocrdma_mbx_rsp rsp; 621 struct ocrdma_mbx_rsp rsp;
620 u32 qp_srq_cq_ird_ord; 622 u32 qp_srq_cq_ird_ord;
621 u32 max_pd_ca_ack_delay; 623 u32 max_pd_ca_ack_delay;
622 u32 max_write_send_sge; 624 u32 max_recv_send_sge;
623 u32 max_ird_ord_per_qp; 625 u32 max_ird_ord_per_qp;
624 u32 max_shared_ird_ord; 626 u32 max_shared_ird_ord;
625 u32 max_mr; 627 u32 max_mr;
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
639 u32 max_wqes_rqes_per_q; 641 u32 max_wqes_rqes_per_q;
640 u32 max_cq_cqes_per_cq; 642 u32 max_cq_cqes_per_cq;
641 u32 max_srq_rqe_sge; 643 u32 max_srq_rqe_sge;
644 u32 max_wr_rd_sge;
645 u32 ird_pgsz_num_pages;
642}; 646};
643 647
644struct ocrdma_fw_ver_rsp { 648struct ocrdma_fw_ver_rsp {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b1a3d91fe8b9..0aa854737e74 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
125 IB_DEVICE_SYS_IMAGE_GUID | 125 IB_DEVICE_SYS_IMAGE_GUID |
126 IB_DEVICE_LOCAL_DMA_LKEY | 126 IB_DEVICE_LOCAL_DMA_LKEY |
127 IB_DEVICE_MEM_MGT_EXTENSIONS; 127 IB_DEVICE_MEM_MGT_EXTENSIONS;
128 attr->max_sge = dev->attr.max_send_sge; 128 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
129 attr->max_sge_rd = attr->max_sge; 129 attr->max_sge_rd = dev->attr.max_rdma_sge;
130 attr->max_cq = dev->attr.max_cq; 130 attr->max_cq = dev->attr.max_cq;
131 attr->max_cqe = dev->attr.max_cqe; 131 attr->max_cqe = dev->attr.max_cqe;
132 attr->max_mr = dev->attr.max_mr; 132 attr->max_mr = dev->attr.max_mr;
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5e75b43c596b..5bad8e3b40bb 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
189DEBUGFS_FILE(ctx_stats) 189DEBUGFS_FILE(ctx_stats)
190 190
191static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) 191static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
192 __acquires(RCU)
192{ 193{
193 struct qib_qp_iter *iter; 194 struct qib_qp_iter *iter;
194 loff_t n = *pos; 195 loff_t n = *pos;
195 196
196 rcu_read_lock();
197 iter = qib_qp_iter_init(s->private); 197 iter = qib_qp_iter_init(s->private);
198
199 /* stop calls rcu_read_unlock */
200 rcu_read_lock();
201
198 if (!iter) 202 if (!iter)
199 return NULL; 203 return NULL;
200 204
201 while (n--) { 205 do {
202 if (qib_qp_iter_next(iter)) { 206 if (qib_qp_iter_next(iter)) {
203 kfree(iter); 207 kfree(iter);
204 return NULL; 208 return NULL;
205 } 209 }
206 } 210 } while (n--);
207 211
208 return iter; 212 return iter;
209} 213}
210 214
211static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, 215static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
212 loff_t *pos) 216 loff_t *pos)
217 __must_hold(RCU)
213{ 218{
214 struct qib_qp_iter *iter = iter_ptr; 219 struct qib_qp_iter *iter = iter_ptr;
215 220
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
224} 229}
225 230
226static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) 231static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
232 __releases(RCU)
227{ 233{
228 rcu_read_unlock(); 234 rcu_read_unlock();
229} 235}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index fcdf37913a26..c3edc033f7c4 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
328 328
329 pos = *ppos; 329 pos = *ppos;
330 330
331 if (pos != 0) { 331 if (pos != 0 || count != sizeof(struct qib_flash))
332 ret = -EINVAL; 332 return -EINVAL;
333 goto bail;
334 }
335
336 if (count != sizeof(struct qib_flash)) {
337 ret = -EINVAL;
338 goto bail;
339 }
340
341 tmp = kmalloc(count, GFP_KERNEL);
342 if (!tmp) {
343 ret = -ENOMEM;
344 goto bail;
345 }
346 333
347 if (copy_from_user(tmp, buf, count)) { 334 tmp = memdup_user(buf, count);
348 ret = -EFAULT; 335 if (IS_ERR(tmp))
349 goto bail_tmp; 336 return PTR_ERR(tmp);
350 }
351 337
352 dd = private2dd(file); 338 dd = private2dd(file);
353 if (qib_eeprom_write(dd, pos, tmp, count)) { 339 if (qib_eeprom_write(dd, pos, tmp, count)) {
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
361 347
362bail_tmp: 348bail_tmp:
363 kfree(tmp); 349 kfree(tmp);
364
365bail:
366 return ret; 350 return ret;
367} 351}
368 352
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 9cc0aae1d781..f9b8cd2354d1 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
573 return NULL; 573 return NULL;
574 574
575 iter->dev = dev; 575 iter->dev = dev;
576 if (qib_qp_iter_next(iter)) {
577 kfree(iter);
578 return NULL;
579 }
580 576
581 return iter; 577 return iter;
582} 578}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index c229b9f4a52d..0a89a955550b 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void)
664 return err; 664 return err;
665 } 665 }
666 666
667 if (pci_register_driver(&usnic_ib_pci_driver)) { 667 err = pci_register_driver(&usnic_ib_pci_driver);
668 if (err) {
668 usnic_err("Unable to register with PCI\n"); 669 usnic_err("Unable to register with PCI\n");
669 goto out_umem_fini; 670 goto out_umem_fini;
670 } 671 }
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 80c4b6b401b8..46b64970058e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
294{ 294{
295 rvt_deinit_mregion(&mr->mr); 295 rvt_deinit_mregion(&mr->mr);
296 rvt_free_lkey(&mr->mr); 296 rvt_free_lkey(&mr->mr);
297 vfree(mr); 297 kfree(mr);
298} 298}
299 299
300/** 300/**
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bdb540f25a88..870b4f212fbc 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -873,7 +873,8 @@ bail_qpn:
873 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 873 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
874 874
875bail_rq_wq: 875bail_rq_wq:
876 vfree(qp->r_rq.wq); 876 if (!qp->ip)
877 vfree(qp->r_rq.wq);
877 878
878bail_driver_priv: 879bail_driver_priv:
879 rdi->driver_f.qp_priv_free(rdi, qp); 880 rdi->driver_f.qp_priv_free(rdi, qp);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 55f0e8f0ca79..ddd59270ff6d 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
362 return err; 362 return err;
363 } 363 }
364 364
365 err = rxe_net_init(); 365 err = rxe_net_ipv4_init();
366 if (err) { 366 if (err) {
367 pr_err("rxe: unable to init\n"); 367 pr_err("rxe: unable to init ipv4 tunnel\n");
368 rxe_cache_exit(); 368 rxe_cache_exit();
369 return err; 369 goto exit;
370 }
371
372 err = rxe_net_ipv6_init();
373 if (err) {
374 pr_err("rxe: unable to init ipv6 tunnel\n");
375 rxe_cache_exit();
376 goto exit;
370 } 377 }
378
379 err = register_netdevice_notifier(&rxe_net_notifier);
380 if (err) {
381 pr_err("rxe: Failed to rigister netdev notifier\n");
382 goto exit;
383 }
384
371 pr_info("rxe: loaded\n"); 385 pr_info("rxe: loaded\n");
372 386
373 return 0; 387 return 0;
388
389exit:
390 rxe_release_udp_tunnel(recv_sockets.sk4);
391 rxe_release_udp_tunnel(recv_sockets.sk6);
392 return err;
374} 393}
375 394
376static void __exit rxe_module_exit(void) 395static void __exit rxe_module_exit(void)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 36f67de44095..1c59ef2c67aa 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
689 qp->req.need_retry = 1; 689 qp->req.need_retry = 1;
690 rxe_run_task(&qp->req.task, 1); 690 rxe_run_task(&qp->req.task, 1);
691 } 691 }
692
693 if (pkt) {
694 rxe_drop_ref(pkt->qp);
695 kfree_skb(skb);
696 }
697
692 goto exit; 698 goto exit;
699
693 } else { 700 } else {
694 wqe->status = IB_WC_RETRY_EXC_ERR; 701 wqe->status = IB_WC_RETRY_EXC_ERR;
695 state = COMPST_ERROR; 702 state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
716 case COMPST_ERROR: 723 case COMPST_ERROR:
717 do_complete(qp, wqe); 724 do_complete(qp, wqe);
718 rxe_qp_error(qp); 725 rxe_qp_error(qp);
726
727 if (pkt) {
728 rxe_drop_ref(pkt->qp);
729 kfree_skb(skb);
730 }
731
719 goto exit; 732 goto exit;
720 } 733 }
721 } 734 }
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 0b8d2ea8b41d..eedf2f1cafdf 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
275 return sock; 275 return sock;
276} 276}
277 277
278static void rxe_release_udp_tunnel(struct socket *sk) 278void rxe_release_udp_tunnel(struct socket *sk)
279{ 279{
280 udp_tunnel_sock_release(sk); 280 if (sk)
281 udp_tunnel_sock_release(sk);
281} 282}
282 283
283static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, 284static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
658 return NOTIFY_OK; 659 return NOTIFY_OK;
659} 660}
660 661
661static struct notifier_block rxe_net_notifier = { 662struct notifier_block rxe_net_notifier = {
662 .notifier_call = rxe_notify, 663 .notifier_call = rxe_notify,
663}; 664};
664 665
665int rxe_net_init(void) 666int rxe_net_ipv4_init(void)
666{ 667{
667 int err;
668
669 spin_lock_init(&dev_list_lock); 668 spin_lock_init(&dev_list_lock);
670 669
671 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
672 htons(ROCE_V2_UDP_DPORT), true);
673 if (IS_ERR(recv_sockets.sk6)) {
674 recv_sockets.sk6 = NULL;
675 pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
676 return -1;
677 }
678
679 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, 670 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
680 htons(ROCE_V2_UDP_DPORT), false); 671 htons(ROCE_V2_UDP_DPORT), false);
681 if (IS_ERR(recv_sockets.sk4)) { 672 if (IS_ERR(recv_sockets.sk4)) {
682 rxe_release_udp_tunnel(recv_sockets.sk6);
683 recv_sockets.sk4 = NULL; 673 recv_sockets.sk4 = NULL;
684 recv_sockets.sk6 = NULL;
685 pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); 674 pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
686 return -1; 675 return -1;
687 } 676 }
688 677
689 err = register_netdevice_notifier(&rxe_net_notifier); 678 return 0;
690 if (err) {
691 rxe_release_udp_tunnel(recv_sockets.sk6);
692 rxe_release_udp_tunnel(recv_sockets.sk4);
693 pr_err("rxe: Failed to rigister netdev notifier\n");
694 }
695
696 return err;
697} 679}
698 680
699void rxe_net_exit(void) 681int rxe_net_ipv6_init(void)
700{ 682{
701 if (recv_sockets.sk6) 683#if IS_ENABLED(CONFIG_IPV6)
702 rxe_release_udp_tunnel(recv_sockets.sk6);
703 684
704 if (recv_sockets.sk4) 685 spin_lock_init(&dev_list_lock);
705 rxe_release_udp_tunnel(recv_sockets.sk4);
706 686
687 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
688 htons(ROCE_V2_UDP_DPORT), true);
689 if (IS_ERR(recv_sockets.sk6)) {
690 recv_sockets.sk6 = NULL;
691 pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
692 return -1;
693 }
694#endif
695 return 0;
696}
697
698void rxe_net_exit(void)
699{
700 rxe_release_udp_tunnel(recv_sockets.sk6);
701 rxe_release_udp_tunnel(recv_sockets.sk4);
707 unregister_netdevice_notifier(&rxe_net_notifier); 702 unregister_netdevice_notifier(&rxe_net_notifier);
708} 703}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 7b06f76d16cc..0daf7f09e5b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
44}; 44};
45 45
46extern struct rxe_recv_sockets recv_sockets; 46extern struct rxe_recv_sockets recv_sockets;
47extern struct notifier_block rxe_net_notifier;
48void rxe_release_udp_tunnel(struct socket *sk);
47 49
48struct rxe_dev *rxe_net_add(struct net_device *ndev); 50struct rxe_dev *rxe_net_add(struct net_device *ndev);
49 51
50int rxe_net_init(void); 52int rxe_net_ipv4_init(void);
53int rxe_net_ipv6_init(void);
51void rxe_net_exit(void); 54void rxe_net_exit(void);
52 55
53#endif /* RXE_NET_H */ 56#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 3d464c23e08b..144d2f129fcd 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
312 * make a copy of the skb to post to the next qp 312 * make a copy of the skb to post to the next qp
313 */ 313 */
314 skb_copy = (mce->qp_list.next != &mcg->qp_list) ? 314 skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
315 skb_clone(skb, GFP_KERNEL) : NULL; 315 skb_clone(skb, GFP_ATOMIC) : NULL;
316 316
317 pkt->qp = qp; 317 pkt->qp = qp;
318 rxe_add_ref(qp); 318 rxe_add_ref(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 33b2d9d77021..13a848a518e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
511} 511}
512 512
513static void update_wqe_state(struct rxe_qp *qp, 513static void update_wqe_state(struct rxe_qp *qp,
514 struct rxe_send_wqe *wqe, 514 struct rxe_send_wqe *wqe,
515 struct rxe_pkt_info *pkt, 515 struct rxe_pkt_info *pkt)
516 enum wqe_state *prev_state)
517{ 516{
518 enum wqe_state prev_state_ = wqe->state;
519
520 if (pkt->mask & RXE_END_MASK) { 517 if (pkt->mask & RXE_END_MASK) {
521 if (qp_type(qp) == IB_QPT_RC) 518 if (qp_type(qp) == IB_QPT_RC)
522 wqe->state = wqe_state_pending; 519 wqe->state = wqe_state_pending;
523 } else { 520 } else {
524 wqe->state = wqe_state_processing; 521 wqe->state = wqe_state_processing;
525 } 522 }
526
527 *prev_state = prev_state_;
528} 523}
529 524
530static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 525static void update_wqe_psn(struct rxe_qp *qp,
531 struct rxe_pkt_info *pkt, int payload) 526 struct rxe_send_wqe *wqe,
527 struct rxe_pkt_info *pkt,
528 int payload)
532{ 529{
533 /* number of packets left to send including current one */ 530 /* number of packets left to send including current one */
534 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; 531 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
546 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; 543 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
547 else 544 else
548 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; 545 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
546}
549 547
550 qp->req.opcode = pkt->opcode; 548static void save_state(struct rxe_send_wqe *wqe,
549 struct rxe_qp *qp,
550 struct rxe_send_wqe *rollback_wqe,
551 struct rxe_qp *rollback_qp)
552{
553 rollback_wqe->state = wqe->state;
554 rollback_wqe->first_psn = wqe->first_psn;
555 rollback_wqe->last_psn = wqe->last_psn;
556 rollback_qp->req.psn = qp->req.psn;
557}
551 558
559static void rollback_state(struct rxe_send_wqe *wqe,
560 struct rxe_qp *qp,
561 struct rxe_send_wqe *rollback_wqe,
562 struct rxe_qp *rollback_qp)
563{
564 wqe->state = rollback_wqe->state;
565 wqe->first_psn = rollback_wqe->first_psn;
566 wqe->last_psn = rollback_wqe->last_psn;
567 qp->req.psn = rollback_qp->req.psn;
568}
569
570static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
571 struct rxe_pkt_info *pkt, int payload)
572{
573 qp->req.opcode = pkt->opcode;
552 574
553 if (pkt->mask & RXE_END_MASK) 575 if (pkt->mask & RXE_END_MASK)
554 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); 576 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
571 int mtu; 593 int mtu;
572 int opcode; 594 int opcode;
573 int ret; 595 int ret;
574 enum wqe_state prev_state; 596 struct rxe_qp rollback_qp;
597 struct rxe_send_wqe rollback_wqe;
575 598
576next_wqe: 599next_wqe:
577 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) 600 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
688 goto err; 711 goto err;
689 } 712 }
690 713
691 update_wqe_state(qp, wqe, &pkt, &prev_state); 714 /*
715 * To prevent a race on wqe access between requester and completer,
716 * wqe members state and psn need to be set before calling
717 * rxe_xmit_packet().
718 * Otherwise, completer might initiate an unjustified retry flow.
719 */
720 save_state(wqe, qp, &rollback_wqe, &rollback_qp);
721 update_wqe_state(qp, wqe, &pkt);
722 update_wqe_psn(qp, wqe, &pkt, payload);
692 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); 723 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
693 if (ret) { 724 if (ret) {
694 qp->need_req_skb = 1; 725 qp->need_req_skb = 1;
695 kfree_skb(skb); 726 kfree_skb(skb);
696 727
697 wqe->state = prev_state; 728 rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
698 729
699 if (ret == -EAGAIN) { 730 if (ret == -EAGAIN) {
700 rxe_run_task(&qp->req.task, 1); 731 rxe_run_task(&qp->req.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ebb03b46e2ad..3e0f0f2baace 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
972 free_rd_atomic_resource(qp, res); 972 free_rd_atomic_resource(qp, res);
973 rxe_advance_resp_resource(qp); 973 rxe_advance_resp_resource(qp);
974 974
975 memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
976
975 res->type = RXE_ATOMIC_MASK; 977 res->type = RXE_ATOMIC_MASK;
976 res->atomic.skb = skb; 978 res->atomic.skb = skb;
977 res->first_psn = qp->resp.psn; 979 res->first_psn = ack_pkt.psn;
978 res->last_psn = qp->resp.psn; 980 res->last_psn = ack_pkt.psn;
979 res->cur_psn = qp->resp.psn; 981 res->cur_psn = ack_pkt.psn;
980 982
981 rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy); 983 rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
982 if (rc) { 984 if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
1116 rc = RESPST_CLEANUP; 1118 rc = RESPST_CLEANUP;
1117 goto out; 1119 goto out;
1118 } 1120 }
1119 bth_set_psn(SKB_TO_PKT(skb_copy), 1121
1120 qp->resp.psn - 1);
1121 /* Resend the result. */ 1122 /* Resend the result. */
1122 rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, 1123 rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1123 pkt, skb_copy); 1124 pkt, skb_copy);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4f7d9b48df64..9dbfcc0ab577 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
478 struct ipoib_ah *address, u32 qpn); 478 struct ipoib_ah *address, u32 qpn);
479void ipoib_reap_ah(struct work_struct *work); 479void ipoib_reap_ah(struct work_struct *work);
480 480
481struct ipoib_path *__path_find(struct net_device *dev, void *gid);
481void ipoib_mark_paths_invalid(struct net_device *dev); 482void ipoib_mark_paths_invalid(struct net_device *dev);
482void ipoib_flush_paths(struct net_device *dev); 483void ipoib_flush_paths(struct net_device *dev);
483int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); 484int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 951d9abcca8b..4ad297d3de89 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1318 } 1318 }
1319} 1319}
1320 1320
1321#define QPN_AND_OPTIONS_OFFSET 4
1322
1321static void ipoib_cm_tx_start(struct work_struct *work) 1323static void ipoib_cm_tx_start(struct work_struct *work)
1322{ 1324{
1323 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1325 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1326 struct ipoib_neigh *neigh; 1328 struct ipoib_neigh *neigh;
1327 struct ipoib_cm_tx *p; 1329 struct ipoib_cm_tx *p;
1328 unsigned long flags; 1330 unsigned long flags;
1331 struct ipoib_path *path;
1329 int ret; 1332 int ret;
1330 1333
1331 struct ib_sa_path_rec pathrec; 1334 struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1338 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1341 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1339 list_del_init(&p->list); 1342 list_del_init(&p->list);
1340 neigh = p->neigh; 1343 neigh = p->neigh;
1344
1341 qpn = IPOIB_QPN(neigh->daddr); 1345 qpn = IPOIB_QPN(neigh->daddr);
1346 /*
1347 * As long as the search is with these 2 locks,
1348 * path existence indicates its validity.
1349 */
1350 path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1351 if (!path) {
1352 pr_info("%s ignore not valid path %pI6\n",
1353 __func__,
1354 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1355 goto free_neigh;
1356 }
1342 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1357 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1343 1358
1344 spin_unlock_irqrestore(&priv->lock, flags); 1359 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1350 spin_lock_irqsave(&priv->lock, flags); 1365 spin_lock_irqsave(&priv->lock, flags);
1351 1366
1352 if (ret) { 1367 if (ret) {
1368free_neigh:
1353 neigh = p->neigh; 1369 neigh = p->neigh;
1354 if (neigh) { 1370 if (neigh) {
1355 neigh->cm = NULL; 1371 neigh->cm = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dc6d241b9406..be11d5d5b8c1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1161 } 1161 }
1162 1162
1163 if (level == IPOIB_FLUSH_LIGHT) { 1163 if (level == IPOIB_FLUSH_LIGHT) {
1164 int oper_up;
1164 ipoib_mark_paths_invalid(dev); 1165 ipoib_mark_paths_invalid(dev);
1166 /* Set IPoIB operation as down to prevent races between:
1167 * the flush flow which leaves MCG and on the fly joins
1168 * which can happen during that time. mcast restart task
1169 * should deal with join requests we missed.
1170 */
1171 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1165 ipoib_mcast_dev_flush(dev); 1172 ipoib_mcast_dev_flush(dev);
1173 if (oper_up)
1174 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1166 ipoib_flush_ah(dev); 1175 ipoib_flush_ah(dev);
1167 } 1176 }
1168 1177
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 74bcaa064226..cc1c1b062ea5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
485 return -EINVAL; 485 return -EINVAL;
486} 486}
487 487
488static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 488struct ipoib_path *__path_find(struct net_device *dev, void *gid)
489{ 489{
490 struct ipoib_dev_priv *priv = netdev_priv(dev); 490 struct ipoib_dev_priv *priv = netdev_priv(dev);
491 struct rb_node *n = priv->path_tree.rb_node; 491 struct rb_node *n = priv->path_tree.rb_node;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ba6be060a476..cae9bbcc27e7 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
403 INIT_LIST_HEAD(&isert_conn->node); 403 INIT_LIST_HEAD(&isert_conn->node);
404 init_completion(&isert_conn->login_comp); 404 init_completion(&isert_conn->login_comp);
405 init_completion(&isert_conn->login_req_comp); 405 init_completion(&isert_conn->login_req_comp);
406 init_waitqueue_head(&isert_conn->rem_wait);
406 kref_init(&isert_conn->kref); 407 kref_init(&isert_conn->kref);
407 mutex_init(&isert_conn->mutex); 408 mutex_init(&isert_conn->mutex);
408 INIT_WORK(&isert_conn->release_work, isert_release_work); 409 INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -448,7 +449,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
448 449
449 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); 450 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
450 if (!isert_conn->login_rsp_buf) { 451 if (!isert_conn->login_rsp_buf) {
451 isert_err("Unable to allocate isert_conn->login_rspbuf\n"); 452 ret = -ENOMEM;
452 goto out_unmap_login_req_buf; 453 goto out_unmap_login_req_buf;
453 } 454 }
454 455
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
578 BUG_ON(!device); 579 BUG_ON(!device);
579 580
580 isert_free_rx_descriptors(isert_conn); 581 isert_free_rx_descriptors(isert_conn);
581 if (isert_conn->cm_id) 582 if (isert_conn->cm_id &&
583 !isert_conn->dev_removed)
582 rdma_destroy_id(isert_conn->cm_id); 584 rdma_destroy_id(isert_conn->cm_id);
583 585
584 if (isert_conn->qp) { 586 if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
593 595
594 isert_device_put(device); 596 isert_device_put(device);
595 597
596 kfree(isert_conn); 598 if (isert_conn->dev_removed)
599 wake_up_interruptible(&isert_conn->rem_wait);
600 else
601 kfree(isert_conn);
597} 602}
598 603
599static void 604static void
@@ -753,6 +758,7 @@ static int
753isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 758isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
754{ 759{
755 struct isert_np *isert_np = cma_id->context; 760 struct isert_np *isert_np = cma_id->context;
761 struct isert_conn *isert_conn;
756 int ret = 0; 762 int ret = 0;
757 763
758 isert_info("%s (%d): status %d id %p np %p\n", 764 isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
773 break; 779 break;
774 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 780 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
775 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 781 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
776 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
777 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 782 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
778 ret = isert_disconnected_handler(cma_id, event->event); 783 ret = isert_disconnected_handler(cma_id, event->event);
779 break; 784 break;
785 case RDMA_CM_EVENT_DEVICE_REMOVAL:
786 isert_conn = cma_id->qp->qp_context;
787 isert_conn->dev_removed = true;
788 isert_disconnected_handler(cma_id, event->event);
789 wait_event_interruptible(isert_conn->rem_wait,
790 isert_conn->state == ISER_CONN_DOWN);
791 kfree(isert_conn);
792 /*
793 * return non-zero from the callback to destroy
794 * the rdma cm id
795 */
796 return 1;
780 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 797 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
781 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 798 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
782 case RDMA_CM_EVENT_CONNECT_ERROR: 799 case RDMA_CM_EVENT_CONNECT_ERROR:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index fc791efe3a10..c02ada57d7f5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -158,6 +158,8 @@ struct isert_conn {
158 struct work_struct release_work; 158 struct work_struct release_work;
159 bool logout_posted; 159 bool logout_posted;
160 bool snd_w_inv; 160 bool snd_w_inv;
161 wait_queue_head_t rem_wait;
162 bool dev_removed;
161}; 163};
162 164
163#define ISERT_MAX_CQ 64 165#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dfa23b075a88..883bbfe08e0e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
522 if (ret) 522 if (ret)
523 goto err_query_port; 523 goto err_query_port;
524 524
525 snprintf(sport->port_guid, sizeof(sport->port_guid),
526 "0x%016llx%016llx",
527 be64_to_cpu(sport->gid.global.subnet_prefix),
528 be64_to_cpu(sport->gid.global.interface_id));
529
525 if (!sport->mad_agent) { 530 if (!sport->mad_agent) {
526 memset(&reg_req, 0, sizeof(reg_req)); 531 memset(&reg_req, 0, sizeof(reg_req));
527 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; 532 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
2548 sdev->device->name, i); 2553 sdev->device->name, i);
2549 goto err_ring; 2554 goto err_ring;
2550 } 2555 }
2551 snprintf(sport->port_guid, sizeof(sport->port_guid),
2552 "0x%016llx%016llx",
2553 be64_to_cpu(sport->gid.global.subnet_prefix),
2554 be64_to_cpu(sport->gid.global.interface_id));
2555 } 2556 }
2556 2557
2557 spin_lock(&srpt_dev_lock); 2558 spin_lock(&srpt_dev_lock);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 7d61439be5f2..0c07e1023a46 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
376 /* Reset the KBC controller to clear all previous status.*/ 376 /* Reset the KBC controller to clear all previous status.*/
377 reset_control_assert(kbc->rst); 377 reset_control_assert(kbc->rst);
378 udelay(100); 378 udelay(100);
379 reset_control_assert(kbc->rst); 379 reset_control_deassert(kbc->rst);
380 udelay(100); 380 udelay(100);
381 381
382 tegra_kbc_config_pins(kbc); 382 tegra_kbc_config_pins(kbc);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index faa295ec4f31..c83bce89028b 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
553 goto free_struct_buff; 553 goto free_struct_buff;
554 554
555 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); 555 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
556 map_offset = 0;
557 for (i = 0; i < rdesc->num_registers; i++) { 556 for (i = 0; i < rdesc->num_registers; i++) {
558 struct rmi_register_desc_item *item = &rdesc->registers[i]; 557 struct rmi_register_desc_item *item = &rdesc->registers[i];
559 int reg_size = struct_buf[offset]; 558 int reg_size = struct_buf[offset];
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
576 item->reg = reg; 575 item->reg = reg;
577 item->reg_size = reg_size; 576 item->reg_size = reg_size;
578 577
578 map_offset = 0;
579
579 do { 580 do {
580 for (b = 0; b < 7; b++) { 581 for (b = 0; b < 7; b++) {
581 if (struct_buf[offset] & (0x1 << b)) 582 if (struct_buf[offset] & (0x1 << b))
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index b4d34086e73f..405252a884dd 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
1305 serio->write = i8042_aux_write; 1305 serio->write = i8042_aux_write;
1306 serio->start = i8042_start; 1306 serio->start = i8042_start;
1307 serio->stop = i8042_stop; 1307 serio->stop = i8042_stop;
1308 serio->ps2_cmd_mutex = &i8042_mutex;
1308 serio->port_data = port; 1309 serio->port_data = port;
1309 serio->dev.parent = &i8042_platform_device->dev; 1310 serio->dev.parent = &i8042_platform_device->dev;
1310 if (idx < 0) { 1311 if (idx < 0) {
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a61b2153ab8c..1ce3ecbe37f8 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi)
1473 1473
1474 ads784x_hwmon_unregister(spi, ts); 1474 ads784x_hwmon_unregister(spi, ts);
1475 1475
1476 regulator_disable(ts->reg);
1477 regulator_put(ts->reg); 1476 regulator_put(ts->reg);
1478 1477
1479 if (!ts->get_pendown_state) { 1478 if (!ts->get_pendown_state) {
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 7379fe153cf9..b2744a64e933 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -464,7 +464,7 @@ static int silead_ts_probe(struct i2c_client *client,
464 return -ENODEV; 464 return -ENODEV;
465 465
466 /* Power GPIO pin */ 466 /* Power GPIO pin */
467 data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); 467 data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
468 if (IS_ERR(data->gpio_power)) { 468 if (IS_ERR(data->gpio_power)) {
469 if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) 469 if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
470 dev_err(dev, "Shutdown GPIO request failed\n"); 470 dev_err(dev, "Shutdown GPIO request failed\n");
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ce801170d5f2..641e88761319 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
879 * We may have concurrent producers, so we need to be careful 879 * We may have concurrent producers, so we need to be careful
880 * not to touch any of the shadow cmdq state. 880 * not to touch any of the shadow cmdq state.
881 */ 881 */
882 queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); 882 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
883 dev_err(smmu->dev, "skipping command in error state:\n"); 883 dev_err(smmu->dev, "skipping command in error state:\n");
884 for (i = 0; i < ARRAY_SIZE(cmd); ++i) 884 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
885 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); 885 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
890 return; 890 return;
891 } 891 }
892 892
893 queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); 893 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
894} 894}
895 895
896static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, 896static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1034 case STRTAB_STE_0_CFG_S2_TRANS: 1034 case STRTAB_STE_0_CFG_S2_TRANS:
1035 ste_live = true; 1035 ste_live = true;
1036 break; 1036 break;
1037 case STRTAB_STE_0_CFG_ABORT:
1038 if (disable_bypass)
1039 break;
1037 default: 1040 default:
1038 BUG(); /* STE corruption */ 1041 BUG(); /* STE corruption */
1039 } 1042 }
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4f49fe29f202..2db74ebc3240 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
686 686
687static irqreturn_t arm_smmu_context_fault(int irq, void *dev) 687static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
688{ 688{
689 int flags, ret; 689 u32 fsr, fsynr;
690 u32 fsr, fsynr, resume;
691 unsigned long iova; 690 unsigned long iova;
692 struct iommu_domain *domain = dev; 691 struct iommu_domain *domain = dev;
693 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
701 if (!(fsr & FSR_FAULT)) 700 if (!(fsr & FSR_FAULT))
702 return IRQ_NONE; 701 return IRQ_NONE;
703 702
704 if (fsr & FSR_IGN)
705 dev_err_ratelimited(smmu->dev,
706 "Unexpected context fault (fsr 0x%x)\n",
707 fsr);
708
709 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); 703 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
710 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
711
712 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); 704 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
713 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
714 ret = IRQ_HANDLED;
715 resume = RESUME_RETRY;
716 } else {
717 dev_err_ratelimited(smmu->dev,
718 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
719 iova, fsynr, cfg->cbndx);
720 ret = IRQ_NONE;
721 resume = RESUME_TERMINATE;
722 }
723
724 /* Clear the faulting FSR */
725 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
726 705
727 /* Retry or terminate any stalled transactions */ 706 dev_err_ratelimited(smmu->dev,
728 if (fsr & FSR_SS) 707 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
729 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); 708 fsr, iova, fsynr, cfg->cbndx);
730 709
731 return ret; 710 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
711 return IRQ_HANDLED;
732} 712}
733 713
734static irqreturn_t arm_smmu_global_fault(int irq, void *dev) 714static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
837 } 817 }
838 818
839 /* SCTLR */ 819 /* SCTLR */
840 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; 820 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
841 if (stage1) 821 if (stage1)
842 reg |= SCTLR_S1_ASIDPNE; 822 reg |= SCTLR_S1_ASIDPNE;
843#ifdef __BIG_ENDIAN 823#ifdef __BIG_ENDIAN
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 08a1e2f3690f..00c8a08d56e7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
68 if (!iovad) 68 if (!iovad)
69 return; 69 return;
70 70
71 put_iova_domain(iovad); 71 if (iovad->granule)
72 put_iova_domain(iovad);
72 kfree(iovad); 73 kfree(iovad);
73 domain->iova_cookie = NULL; 74 domain->iova_cookie = NULL;
74} 75}
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
151 } 152 }
152} 153}
153 154
154static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, 155static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
155 dma_addr_t dma_limit) 156 dma_addr_t dma_limit)
156{ 157{
158 struct iova_domain *iovad = domain->iova_cookie;
157 unsigned long shift = iova_shift(iovad); 159 unsigned long shift = iova_shift(iovad);
158 unsigned long length = iova_align(iovad, size) >> shift; 160 unsigned long length = iova_align(iovad, size) >> shift;
159 161
162 if (domain->geometry.force_aperture)
163 dma_limit = min(dma_limit, domain->geometry.aperture_end);
160 /* 164 /*
161 * Enforce size-alignment to be safe - there could perhaps be an 165 * Enforce size-alignment to be safe - there could perhaps be an
162 * attribute to control this per-device, or at least per-domain... 166 * attribute to control this per-device, or at least per-domain...
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
314 if (!pages) 318 if (!pages)
315 return NULL; 319 return NULL;
316 320
317 iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); 321 iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
318 if (!iova) 322 if (!iova)
319 goto out_free_pages; 323 goto out_free_pages;
320 324
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
386 phys_addr_t phys = page_to_phys(page) + offset; 390 phys_addr_t phys = page_to_phys(page) + offset;
387 size_t iova_off = iova_offset(iovad, phys); 391 size_t iova_off = iova_offset(iovad, phys);
388 size_t len = iova_align(iovad, size + iova_off); 392 size_t len = iova_align(iovad, size + iova_off);
389 struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); 393 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
390 394
391 if (!iova) 395 if (!iova)
392 return DMA_ERROR_CODE; 396 return DMA_ERROR_CODE;
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
538 prev = s; 542 prev = s;
539 } 543 }
540 544
541 iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); 545 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
542 if (!iova) 546 if (!iova)
543 goto out_restore_sg; 547 goto out_restore_sg;
544 548
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 8c6139986d7d..def8ca1c982d 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
286 int prot = IOMMU_READ; 286 int prot = IOMMU_READ;
287 arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); 287 arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
288 288
289 if (attr & ARM_V7S_PTE_AP_RDONLY) 289 if (!(attr & ARM_V7S_PTE_AP_RDONLY))
290 prot |= IOMMU_WRITE; 290 prot |= IOMMU_WRITE;
291 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) 291 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
292 prot |= IOMMU_MMIO; 292 prot |= IOMMU_MMIO;
293 else if (pte & ARM_V7S_ATTR_C) 293 else if (pte & ARM_V7S_ATTR_C)
294 prot |= IOMMU_CACHE; 294 prot |= IOMMU_CACHE;
295 if (pte & ARM_V7S_ATTR_XN(lvl))
296 prot |= IOMMU_NOEXEC;
295 297
296 return prot; 298 return prot;
297} 299}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 9ed0a8462ccf..3dab13b4a211 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -55,19 +55,19 @@ struct mtk_iommu_data {
55 bool enable_4GB; 55 bool enable_4GB;
56}; 56};
57 57
58static int compare_of(struct device *dev, void *data) 58static inline int compare_of(struct device *dev, void *data)
59{ 59{
60 return dev->of_node == data; 60 return dev->of_node == data;
61} 61}
62 62
63static int mtk_iommu_bind(struct device *dev) 63static inline int mtk_iommu_bind(struct device *dev)
64{ 64{
65 struct mtk_iommu_data *data = dev_get_drvdata(dev); 65 struct mtk_iommu_data *data = dev_get_drvdata(dev);
66 66
67 return component_bind_all(dev, &data->smi_imu); 67 return component_bind_all(dev, &data->smi_imu);
68} 68}
69 69
70static void mtk_iommu_unbind(struct device *dev) 70static inline void mtk_iommu_unbind(struct device *dev)
71{ 71{
72 struct mtk_iommu_data *data = dev_get_drvdata(dev); 72 struct mtk_iommu_data *data = dev_get_drvdata(dev);
73 73
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 112e17c2768b..37f952dd9fc9 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
176{ 176{
177 struct irq_domain_chip_generic *dgc = d->gc; 177 struct irq_domain_chip_generic *dgc = d->gc;
178 struct irq_chip_generic *gc; 178 struct irq_chip_generic *gc;
179 unsigned long flags;
179 unsigned smr; 180 unsigned smr;
180 int idx; 181 int idx;
181 int ret; 182 int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
194 195
195 gc = dgc->gc[idx]; 196 gc = dgc->gc[idx];
196 197
197 irq_gc_lock(gc); 198 irq_gc_lock_irqsave(gc, flags);
198 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); 199 smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
199 aic_common_set_priority(intspec[2], &smr); 200 aic_common_set_priority(intspec[2], &smr);
200 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); 201 irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
201 irq_gc_unlock(gc); 202 irq_gc_unlock_irqrestore(gc, flags);
202 203
203 return ret; 204 return ret;
204} 205}
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 4f0d068e1abe..2a624d87a035 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
258 unsigned int *out_type) 258 unsigned int *out_type)
259{ 259{
260 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); 260 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
261 unsigned long flags;
261 unsigned smr; 262 unsigned smr;
262 int ret; 263 int ret;
263 264
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
269 if (ret) 270 if (ret)
270 return ret; 271 return ret;
271 272
272 irq_gc_lock(bgc); 273 irq_gc_lock_irqsave(bgc, flags);
273 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); 274 irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
274 smr = irq_reg_readl(bgc, AT91_AIC5_SMR); 275 smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
275 aic_common_set_priority(intspec[2], &smr); 276 aic_common_set_priority(intspec[2], &smr);
276 irq_reg_writel(bgc, smr, AT91_AIC5_SMR); 277 irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
277 irq_gc_unlock(bgc); 278 irq_gc_unlock_irqrestore(bgc, flags);
278 279
279 return ret; 280 return ret;
280} 281}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7ceaba81efb4..36b9c28a5c91 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
1545 u32 val; 1545 u32 val;
1546 1546
1547 val = readl_relaxed(base + GITS_CTLR); 1547 val = readl_relaxed(base + GITS_CTLR);
1548 if (val & GITS_CTLR_QUIESCENT) 1548 /*
1549 * GIC architecture specification requires the ITS to be both
1550 * disabled and quiescent for writes to GITS_BASER<n> or
1551 * GITS_CBASER to not have UNPREDICTABLE results.
1552 */
1553 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
1549 return 0; 1554 return 0;
1550 1555
1551 /* Disable the generation of all interrupts to this ITS */ 1556 /* Disable the generation of all interrupts to this ITS */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6fc56c3466b0..ede5672ab34d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -667,13 +667,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
667#endif 667#endif
668 668
669#ifdef CONFIG_CPU_PM 669#ifdef CONFIG_CPU_PM
670/* Check whether it's single security state view */
671static bool gic_dist_security_disabled(void)
672{
673 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
674}
675
670static int gic_cpu_pm_notifier(struct notifier_block *self, 676static int gic_cpu_pm_notifier(struct notifier_block *self,
671 unsigned long cmd, void *v) 677 unsigned long cmd, void *v)
672{ 678{
673 if (cmd == CPU_PM_EXIT) { 679 if (cmd == CPU_PM_EXIT) {
674 gic_enable_redist(true); 680 if (gic_dist_security_disabled())
681 gic_enable_redist(true);
675 gic_cpu_sys_reg_init(); 682 gic_cpu_sys_reg_init();
676 } else if (cmd == CPU_PM_ENTER) { 683 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
677 gic_write_grpen1(0); 684 gic_write_grpen1(0);
678 gic_enable_redist(false); 685 gic_enable_redist(false);
679 } 686 }
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index c2cab572c511..390fac59c6bc 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
769 int cpu; 769 int cpu;
770 unsigned long flags, map = 0; 770 unsigned long flags, map = 0;
771 771
772 if (unlikely(nr_cpu_ids == 1)) {
773 /* Only one CPU? let's do a self-IPI... */
774 writel_relaxed(2 << 24 | irq,
775 gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
776 return;
777 }
778
772 raw_spin_lock_irqsave(&irq_controller_lock, flags); 779 raw_spin_lock_irqsave(&irq_controller_lock, flags);
773 780
774 /* Convert our logical CPU mask into a physical one. */ 781 /* Convert our logical CPU mask into a physical one. */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c5f33c3bd228..83f498393a7f 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -713,9 +713,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
713 unsigned long flags; 713 unsigned long flags;
714 int i; 714 int i;
715 715
716 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
717 handle_level_irq);
718
719 spin_lock_irqsave(&gic_lock, flags); 716 spin_lock_irqsave(&gic_lock, flags);
720 gic_map_to_pin(intr, gic_cpu_pin); 717 gic_map_to_pin(intr, gic_cpu_pin);
721 gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); 718 gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
@@ -732,6 +729,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
732{ 729{
733 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 730 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
734 return gic_local_irq_domain_map(d, virq, hw); 731 return gic_local_irq_domain_map(d, virq, hw);
732
733 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
734 handle_level_irq);
735
735 return gic_shared_irq_domain_map(d, virq, hw, 0); 736 return gic_shared_irq_domain_map(d, virq, hw, 0);
736} 737}
737 738
@@ -771,11 +772,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
771 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 772 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
772 773
773 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 774 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
774 &gic_edge_irq_controller, 775 &gic_level_irq_controller,
775 NULL); 776 NULL);
776 if (ret) 777 if (ret)
777 goto error; 778 goto error;
778 779
780 irq_set_handler(virq + i, handle_level_irq);
781
779 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 782 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
780 if (ret) 783 if (ret)
781 goto error; 784 goto error;
@@ -890,10 +893,17 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
890 return; 893 return;
891} 894}
892 895
896static void gic_dev_domain_activate(struct irq_domain *domain,
897 struct irq_data *d)
898{
899 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
900}
901
893static struct irq_domain_ops gic_dev_domain_ops = { 902static struct irq_domain_ops gic_dev_domain_ops = {
894 .xlate = gic_dev_domain_xlate, 903 .xlate = gic_dev_domain_xlate,
895 .alloc = gic_dev_domain_alloc, 904 .alloc = gic_dev_domain_alloc,
896 .free = gic_dev_domain_free, 905 .free = gic_dev_domain_free,
906 .activate = gic_dev_domain_activate,
897}; 907};
898 908
899static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 909static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index 978eda8d6678..8a3ba565106f 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id);
73static struct i2c_driver ams_i2c_driver = { 73static struct i2c_driver ams_i2c_driver = {
74 .driver = { 74 .driver = {
75 .name = "ams", 75 .name = "ams",
76 .owner = THIS_MODULE,
77 }, 76 },
78 .probe = ams_i2c_probe, 77 .probe = ams_i2c_probe,
79 .remove = ams_i2c_remove, 78 .remove = ams_i2c_remove,
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 3024685e4cca..96d16fca68b2 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = {
668 .remove = wf_pm112_remove, 668 .remove = wf_pm112_remove,
669 .driver = { 669 .driver = {
670 .name = "windfarm", 670 .name = "windfarm",
671 .owner = THIS_MODULE,
672 }, 671 },
673}; 672};
674 673
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 2f506b9d5a52..e88cfb36a74d 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = {
789 .remove = wf_pm72_remove, 789 .remove = wf_pm72_remove,
790 .driver = { 790 .driver = {
791 .name = "windfarm", 791 .name = "windfarm",
792 .owner = THIS_MODULE,
793 }, 792 },
794}; 793};
795 794
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 82fc86a90c1a..bdfcb8a8bfbb 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = {
682 .remove = wf_rm31_remove, 682 .remove = wf_rm31_remove,
683 .driver = { 683 .driver = {
684 .name = "windfarm", 684 .name = "windfarm",
685 .owner = THIS_MODULE,
686 }, 685 },
687}; 686};
688 687
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 97c372908e78..7817d40d81e7 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
127config BCM_PDC_MBOX 127config BCM_PDC_MBOX
128 tristate "Broadcom PDC Mailbox" 128 tristate "Broadcom PDC Mailbox"
129 depends on ARM64 || COMPILE_TEST 129 depends on ARM64 || COMPILE_TEST
130 depends on HAS_DMA
130 default ARCH_BCM_IPROC 131 default ARCH_BCM_IPROC
131 help 132 help
132 Mailbox implementation for the Broadcom PDC ring manager, 133 Mailbox implementation for the Broadcom PDC ring manager,
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index cbe0c1ee4ba9..c19dd820ea9b 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
469 * this directory for a SPU. 469 * this directory for a SPU.
470 * @pdcs: PDC state structure 470 * @pdcs: PDC state structure
471 */ 471 */
472void pdc_setup_debugfs(struct pdc_state *pdcs) 472static void pdc_setup_debugfs(struct pdc_state *pdcs)
473{ 473{
474 char spu_stats_name[16]; 474 char spu_stats_name[16];
475 475
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
485 &pdc_debugfs_stats); 485 &pdc_debugfs_stats);
486} 486}
487 487
488void pdc_free_debugfs(void) 488static void pdc_free_debugfs(void)
489{ 489{
490 if (debugfs_dir && simple_empty(debugfs_dir)) { 490 if (debugfs_dir && simple_empty(debugfs_dir)) {
491 debugfs_remove_recursive(debugfs_dir); 491 debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
1191{ 1191{
1192 struct pdc_state *pdcs = chan->con_priv; 1192 struct pdc_state *pdcs = chan->con_priv;
1193 1193
1194 if (pdcs) 1194 if (!pdcs)
1195 dev_dbg(&pdcs->pdev->dev, 1195 return;
1196 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1197 1196
1197 dev_dbg(&pdcs->pdev->dev,
1198 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1198 pdc_ring_free(pdcs); 1199 pdc_ring_free(pdcs);
1199} 1200}
1200 1201
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 95a4ca6ce6ff..849ad441cd76 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
760 if (!d->nr_stripes || 760 if (!d->nr_stripes ||
761 d->nr_stripes > INT_MAX || 761 d->nr_stripes > INT_MAX ||
762 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { 762 d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
763 pr_err("nr_stripes too large"); 763 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
764 (unsigned)d->nr_stripes);
764 return -ENOMEM; 765 return -ENOMEM;
765 } 766 }
766 767
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca)
1820 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; 1821 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1821 1822
1822 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || 1823 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
1823 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || 1824 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1824 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || 1825 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1825 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || 1826 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1826 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1827 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1844 struct block_device *bdev, struct cache *ca) 1845 struct block_device *bdev, struct cache *ca)
1845{ 1846{
1846 char name[BDEVNAME_SIZE]; 1847 char name[BDEVNAME_SIZE];
1847 const char *err = NULL; 1848 const char *err = NULL; /* must be set for any error case */
1848 int ret = 0; 1849 int ret = 0;
1849 1850
1850 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1851 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1861 ca->discard = CACHE_DISCARD(&ca->sb); 1862 ca->discard = CACHE_DISCARD(&ca->sb);
1862 1863
1863 ret = cache_alloc(ca); 1864 ret = cache_alloc(ca);
1864 if (ret != 0) 1865 if (ret != 0) {
1866 if (ret == -ENOMEM)
1867 err = "cache_alloc(): -ENOMEM";
1868 else
1869 err = "cache_alloc(): unknown error";
1865 goto err; 1870 goto err;
1871 }
1866 1872
1867 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { 1873 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
1868 err = "error calling kobject_add"; 1874 err = "error calling kobject_add";
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 6fff794e0c72..13041ee37ad6 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
2183static ssize_t 2183static ssize_t
2184location_store(struct mddev *mddev, const char *buf, size_t len) 2184location_store(struct mddev *mddev, const char *buf, size_t len)
2185{ 2185{
2186 int rv;
2186 2187
2188 rv = mddev_lock(mddev);
2189 if (rv)
2190 return rv;
2187 if (mddev->pers) { 2191 if (mddev->pers) {
2188 if (!mddev->pers->quiesce) 2192 if (!mddev->pers->quiesce) {
2189 return -EBUSY; 2193 rv = -EBUSY;
2190 if (mddev->recovery || mddev->sync_thread) 2194 goto out;
2191 return -EBUSY; 2195 }
2196 if (mddev->recovery || mddev->sync_thread) {
2197 rv = -EBUSY;
2198 goto out;
2199 }
2192 } 2200 }
2193 2201
2194 if (mddev->bitmap || mddev->bitmap_info.file || 2202 if (mddev->bitmap || mddev->bitmap_info.file ||
2195 mddev->bitmap_info.offset) { 2203 mddev->bitmap_info.offset) {
2196 /* bitmap already configured. Only option is to clear it */ 2204 /* bitmap already configured. Only option is to clear it */
2197 if (strncmp(buf, "none", 4) != 0) 2205 if (strncmp(buf, "none", 4) != 0) {
2198 return -EBUSY; 2206 rv = -EBUSY;
2207 goto out;
2208 }
2199 if (mddev->pers) { 2209 if (mddev->pers) {
2200 mddev->pers->quiesce(mddev, 1); 2210 mddev->pers->quiesce(mddev, 1);
2201 bitmap_destroy(mddev); 2211 bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2214 /* nothing to be done */; 2224 /* nothing to be done */;
2215 else if (strncmp(buf, "file:", 5) == 0) { 2225 else if (strncmp(buf, "file:", 5) == 0) {
2216 /* Not supported yet */ 2226 /* Not supported yet */
2217 return -EINVAL; 2227 rv = -EINVAL;
2228 goto out;
2218 } else { 2229 } else {
2219 int rv;
2220 if (buf[0] == '+') 2230 if (buf[0] == '+')
2221 rv = kstrtoll(buf+1, 10, &offset); 2231 rv = kstrtoll(buf+1, 10, &offset);
2222 else 2232 else
2223 rv = kstrtoll(buf, 10, &offset); 2233 rv = kstrtoll(buf, 10, &offset);
2224 if (rv) 2234 if (rv)
2225 return rv; 2235 goto out;
2226 if (offset == 0) 2236 if (offset == 0) {
2227 return -EINVAL; 2237 rv = -EINVAL;
2238 goto out;
2239 }
2228 if (mddev->bitmap_info.external == 0 && 2240 if (mddev->bitmap_info.external == 0 &&
2229 mddev->major_version == 0 && 2241 mddev->major_version == 0 &&
2230 offset != mddev->bitmap_info.default_offset) 2242 offset != mddev->bitmap_info.default_offset) {
2231 return -EINVAL; 2243 rv = -EINVAL;
2244 goto out;
2245 }
2232 mddev->bitmap_info.offset = offset; 2246 mddev->bitmap_info.offset = offset;
2233 if (mddev->pers) { 2247 if (mddev->pers) {
2234 struct bitmap *bitmap; 2248 struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2245 mddev->pers->quiesce(mddev, 0); 2259 mddev->pers->quiesce(mddev, 0);
2246 if (rv) { 2260 if (rv) {
2247 bitmap_destroy(mddev); 2261 bitmap_destroy(mddev);
2248 return rv; 2262 goto out;
2249 } 2263 }
2250 } 2264 }
2251 } 2265 }
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2257 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2271 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2258 md_wakeup_thread(mddev->thread); 2272 md_wakeup_thread(mddev->thread);
2259 } 2273 }
2274 rv = 0;
2275out:
2276 mddev_unlock(mddev);
2277 if (rv)
2278 return rv;
2260 return len; 2279 return len;
2261} 2280}
2262 2281
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 6571c81465e1..8625040bae92 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
1879 __cache_size_refresh(); 1879 __cache_size_refresh();
1880 mutex_unlock(&dm_bufio_clients_lock); 1880 mutex_unlock(&dm_bufio_clients_lock);
1881 1881
1882 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache"); 1882 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1883 if (!dm_bufio_wq) 1883 if (!dm_bufio_wq)
1884 return -ENOMEM; 1884 return -ENOMEM;
1885 1885
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e9784b4e0ac..874295757caa 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -181,7 +181,7 @@ struct crypt_config {
181 u8 key[0]; 181 u8 key[0];
182}; 182};
183 183
184#define MIN_IOS 16 184#define MIN_IOS 64
185 185
186static void clone_init(struct dm_crypt_io *, struct bio *); 186static void clone_init(struct dm_crypt_io *, struct bio *);
187static void kcryptd_queue_crypt(struct dm_crypt_io *io); 187static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1453 unsigned i; 1453 unsigned i;
1454 int err; 1454 int err;
1455 1455
1456 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), 1456 cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
1457 GFP_KERNEL); 1457 GFP_KERNEL);
1458 if (!cc->tfms) 1458 if (!cc->tfms)
1459 return -ENOMEM; 1459 return -ENOMEM;
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1924 return DM_MAPIO_REMAPPED; 1924 return DM_MAPIO_REMAPPED;
1925 } 1925 }
1926 1926
1927 /*
1928 * Check if bio is too large, split as needed.
1929 */
1930 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
1931 bio_data_dir(bio) == WRITE)
1932 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
1933
1927 io = dm_per_bio_data(bio, cc->per_bio_data_size); 1934 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1928 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); 1935 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1929 io->ctx.req = (struct skcipher_request *)(io + 1); 1936 io->ctx.req = (struct skcipher_request *)(io + 1);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 97e446d54a15..6a2e8dd44a1b 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
289 pb->bio_submitted = true; 289 pb->bio_submitted = true;
290 290
291 /* 291 /*
292 * Map reads as normal only if corrupt_bio_byte set. 292 * Error reads if neither corrupt_bio_byte or drop_writes are set.
293 * Otherwise, flakey_end_io() will decide if the reads should be modified.
293 */ 294 */
294 if (bio_data_dir(bio) == READ) { 295 if (bio_data_dir(bio) == READ) {
295 /* If flags were specified, only corrupt those that match. */ 296 if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
296 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
297 all_corrupt_bio_flags_match(bio, fc))
298 goto map_bio;
299 else
300 return -EIO; 297 return -EIO;
298 goto map_bio;
301 } 299 }
302 300
303 /* 301 /*
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
334 struct flakey_c *fc = ti->private; 332 struct flakey_c *fc = ti->private;
335 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 333 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
336 334
337 /*
338 * Corrupt successful READs while in down state.
339 */
340 if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { 335 if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
341 if (fc->corrupt_bio_byte) 336 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
337 all_corrupt_bio_flags_match(bio, fc)) {
338 /*
339 * Corrupt successful matching READs while in down state.
340 */
342 corrupt_bio_data(bio, fc); 341 corrupt_bio_data(bio, fc);
343 else 342
343 } else if (!test_bit(DROP_WRITES, &fc->flags)) {
344 /*
345 * Error read during the down_interval if drop_writes
346 * wasn't configured.
347 */
344 return -EIO; 348 return -EIO;
349 }
345 } 350 }
346 351
347 return error; 352 return error;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 4ab68033f9d1..49e4d8d4558f 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
259 goto out; 259 goto out;
260 sector++; 260 sector++;
261 261
262 bio = bio_alloc(GFP_KERNEL, block->vec_cnt); 262 atomic_inc(&lc->io_blocks);
263 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
263 if (!bio) { 264 if (!bio) {
264 DMERR("Couldn't alloc log bio"); 265 DMERR("Couldn't alloc log bio");
265 goto error; 266 goto error;
266 } 267 }
267 atomic_inc(&lc->io_blocks);
268 bio->bi_iter.bi_size = 0; 268 bio->bi_iter.bi_size = 0;
269 bio->bi_iter.bi_sector = sector; 269 bio->bi_iter.bi_sector = sector;
270 bio->bi_bdev = lc->logdev->bdev; 270 bio->bi_bdev = lc->logdev->bdev;
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
282 if (ret != block->vecs[i].bv_len) { 282 if (ret != block->vecs[i].bv_len) {
283 atomic_inc(&lc->io_blocks); 283 atomic_inc(&lc->io_blocks);
284 submit_bio(bio); 284 submit_bio(bio);
285 bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); 285 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
286 if (!bio) { 286 if (!bio) {
287 DMERR("Couldn't alloc log bio"); 287 DMERR("Couldn't alloc log bio");
288 goto error; 288 goto error;
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
459 goto bad; 459 goto bad;
460 } 460 }
461 461
462 ret = -EINVAL;
463 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); 462 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
464 if (!lc->log_kthread) { 463 if (IS_ERR(lc->log_kthread)) {
464 ret = PTR_ERR(lc->log_kthread);
465 ti->error = "Couldn't alloc kthread"; 465 ti->error = "Couldn't alloc kthread";
466 dm_put_device(ti, lc->dev); 466 dm_put_device(ti, lc->dev);
467 dm_put_device(ti, lc->logdev); 467 dm_put_device(ti, lc->logdev);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 4ca2d1df5b44..07fc1ad42ec5 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
291 core->nr_regions = le64_to_cpu(disk->nr_regions); 291 core->nr_regions = le64_to_cpu(disk->nr_regions);
292} 292}
293 293
294static int rw_header(struct log_c *lc, int rw) 294static int rw_header(struct log_c *lc, int op)
295{ 295{
296 lc->io_req.bi_op = rw; 296 lc->io_req.bi_op = op;
297 lc->io_req.bi_op_flags = 0;
297 298
298 return dm_io(&lc->io_req, 1, &lc->header_location, NULL); 299 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
299} 300}
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log)
316{ 317{
317 int r; 318 int r;
318 319
319 r = rw_header(log, READ); 320 r = rw_header(log, REQ_OP_READ);
320 if (r) 321 if (r)
321 return r; 322 return r;
322 323
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log)
630 header_to_disk(&lc->header, lc->disk_header); 631 header_to_disk(&lc->header, lc->disk_header);
631 632
632 /* write the new header */ 633 /* write the new header */
633 r = rw_header(lc, WRITE); 634 r = rw_header(lc, REQ_OP_WRITE);
634 if (!r) { 635 if (!r) {
635 r = flush_header(lc); 636 r = flush_header(lc);
636 if (r) 637 if (r)
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log)
698 log_clear_bit(lc, lc->clean_bits, i); 699 log_clear_bit(lc, lc->clean_bits, i);
699 } 700 }
700 701
701 r = rw_header(lc, WRITE); 702 r = rw_header(lc, REQ_OP_WRITE);
702 if (r) 703 if (r)
703 fail_log_device(lc); 704 fail_log_device(lc);
704 else { 705 else {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1b9795d75ef8..8abde6b8cedc 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -191,7 +191,6 @@ struct raid_dev {
191#define RT_FLAG_RS_BITMAP_LOADED 2 191#define RT_FLAG_RS_BITMAP_LOADED 2
192#define RT_FLAG_UPDATE_SBS 3 192#define RT_FLAG_UPDATE_SBS 3
193#define RT_FLAG_RESHAPE_RS 4 193#define RT_FLAG_RESHAPE_RS 4
194#define RT_FLAG_KEEP_RS_FROZEN 5
195 194
196/* Array elements of 64 bit needed for rebuild/failed disk bits */ 195/* Array elements of 64 bit needed for rebuild/failed disk bits */
197#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) 196#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
861{ 860{
862 unsigned long min_region_size = rs->ti->len / (1 << 21); 861 unsigned long min_region_size = rs->ti->len / (1 << 21);
863 862
863 if (rs_is_raid0(rs))
864 return 0;
865
864 if (!region_size) { 866 if (!region_size) {
865 /* 867 /*
866 * Choose a reasonable default. All figures in sectors. 868 * Choose a reasonable default. All figures in sectors.
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs)
930 rebuild_cnt++; 932 rebuild_cnt++;
931 933
932 switch (rs->raid_type->level) { 934 switch (rs->raid_type->level) {
935 case 0:
936 break;
933 case 1: 937 case 1:
934 if (rebuild_cnt >= rs->md.raid_disks) 938 if (rebuild_cnt >= rs->md.raid_disks)
935 goto too_many; 939 goto too_many;
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2335 case 0: 2339 case 0:
2336 break; 2340 break;
2337 default: 2341 default:
2342 /*
2343 * We have to keep any raid0 data/metadata device pairs or
2344 * the MD raid0 personality will fail to start the array.
2345 */
2346 if (rs_is_raid0(rs))
2347 continue;
2348
2338 dev = container_of(rdev, struct raid_dev, rdev); 2349 dev = container_of(rdev, struct raid_dev, rdev);
2339 if (dev->meta_dev) 2350 if (dev->meta_dev)
2340 dm_put_device(ti, dev->meta_dev); 2351 dm_put_device(ti, dev->meta_dev);
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
2579 } else { 2590 } else {
2580 /* Process raid1 without delta_disks */ 2591 /* Process raid1 without delta_disks */
2581 mddev->raid_disks = rs->raid_disks; 2592 mddev->raid_disks = rs->raid_disks;
2582 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2583 reshape = false; 2593 reshape = false;
2584 } 2594 }
2585 } else { 2595 } else {
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs)
2590 if (reshape) { 2600 if (reshape) {
2591 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); 2601 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2592 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2602 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2593 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2594 } else if (mddev->raid_disks < rs->raid_disks) 2603 } else if (mddev->raid_disks < rs->raid_disks)
2595 /* Create new superblocks and bitmaps, if any new disks */ 2604 /* Create new superblocks and bitmaps, if any new disks */
2596 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2605 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2902 goto bad; 2911 goto bad;
2903 2912
2904 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2913 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2905 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2906 /* Takeover ain't recovery, so disable recovery */ 2914 /* Takeover ain't recovery, so disable recovery */
2907 rs_setup_recovery(rs, MaxSector); 2915 rs_setup_recovery(rs, MaxSector);
2908 rs_set_new(rs); 2916 rs_set_new(rs);
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti)
3386{ 3394{
3387 struct raid_set *rs = ti->private; 3395 struct raid_set *rs = ti->private;
3388 3396
3389 if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { 3397 if (!rs->md.suspended)
3390 if (!rs->md.suspended) 3398 mddev_suspend(&rs->md);
3391 mddev_suspend(&rs->md); 3399
3392 rs->md.ro = 1; 3400 rs->md.ro = 1;
3393 }
3394} 3401}
3395 3402
3396static void attempt_restore_of_faulty_devices(struct raid_set *rs) 3403static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3397{ 3404{
3398 int i; 3405 int i;
3399 uint64_t failed_devices, cleared_failed_devices = 0; 3406 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3400 unsigned long flags; 3407 unsigned long flags;
3408 bool cleared = false;
3401 struct dm_raid_superblock *sb; 3409 struct dm_raid_superblock *sb;
3410 struct mddev *mddev = &rs->md;
3402 struct md_rdev *r; 3411 struct md_rdev *r;
3403 3412
3413 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
3414 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3415 return;
3416
3417 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3418
3404 for (i = 0; i < rs->md.raid_disks; i++) { 3419 for (i = 0; i < rs->md.raid_disks; i++) {
3405 r = &rs->dev[i].rdev; 3420 r = &rs->dev[i].rdev;
3406 if (test_bit(Faulty, &r->flags) && r->sb_page && 3421 if (test_bit(Faulty, &r->flags) && r->sb_page &&
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3420 * ourselves. 3435 * ourselves.
3421 */ 3436 */
3422 if ((r->raid_disk >= 0) && 3437 if ((r->raid_disk >= 0) &&
3423 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) 3438 (mddev->pers->hot_remove_disk(mddev, r) != 0))
3424 /* Failed to revive this device, try next */ 3439 /* Failed to revive this device, try next */
3425 continue; 3440 continue;
3426 3441
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3430 clear_bit(Faulty, &r->flags); 3445 clear_bit(Faulty, &r->flags);
3431 clear_bit(WriteErrorSeen, &r->flags); 3446 clear_bit(WriteErrorSeen, &r->flags);
3432 clear_bit(In_sync, &r->flags); 3447 clear_bit(In_sync, &r->flags);
3433 if (r->mddev->pers->hot_add_disk(r->mddev, r)) { 3448 if (mddev->pers->hot_add_disk(mddev, r)) {
3434 r->raid_disk = -1; 3449 r->raid_disk = -1;
3435 r->saved_raid_disk = -1; 3450 r->saved_raid_disk = -1;
3436 r->flags = flags; 3451 r->flags = flags;
3437 } else { 3452 } else {
3438 r->recovery_offset = 0; 3453 r->recovery_offset = 0;
3439 cleared_failed_devices |= 1 << i; 3454 set_bit(i, (void *) cleared_failed_devices);
3455 cleared = true;
3440 } 3456 }
3441 } 3457 }
3442 } 3458 }
3443 if (cleared_failed_devices) { 3459
3460 /* If any failed devices could be cleared, update all sbs failed_devices bits */
3461 if (cleared) {
3462 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3463
3444 rdev_for_each(r, &rs->md) { 3464 rdev_for_each(r, &rs->md) {
3445 sb = page_address(r->sb_page); 3465 sb = page_address(r->sb_page);
3446 failed_devices = le64_to_cpu(sb->failed_devices); 3466 sb_retrieve_failed_devices(sb, failed_devices);
3447 failed_devices &= ~cleared_failed_devices; 3467
3448 sb->failed_devices = cpu_to_le64(failed_devices); 3468 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3469 failed_devices[i] &= ~cleared_failed_devices[i];
3470
3471 sb_update_failed_devices(sb, failed_devices);
3449 } 3472 }
3450 } 3473 }
3451} 3474}
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti)
3610 * devices are reachable again. 3633 * devices are reachable again.
3611 */ 3634 */
3612 attempt_restore_of_faulty_devices(rs); 3635 attempt_restore_of_faulty_devices(rs);
3613 } else { 3636 }
3614 mddev->ro = 0;
3615 mddev->in_sync = 0;
3616 3637
3617 /* 3638 mddev->ro = 0;
3618 * When passing in flags to the ctr, we expect userspace 3639 mddev->in_sync = 0;
3619 * to reset them because they made it to the superblocks
3620 * and reload the mapping anyway.
3621 *
3622 * -> only unfreeze recovery in case of a table reload or
3623 * we'll have a bogus recovery/reshape position
3624 * retrieved from the superblock by the ctr because
3625 * the ongoing recovery/reshape will change it after read.
3626 */
3627 if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
3628 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3629 3640
3630 if (mddev->suspended) 3641 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3631 mddev_resume(mddev); 3642
3632 } 3643 if (mddev->suspended)
3644 mddev_resume(mddev);
3633} 3645}
3634 3646
3635static struct target_type raid_target = { 3647static struct target_type raid_target = {
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 4ace1da17db8..6c25213ab38c 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
210 struct path_info *pi = NULL; 210 struct path_info *pi = NULL;
211 struct dm_path *current_path = NULL; 211 struct dm_path *current_path = NULL;
212 212
213 local_irq_save(flags);
213 current_path = *this_cpu_ptr(s->current_path); 214 current_path = *this_cpu_ptr(s->current_path);
214 if (current_path) { 215 if (current_path) {
215 percpu_counter_dec(&s->repeat_count); 216 percpu_counter_dec(&s->repeat_count);
216 if (percpu_counter_read_positive(&s->repeat_count) > 0) 217 if (percpu_counter_read_positive(&s->repeat_count) > 0) {
218 local_irq_restore(flags);
217 return current_path; 219 return current_path;
220 }
218 } 221 }
219 222
220 spin_lock_irqsave(&s->lock, flags); 223 spin_lock(&s->lock);
221 if (!list_empty(&s->valid_paths)) { 224 if (!list_empty(&s->valid_paths)) {
222 pi = list_entry(s->valid_paths.next, struct path_info, list); 225 pi = list_entry(s->valid_paths.next, struct path_info, list);
223 list_move_tail(&pi->list, &s->valid_paths); 226 list_move_tail(&pi->list, &s->valid_paths);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 41573f1f626f..34a840d9df76 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
834 goto err; 834 goto err;
835 } 835 }
836 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); 836 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
837 if (!cinfo->ack_lockres) 837 if (!cinfo->ack_lockres) {
838 ret = -ENOMEM;
838 goto err; 839 goto err;
840 }
839 /* get sync CR lock on ACK. */ 841 /* get sync CR lock on ACK. */
840 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) 842 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
841 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", 843 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
849 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); 851 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
850 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); 852 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
851 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); 853 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
852 if (!cinfo->bitmap_lockres) 854 if (!cinfo->bitmap_lockres) {
855 ret = -ENOMEM;
853 goto err; 856 goto err;
857 }
854 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { 858 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
855 pr_err("Failed to get bitmap lock\n"); 859 pr_err("Failed to get bitmap lock\n");
856 ret = -EINVAL; 860 ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
858 } 862 }
859 863
860 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); 864 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
861 if (!cinfo->resync_lockres) 865 if (!cinfo->resync_lockres) {
866 ret = -ENOMEM;
862 goto err; 867 goto err;
868 }
863 869
864 return 0; 870 return 0;
865err: 871err:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d646f6e444f0..915e84d631a2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1604 mddev->new_chunk_sectors = mddev->chunk_sectors; 1604 mddev->new_chunk_sectors = mddev->chunk_sectors;
1605 } 1605 }
1606 1606
1607 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) { 1607 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1608 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1608 set_bit(MD_HAS_JOURNAL, &mddev->flags);
1609 if (mddev->recovery_cp == MaxSector)
1610 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
1611 }
1612 } else if (mddev->pers == NULL) { 1609 } else if (mddev->pers == NULL) {
1613 /* Insist of good event counter while assembling, except for 1610 /* Insist of good event counter while assembling, except for
1614 * spares (which don't need an event count) */ 1611 * spares (which don't need an event count) */
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
5851 working++; 5848 working++;
5852 if (test_bit(In_sync, &rdev->flags)) 5849 if (test_bit(In_sync, &rdev->flags))
5853 insync++; 5850 insync++;
5851 else if (test_bit(Journal, &rdev->flags))
5852 /* TODO: add journal count to md_u.h */
5853 ;
5854 else 5854 else
5855 spare++; 5855 spare++;
5856 } 5856 }
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
7610 7610
7611int md_setup_cluster(struct mddev *mddev, int nodes) 7611int md_setup_cluster(struct mddev *mddev, int nodes)
7612{ 7612{
7613 int err; 7613 if (!md_cluster_ops)
7614 7614 request_module("md-cluster");
7615 err = request_module("md-cluster");
7616 if (err) {
7617 pr_err("md-cluster module not found.\n");
7618 return -ENOENT;
7619 }
7620
7621 spin_lock(&pers_lock); 7615 spin_lock(&pers_lock);
7616 /* ensure module won't be unloaded */
7622 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 7617 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7618 pr_err("can't find md-cluster module or get it's reference.\n");
7623 spin_unlock(&pers_lock); 7619 spin_unlock(&pers_lock);
7624 return -ENOENT; 7620 return -ENOENT;
7625 } 7621 }
@@ -7862,6 +7858,7 @@ void md_do_sync(struct md_thread *thread)
7862 */ 7858 */
7863 7859
7864 do { 7860 do {
7861 int mddev2_minor = -1;
7865 mddev->curr_resync = 2; 7862 mddev->curr_resync = 2;
7866 7863
7867 try_again: 7864 try_again:
@@ -7891,10 +7888,14 @@ void md_do_sync(struct md_thread *thread)
7891 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7888 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7892 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7889 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7893 mddev2->curr_resync >= mddev->curr_resync) { 7890 mddev2->curr_resync >= mddev->curr_resync) {
7894 printk(KERN_INFO "md: delaying %s of %s" 7891 if (mddev2_minor != mddev2->md_minor) {
7895 " until %s has finished (they" 7892 mddev2_minor = mddev2->md_minor;
7896 " share one or more physical units)\n", 7893 printk(KERN_INFO "md: delaying %s of %s"
7897 desc, mdname(mddev), mdname(mddev2)); 7894 " until %s has finished (they"
7895 " share one or more physical units)\n",
7896 desc, mdname(mddev),
7897 mdname(mddev2));
7898 }
7898 mddev_put(mddev2); 7899 mddev_put(mddev2);
7899 if (signal_pending(current)) 7900 if (signal_pending(current))
7900 flush_signals(current); 7901 flush_signals(current);
@@ -8275,16 +8276,13 @@ no_add:
8275static void md_start_sync(struct work_struct *ws) 8276static void md_start_sync(struct work_struct *ws)
8276{ 8277{
8277 struct mddev *mddev = container_of(ws, struct mddev, del_work); 8278 struct mddev *mddev = container_of(ws, struct mddev, del_work);
8278 int ret = 0;
8279 8279
8280 mddev->sync_thread = md_register_thread(md_do_sync, 8280 mddev->sync_thread = md_register_thread(md_do_sync,
8281 mddev, 8281 mddev,
8282 "resync"); 8282 "resync");
8283 if (!mddev->sync_thread) { 8283 if (!mddev->sync_thread) {
8284 if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) 8284 printk(KERN_ERR "%s: could not start resync thread...\n",
8285 printk(KERN_ERR "%s: could not start resync" 8285 mdname(mddev));
8286 " thread...\n",
8287 mdname(mddev));
8288 /* leave the spares where they are, it shouldn't hurt */ 8286 /* leave the spares where they are, it shouldn't hurt */
8289 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 8287 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8290 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 8288 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0e4efcd10795..be1a9fca3b2d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1064 int max_sectors; 1064 int max_sectors;
1065 int sectors; 1065 int sectors;
1066 1066
1067 md_write_start(mddev, bio);
1068
1067 /* 1069 /*
1068 * Register the new request and wait if the reconstruction 1070 * Register the new request and wait if the reconstruction
1069 * thread has put up a bar for new requests. 1071 * thread has put up a bar for new requests.
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1445 return; 1447 return;
1446 } 1448 }
1447 1449
1448 md_write_start(mddev, bio);
1449
1450 do { 1450 do {
1451 1451
1452 /* 1452 /*
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2465 2465
2466 while (sect_to_write) { 2466 while (sect_to_write) {
2467 struct bio *wbio; 2467 struct bio *wbio;
2468 sector_t wsector;
2468 if (sectors > sect_to_write) 2469 if (sectors > sect_to_write)
2469 sectors = sect_to_write; 2470 sectors = sect_to_write;
2470 /* Write at 'sector' for 'sectors' */ 2471 /* Write at 'sector' for 'sectors' */
2471 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2472 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2472 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2473 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2473 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 2474 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2474 choose_data_offset(r10_bio, rdev) + 2475 wbio->bi_iter.bi_sector = wsector +
2475 (sector - r10_bio->sector)); 2476 choose_data_offset(r10_bio, rdev);
2476 wbio->bi_bdev = rdev->bdev; 2477 wbio->bi_bdev = rdev->bdev;
2477 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2478 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2478 2479
2479 if (submit_bio_wait(wbio) < 0) 2480 if (submit_bio_wait(wbio) < 0)
2480 /* Failure! */ 2481 /* Failure! */
2481 ok = rdev_set_badblocks(rdev, sector, 2482 ok = rdev_set_badblocks(rdev, wsector,
2482 sectors, 0) 2483 sectors, 0)
2483 && ok; 2484 && ok;
2484 2485
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 51f76ddbe265..1b1ab4a1d132 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -96,7 +96,6 @@ struct r5l_log {
96 spinlock_t no_space_stripes_lock; 96 spinlock_t no_space_stripes_lock;
97 97
98 bool need_cache_flush; 98 bool need_cache_flush;
99 bool in_teardown;
100}; 99};
101 100
102/* 101/*
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
704 703
705 mddev = log->rdev->mddev; 704 mddev = log->rdev->mddev;
706 /* 705 /*
707 * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and 706 * Discard could zero data, so before discard we must make sure
708 * wait for this thread to finish. This thread waits for 707 * superblock is updated to new log tail. Updating superblock (either
709 * MD_CHANGE_PENDING clear, which is supposed to be done in 708 * directly call md_update_sb() or depend on md thread) must hold
710 * md_check_recovery(). md_check_recovery() tries to get 709 * reconfig mutex. On the other hand, raid5_quiesce is called with
711 * reconfig_mutex. Since r5l_quiesce already holds the mutex, 710 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
712 * md_check_recovery() fails, so the PENDING never get cleared. The 711 * for all IO finish, hence waitting for reclaim thread, while reclaim
713 * in_teardown check workaround this issue. 712 * thread is calling this function and waitting for reconfig mutex. So
713 * there is a deadlock. We workaround this issue with a trylock.
714 * FIXME: we could miss discard if we can't take reconfig mutex
714 */ 715 */
715 if (!log->in_teardown) { 716 set_mask_bits(&mddev->flags, 0,
716 set_mask_bits(&mddev->flags, 0, 717 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
717 BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); 718 if (!mddev_trylock(mddev))
718 md_wakeup_thread(mddev->thread); 719 return;
719 wait_event(mddev->sb_wait, 720 md_update_sb(mddev, 1);
720 !test_bit(MD_CHANGE_PENDING, &mddev->flags) || 721 mddev_unlock(mddev);
721 log->in_teardown);
722 /*
723 * r5l_quiesce could run after in_teardown check and hold
724 * mutex first. Superblock might get updated twice.
725 */
726 if (log->in_teardown)
727 md_update_sb(mddev, 1);
728 } else {
729 WARN_ON(!mddev_is_locked(mddev));
730 md_update_sb(mddev, 1);
731 }
732 722
733 /* discard IO error really doesn't matter, ignore it */ 723 /* discard IO error really doesn't matter, ignore it */
734 if (log->last_checkpoint < end) { 724 if (log->last_checkpoint < end) {
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
827 if (!log || state == 2) 817 if (!log || state == 2)
828 return; 818 return;
829 if (state == 0) { 819 if (state == 0) {
830 log->in_teardown = 0;
831 /* 820 /*
832 * This is a special case for hotadd. In suspend, the array has 821 * This is a special case for hotadd. In suspend, the array has
833 * no journal. In resume, journal is initialized as well as the 822 * no journal. In resume, journal is initialized as well as the
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
838 log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 827 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
839 log->rdev->mddev, "reclaim"); 828 log->rdev->mddev, "reclaim");
840 } else if (state == 1) { 829 } else if (state == 1) {
841 /*
842 * at this point all stripes are finished, so io_unit is at
843 * least in STRIPE_END state
844 */
845 log->in_teardown = 1;
846 /* make sure r5l_write_super_and_discard_space exits */ 830 /* make sure r5l_write_super_and_discard_space exits */
847 mddev = log->rdev->mddev; 831 mddev = log->rdev->mddev;
848 wake_up(&mddev->sb_wait); 832 wake_up(&mddev->sb_wait);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8912407a4dd0..ee7fc3701700 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
659{ 659{
660 struct stripe_head *sh; 660 struct stripe_head *sh;
661 int hash = stripe_hash_locks_hash(sector); 661 int hash = stripe_hash_locks_hash(sector);
662 int inc_empty_inactive_list_flag;
662 663
663 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 664 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
664 665
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
703 atomic_inc(&conf->active_stripes); 704 atomic_inc(&conf->active_stripes);
704 BUG_ON(list_empty(&sh->lru) && 705 BUG_ON(list_empty(&sh->lru) &&
705 !test_bit(STRIPE_EXPANDING, &sh->state)); 706 !test_bit(STRIPE_EXPANDING, &sh->state));
707 inc_empty_inactive_list_flag = 0;
708 if (!list_empty(conf->inactive_list + hash))
709 inc_empty_inactive_list_flag = 1;
706 list_del_init(&sh->lru); 710 list_del_init(&sh->lru);
711 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
712 atomic_inc(&conf->empty_inactive_list_nr);
707 if (sh->group) { 713 if (sh->group) {
708 sh->group->stripes_cnt--; 714 sh->group->stripes_cnt--;
709 sh->group = NULL; 715 sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
762 sector_t head_sector, tmp_sec; 768 sector_t head_sector, tmp_sec;
763 int hash; 769 int hash;
764 int dd_idx; 770 int dd_idx;
771 int inc_empty_inactive_list_flag;
765 772
766 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 773 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
767 tmp_sec = sh->sector; 774 tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
779 atomic_inc(&conf->active_stripes); 786 atomic_inc(&conf->active_stripes);
780 BUG_ON(list_empty(&head->lru) && 787 BUG_ON(list_empty(&head->lru) &&
781 !test_bit(STRIPE_EXPANDING, &head->state)); 788 !test_bit(STRIPE_EXPANDING, &head->state));
789 inc_empty_inactive_list_flag = 0;
790 if (!list_empty(conf->inactive_list + hash))
791 inc_empty_inactive_list_flag = 1;
782 list_del_init(&head->lru); 792 list_del_init(&head->lru);
793 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
794 atomic_inc(&conf->empty_inactive_list_nr);
783 if (head->group) { 795 if (head->group) {
784 head->group->stripes_cnt--; 796 head->group->stripes_cnt--;
785 head->group = NULL; 797 head->group = NULL;
@@ -993,7 +1005,6 @@ again:
993 1005
994 set_bit(STRIPE_IO_STARTED, &sh->state); 1006 set_bit(STRIPE_IO_STARTED, &sh->state);
995 1007
996 bio_reset(bi);
997 bi->bi_bdev = rdev->bdev; 1008 bi->bi_bdev = rdev->bdev;
998 bio_set_op_attrs(bi, op, op_flags); 1009 bio_set_op_attrs(bi, op, op_flags);
999 bi->bi_end_io = op_is_write(op) 1010 bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
1045 1056
1046 set_bit(STRIPE_IO_STARTED, &sh->state); 1057 set_bit(STRIPE_IO_STARTED, &sh->state);
1047 1058
1048 bio_reset(rbi);
1049 rbi->bi_bdev = rrdev->bdev; 1059 rbi->bi_bdev = rrdev->bdev;
1050 bio_set_op_attrs(rbi, op, op_flags); 1060 bio_set_op_attrs(rbi, op, op_flags);
1051 BUG_ON(!op_is_write(op)); 1061 BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1978 put_cpu(); 1988 put_cpu();
1979} 1989}
1980 1990
1981static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) 1991static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
1992 int disks)
1982{ 1993{
1983 struct stripe_head *sh; 1994 struct stripe_head *sh;
1995 int i;
1984 1996
1985 sh = kmem_cache_zalloc(sc, gfp); 1997 sh = kmem_cache_zalloc(sc, gfp);
1986 if (sh) { 1998 if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
1989 INIT_LIST_HEAD(&sh->batch_list); 2001 INIT_LIST_HEAD(&sh->batch_list);
1990 INIT_LIST_HEAD(&sh->lru); 2002 INIT_LIST_HEAD(&sh->lru);
1991 atomic_set(&sh->count, 1); 2003 atomic_set(&sh->count, 1);
2004 for (i = 0; i < disks; i++) {
2005 struct r5dev *dev = &sh->dev[i];
2006
2007 bio_init(&dev->req);
2008 dev->req.bi_io_vec = &dev->vec;
2009 dev->req.bi_max_vecs = 1;
2010
2011 bio_init(&dev->rreq);
2012 dev->rreq.bi_io_vec = &dev->rvec;
2013 dev->rreq.bi_max_vecs = 1;
2014 }
1992 } 2015 }
1993 return sh; 2016 return sh;
1994} 2017}
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
1996{ 2019{
1997 struct stripe_head *sh; 2020 struct stripe_head *sh;
1998 2021
1999 sh = alloc_stripe(conf->slab_cache, gfp); 2022 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
2000 if (!sh) 2023 if (!sh)
2001 return 0; 2024 return 0;
2002 2025
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2167 mutex_lock(&conf->cache_size_mutex); 2190 mutex_lock(&conf->cache_size_mutex);
2168 2191
2169 for (i = conf->max_nr_stripes; i; i--) { 2192 for (i = conf->max_nr_stripes; i; i--) {
2170 nsh = alloc_stripe(sc, GFP_KERNEL); 2193 nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
2171 if (!nsh) 2194 if (!nsh)
2172 break; 2195 break;
2173 2196
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
2299 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2322 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2300 bi->bi_error); 2323 bi->bi_error);
2301 if (i == disks) { 2324 if (i == disks) {
2325 bio_reset(bi);
2302 BUG(); 2326 BUG();
2303 return; 2327 return;
2304 } 2328 }
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi)
2399 } 2423 }
2400 } 2424 }
2401 rdev_dec_pending(rdev, conf->mddev); 2425 rdev_dec_pending(rdev, conf->mddev);
2426 bio_reset(bi);
2402 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2427 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2403 set_bit(STRIPE_HANDLE, &sh->state); 2428 set_bit(STRIPE_HANDLE, &sh->state);
2404 raid5_release_stripe(sh); 2429 raid5_release_stripe(sh);
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
2436 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2461 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2437 bi->bi_error); 2462 bi->bi_error);
2438 if (i == disks) { 2463 if (i == disks) {
2464 bio_reset(bi);
2439 BUG(); 2465 BUG();
2440 return; 2466 return;
2441 } 2467 }
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
2472 if (sh->batch_head && bi->bi_error && !replacement) 2498 if (sh->batch_head && bi->bi_error && !replacement)
2473 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2499 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2474 2500
2501 bio_reset(bi);
2475 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2502 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2476 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2503 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2477 set_bit(STRIPE_HANDLE, &sh->state); 2504 set_bit(STRIPE_HANDLE, &sh->state);
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2485{ 2512{
2486 struct r5dev *dev = &sh->dev[i]; 2513 struct r5dev *dev = &sh->dev[i];
2487 2514
2488 bio_init(&dev->req);
2489 dev->req.bi_io_vec = &dev->vec;
2490 dev->req.bi_max_vecs = 1;
2491 dev->req.bi_private = sh;
2492
2493 bio_init(&dev->rreq);
2494 dev->rreq.bi_io_vec = &dev->rvec;
2495 dev->rreq.bi_max_vecs = 1;
2496 dev->rreq.bi_private = sh;
2497
2498 dev->flags = 0; 2515 dev->flags = 0;
2499 dev->sector = raid5_compute_blocknr(sh, i, previous); 2516 dev->sector = raid5_compute_blocknr(sh, i, previous);
2500} 2517}
@@ -4628,7 +4645,9 @@ finish:
4628 } 4645 }
4629 4646
4630 if (!bio_list_empty(&s.return_bi)) { 4647 if (!bio_list_empty(&s.return_bi)) {
4631 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { 4648 if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
4649 (s.failed <= conf->max_degraded ||
4650 conf->mddev->external == 0)) {
4632 spin_lock_irq(&conf->device_lock); 4651 spin_lock_irq(&conf->device_lock);
4633 bio_list_merge(&conf->return_bi, &s.return_bi); 4652 bio_list_merge(&conf->return_bi, &s.return_bi);
4634 spin_unlock_irq(&conf->device_lock); 4653 spin_unlock_irq(&conf->device_lock);
@@ -6620,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
6620 } 6639 }
6621 6640
6622 conf->min_nr_stripes = NR_STRIPES; 6641 conf->min_nr_stripes = NR_STRIPES;
6642 if (mddev->reshape_position != MaxSector) {
6643 int stripes = max_t(int,
6644 ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
6645 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
6646 conf->min_nr_stripes = max(NR_STRIPES, stripes);
6647 if (conf->min_nr_stripes != NR_STRIPES)
6648 printk(KERN_INFO
6649 "md/raid:%s: force stripe size %d for reshape\n",
6650 mdname(mddev), conf->min_nr_stripes);
6651 }
6623 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 6652 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
6624 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 6653 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
6625 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 6654 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
@@ -6826,11 +6855,14 @@ static int raid5_run(struct mddev *mddev)
6826 if (IS_ERR(conf)) 6855 if (IS_ERR(conf))
6827 return PTR_ERR(conf); 6856 return PTR_ERR(conf);
6828 6857
6829 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { 6858 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
6830 printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", 6859 if (!journal_dev) {
6831 mdname(mddev)); 6860 pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
6832 mddev->ro = 1; 6861 mdname(mddev));
6833 set_disk_ro(mddev->gendisk, 1); 6862 mddev->ro = 1;
6863 set_disk_ro(mddev->gendisk, 1);
6864 } else if (mddev->recovery_cp == MaxSector)
6865 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
6834 } 6866 }
6835 6867
6836 conf->min_offset_diff = min_offset_diff; 6868 conf->min_offset_diff = min_offset_diff;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 869c83fb3c5d..f00f3e742265 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2185 return 0; 2185 return 0;
2186} 2186}
2187 2187
2188static int gpmc_probe_dt_children(struct platform_device *pdev) 2188static void gpmc_probe_dt_children(struct platform_device *pdev)
2189{ 2189{
2190 int ret; 2190 int ret;
2191 struct device_node *child; 2191 struct device_node *child;
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
2200 else 2200 else
2201 ret = gpmc_probe_generic_child(pdev, child); 2201 ret = gpmc_probe_generic_child(pdev, child);
2202 2202
2203 if (ret) 2203 if (ret) {
2204 return ret; 2204 dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
2205 child->name, ret);
2206 }
2205 } 2207 }
2206
2207 return 0;
2208} 2208}
2209#else 2209#else
2210static int gpmc_probe_dt(struct platform_device *pdev) 2210static int gpmc_probe_dt(struct platform_device *pdev)
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2212 return 0; 2212 return 0;
2213} 2213}
2214 2214
2215static int gpmc_probe_dt_children(struct platform_device *pdev) 2215static void gpmc_probe_dt_children(struct platform_device *pdev)
2216{ 2216{
2217 return 0;
2218} 2217}
2219#endif /* CONFIG_OF */ 2218#endif /* CONFIG_OF */
2220 2219
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev)
2369 goto setup_irq_failed; 2368 goto setup_irq_failed;
2370 } 2369 }
2371 2370
2372 rc = gpmc_probe_dt_children(pdev); 2371 gpmc_probe_dt_children(pdev);
2373 if (rc < 0) {
2374 dev_err(gpmc->dev, "failed to probe DT children\n");
2375 goto dt_children_failed;
2376 }
2377 2372
2378 return 0; 2373 return 0;
2379 2374
2380dt_children_failed:
2381 gpmc_free_irq(gpmc);
2382setup_irq_failed: 2375setup_irq_failed:
2383 gpmc_gpio_exit(gpmc); 2376 gpmc_gpio_exit(gpmc);
2384gpio_init_failed: 2377gpio_init_failed:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a216b4667742..d00252828966 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
345 This driver can also be built as a module. If so, the module 345 This driver can also be built as a module. If so, the module
346 will be called tsl2550. 346 will be called tsl2550.
347 347
348config SENSORS_BH1780
349 tristate "ROHM BH1780GLI ambient light sensor"
350 depends on I2C && SYSFS
351 help
352 If you say yes here you get support for the ROHM BH1780GLI
353 ambient light sensor.
354
355 This driver can also be built as a module. If so, the module
356 will be called bh1780gli.
357
358config SENSORS_BH1770 348config SENSORS_BH1770
359 tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" 349 tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
360 depends on I2C 350 depends on I2C
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4387ccb79e64..fb32516ddfe2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
19obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 19obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
20obj-$(CONFIG_PHANTOM) += phantom.o 20obj-$(CONFIG_PHANTOM) += phantom.o
21obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o 21obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
22obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
23obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o 22obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
24obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o 23obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
25obj-$(CONFIG_SGI_IOC4) += ioc4.o 24obj-$(CONFIG_SGI_IOC4) += ioc4.o
@@ -69,5 +68,6 @@ OBJCOPYFLAGS :=
69OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ 68OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
70 --set-section-flags .text=alloc,readonly \ 69 --set-section-flags .text=alloc,readonly \
71 --rename-section .text=.rodata 70 --rename-section .text=.rodata
72$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o 71targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
72$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
73 $(call if_changed,objcopy) 73 $(call if_changed,objcopy)
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
deleted file mode 100644
index 7f90ce5a569a..000000000000
--- a/drivers/misc/bh1780gli.c
+++ /dev/null
@@ -1,259 +0,0 @@
1/*
2 * bh1780gli.c
3 * ROHM Ambient Light Sensor Driver
4 *
5 * Copyright (C) 2010 Texas Instruments
6 * Author: Hemanth V <hemanthv@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#include <linux/i2c.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/platform_device.h>
24#include <linux/delay.h>
25#include <linux/module.h>
26#include <linux/of.h>
27
28#define BH1780_REG_CONTROL 0x80
29#define BH1780_REG_PARTID 0x8A
30#define BH1780_REG_MANFID 0x8B
31#define BH1780_REG_DLOW 0x8C
32#define BH1780_REG_DHIGH 0x8D
33
34#define BH1780_REVMASK (0xf)
35#define BH1780_POWMASK (0x3)
36#define BH1780_POFF (0x0)
37#define BH1780_PON (0x3)
38
39/* power on settling time in ms */
40#define BH1780_PON_DELAY 2
41
42struct bh1780_data {
43 struct i2c_client *client;
44 int power_state;
45 /* lock for sysfs operations */
46 struct mutex lock;
47};
48
49static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
50{
51 int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
52 if (ret < 0)
53 dev_err(&ddata->client->dev,
54 "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
55 ret, msg);
56 return ret;
57}
58
59static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
60{
61 int ret = i2c_smbus_read_byte_data(ddata->client, reg);
62 if (ret < 0)
63 dev_err(&ddata->client->dev,
64 "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
65 ret, msg);
66 return ret;
67}
68
69static ssize_t bh1780_show_lux(struct device *dev,
70 struct device_attribute *attr, char *buf)
71{
72 struct platform_device *pdev = to_platform_device(dev);
73 struct bh1780_data *ddata = platform_get_drvdata(pdev);
74 int lsb, msb;
75
76 lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
77 if (lsb < 0)
78 return lsb;
79
80 msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
81 if (msb < 0)
82 return msb;
83
84 return sprintf(buf, "%d\n", (msb << 8) | lsb);
85}
86
87static ssize_t bh1780_show_power_state(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct platform_device *pdev = to_platform_device(dev);
92 struct bh1780_data *ddata = platform_get_drvdata(pdev);
93 int state;
94
95 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
96 if (state < 0)
97 return state;
98
99 return sprintf(buf, "%d\n", state & BH1780_POWMASK);
100}
101
102static ssize_t bh1780_store_power_state(struct device *dev,
103 struct device_attribute *attr,
104 const char *buf, size_t count)
105{
106 struct platform_device *pdev = to_platform_device(dev);
107 struct bh1780_data *ddata = platform_get_drvdata(pdev);
108 unsigned long val;
109 int error;
110
111 error = kstrtoul(buf, 0, &val);
112 if (error)
113 return error;
114
115 if (val < BH1780_POFF || val > BH1780_PON)
116 return -EINVAL;
117
118 mutex_lock(&ddata->lock);
119
120 error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
121 if (error < 0) {
122 mutex_unlock(&ddata->lock);
123 return error;
124 }
125
126 msleep(BH1780_PON_DELAY);
127 ddata->power_state = val;
128 mutex_unlock(&ddata->lock);
129
130 return count;
131}
132
133static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
134
135static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
136 bh1780_show_power_state, bh1780_store_power_state);
137
138static struct attribute *bh1780_attributes[] = {
139 &dev_attr_power_state.attr,
140 &dev_attr_lux.attr,
141 NULL
142};
143
144static const struct attribute_group bh1780_attr_group = {
145 .attrs = bh1780_attributes,
146};
147
148static int bh1780_probe(struct i2c_client *client,
149 const struct i2c_device_id *id)
150{
151 int ret;
152 struct bh1780_data *ddata;
153 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
154
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
156 return -EIO;
157
158 ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
159 GFP_KERNEL);
160 if (ddata == NULL)
161 return -ENOMEM;
162
163 ddata->client = client;
164 i2c_set_clientdata(client, ddata);
165
166 ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
167 if (ret < 0)
168 return ret;
169
170 dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
171 (ret & BH1780_REVMASK));
172
173 mutex_init(&ddata->lock);
174
175 return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
176}
177
178static int bh1780_remove(struct i2c_client *client)
179{
180 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
181
182 return 0;
183}
184
185#ifdef CONFIG_PM_SLEEP
186static int bh1780_suspend(struct device *dev)
187{
188 struct bh1780_data *ddata;
189 int state, ret;
190 struct i2c_client *client = to_i2c_client(dev);
191
192 ddata = i2c_get_clientdata(client);
193 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
194 if (state < 0)
195 return state;
196
197 ddata->power_state = state & BH1780_POWMASK;
198
199 ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
200 "CONTROL");
201
202 if (ret < 0)
203 return ret;
204
205 return 0;
206}
207
208static int bh1780_resume(struct device *dev)
209{
210 struct bh1780_data *ddata;
211 int state, ret;
212 struct i2c_client *client = to_i2c_client(dev);
213
214 ddata = i2c_get_clientdata(client);
215 state = ddata->power_state;
216 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
217 "CONTROL");
218
219 if (ret < 0)
220 return ret;
221
222 return 0;
223}
224#endif /* CONFIG_PM_SLEEP */
225
226static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
227
228static const struct i2c_device_id bh1780_id[] = {
229 { "bh1780", 0 },
230 { },
231};
232
233MODULE_DEVICE_TABLE(i2c, bh1780_id);
234
235#ifdef CONFIG_OF
236static const struct of_device_id of_bh1780_match[] = {
237 { .compatible = "rohm,bh1780gli", },
238 {},
239};
240
241MODULE_DEVICE_TABLE(of, of_bh1780_match);
242#endif
243
244static struct i2c_driver bh1780_driver = {
245 .probe = bh1780_probe,
246 .remove = bh1780_remove,
247 .id_table = bh1780_id,
248 .driver = {
249 .name = "bh1780",
250 .pm = &bh1780_pm,
251 .of_match_table = of_match_ptr(of_bh1780_match),
252 },
253};
254
255module_i2c_driver(bh1780_driver);
256
257MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
258MODULE_LICENSE("GPL");
259MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index bdee9a01ef35..c466ee2b0c97 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
90 */ 90 */
91 mutex_lock(&afu->contexts_lock); 91 mutex_lock(&afu->contexts_lock);
92 idr_preload(GFP_KERNEL); 92 idr_preload(GFP_KERNEL);
93 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 93 i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
94 ctx->afu->adapter->native->sl_ops->min_pe,
95 ctx->afu->num_procs, GFP_NOWAIT); 94 ctx->afu->num_procs, GFP_NOWAIT);
96 idr_preload_end(); 95 idr_preload_end();
97 mutex_unlock(&afu->contexts_lock); 96 mutex_unlock(&afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index de090533f18c..344a0ff8f8c7 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -561,7 +561,6 @@ struct cxl_service_layer_ops {
561 u64 (*timebase_read)(struct cxl *adapter); 561 u64 (*timebase_read)(struct cxl *adapter);
562 int capi_mode; 562 int capi_mode;
563 bool needs_reset_before_disable; 563 bool needs_reset_before_disable;
564 int min_pe;
565}; 564};
566 565
567struct cxl_native { 566struct cxl_native {
@@ -603,6 +602,7 @@ struct cxl {
603 struct bin_attribute cxl_attr; 602 struct bin_attribute cxl_attr;
604 int adapter_num; 603 int adapter_num;
605 int user_irqs; 604 int user_irqs;
605 int min_pe;
606 u64 ps_size; 606 u64 ps_size;
607 u16 psl_rev; 607 u16 psl_rev;
608 u16 base_image; 608 u16 base_image;
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 3bcdaee11ba1..e606fdc4bc9c 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
924 return fail_psl_irq(afu, &irq_info); 924 return fail_psl_irq(afu, &irq_info);
925} 925}
926 926
927void native_irq_wait(struct cxl_context *ctx) 927static void native_irq_wait(struct cxl_context *ctx)
928{ 928{
929 u64 dsisr; 929 u64 dsisr;
930 int timeout = 1000; 930 int timeout = 1000;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index d152e2de8c93..6f0c4ac4b649 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
379 379
380static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) 380static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
381{ 381{
382 u64 psl_dsnctl; 382 u64 psl_dsnctl, psl_fircntl;
383 u64 chipid; 383 u64 chipid;
384 u64 capp_unit_id; 384 u64 capp_unit_id;
385 int rc; 385 int rc;
@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_
398 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); 398 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
399 /* snoop write mask */ 399 /* snoop write mask */
400 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); 400 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
401 /* set fir_accum */ 401 /* set fir_cntl to recommended value for production env */
402 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); 402 psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
403 psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
404 psl_fircntl |= 0x1ULL; /* ce_thresh */
405 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
403 /* for debugging with trace arrays */ 406 /* for debugging with trace arrays */
404 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); 407 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
405 408
@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = {
1521 .write_timebase_ctrl = write_timebase_ctrl_xsl, 1524 .write_timebase_ctrl = write_timebase_ctrl_xsl,
1522 .timebase_read = timebase_read_xsl, 1525 .timebase_read = timebase_read_xsl,
1523 .capi_mode = OPAL_PHB_CAPI_MODE_DMA, 1526 .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
1524 .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
1525}; 1527};
1526 1528
1527static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) 1529static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1528{ 1530{
1529 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { 1531 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
1532 /* Mellanox CX-4 */
1530 dev_info(&adapter->dev, "Device uses an XSL\n"); 1533 dev_info(&adapter->dev, "Device uses an XSL\n");
1531 adapter->native->sl_ops = &xsl_ops; 1534 adapter->native->sl_ops = &xsl_ops;
1535 adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
1532 } else { 1536 } else {
1533 dev_info(&adapter->dev, "Device uses a PSL\n"); 1537 dev_info(&adapter->dev, "Device uses a PSL\n");
1534 adapter->native->sl_ops = &psl_ops; 1538 adapter->native->sl_ops = &psl_ops;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index dee8def1c193..3519acebfdab 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
221 /* Setup the PHB using arch provided callback */ 221 /* Setup the PHB using arch provided callback */
222 phb->ops = &cxl_pcie_pci_ops; 222 phb->ops = &cxl_pcie_pci_ops;
223 phb->cfg_addr = NULL; 223 phb->cfg_addr = NULL;
224 phb->cfg_data = 0; 224 phb->cfg_data = NULL;
225 phb->private_data = afu; 225 phb->private_data = afu;
226 phb->controller_ops = cxl_pci_controller_ops; 226 phb->controller_ops = cxl_pci_controller_ops;
227 227
@@ -230,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
230 if (phb->bus == NULL) 230 if (phb->bus == NULL)
231 return -ENXIO; 231 return -ENXIO;
232 232
233 /* Set release hook on root bus */
234 pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
235 pcibios_free_controller_deferred,
236 (void *) phb);
237
233 /* Claim resources. This might need some rework as well depending 238 /* Claim resources. This might need some rework as well depending
234 * whether we are doing probe-only or not, like assigning unassigned 239 * whether we are doing probe-only or not, like assigning unassigned
235 * resources etc... 240 * resources etc...
@@ -256,7 +261,10 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
256 afu->phb = NULL; 261 afu->phb = NULL;
257 262
258 pci_remove_root_bus(phb->bus); 263 pci_remove_root_bus(phb->bus);
259 pcibios_free_controller(phb); 264 /*
265 * We don't free phb here - that's handled by
266 * pcibios_free_controller_deferred()
267 */
260} 268}
261 269
262static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) 270static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
index 166b1db3969f..3564477b8c2d 100644
--- a/drivers/misc/lkdtm_rodata.c
+++ b/drivers/misc/lkdtm_rodata.c
@@ -4,7 +4,7 @@
4 */ 4 */
5#include "lkdtm.h" 5#include "lkdtm.h"
6 6
7void lkdtm_rodata_do_nothing(void) 7void notrace lkdtm_rodata_do_nothing(void)
8{ 8{
9 /* Does nothing. We just want an architecture agnostic "return". */ 9 /* Does nothing. We just want an architecture agnostic "return". */
10} 10}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index 5a3fd76eec27..1dd611423d8b 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -9,7 +9,15 @@
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
11 11
12static size_t cache_size = 1024; 12/*
13 * Many of the tests here end up using const sizes, but those would
14 * normally be ignored by hardened usercopy, so force the compiler
15 * into choosing the non-const path to make sure we trigger the
16 * hardened usercopy checks by added "unconst" to all the const copies,
17 * and making sure "cache_size" isn't optimized into a const.
18 */
19static volatile size_t unconst = 0;
20static volatile size_t cache_size = 1024;
13static struct kmem_cache *bad_cache; 21static struct kmem_cache *bad_cache;
14 22
15static const unsigned char test_text[] = "This is a test.\n"; 23static const unsigned char test_text[] = "This is a test.\n";
@@ -49,7 +57,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
49 57
50 /* This is a pointer to outside our current stack frame. */ 58 /* This is a pointer to outside our current stack frame. */
51 if (bad_frame) { 59 if (bad_frame) {
52 bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); 60 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
53 } else { 61 } else {
54 /* Put start address just inside stack. */ 62 /* Put start address just inside stack. */
55 bad_stack = task_stack_page(current) + THREAD_SIZE; 63 bad_stack = task_stack_page(current) + THREAD_SIZE;
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
67 if (to_user) { 75 if (to_user) {
68 pr_info("attempting good copy_to_user of local stack\n"); 76 pr_info("attempting good copy_to_user of local stack\n");
69 if (copy_to_user((void __user *)user_addr, good_stack, 77 if (copy_to_user((void __user *)user_addr, good_stack,
70 sizeof(good_stack))) { 78 unconst + sizeof(good_stack))) {
71 pr_warn("copy_to_user failed unexpectedly?!\n"); 79 pr_warn("copy_to_user failed unexpectedly?!\n");
72 goto free_user; 80 goto free_user;
73 } 81 }
74 82
75 pr_info("attempting bad copy_to_user of distant stack\n"); 83 pr_info("attempting bad copy_to_user of distant stack\n");
76 if (copy_to_user((void __user *)user_addr, bad_stack, 84 if (copy_to_user((void __user *)user_addr, bad_stack,
77 sizeof(good_stack))) { 85 unconst + sizeof(good_stack))) {
78 pr_warn("copy_to_user failed, but lacked Oops\n"); 86 pr_warn("copy_to_user failed, but lacked Oops\n");
79 goto free_user; 87 goto free_user;
80 } 88 }
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
88 96
89 pr_info("attempting good copy_from_user of local stack\n"); 97 pr_info("attempting good copy_from_user of local stack\n");
90 if (copy_from_user(good_stack, (void __user *)user_addr, 98 if (copy_from_user(good_stack, (void __user *)user_addr,
91 sizeof(good_stack))) { 99 unconst + sizeof(good_stack))) {
92 pr_warn("copy_from_user failed unexpectedly?!\n"); 100 pr_warn("copy_from_user failed unexpectedly?!\n");
93 goto free_user; 101 goto free_user;
94 } 102 }
95 103
96 pr_info("attempting bad copy_from_user of distant stack\n"); 104 pr_info("attempting bad copy_from_user of distant stack\n");
97 if (copy_from_user(bad_stack, (void __user *)user_addr, 105 if (copy_from_user(bad_stack, (void __user *)user_addr,
98 sizeof(good_stack))) { 106 unconst + sizeof(good_stack))) {
99 pr_warn("copy_from_user failed, but lacked Oops\n"); 107 pr_warn("copy_from_user failed, but lacked Oops\n");
100 goto free_user; 108 goto free_user;
101 } 109 }
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
109{ 117{
110 unsigned long user_addr; 118 unsigned long user_addr;
111 unsigned char *one, *two; 119 unsigned char *one, *two;
112 const size_t size = 1024; 120 size_t size = unconst + 1024;
113 121
114 one = kmalloc(size, GFP_KERNEL); 122 one = kmalloc(size, GFP_KERNEL);
115 two = kmalloc(size, GFP_KERNEL); 123 two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
285 293
286 pr_info("attempting good copy_to_user from kernel rodata\n"); 294 pr_info("attempting good copy_to_user from kernel rodata\n");
287 if (copy_to_user((void __user *)user_addr, test_text, 295 if (copy_to_user((void __user *)user_addr, test_text,
288 sizeof(test_text))) { 296 unconst + sizeof(test_text))) {
289 pr_warn("copy_to_user failed unexpectedly?!\n"); 297 pr_warn("copy_to_user failed unexpectedly?!\n");
290 goto free_user; 298 goto free_user;
291 } 299 }
292 300
293 pr_info("attempting bad copy_to_user from kernel text\n"); 301 pr_info("attempting bad copy_to_user from kernel text\n");
294 if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { 302 if (copy_to_user((void __user *)user_addr, vm_mmap,
303 unconst + PAGE_SIZE)) {
295 pr_warn("copy_to_user failed, but lacked Oops\n"); 304 pr_warn("copy_to_user failed, but lacked Oops\n");
296 goto free_user; 305 goto free_user;
297 } 306 }
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e2fb44cc5c37..dc3a854e02d3 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
1263static bool mei_me_fw_type_sps(struct pci_dev *pdev) 1263static bool mei_me_fw_type_sps(struct pci_dev *pdev)
1264{ 1264{
1265 u32 reg; 1265 u32 reg;
1266 /* Read ME FW Status check for SPS Firmware */ 1266 unsigned int devfn;
1267 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg); 1267
1268 /*
1269 * Read ME FW Status register to check for SPS Firmware
1270 * The SPS FW is only signaled in pci function 0
1271 */
1272 devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
1273 pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
1268 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); 1274 trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
1269 /* if bits [19:16] = 15, running SPS Firmware */ 1275 /* if bits [19:16] = 15, running SPS Firmware */
1270 return (reg & 0xf0000) == 0xf0000; 1276 return (reg & 0xf0000) == 0xf0000;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 64e64da6da44..71cea9b296b2 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
85 85
86 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, 86 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, 87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
90 90
91 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, 91 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
92 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, 92 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 48a5dd740f3b..2206d4477dbb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1726 break; 1726 break;
1727 1727
1728 if (req_op(next) == REQ_OP_DISCARD || 1728 if (req_op(next) == REQ_OP_DISCARD ||
1729 req_op(next) == REQ_OP_SECURE_ERASE ||
1729 req_op(next) == REQ_OP_FLUSH) 1730 req_op(next) == REQ_OP_FLUSH)
1730 break; 1731 break;
1731 1732
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2150 struct mmc_card *card = md->queue.card; 2151 struct mmc_card *card = md->queue.card;
2151 struct mmc_host *host = card->host; 2152 struct mmc_host *host = card->host;
2152 unsigned long flags; 2153 unsigned long flags;
2154 bool req_is_special = mmc_req_is_special(req);
2153 2155
2154 if (req && !mq->mqrq_prev->req) 2156 if (req && !mq->mqrq_prev->req)
2155 /* claim host only for the first request */ 2157 /* claim host only for the first request */
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
2190 } 2192 }
2191 2193
2192out: 2194out:
2193 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || 2195 if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
2194 mmc_req_is_special(req))
2195 /* 2196 /*
2196 * Release host when there are no more requests 2197 * Release host when there are no more requests
2197 * and after special request(discard, flush) is done. 2198 * and after special request(discard, flush) is done.
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index bf14642a576a..708057261b38 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
33 /* 33 /*
34 * We only like normal block requests and discards. 34 * We only like normal block requests and discards.
35 */ 35 */
36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { 36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37 req_op(req) != REQ_OP_SECURE_ERASE) {
37 blk_dump_rq_flags(req, "MMC bad request"); 38 blk_dump_rq_flags(req, "MMC bad request");
38 return BLKPREP_KILL; 39 return BLKPREP_KILL;
39 } 40 }
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d)
64 spin_unlock_irq(q->queue_lock); 65 spin_unlock_irq(q->queue_lock);
65 66
66 if (req || mq->mqrq_prev->req) { 67 if (req || mq->mqrq_prev->req) {
68 bool req_is_special = mmc_req_is_special(req);
69
67 set_current_state(TASK_RUNNING); 70 set_current_state(TASK_RUNNING);
68 mq->issue_fn(mq, req); 71 mq->issue_fn(mq, req);
69 cond_resched(); 72 cond_resched();
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d)
79 * has been finished. Do not assign it to previous 82 * has been finished. Do not assign it to previous
80 * request. 83 * request.
81 */ 84 */
82 if (mmc_req_is_special(req)) 85 if (req_is_special)
83 mq->mqrq_cur->req = NULL; 86 mq->mqrq_cur->req = NULL;
84 87
85 mq->mqrq_prev->brq.mrq.data = NULL; 88 mq->mqrq_prev->brq.mrq.data = NULL;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d62531124d54..fee5e1271465 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,7 +4,9 @@
4static inline bool mmc_req_is_special(struct request *req) 4static inline bool mmc_req_is_special(struct request *req)
5{ 5{
6 return req && 6 return req &&
7 (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); 7 (req_op(req) == REQ_OP_FLUSH ||
8 req_op(req) == REQ_OP_DISCARD ||
9 req_op(req) == REQ_OP_SECURE_ERASE);
8} 10}
9 11
10struct request; 12struct request;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index f23d65eb070d..be3c49fa7382 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1016 1016
1017 /* Only reconfigure if we have a different burst size */ 1017 /* Only reconfigure if we have a different burst size */
1018 if (*bp != burst) { 1018 if (*bp != burst) {
1019 struct dma_slave_config cfg; 1019 struct dma_slave_config cfg = {
1020 1020 .src_addr = host->phys_base +
1021 cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); 1021 OMAP_MMC_REG(host, DATA),
1022 cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); 1022 .dst_addr = host->phys_base +
1023 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 1023 OMAP_MMC_REG(host, DATA),
1024 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 1024 .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
1025 cfg.src_maxburst = burst; 1025 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
1026 cfg.dst_maxburst = burst; 1026 .src_maxburst = burst,
1027 .dst_maxburst = burst,
1028 };
1027 1029
1028 if (dmaengine_slave_config(c, &cfg)) 1030 if (dmaengine_slave_config(c, &cfg))
1029 goto use_pio; 1031 goto use_pio;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 24ebc9a8de89..5f2f24a7360d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
1409static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, 1409static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1410 struct mmc_request *req) 1410 struct mmc_request *req)
1411{ 1411{
1412 struct dma_slave_config cfg;
1413 struct dma_async_tx_descriptor *tx; 1412 struct dma_async_tx_descriptor *tx;
1414 int ret = 0, i; 1413 int ret = 0, i;
1415 struct mmc_data *data = req->data; 1414 struct mmc_data *data = req->data;
1416 struct dma_chan *chan; 1415 struct dma_chan *chan;
1416 struct dma_slave_config cfg = {
1417 .src_addr = host->mapbase + OMAP_HSMMC_DATA,
1418 .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
1419 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1420 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1421 .src_maxburst = data->blksz / 4,
1422 .dst_maxburst = data->blksz / 4,
1423 };
1417 1424
1418 /* Sanity check: all the SG entries must be aligned by block size. */ 1425 /* Sanity check: all the SG entries must be aligned by block size. */
1419 for (i = 0; i < data->sg_len; i++) { 1426 for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
1433 1440
1434 chan = omap_hsmmc_get_dma_chan(host, data); 1441 chan = omap_hsmmc_get_dma_chan(host, data);
1435 1442
1436 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
1437 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
1438 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1439 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1440 cfg.src_maxburst = data->blksz / 4;
1441 cfg.dst_maxburst = data->blksz / 4;
1442
1443 ret = dmaengine_slave_config(chan, &cfg); 1443 ret = dmaengine_slave_config(chan, &cfg);
1444 if (ret) 1444 if (ret)
1445 return ret; 1445 return ret;
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index c95ba83366a0..ed92ce729dde 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -28,6 +28,7 @@
28 28
29struct st_mmc_platform_data { 29struct st_mmc_platform_data {
30 struct reset_control *rstc; 30 struct reset_control *rstc;
31 struct clk *icnclk;
31 void __iomem *top_ioaddr; 32 void __iomem *top_ioaddr;
32}; 33};
33 34
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
353 struct sdhci_host *host; 354 struct sdhci_host *host;
354 struct st_mmc_platform_data *pdata; 355 struct st_mmc_platform_data *pdata;
355 struct sdhci_pltfm_host *pltfm_host; 356 struct sdhci_pltfm_host *pltfm_host;
356 struct clk *clk; 357 struct clk *clk, *icnclk;
357 int ret = 0; 358 int ret = 0;
358 u16 host_version; 359 u16 host_version;
359 struct resource *res; 360 struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
365 return PTR_ERR(clk); 366 return PTR_ERR(clk);
366 } 367 }
367 368
369 /* ICN clock isn't compulsory, but use it if it's provided. */
370 icnclk = devm_clk_get(&pdev->dev, "icn");
371 if (IS_ERR(icnclk))
372 icnclk = NULL;
373
368 rstc = devm_reset_control_get(&pdev->dev, NULL); 374 rstc = devm_reset_control_get(&pdev->dev, NULL);
369 if (IS_ERR(rstc)) 375 if (IS_ERR(rstc))
370 rstc = NULL; 376 rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
389 } 395 }
390 396
391 clk_prepare_enable(clk); 397 clk_prepare_enable(clk);
398 clk_prepare_enable(icnclk);
392 399
393 /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ 400 /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
394 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 401 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
400 } 407 }
401 408
402 pltfm_host->clk = clk; 409 pltfm_host->clk = clk;
410 pdata->icnclk = icnclk;
403 411
404 /* Configure the Arasan HC inside the flashSS */ 412 /* Configure the Arasan HC inside the flashSS */
405 st_mmcss_cconfig(np, host); 413 st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
422 return 0; 430 return 0;
423 431
424err_out: 432err_out:
433 clk_disable_unprepare(icnclk);
425 clk_disable_unprepare(clk); 434 clk_disable_unprepare(clk);
426err_of: 435err_of:
427 sdhci_pltfm_free(pdev); 436 sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
442 451
443 ret = sdhci_pltfm_unregister(pdev); 452 ret = sdhci_pltfm_unregister(pdev);
444 453
454 clk_disable_unprepare(pdata->icnclk);
455
445 if (rstc) 456 if (rstc)
446 reset_control_assert(rstc); 457 reset_control_assert(rstc);
447 458
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
462 if (pdata->rstc) 473 if (pdata->rstc)
463 reset_control_assert(pdata->rstc); 474 reset_control_assert(pdata->rstc);
464 475
476 clk_disable_unprepare(pdata->icnclk);
465 clk_disable_unprepare(pltfm_host->clk); 477 clk_disable_unprepare(pltfm_host->clk);
466out: 478out:
467 return ret; 479 return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
475 struct device_node *np = dev->of_node; 487 struct device_node *np = dev->of_node;
476 488
477 clk_prepare_enable(pltfm_host->clk); 489 clk_prepare_enable(pltfm_host->clk);
490 clk_prepare_enable(pdata->icnclk);
478 491
479 if (pdata->rstc) 492 if (pdata->rstc)
480 reset_control_deassert(pdata->rstc); 493 reset_control_deassert(pdata->rstc);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1f276fa30ba6..9599ed6f1213 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
153 "0 for slow, 1 for fast"); 153 "0 for slow, 1 for fast");
154module_param(ad_select, charp, 0); 154module_param(ad_select, charp, 0);
155MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " 155MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 "0 for stable (default), 1 for bandwidth, " 156 "0 for stable (default), 1 for bandwidth, "
157 "2 for count"); 157 "2 for count");
158module_param(min_links, int, 0); 158module_param(min_links, int, 0);
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1341 slave_dev->name); 1341 slave_dev->name);
1342 } 1342 }
1343 1343
1344 /* already enslaved */ 1344 /* already in-use? */
1345 if (slave_dev->flags & IFF_SLAVE) { 1345 if (netdev_is_rx_handler_busy(slave_dev)) {
1346 netdev_dbg(bond_dev, "Error: Device was already enslaved\n"); 1346 netdev_err(bond_dev,
1347 "Error: Device is in use and cannot be enslaved\n");
1347 return -EBUSY; 1348 return -EBUSY;
1348 } 1349 }
1349 1350
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 8f12bddd5dc9..a0b453ea34c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -258,7 +258,7 @@
258 * BCM5325 and BCM5365 share most definitions below 258 * BCM5325 and BCM5365 share most definitions below
259 */ 259 */
260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) 260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
261#define ARLTBL_MAC_MASK 0xffffffffffff 261#define ARLTBL_MAC_MASK 0xffffffffffffULL
262#define ARLTBL_VID_S 48 262#define ARLTBL_VID_S 48
263#define ARLTBL_VID_MASK_25 0xff 263#define ARLTBL_VID_MASK_25 0xff
264#define ARLTBL_VID_MASK 0xfff 264#define ARLTBL_VID_MASK 0xfff
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 463bed8cbe4c..dd446e466699 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -205,8 +205,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
205static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ 205static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
206 u32 mask) \ 206 u32 mask) \
207{ \ 207{ \
208 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
209 priv->irq##which##_mask &= ~(mask); \ 208 priv->irq##which##_mask &= ~(mask); \
209 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
210} \ 210} \
211static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ 211static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
212 u32 mask) \ 212 u32 mask) \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d36aedde8cb9..710679067594 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2656,15 +2656,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
2656 return ret; 2656 return ret;
2657 } 2657 }
2658 2658
2659 /* Rate Control: disable ingress rate limiting. */
2659 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || 2660 if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
2660 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || 2661 mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
2661 mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
2662 mv88e6xxx_6320_family(chip)) { 2662 mv88e6xxx_6320_family(chip)) {
2663 /* Rate Control: disable ingress rate limiting. */
2664 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), 2663 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2665 PORT_RATE_CONTROL, 0x0001); 2664 PORT_RATE_CONTROL, 0x0001);
2666 if (ret) 2665 if (ret)
2667 return ret; 2666 return ret;
2667 } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
2668 ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
2669 PORT_RATE_CONTROL, 0x0000);
2670 if (ret)
2671 return ret;
2668 } 2672 }
2669 2673
2670 /* Port Control 1: disable trunking, disable sending 2674 /* Port Control 1: disable trunking, disable sending
@@ -3187,6 +3191,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
3187 return err; 3191 return err;
3188} 3192}
3189 3193
3194#ifdef CONFIG_NET_DSA_HWMON
3190static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, 3195static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
3191 int reg) 3196 int reg)
3192{ 3197{
@@ -3212,6 +3217,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
3212 3217
3213 return ret; 3218 return ret;
3214} 3219}
3220#endif
3215 3221
3216static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) 3222static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
3217{ 3223{
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 37a0f463b8de..18bb9556dd00 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
793 netdev_err(ndev, "Could not connect to PHY\n"); 793 netdev_err(ndev, "Could not connect to PHY\n");
794 return -ENODEV; 794 return -ENODEV;
795 } 795 }
796#else
797 return -ENODEV;
796#endif 798#endif
797 } 799 }
798 800
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 4bff0f3040df..b0da9693f28a 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
771 priv->dev = dev; 771 priv->dev = dev;
772 772
773 priv->regs = devm_ioremap_resource(dev, &res_regs); 773 priv->regs = devm_ioremap_resource(dev, &res_regs);
774 if (IS_ERR(priv->regs)) 774 if (IS_ERR(priv->regs)) {
775 return PTR_ERR(priv->regs); 775 err = PTR_ERR(priv->regs);
776 goto out_put_node;
777 }
776 778
777 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 779 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
778 780
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6453148d066a..4eb17daefc4f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
1545 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1545 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1546 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), 1546 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
1547 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1547 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1548 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
1549 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1548 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1550 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1549 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1551 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1550 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1552 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index 0959e6824cb6..1fc2d852249f 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -38,6 +38,7 @@
38#define ALX_DEV_ID_AR8161 0x1091 38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091 39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_E2400 0xe0a1 40#define ALX_DEV_ID_E2400 0xe0a1
41#define ALX_DEV_ID_E2500 0xe0b1
41#define ALX_DEV_ID_AR8162 0x1090 42#define ALX_DEV_ID_AR8162 0x1090
42#define ALX_DEV_ID_AR8171 0x10A1 43#define ALX_DEV_ID_AR8171 0x10A1
43#define ALX_DEV_ID_AR8172 0x10A0 44#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9a9745c4047c..625235db644f 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core)
159 159
160 if (!bgmac_is_bcm4707_family(core)) { 160 if (!bgmac_is_bcm4707_family(core)) {
161 mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); 161 mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
162 if (!IS_ERR(mii_bus)) { 162 if (IS_ERR(mii_bus)) {
163 err = PTR_ERR(mii_bus); 163 err = PTR_ERR(mii_bus);
164 goto err; 164 goto err;
165 } 165 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 97e892511666..fa3386bb14f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
772 (bp->common.bc_ver & 0xff00) >> 8, 772 (bp->common.bc_ver & 0xff00) >> 8,
773 (bp->common.bc_ver & 0xff)); 773 (bp->common.bc_ver & 0xff));
774 774
775 if (pci_channel_offline(bp->pdev)) {
776 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
777 return;
778 }
779
775 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 780 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
776 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 781 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
777 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 782 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
9415 /* Release IRQs */ 9420 /* Release IRQs */
9416 bnx2x_free_irq(bp); 9421 bnx2x_free_irq(bp);
9417 9422
9418 /* Reset the chip */ 9423 /* Reset the chip, unless PCI function is offline. If we reach this
9419 rc = bnx2x_reset_hw(bp, reset_code); 9424 * point following a PCI error handling, it means device is really
9420 if (rc) 9425 * in a bad state and we're about to remove it, so reset the chip
9421 BNX2X_ERR("HW_RESET failed\n"); 9426 * is not a good idea.
9427 */
9428 if (!pci_channel_offline(bp->pdev)) {
9429 rc = bnx2x_reset_hw(bp, reset_code);
9430 if (rc)
9431 BNX2X_ERR("HW_RESET failed\n");
9432 }
9422 9433
9423 /* Report UNLOAD_DONE to MCP */ 9434 /* Report UNLOAD_DONE to MCP */
9424 bnx2x_send_unload_done(bp, keep_link); 9435 bnx2x_send_unload_done(bp, keep_link);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cf79100c9cb..228c964e709a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
353 push_len = (length + sizeof(*tx_push) + 7) / 8; 353 push_len = (length + sizeof(*tx_push) + 7) / 8;
354 if (push_len > 16) { 354 if (push_len > 16) {
355 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); 355 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
356 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, 356 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
357 push_len - 16); 357 (push_len - 16) << 1);
358 } else { 358 } else {
359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
360 push_len); 360 push_len);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff300f7cf529..a2551bcd1027 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12552 info->data = TG3_RSS_MAX_NUM_QS; 12552 info->data = TG3_RSS_MAX_NUM_QS;
12553 } 12553 }
12554 12554
12555 /* The first interrupt vector only
12556 * handles link interrupts.
12557 */
12558 info->data -= 1;
12559 return 0; 12555 return 0;
12560 12556
12561 default: 12557 default:
@@ -14014,7 +14010,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14014 } 14010 }
14015 14011
14016 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14012 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14013 (!ec->rx_coalesce_usecs) ||
14017 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14014 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14015 (!ec->tx_coalesce_usecs) ||
14018 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14016 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14019 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14017 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14020 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 14018 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14025,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14025 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 14023 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14026 return -EINVAL; 14024 return -EINVAL;
14027 14025
14028 /* No rx interrupts will be generated if both are zero */
14029 if ((ec->rx_coalesce_usecs == 0) &&
14030 (ec->rx_max_coalesced_frames == 0))
14031 return -EINVAL;
14032
14033 /* No tx interrupts will be generated if both are zero */
14034 if ((ec->tx_coalesce_usecs == 0) &&
14035 (ec->tx_max_coalesced_frames == 0))
14036 return -EINVAL;
14037
14038 /* Only copy relevant parameters, ignore all others. */ 14026 /* Only copy relevant parameters, ignore all others. */
14039 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 14027 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14040 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 14028 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 89c0cfa9719f..d954a97b0b0b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1323,6 +1323,24 @@ dma_error:
1323 return 0; 1323 return 0;
1324} 1324}
1325 1325
1326static inline int macb_clear_csum(struct sk_buff *skb)
1327{
1328 /* no change for packets without checksum offloading */
1329 if (skb->ip_summed != CHECKSUM_PARTIAL)
1330 return 0;
1331
1332 /* make sure we can modify the header */
1333 if (unlikely(skb_cow_head(skb, 0)))
1334 return -1;
1335
1336 /* initialize checksum field
1337 * This is required - at least for Zynq, which otherwise calculates
1338 * wrong UDP header checksums for UDP packets with UDP data len <=2
1339 */
1340 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1341 return 0;
1342}
1343
1326static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1344static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1327{ 1345{
1328 u16 queue_index = skb_get_queue_mapping(skb); 1346 u16 queue_index = skb_get_queue_mapping(skb);
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 return NETDEV_TX_BUSY; 1380 return NETDEV_TX_BUSY;
1363 } 1381 }
1364 1382
1383 if (macb_clear_csum(skb)) {
1384 dev_kfree_skb_any(skb);
1385 return NETDEV_TX_OK;
1386 }
1387
1365 /* Map socket buffer for DMA transfer */ 1388 /* Map socket buffer for DMA transfer */
1366 if (!macb_tx_map(bp, queue, skb)) { 1389 if (!macb_tx_map(bp, queue, skb)) {
1367 dev_kfree_skb_any(skb); 1390 dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 36893d8958d4..b6fcf10621b6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -403,11 +403,11 @@
403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
405#define MACB_CAPS_USRIO_DISABLED 0x00000010 405#define MACB_CAPS_USRIO_DISABLED 0x00000010
406#define MACB_CAPS_JUMBO 0x00000020
406#define MACB_CAPS_FIFO_MODE 0x10000000 407#define MACB_CAPS_FIFO_MODE 0x10000000
407#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 408#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
408#define MACB_CAPS_SG_DISABLED 0x40000000 409#define MACB_CAPS_SG_DISABLED 0x40000000
409#define MACB_CAPS_MACB_IS_GEM 0x80000000 410#define MACB_CAPS_MACB_IS_GEM 0x80000000
410#define MACB_CAPS_JUMBO 0x00000010
411 411
412/* Bit manipulation macros */ 412/* Bit manipulation macros */
413#define MACB_BIT(name) \ 413#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 83025bb4737c..e29815d9e6f4 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -279,6 +279,7 @@ struct nicvf {
279 u8 sqs_id; 279 u8 sqs_id;
280 bool sqs_mode; 280 bool sqs_mode;
281 bool hw_tso; 281 bool hw_tso;
282 bool t88;
282 283
283 /* Receive buffer alloc */ 284 /* Receive buffer alloc */
284 u32 rb_page_offset; 285 u32 rb_page_offset;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 16ed20357c5c..85cc782b9060 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
251 int lmac; 251 int lmac;
252 u64 lmac_cfg; 252 u64 lmac_cfg;
253 253
254 /* Max value that can be set is 60 */ 254 /* There is a issue in HW where-in while sending GSO sized
255 if (size > 60) 255 * pkts as part of TSO, if pkt len falls below this size
256 size = 60; 256 * NIC will zero PAD packet and also updates IP total length.
257 * Hence set this value to lessthan min pkt size of MAC+IP+TCP
258 * headers, BGX will do the padding to transmit 64 byte pkt.
259 */
260 if (size > 52)
261 size = 52;
257 262
258 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 263 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
259 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 264 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index afb10e326b4f..fab35a593898 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -170,7 +170,6 @@
170#define NIC_QSET_SQ_0_7_DOOR (0x010838) 170#define NIC_QSET_SQ_0_7_DOOR (0x010838)
171#define NIC_QSET_SQ_0_7_STATUS (0x010840) 171#define NIC_QSET_SQ_0_7_STATUS (0x010840)
172#define NIC_QSET_SQ_0_7_DEBUG (0x010848) 172#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
173#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
174#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) 173#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
175 174
176#define NIC_QSET_RBDR_0_1_CFG (0x010C00) 175#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d2d8ef270142..ad4fddb55421 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
382 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); 382 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
383 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); 383 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
384 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); 384 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
385 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); 385 /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
386 * produces bus errors when read
387 */
388 p[i++] = 0;
386 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); 389 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
387 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); 390 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
388 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); 391 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a19e73f11d73..3240349615bd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
513 struct nicvf *nic = netdev_priv(netdev); 513 struct nicvf *nic = netdev_priv(netdev);
514 struct snd_queue *sq; 514 struct snd_queue *sq;
515 struct sq_hdr_subdesc *hdr; 515 struct sq_hdr_subdesc *hdr;
516 struct sq_hdr_subdesc *tso_sqe;
516 517
517 sq = &nic->qs->sq[cqe_tx->sq_idx]; 518 sq = &nic->qs->sq[cqe_tx->sq_idx];
518 519
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
527 528
528 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 529 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
529 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 530 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
530 /* For TSO offloaded packets only one SQE will have a valid SKB */
531 if (skb) { 531 if (skb) {
532 /* Check for dummy descriptor used for HW TSO offload on 88xx */
533 if (hdr->dont_send) {
534 /* Get actual TSO descriptors and free them */
535 tso_sqe =
536 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
537 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
538 }
532 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 539 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
533 prefetch(skb); 540 prefetch(skb);
534 dev_consume_skb_any(skb); 541 dev_consume_skb_any(skb);
535 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; 542 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
536 } else { 543 } else {
537 /* In case of HW TSO, HW sends a CQE for each segment of a TSO 544 /* In case of SW TSO on 88xx, only last segment will have
538 * packet instead of a single CQE for the whole TSO packet 545 * a SKB attached, so just free SQEs here.
539 * transmitted. Each of this CQE points to the same SQE, so
540 * avoid freeing same SQE multiple times.
541 */ 546 */
542 if (!nic->hw_tso) 547 if (!nic->hw_tso)
543 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 548 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1502 struct net_device *netdev; 1507 struct net_device *netdev;
1503 struct nicvf *nic; 1508 struct nicvf *nic;
1504 int err, qcount; 1509 int err, qcount;
1510 u16 sdevid;
1505 1511
1506 err = pci_enable_device(pdev); 1512 err = pci_enable_device(pdev);
1507 if (err) { 1513 if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1575 if (!pass1_silicon(nic->pdev)) 1581 if (!pass1_silicon(nic->pdev))
1576 nic->hw_tso = true; 1582 nic->hw_tso = true;
1577 1583
1584 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1585 if (sdevid == 0xA134)
1586 nic->t88 = true;
1587
1578 /* Check if this VF is in QS only mode */ 1588 /* Check if this VF is in QS only mode */
1579 if (nic->sqs_mode) 1589 if (nic->sqs_mode)
1580 return 0; 1590 return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 0ff8e60deccb..dda3ea3f3bb6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
938 return num_edescs + sh->gso_segs; 938 return num_edescs + sh->gso_segs;
939} 939}
940 940
941#define POST_CQE_DESC_COUNT 2
942
941/* Get the number of SQ descriptors needed to xmit this skb */ 943/* Get the number of SQ descriptors needed to xmit this skb */
942static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) 944static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
943{ 945{
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
948 return subdesc_cnt; 950 return subdesc_cnt;
949 } 951 }
950 952
953 /* Dummy descriptors to get TSO pkt completion notification */
954 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
955 subdesc_cnt += POST_CQE_DESC_COUNT;
956
951 if (skb_shinfo(skb)->nr_frags) 957 if (skb_shinfo(skb)->nr_frags)
952 subdesc_cnt += skb_shinfo(skb)->nr_frags; 958 subdesc_cnt += skb_shinfo(skb)->nr_frags;
953 959
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
965 struct sq_hdr_subdesc *hdr; 971 struct sq_hdr_subdesc *hdr;
966 972
967 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); 973 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
968 sq->skbuff[qentry] = (u64)skb;
969
970 memset(hdr, 0, SND_QUEUE_DESC_SIZE); 974 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
971 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; 975 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
972 /* Enable notification via CQE after processing SQE */ 976
973 hdr->post_cqe = 1; 977 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
974 /* No of subdescriptors following this */ 978 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
975 hdr->subdesc_cnt = subdesc_cnt; 979 * segment transmitted on 88xx.
980 */
981 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
982 } else {
983 sq->skbuff[qentry] = (u64)skb;
984 /* Enable notification via CQE after processing SQE */
985 hdr->post_cqe = 1;
986 /* No of subdescriptors following this */
987 hdr->subdesc_cnt = subdesc_cnt;
988 }
976 hdr->tot_len = len; 989 hdr->tot_len = len;
977 990
978 /* Offload checksum calculation to HW */ 991 /* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1023 gather->addr = data; 1036 gather->addr = data;
1024} 1037}
1025 1038
1039/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1040 * packet so that a CQE is posted as a notifation for transmission of
1041 * TSO packet.
1042 */
1043static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1044 int tso_sqe, struct sk_buff *skb)
1045{
1046 struct sq_imm_subdesc *imm;
1047 struct sq_hdr_subdesc *hdr;
1048
1049 sq->skbuff[qentry] = (u64)skb;
1050
1051 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1052 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1053 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1054 /* Enable notification via CQE after processing SQE */
1055 hdr->post_cqe = 1;
1056 /* There is no packet to transmit here */
1057 hdr->dont_send = 1;
1058 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1059 hdr->tot_len = 1;
1060 /* Actual TSO header SQE index, needed for cleanup */
1061 hdr->rsvd2 = tso_sqe;
1062
1063 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1064 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1065 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1066 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1067 imm->len = 1;
1068}
1069
1026/* Segment a TSO packet into 'gso_size' segments and append 1070/* Segment a TSO packet into 'gso_size' segments and append
1027 * them to SQ for transfer 1071 * them to SQ for transfer
1028 */ 1072 */
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1096int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) 1140int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1097{ 1141{
1098 int i, size; 1142 int i, size;
1099 int subdesc_cnt; 1143 int subdesc_cnt, tso_sqe = 0;
1100 int sq_num, qentry; 1144 int sq_num, qentry;
1101 struct queue_set *qs; 1145 struct queue_set *qs;
1102 struct snd_queue *sq; 1146 struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1131 /* Add SQ header subdesc */ 1175 /* Add SQ header subdesc */
1132 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1176 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1133 skb, skb->len); 1177 skb, skb->len);
1178 tso_sqe = qentry;
1134 1179
1135 /* Add SQ gather subdescs */ 1180 /* Add SQ gather subdescs */
1136 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1181 qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1154 } 1199 }
1155 1200
1156doorbell: 1201doorbell:
1202 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1203 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1204 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
1205 }
1206
1157 /* make sure all memory stores are done before ringing doorbell */ 1207 /* make sure all memory stores are done before ringing doorbell */
1158 smp_wmb(); 1208 smp_wmb();
1159 1209
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c45de49dc963..c762a8c8c954 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4335,6 +4335,11 @@ static void cfg_queues(struct adapter *adap)
4335#endif 4335#endif
4336 int ciq_size; 4336 int ciq_size;
4337 4337
4338 /* Reduce memory usage in kdump environment, disable all offload.
4339 */
4340 if (is_kdump_kernel())
4341 adap->params.offload = 0;
4342
4338 for_each_port(adap, i) 4343 for_each_port(adap, i)
4339 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 4344 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4340#ifdef CONFIG_CHELSIO_T4_DCB 4345#ifdef CONFIG_CHELSIO_T4_DCB
@@ -4365,11 +4370,6 @@ static void cfg_queues(struct adapter *adap)
4365 if (q10g > netif_get_num_default_rss_queues()) 4370 if (q10g > netif_get_num_default_rss_queues())
4366 q10g = netif_get_num_default_rss_queues(); 4371 q10g = netif_get_num_default_rss_queues();
4367 4372
4368 /* Reduce memory usage in kdump environment, disable all offload.
4369 */
4370 if (is_kdump_kernel())
4371 adap->params.offload = 0;
4372
4373 for_each_port(adap, i) { 4373 for_each_port(adap, i) {
4374 struct port_info *pi = adap2pinfo(adap, i); 4374 struct port_info *pi = adap2pinfo(adap, i);
4375 4375
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1471e16ba719..f45385f5c6e5 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1299,6 +1299,7 @@ static int
1299dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1300{ 1300{
1301 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1302 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1302 1303
1303 if (netif_msg_ifup(db)) 1304 if (netif_msg_ifup(db))
1304 dev_dbg(db->dev, "enabling %s\n", dev->name); 1305 dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
1306 /* If there is no IRQ type specified, tell the user that this is a 1307 /* If there is no IRQ type specified, tell the user that this is a
1307 * problem 1308 * problem
1308 */ 1309 */
1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) 1310 if (irq_flags == IRQF_TRIGGER_NONE)
1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1311 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1311 1312
1313 irq_flags |= IRQF_SHARED;
1314
1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1315 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1316 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1314 mdelay(1); /* delay needs by DM9000B */ 1317 mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
1316 /* Initialize DM9000 board */ 1319 /* Initialize DM9000 board */
1317 dm9000_init_dm9000(dev); 1320 dm9000_init_dm9000(dev);
1318 1321
1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, 1322 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1320 dev->name, dev))
1321 return -EAGAIN; 1323 return -EAGAIN;
1322 /* Now that we have an interrupt handler hooked up we can unmask 1324 /* Now that we have an interrupt handler hooked up we can unmask
1323 * our interrupts 1325 * our interrupts
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d20935dc8399..4b4f5bc0e279 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2922{ 2922{
2923 unsigned int size = lstatus & BD_LENGTH_MASK; 2923 unsigned int size = lstatus & BD_LENGTH_MASK;
2924 struct page *page = rxb->page; 2924 struct page *page = rxb->page;
2925 bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
2925 2926
2926 /* Remove the FCS from the packet length */ 2927 /* Remove the FCS from the packet length */
2927 if (likely(lstatus & BD_LFLAG(RXBD_LAST))) 2928 if (last)
2928 size -= ETH_FCS_LEN; 2929 size -= ETH_FCS_LEN;
2929 2930
2930 if (likely(first)) 2931 if (likely(first)) {
2931 skb_put(skb, size); 2932 skb_put(skb, size);
2932 else 2933 } else {
2933 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 2934 /* the last fragments' length contains the full frame length */
2934 rxb->page_offset + RXBUF_ALIGNMENT, 2935 if (last)
2935 size, GFAR_RXB_TRUESIZE); 2936 size -= skb->len;
2937
2938 /* in case the last fragment consisted only of the FCS */
2939 if (size > 0)
2940 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2941 rxb->page_offset + RXBUF_ALIGNMENT,
2942 size, GFAR_RXB_TRUESIZE);
2943 }
2936 2944
2937 /* try reuse page */ 2945 /* try reuse page */
2938 if (unlikely(page_count(page) != 1)) 2946 if (unlikely(page_count(page) != 1))
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 373fd094f2f3..6e8a9c8467b9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
100#define DEFAULT_RX_LFC_THR 16 100#define DEFAULT_RX_LFC_THR 16
101#define DEFAULT_LFC_PTVVAL 4 101#define DEFAULT_LFC_PTVVAL 4
102 102
103#define GFAR_RXB_SIZE 1536 103/* prevent fragmenation by HW in DSA environments */
104#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
104#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ 105#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
105 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 106 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
106#define GFAR_RXB_TRUESIZE 2048 107#define GFAR_RXB_TRUESIZE 2048
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1235c7f2564b..1e1eb92998fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, 17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, 18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, 19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
20 {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, 20 {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, 21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, 22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, 23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index ff8b6a468b24..6ea872287307 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
328static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) 328static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
329{ 329{
330 u32 port; 330 u32 port;
331 struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
332 331
333 if (ppe_cb->ppe_common_cb) { 332 if (ppe_cb->ppe_common_cb) {
333 struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
334
334 port = ppe_cb->index; 335 port = ppe_cb->index;
335 dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); 336 dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
336 } 337 }
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7fd4d54599e4..6b03c8553e59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
2032 | FLAG2_DISABLE_ASPM_L0S 2032 | FLAG2_DISABLE_ASPM_L0S
2033 | FLAG2_DISABLE_ASPM_L1 2033 | FLAG2_DISABLE_ASPM_L1
2034 | FLAG2_NO_DISABLE_RX 2034 | FLAG2_NO_DISABLE_RX
2035 | FLAG2_DMA_BURST, 2035 | FLAG2_DMA_BURST
2036 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2036 .pba = 32, 2037 .pba = 32,
2037 .max_hw_frame_size = DEFAULT_JUMBO, 2038 .max_hw_frame_size = DEFAULT_JUMBO,
2038 .get_variants = e1000_get_variants_82571, 2039 .get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
2053 | FLAG_HAS_CTRLEXT_ON_LOAD, 2054 | FLAG_HAS_CTRLEXT_ON_LOAD,
2054 .flags2 = FLAG2_DISABLE_ASPM_L0S 2055 .flags2 = FLAG2_DISABLE_ASPM_L0S
2055 | FLAG2_DISABLE_ASPM_L1 2056 | FLAG2_DISABLE_ASPM_L1
2056 | FLAG2_NO_DISABLE_RX, 2057 | FLAG2_NO_DISABLE_RX
2058 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2057 .pba = 32, 2059 .pba = 32,
2058 .max_hw_frame_size = DEFAULT_JUMBO, 2060 .max_hw_frame_size = DEFAULT_JUMBO,
2059 .get_variants = e1000_get_variants_82571, 2061 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ef96cd11d6d2..879cca47b021 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) 452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
453#define FLAG2_DFLT_CRC_STRIPPING BIT(12) 453#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13) 454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
455#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
455 456
456#define E1000_RX_DESC_PS(R, i) \ 457#define E1000_RX_DESC_PS(R, i) \
457 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 3e11322d8d58..f3aaca743ea3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
5885 | FLAG_HAS_JUMBO_FRAMES 5885 | FLAG_HAS_JUMBO_FRAMES
5886 | FLAG_APME_IN_WUC, 5886 | FLAG_APME_IN_WUC,
5887 .flags2 = FLAG2_HAS_PHY_STATS 5887 .flags2 = FLAG2_HAS_PHY_STATS
5888 | FLAG2_HAS_EEE, 5888 | FLAG2_HAS_EEE
5889 | FLAG2_CHECK_SYSTIM_OVERFLOW,
5889 .pba = 26, 5890 .pba = 26,
5890 .max_hw_frame_size = 9022, 5891 .max_hw_frame_size = 9022,
5891 .get_variants = e1000_get_variants_ich8lan, 5892 .get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 02f443958f31..7017281ba2dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4303} 4303}
4304 4304
4305/** 4305/**
4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4307 * @hw: pointer to the HW structure
4308 * @systim: cycle_t value read, sanitized and returned
4309 *
4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4311 * check to see that the time is incrementing at a reasonable
4312 * rate and is a multiple of incvalue.
4313 **/
4314static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
4315{
4316 u64 time_delta, rem, temp;
4317 cycle_t systim_next;
4318 u32 incvalue;
4319 int i;
4320
4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4323 /* latch SYSTIMH on read of SYSTIML */
4324 systim_next = (cycle_t)er32(SYSTIML);
4325 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4326
4327 time_delta = systim_next - systim;
4328 temp = time_delta;
4329 /* VMWare users have seen incvalue of zero, don't div / 0 */
4330 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4331
4332 systim = systim_next;
4333
4334 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
4335 break;
4336 }
4337
4338 return systim;
4339}
4340
4341/**
4306 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4307 * @cc: cyclecounter structure 4343 * @cc: cyclecounter structure
4308 **/ 4344 **/
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4312 cc); 4348 cc);
4313 struct e1000_hw *hw = &adapter->hw; 4349 struct e1000_hw *hw = &adapter->hw;
4314 u32 systimel, systimeh; 4350 u32 systimel, systimeh;
4315 cycle_t systim, systim_next; 4351 cycle_t systim;
4316 /* SYSTIMH latching upon SYSTIML read does not work well. 4352 /* SYSTIMH latching upon SYSTIML read does not work well.
4317 * This means that if SYSTIML overflows after we read it but before 4353 * This means that if SYSTIML overflows after we read it but before
4318 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4335 systim = (cycle_t)systimel; 4371 systim = (cycle_t)systimel;
4336 systim |= (cycle_t)systimeh << 32; 4372 systim |= (cycle_t)systimeh << 32;
4337 4373
4338 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { 4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4339 u64 time_delta, rem, temp; 4375 systim = e1000e_sanitize_systim(hw, systim);
4340 u32 incvalue;
4341 int i;
4342
4343 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4344 * check to see that the time is incrementing at a reasonable
4345 * rate and is a multiple of incvalue
4346 */
4347 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4348 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4349 /* latch SYSTIMH on read of SYSTIML */
4350 systim_next = (cycle_t)er32(SYSTIML);
4351 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4352
4353 time_delta = systim_next - systim;
4354 temp = time_delta;
4355 /* VMWare users have seen incvalue of zero, don't div / 0 */
4356 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4357
4358 systim = systim_next;
4359 4376
4360 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4361 (rem == 0))
4362 break;
4363 }
4364 }
4365 return systim; 4377 return systim;
4366} 4378}
4367 4379
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index e1370c556a3c..618f18436618 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
199void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) 199void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
200{ 200{
201 struct i40e_client_instance *cdev; 201 struct i40e_client_instance *cdev;
202 int ret = 0;
202 203
203 if (!vsi) 204 if (!vsi)
204 return; 205 return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
211 "Cannot locate client instance open routine\n"); 212 "Cannot locate client instance open routine\n");
212 continue; 213 continue;
213 } 214 }
214 cdev->client->ops->open(&cdev->lan_info, cdev->client); 215 if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
216 &cdev->state))) {
217 ret = cdev->client->ops->open(&cdev->lan_info,
218 cdev->client);
219 if (!ret)
220 set_bit(__I40E_CLIENT_INSTANCE_OPENED,
221 &cdev->state);
222 }
215 } 223 }
216 } 224 }
217 mutex_unlock(&i40e_client_instance_mutex); 225 mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
407 * i40e_client_add_instance - add a client instance struct to the instance list 415 * i40e_client_add_instance - add a client instance struct to the instance list
408 * @pf: pointer to the board struct 416 * @pf: pointer to the board struct
409 * @client: pointer to a client struct in the client list. 417 * @client: pointer to a client struct in the client list.
418 * @existing: if there was already an existing instance
410 * 419 *
411 * Returns cdev ptr on success, NULL on failure 420 * Returns cdev ptr on success or if already exists, NULL on failure
412 **/ 421 **/
413static 422static
414struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, 423struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
415 struct i40e_client *client) 424 struct i40e_client *client,
425 bool *existing)
416{ 426{
417 struct i40e_client_instance *cdev; 427 struct i40e_client_instance *cdev;
418 struct netdev_hw_addr *mac = NULL; 428 struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
421 mutex_lock(&i40e_client_instance_mutex); 431 mutex_lock(&i40e_client_instance_mutex);
422 list_for_each_entry(cdev, &i40e_client_instances, list) { 432 list_for_each_entry(cdev, &i40e_client_instances, list) {
423 if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { 433 if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
424 cdev = NULL; 434 *existing = true;
425 goto out; 435 goto out;
426 } 436 }
427 } 437 }
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
505{ 515{
506 struct i40e_client_instance *cdev; 516 struct i40e_client_instance *cdev;
507 struct i40e_client *client; 517 struct i40e_client *client;
518 bool existing = false;
508 int ret = 0; 519 int ret = 0;
509 520
510 if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) 521 if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
528 /* check if L2 VSI is up, if not we are not ready */ 539 /* check if L2 VSI is up, if not we are not ready */
529 if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) 540 if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
530 continue; 541 continue;
542 } else {
543 dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
544 client->name);
531 } 545 }
532 546
533 /* Add the client instance to the instance list */ 547 /* Add the client instance to the instance list */
534 cdev = i40e_client_add_instance(pf, client); 548 cdev = i40e_client_add_instance(pf, client, &existing);
535 if (!cdev) 549 if (!cdev)
536 continue; 550 continue;
537 551
538 /* Also up the ref_cnt of no. of instances of this client */ 552 if (!existing) {
539 atomic_inc(&client->ref_cnt); 553 /* Also up the ref_cnt for no. of instances of this
540 dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", 554 * client.
541 client->name, pf->hw.pf_id, 555 */
542 pf->hw.bus.device, pf->hw.bus.func); 556 atomic_inc(&client->ref_cnt);
557 dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
558 client->name, pf->hw.pf_id,
559 pf->hw.bus.device, pf->hw.bus.func);
560 }
543 561
544 /* Send an Open request to the client */ 562 /* Send an Open request to the client */
545 atomic_inc(&cdev->ref_cnt); 563 atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
588 pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); 606 pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
589 607
590 /* Since in some cases register may have happened before a device gets 608 /* Since in some cases register may have happened before a device gets
591 * added, we can schedule a subtask to go initiate the clients. 609 * added, we can schedule a subtask to go initiate the clients if
610 * they can be launched at probe time.
592 */ 611 */
593 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; 612 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
594 i40e_service_event_schedule(pf); 613 i40e_service_event_schedule(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 81c99e1be708..d0b3a1bb82ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4554 **/ 4554 **/
4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4556{ 4556{
4557 int i, tc_unused = 0;
4557 u8 num_tc = 0; 4558 u8 num_tc = 0;
4558 int i; 4559 u8 ret = 0;
4559 4560
4560 /* Scan the ETS Config Priority Table to find 4561 /* Scan the ETS Config Priority Table to find
4561 * traffic class enabled for a given priority 4562 * traffic class enabled for a given priority
4562 * and use the traffic class index to get the 4563 * and create a bitmask of enabled TCs
4563 * number of traffic classes enabled
4564 */ 4564 */
4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4566 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4566 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4567 num_tc = dcbcfg->etscfg.prioritytable[i];
4568 }
4569 4567
4570 /* Traffic class index starts from zero so 4568 /* Now scan the bitmask to check for
4571 * increment to return the actual count 4569 * contiguous TCs starting with TC0
4572 */ 4570 */
4573 return num_tc + 1; 4571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4572 if (num_tc & BIT(i)) {
4573 if (!tc_unused) {
4574 ret++;
4575 } else {
4576 pr_err("Non-contiguous TC - Disabling DCB\n");
4577 return 1;
4578 }
4579 } else {
4580 tc_unused = 1;
4581 }
4582 }
4583
4584 /* There is always at least TC0 */
4585 if (!ret)
4586 ret = 1;
4587
4588 return ret;
4574} 4589}
4575 4590
4576/** 4591/**
@@ -5098,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
5098 DCB_CAP_DCBX_VER_IEEE; 5113 DCB_CAP_DCBX_VER_IEEE;
5099 5114
5100 pf->flags |= I40E_FLAG_DCB_CAPABLE; 5115 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5101 /* Enable DCB tagging only when more than one TC */ 5116 /* Enable DCB tagging only when more than one TC
5117 * or explicitly disable if only one TC
5118 */
5102 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) 5119 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5103 pf->flags |= I40E_FLAG_DCB_ENABLED; 5120 pf->flags |= I40E_FLAG_DCB_ENABLED;
5121 else
5122 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5104 dev_dbg(&pf->pdev->dev, 5123 dev_dbg(&pf->pdev->dev,
5105 "DCBX offload is supported for this PF.\n"); 5124 "DCBX offload is supported for this PF.\n");
5106 } 5125 }
@@ -5416,7 +5435,6 @@ int i40e_open(struct net_device *netdev)
5416 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 5435 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5417 5436
5418 udp_tunnel_get_rx_info(netdev); 5437 udp_tunnel_get_rx_info(netdev);
5419 i40e_notify_client_of_netdev_open(vsi);
5420 5438
5421 return 0; 5439 return 0;
5422} 5440}
@@ -5702,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5702 u8 type; 5720 u8 type;
5703 5721
5704 /* Not DCB capable or capability disabled */ 5722 /* Not DCB capable or capability disabled */
5705 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) 5723 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5706 return ret; 5724 return ret;
5707 5725
5708 /* Ignore if event is not for Nearest Bridge */ 5726 /* Ignore if event is not for Nearest Bridge */
@@ -7882,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7882#endif 7900#endif
7883 I40E_FLAG_RSS_ENABLED | 7901 I40E_FLAG_RSS_ENABLED |
7884 I40E_FLAG_DCB_CAPABLE | 7902 I40E_FLAG_DCB_CAPABLE |
7903 I40E_FLAG_DCB_ENABLED |
7885 I40E_FLAG_SRIOV_ENABLED | 7904 I40E_FLAG_SRIOV_ENABLED |
7886 I40E_FLAG_FD_SB_ENABLED | 7905 I40E_FLAG_FD_SB_ENABLED |
7887 I40E_FLAG_FD_ATR_ENABLED | 7906 I40E_FLAG_FD_ATR_ENABLED |
@@ -10488,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10488 I40E_FLAG_FD_SB_ENABLED | 10507 I40E_FLAG_FD_SB_ENABLED |
10489 I40E_FLAG_FD_ATR_ENABLED | 10508 I40E_FLAG_FD_ATR_ENABLED |
10490 I40E_FLAG_DCB_CAPABLE | 10509 I40E_FLAG_DCB_CAPABLE |
10510 I40E_FLAG_DCB_ENABLED |
10491 I40E_FLAG_SRIOV_ENABLED | 10511 I40E_FLAG_SRIOV_ENABLED |
10492 I40E_FLAG_VMDQ_ENABLED); 10512 I40E_FLAG_VMDQ_ENABLED);
10493 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | 10513 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10511,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
10511 /* Not enough queues for all TCs */ 10531 /* Not enough queues for all TCs */
10512 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && 10532 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10513 (queues_left < I40E_MAX_TRAFFIC_CLASS)) { 10533 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10514 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10534 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10535 I40E_FLAG_DCB_ENABLED);
10515 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); 10536 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10516 } 10537 }
10517 pf->num_lan_qps = max_t(int, pf->rss_size_max, 10538 pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10908,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10908 err = i40e_init_pf_dcb(pf); 10929 err = i40e_init_pf_dcb(pf);
10909 if (err) { 10930 if (err) {
10910 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); 10931 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10911 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 10932 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
10912 /* Continue without DCB enabled */ 10933 /* Continue without DCB enabled */
10913 } 10934 }
10914#endif /* CONFIG_I40E_DCB */ 10935#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e61b647f5f2a..336c103ae374 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
744 } 744 }
745 } 745 }
746 746
747 shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); 747 shhwtstamps.hwtstamp =
748 ktime_add_ns(shhwtstamps.hwtstamp, adjust);
748 749
749 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); 750 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
750 dev_kfree_skb_any(adapter->ptp_tx_skb); 751 dev_kfree_skb_any(adapter->ptp_tx_skb);
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
767 struct sk_buff *skb) 768 struct sk_buff *skb)
768{ 769{
769 __le64 *regval = (__le64 *)va; 770 __le64 *regval = (__le64 *)va;
771 struct igb_adapter *adapter = q_vector->adapter;
772 int adjust = 0;
770 773
771 /* The timestamp is recorded in little endian format. 774 /* The timestamp is recorded in little endian format.
772 * DWORD: 0 1 2 3 775 * DWORD: 0 1 2 3
773 * Field: Reserved Reserved SYSTIML SYSTIMH 776 * Field: Reserved Reserved SYSTIML SYSTIMH
774 */ 777 */
775 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), 778 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
776 le64_to_cpu(regval[1])); 779 le64_to_cpu(regval[1]));
780
781 /* adjust timestamp for the RX latency based on link speed */
782 if (adapter->hw.mac.type == e1000_i210) {
783 switch (adapter->link_speed) {
784 case SPEED_10:
785 adjust = IGB_I210_RX_LATENCY_10;
786 break;
787 case SPEED_100:
788 adjust = IGB_I210_RX_LATENCY_100;
789 break;
790 case SPEED_1000:
791 adjust = IGB_I210_RX_LATENCY_1000;
792 break;
793 }
794 }
795 skb_hwtstamps(skb)->hwtstamp =
796 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
777} 797}
778 798
779/** 799/**
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
825 } 845 }
826 } 846 }
827 skb_hwtstamps(skb)->hwtstamp = 847 skb_hwtstamps(skb)->hwtstamp =
828 ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); 848 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
829 849
830 /* Update the last_rx_timestamp timer in order to enable watchdog check 850 /* Update the last_rx_timestamp timer in order to enable watchdog check
831 * for error case of latched timestamp on a dropped packet. 851 * for error case of latched timestamp on a dropped packet.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b4217f30e89c..c47b605e8651 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2958 } 2958 }
2959 2959
2960 /* was that the last pool using this rar? */ 2960 /* was that the last pool using this rar? */
2961 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2961 if (mpsar_lo == 0 && mpsar_hi == 0 &&
2962 rar != 0 && rar != hw->mac.san_mac_rar_index)
2962 hw->mac.ops.clear_rar(hw, rar); 2963 hw->mac.ops.clear_rar(hw, rar);
2964
2963 return 0; 2965 return 0;
2964} 2966}
2965 2967
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5418c69a7463..b4f03748adc0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4100 struct ixgbe_hw *hw = &adapter->hw; 4100 struct ixgbe_hw *hw = &adapter->hw;
4101 u32 vlnctrl, i; 4101 u32 vlnctrl, i;
4102 4102
4103 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4104
4103 switch (hw->mac.type) { 4105 switch (hw->mac.type) {
4104 case ixgbe_mac_82599EB: 4106 case ixgbe_mac_82599EB:
4105 case ixgbe_mac_X540: 4107 case ixgbe_mac_X540:
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4112 /* fall through */ 4114 /* fall through */
4113 case ixgbe_mac_82598EB: 4115 case ixgbe_mac_82598EB:
4114 /* legacy case, we can just disable VLAN filtering */ 4116 /* legacy case, we can just disable VLAN filtering */
4115 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4117 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4116 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4118 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4118 return; 4119 return;
4119 } 4120 }
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4125 /* Set flag so we don't redo unnecessary work */ 4126 /* Set flag so we don't redo unnecessary work */
4126 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; 4127 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4127 4128
4129 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
4130 vlnctrl |= IXGBE_VLNCTRL_VFE;
4131 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4132
4128 /* Add PF to all active pools */ 4133 /* Add PF to all active pools */
4129 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4134 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4130 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); 4135 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4191 struct ixgbe_hw *hw = &adapter->hw; 4196 struct ixgbe_hw *hw = &adapter->hw;
4192 u32 vlnctrl, i; 4197 u32 vlnctrl, i;
4193 4198
4199 /* Set VLAN filtering to enabled */
4200 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4201 vlnctrl |= IXGBE_VLNCTRL_VFE;
4202 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4203
4194 switch (hw->mac.type) { 4204 switch (hw->mac.type) {
4195 case ixgbe_mac_82599EB: 4205 case ixgbe_mac_82599EB:
4196 case ixgbe_mac_X540: 4206 case ixgbe_mac_X540:
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4202 break; 4212 break;
4203 /* fall through */ 4213 /* fall through */
4204 case ixgbe_mac_82598EB: 4214 case ixgbe_mac_82598EB:
4205 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4206 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
4207 vlnctrl |= IXGBE_VLNCTRL_VFE;
4208 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4209 return; 4215 return;
4210 } 4216 }
4211 4217
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
8390 struct tcf_exts *exts, u64 *action, u8 *queue) 8396 struct tcf_exts *exts, u64 *action, u8 *queue)
8391{ 8397{
8392 const struct tc_action *a; 8398 const struct tc_action *a;
8399 LIST_HEAD(actions);
8393 int err; 8400 int err;
8394 8401
8395 if (tc_no_actions(exts)) 8402 if (tc_no_actions(exts))
8396 return -EINVAL; 8403 return -EINVAL;
8397 8404
8398 tc_for_each_action(a, exts) { 8405 tcf_exts_to_list(exts, &actions);
8406 list_for_each_entry(a, &actions, list) {
8399 8407
8400 /* Drop action */ 8408 /* Drop action */
8401 if (is_tcf_gact_shot(a)) { 8409 if (is_tcf_gact_shot(a)) {
@@ -9517,6 +9525,7 @@ skip_sriov:
9517 9525
9518 /* copy netdev features into list of user selectable features */ 9526 /* copy netdev features into list of user selectable features */
9519 netdev->hw_features |= netdev->features | 9527 netdev->hw_features |= netdev->features |
9528 NETIF_F_HW_VLAN_CTAG_FILTER |
9520 NETIF_F_HW_VLAN_CTAG_RX | 9529 NETIF_F_HW_VLAN_CTAG_RX |
9521 NETIF_F_HW_VLAN_CTAG_TX | 9530 NETIF_F_HW_VLAN_CTAG_TX |
9522 NETIF_F_RXALL | 9531 NETIF_F_RXALL |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b57ae3afb994..d9199151a83e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
50 MTK_ETHTOOL_STAT(rx_flow_control_packets), 50 MTK_ETHTOOL_STAT(rx_flow_control_packets),
51}; 51};
52 52
53static const char * const mtk_clks_source_name[] = {
54 "ethif", "esw", "gp1", "gp2"
55};
56
53void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) 57void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
54{ 58{
55 __raw_writel(val, eth->base + reg); 59 __raw_writel(val, eth->base + reg);
@@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
245 case PHY_INTERFACE_MODE_MII: 249 case PHY_INTERFACE_MODE_MII:
246 ge_mode = 1; 250 ge_mode = 1;
247 break; 251 break;
248 case PHY_INTERFACE_MODE_RMII: 252 case PHY_INTERFACE_MODE_REVMII:
249 ge_mode = 2; 253 ge_mode = 2;
250 break; 254 break;
255 case PHY_INTERFACE_MODE_RMII:
256 if (!mac->id)
257 goto err_phy;
258 ge_mode = 3;
259 break;
251 default: 260 default:
252 dev_err(eth->dev, "invalid phy_mode\n"); 261 goto err_phy;
253 return -1;
254 } 262 }
255 263
256 /* put the gmac into the right mode */ 264 /* put the gmac into the right mode */
@@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac)
263 mac->phy_dev->autoneg = AUTONEG_ENABLE; 271 mac->phy_dev->autoneg = AUTONEG_ENABLE;
264 mac->phy_dev->speed = 0; 272 mac->phy_dev->speed = 0;
265 mac->phy_dev->duplex = 0; 273 mac->phy_dev->duplex = 0;
274
275 if (of_phy_is_fixed_link(mac->of_node))
276 mac->phy_dev->supported |=
277 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
278
266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 279 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause; 280 SUPPORTED_Asym_Pause;
268 mac->phy_dev->advertising = mac->phy_dev->supported | 281 mac->phy_dev->advertising = mac->phy_dev->supported |
269 ADVERTISED_Autoneg; 282 ADVERTISED_Autoneg;
270 phy_start_aneg(mac->phy_dev); 283 phy_start_aneg(mac->phy_dev);
271 284
285 of_node_put(np);
286
272 return 0; 287 return 0;
288
289err_phy:
290 of_node_put(np);
291 dev_err(eth->dev, "invalid phy_mode\n");
292 return -EINVAL;
273} 293}
274 294
275static int mtk_mdio_init(struct mtk_eth *eth) 295static int mtk_mdio_init(struct mtk_eth *eth)
276{ 296{
277 struct device_node *mii_np; 297 struct device_node *mii_np;
278 int err; 298 int ret;
279 299
280 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); 300 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
281 if (!mii_np) { 301 if (!mii_np) {
@@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
284 } 304 }
285 305
286 if (!of_device_is_available(mii_np)) { 306 if (!of_device_is_available(mii_np)) {
287 err = 0; 307 ret = -ENODEV;
288 goto err_put_node; 308 goto err_put_node;
289 } 309 }
290 310
291 eth->mii_bus = mdiobus_alloc(); 311 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
292 if (!eth->mii_bus) { 312 if (!eth->mii_bus) {
293 err = -ENOMEM; 313 ret = -ENOMEM;
294 goto err_put_node; 314 goto err_put_node;
295 } 315 }
296 316
@@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
301 eth->mii_bus->parent = eth->dev; 321 eth->mii_bus->parent = eth->dev;
302 322
303 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); 323 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
304 err = of_mdiobus_register(eth->mii_bus, mii_np); 324 ret = of_mdiobus_register(eth->mii_bus, mii_np);
305 if (err)
306 goto err_free_bus;
307
308 return 0;
309
310err_free_bus:
311 mdiobus_free(eth->mii_bus);
312 325
313err_put_node: 326err_put_node:
314 of_node_put(mii_np); 327 of_node_put(mii_np);
315 eth->mii_bus = NULL; 328 return ret;
316 return err;
317} 329}
318 330
319static void mtk_mdio_cleanup(struct mtk_eth *eth) 331static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -322,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
322 return; 334 return;
323 335
324 mdiobus_unregister(eth->mii_bus); 336 mdiobus_unregister(eth->mii_bus);
325 of_node_put(eth->mii_bus->dev.of_node);
326 mdiobus_free(eth->mii_bus);
327} 337}
328 338
329static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) 339static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -542,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
542 return &ring->buf[idx]; 552 return &ring->buf[idx];
543} 553}
544 554
545static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) 555static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
546{ 556{
547 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 557 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
548 dma_unmap_single(dev, 558 dma_unmap_single(eth->dev,
549 dma_unmap_addr(tx_buf, dma_addr0), 559 dma_unmap_addr(tx_buf, dma_addr0),
550 dma_unmap_len(tx_buf, dma_len0), 560 dma_unmap_len(tx_buf, dma_len0),
551 DMA_TO_DEVICE); 561 DMA_TO_DEVICE);
552 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 562 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
553 dma_unmap_page(dev, 563 dma_unmap_page(eth->dev,
554 dma_unmap_addr(tx_buf, dma_addr0), 564 dma_unmap_addr(tx_buf, dma_addr0),
555 dma_unmap_len(tx_buf, dma_len0), 565 dma_unmap_len(tx_buf, dma_len0),
556 DMA_TO_DEVICE); 566 DMA_TO_DEVICE);
@@ -572,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
572 dma_addr_t mapped_addr; 582 dma_addr_t mapped_addr;
573 unsigned int nr_frags; 583 unsigned int nr_frags;
574 int i, n_desc = 1; 584 int i, n_desc = 1;
575 u32 txd4 = 0; 585 u32 txd4 = 0, fport;
576 586
577 itxd = ring->next_free; 587 itxd = ring->next_free;
578 if (itxd == ring->last_free) 588 if (itxd == ring->last_free)
579 return -ENOMEM; 589 return -ENOMEM;
580 590
581 /* set the forward port */ 591 /* set the forward port */
582 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; 592 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
593 txd4 |= fport;
583 594
584 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 595 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
585 memset(tx_buf, 0, sizeof(*tx_buf)); 596 memset(tx_buf, 0, sizeof(*tx_buf));
@@ -595,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
595 if (skb_vlan_tag_present(skb)) 606 if (skb_vlan_tag_present(skb))
596 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 607 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
597 608
598 mapped_addr = dma_map_single(&dev->dev, skb->data, 609 mapped_addr = dma_map_single(eth->dev, skb->data,
599 skb_headlen(skb), DMA_TO_DEVICE); 610 skb_headlen(skb), DMA_TO_DEVICE);
600 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 611 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
601 return -ENOMEM; 612 return -ENOMEM;
602 613
603 WRITE_ONCE(itxd->txd1, mapped_addr); 614 WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -623,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
623 634
624 n_desc++; 635 n_desc++;
625 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 636 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
626 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, 637 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
627 frag_map_size, 638 frag_map_size,
628 DMA_TO_DEVICE); 639 DMA_TO_DEVICE);
629 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 640 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
630 goto err_dma; 641 goto err_dma;
631 642
632 if (i == nr_frags - 1 && 643 if (i == nr_frags - 1 &&
@@ -637,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
637 WRITE_ONCE(txd->txd3, (TX_DMA_SWC | 648 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
638 TX_DMA_PLEN0(frag_map_size) | 649 TX_DMA_PLEN0(frag_map_size) |
639 last_frag * TX_DMA_LS0)); 650 last_frag * TX_DMA_LS0));
640 WRITE_ONCE(txd->txd4, 0); 651 WRITE_ONCE(txd->txd4, fport);
641 652
642 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; 653 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
643 tx_buf = mtk_desc_to_tx_buf(ring, txd); 654 tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -679,7 +690,7 @@ err_dma:
679 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 690 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
680 691
681 /* unmap dma */ 692 /* unmap dma */
682 mtk_tx_unmap(&dev->dev, tx_buf); 693 mtk_tx_unmap(eth, tx_buf);
683 694
684 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 695 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
685 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 696 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -836,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
836 netdev->stats.rx_dropped++; 847 netdev->stats.rx_dropped++;
837 goto release_desc; 848 goto release_desc;
838 } 849 }
839 dma_addr = dma_map_single(&eth->netdev[mac]->dev, 850 dma_addr = dma_map_single(eth->dev,
840 new_data + NET_SKB_PAD, 851 new_data + NET_SKB_PAD,
841 ring->buf_size, 852 ring->buf_size,
842 DMA_FROM_DEVICE); 853 DMA_FROM_DEVICE);
843 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 854 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
844 skb_free_frag(new_data); 855 skb_free_frag(new_data);
845 netdev->stats.rx_dropped++; 856 netdev->stats.rx_dropped++;
846 goto release_desc; 857 goto release_desc;
@@ -849,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
849 /* receive data */ 860 /* receive data */
850 skb = build_skb(data, ring->frag_size); 861 skb = build_skb(data, ring->frag_size);
851 if (unlikely(!skb)) { 862 if (unlikely(!skb)) {
852 put_page(virt_to_head_page(new_data)); 863 skb_free_frag(new_data);
853 netdev->stats.rx_dropped++; 864 netdev->stats.rx_dropped++;
854 goto release_desc; 865 goto release_desc;
855 } 866 }
856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 867 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
857 868
858 dma_unmap_single(&netdev->dev, trxd.rxd1, 869 dma_unmap_single(eth->dev, trxd.rxd1,
859 ring->buf_size, DMA_FROM_DEVICE); 870 ring->buf_size, DMA_FROM_DEVICE);
860 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 871 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
861 skb->dev = netdev; 872 skb->dev = netdev;
@@ -937,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
937 done[mac]++; 948 done[mac]++;
938 budget--; 949 budget--;
939 } 950 }
940 mtk_tx_unmap(eth->dev, tx_buf); 951 mtk_tx_unmap(eth, tx_buf);
941 952
942 ring->last_free = desc; 953 ring->last_free = desc;
943 atomic_inc(&ring->free_count); 954 atomic_inc(&ring->free_count);
@@ -1092,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1092 1103
1093 if (ring->buf) { 1104 if (ring->buf) {
1094 for (i = 0; i < MTK_DMA_SIZE; i++) 1105 for (i = 0; i < MTK_DMA_SIZE; i++)
1095 mtk_tx_unmap(eth->dev, &ring->buf[i]); 1106 mtk_tx_unmap(eth, &ring->buf[i]);
1096 kfree(ring->buf); 1107 kfree(ring->buf);
1097 ring->buf = NULL; 1108 ring->buf = NULL;
1098 } 1109 }
@@ -1490,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
1490 struct mtk_eth *eth = mac->hw; 1501 struct mtk_eth *eth = mac->hw;
1491 1502
1492 phy_disconnect(mac->phy_dev); 1503 phy_disconnect(mac->phy_dev);
1493 mtk_mdio_cleanup(eth);
1494 mtk_irq_disable(eth, ~0); 1504 mtk_irq_disable(eth, ~0);
1495 free_irq(eth->irq[1], dev);
1496 free_irq(eth->irq[2], dev);
1497} 1505}
1498 1506
1499static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1507static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1751,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1751 goto free_netdev; 1759 goto free_netdev;
1752 } 1760 }
1753 spin_lock_init(&mac->hw_stats->stats_lock); 1761 spin_lock_init(&mac->hw_stats->stats_lock);
1762 u64_stats_init(&mac->hw_stats->syncp);
1754 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; 1763 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1755 1764
1756 SET_NETDEV_DEV(eth->netdev[id], eth->dev); 1765 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
@@ -1796,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
1796 if (!eth) 1805 if (!eth)
1797 return -ENOMEM; 1806 return -ENOMEM;
1798 1807
1808 eth->dev = &pdev->dev;
1799 eth->base = devm_ioremap_resource(&pdev->dev, res); 1809 eth->base = devm_ioremap_resource(&pdev->dev, res);
1800 if (IS_ERR(eth->base)) 1810 if (IS_ERR(eth->base))
1801 return PTR_ERR(eth->base); 1811 return PTR_ERR(eth->base);
@@ -1830,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
1830 return -ENXIO; 1840 return -ENXIO;
1831 } 1841 }
1832 } 1842 }
1843 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
1844 eth->clks[i] = devm_clk_get(eth->dev,
1845 mtk_clks_source_name[i]);
1846 if (IS_ERR(eth->clks[i])) {
1847 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
1848 return -EPROBE_DEFER;
1849 return -ENODEV;
1850 }
1851 }
1833 1852
1834 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); 1853 clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
1835 eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); 1854 clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
1836 eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); 1855 clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
1837 eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); 1856 clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
1838 if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1839 IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1840 return -ENODEV;
1841
1842 clk_prepare_enable(eth->clk_ethif);
1843 clk_prepare_enable(eth->clk_esw);
1844 clk_prepare_enable(eth->clk_gp1);
1845 clk_prepare_enable(eth->clk_gp2);
1846 1857
1847 eth->dev = &pdev->dev;
1848 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); 1858 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1849 INIT_WORK(&eth->pending_work, mtk_pending_work); 1859 INIT_WORK(&eth->pending_work, mtk_pending_work);
1850 1860
@@ -1886,15 +1896,24 @@ err_free_dev:
1886static int mtk_remove(struct platform_device *pdev) 1896static int mtk_remove(struct platform_device *pdev)
1887{ 1897{
1888 struct mtk_eth *eth = platform_get_drvdata(pdev); 1898 struct mtk_eth *eth = platform_get_drvdata(pdev);
1899 int i;
1889 1900
1890 clk_disable_unprepare(eth->clk_ethif); 1901 /* stop all devices to make sure that dma is properly shut down */
1891 clk_disable_unprepare(eth->clk_esw); 1902 for (i = 0; i < MTK_MAC_COUNT; i++) {
1892 clk_disable_unprepare(eth->clk_gp1); 1903 if (!eth->netdev[i])
1893 clk_disable_unprepare(eth->clk_gp2); 1904 continue;
1905 mtk_stop(eth->netdev[i]);
1906 }
1907
1908 clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
1909 clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
1910 clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
1911 clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
1894 1912
1895 netif_napi_del(&eth->tx_napi); 1913 netif_napi_del(&eth->tx_napi);
1896 netif_napi_del(&eth->rx_napi); 1914 netif_napi_del(&eth->rx_napi);
1897 mtk_cleanup(eth); 1915 mtk_cleanup(eth);
1916 mtk_mdio_cleanup(eth);
1898 platform_set_drvdata(pdev, NULL); 1917 platform_set_drvdata(pdev, NULL);
1899 1918
1900 return 0; 1919 return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index f82e3acb947b..6e1ade7a25c5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -290,6 +290,17 @@ enum mtk_tx_flags {
290 MTK_TX_FLAGS_PAGE0 = 0x02, 290 MTK_TX_FLAGS_PAGE0 = 0x02,
291}; 291};
292 292
293/* This enum allows us to identify how the clock is defined on the array of the
294 * clock in the order
295 */
296enum mtk_clks_map {
297 MTK_CLK_ETHIF,
298 MTK_CLK_ESW,
299 MTK_CLK_GP1,
300 MTK_CLK_GP2,
301 MTK_CLK_MAX
302};
303
293/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at 304/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
294 * by the TX descriptor s 305 * by the TX descriptor s
295 * @skb: The SKB pointer of the packet being sent 306 * @skb: The SKB pointer of the packet being sent
@@ -370,10 +381,7 @@ struct mtk_rx_ring {
370 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 381 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
371 * @phy_scratch_ring: physical address of scratch_ring 382 * @phy_scratch_ring: physical address of scratch_ring
372 * @scratch_head: The scratch memory that scratch_ring points to. 383 * @scratch_head: The scratch memory that scratch_ring points to.
373 * @clk_ethif: The ethif clock 384 * @clks: clock array for all clocks required
374 * @clk_esw: The switch clock
375 * @clk_gp1: The gmac1 clock
376 * @clk_gp2: The gmac2 clock
377 * @mii_bus: If there is a bus we need to create an instance for it 385 * @mii_bus: If there is a bus we need to create an instance for it
378 * @pending_work: The workqueue used to reset the dma ring 386 * @pending_work: The workqueue used to reset the dma ring
379 */ 387 */
@@ -400,10 +408,8 @@ struct mtk_eth {
400 struct mtk_tx_dma *scratch_ring; 408 struct mtk_tx_dma *scratch_ring;
401 dma_addr_t phy_scratch_ring; 409 dma_addr_t phy_scratch_ring;
402 void *scratch_head; 410 void *scratch_head;
403 struct clk *clk_ethif; 411 struct clk *clks[MTK_CLK_MAX];
404 struct clk *clk_esw; 412
405 struct clk *clk_gp1;
406 struct clk *clk_gp2;
407 struct mii_bus *mii_bus; 413 struct mii_bus *mii_bus;
408 struct work_struct pending_work; 414 struct work_struct pending_work;
409}; 415};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 99c6bbdff501..b04760a5034b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
94 *cap = true; 94 *cap = true;
95 break; 95 break;
96 case DCB_CAP_ATTR_DCBX: 96 case DCB_CAP_ATTR_DCBX:
97 *cap = priv->cee_params.dcbx_cap; 97 *cap = priv->dcbx_cap;
98 break; 98 break;
99 case DCB_CAP_ATTR_PFC_TCS: 99 case DCB_CAP_ATTR_PFC_TCS:
100 *cap = 1 << mlx4_max_tc(priv->mdev->dev); 100 *cap = 1 << mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
111{ 111{
112 struct mlx4_en_priv *priv = netdev_priv(netdev); 112 struct mlx4_en_priv *priv = netdev_priv(netdev);
113 113
114 return priv->cee_params.dcb_cfg.pfc_state; 114 return priv->cee_config.pfc_state;
115} 115}
116 116
117static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) 117static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
118{ 118{
119 struct mlx4_en_priv *priv = netdev_priv(netdev); 119 struct mlx4_en_priv *priv = netdev_priv(netdev);
120 120
121 priv->cee_params.dcb_cfg.pfc_state = state; 121 priv->cee_config.pfc_state = state;
122} 122}
123 123
124static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 124static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
126{ 126{
127 struct mlx4_en_priv *priv = netdev_priv(netdev); 127 struct mlx4_en_priv *priv = netdev_priv(netdev);
128 128
129 *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc; 129 *setting = priv->cee_config.dcb_pfc[priority];
130} 130}
131 131
132static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, 132static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
134{ 134{
135 struct mlx4_en_priv *priv = netdev_priv(netdev); 135 struct mlx4_en_priv *priv = netdev_priv(netdev);
136 136
137 priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting; 137 priv->cee_config.dcb_pfc[priority] = setting;
138 priv->cee_params.dcb_cfg.pfc_state = true; 138 priv->cee_config.pfc_state = true;
139} 139}
140 140
141static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) 141static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
157{ 157{
158 struct mlx4_en_priv *priv = netdev_priv(netdev); 158 struct mlx4_en_priv *priv = netdev_priv(netdev);
159 struct mlx4_en_dev *mdev = priv->mdev; 159 struct mlx4_en_dev *mdev = priv->mdev;
160 struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
161 int err = 0;
162 160
163 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 161 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
164 return -EINVAL; 162 return 1;
165 163
166 if (dcb_cfg->pfc_state) { 164 if (priv->cee_config.pfc_state) {
167 int tc; 165 int tc;
168 166
169 priv->prof->rx_pause = 0; 167 priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
171 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { 169 for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
172 u8 tc_mask = 1 << tc; 170 u8 tc_mask = 1 << tc;
173 171
174 switch (dcb_cfg->tc_config[tc].dcb_pfc) { 172 switch (priv->cee_config.dcb_pfc[tc]) {
175 case pfc_disabled: 173 case pfc_disabled:
176 priv->prof->tx_ppp &= ~tc_mask; 174 priv->prof->tx_ppp &= ~tc_mask;
177 priv->prof->rx_ppp &= ~tc_mask; 175 priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
199 en_dbg(DRV, priv, "Set pfc off\n"); 197 en_dbg(DRV, priv, "Set pfc off\n");
200 } 198 }
201 199
202 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 200 if (mlx4_SET_PORT_general(mdev->dev, priv->port,
203 priv->rx_skb_size + ETH_FCS_LEN, 201 priv->rx_skb_size + ETH_FCS_LEN,
204 priv->prof->tx_pause, 202 priv->prof->tx_pause,
205 priv->prof->tx_ppp, 203 priv->prof->tx_ppp,
206 priv->prof->rx_pause, 204 priv->prof->rx_pause,
207 priv->prof->rx_ppp); 205 priv->prof->rx_ppp)) {
208 if (err)
209 en_err(priv, "Failed setting pause params\n"); 206 en_err(priv, "Failed setting pause params\n");
210 return err; 207 return 1;
208 }
209
210 return 0;
211} 211}
212 212
213static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) 213static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
225 struct mlx4_en_priv *priv = netdev_priv(dev); 225 struct mlx4_en_priv *priv = netdev_priv(dev);
226 int num_tcs = 0; 226 int num_tcs = 0;
227 227
228 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 228 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
229 return 1; 229 return 1;
230 230
231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) 231 if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 238 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
239 } 239 }
240 240
241 return mlx4_en_setup_tc(dev, num_tcs); 241 if (mlx4_en_setup_tc(dev, num_tcs))
242 return 1;
243
244 return 0;
242} 245}
243 246
244/* On success returns a non-zero 802.1p user priority bitmap 247/* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
252 .selector = idtype, 255 .selector = idtype,
253 .protocol = id, 256 .protocol = id,
254 }; 257 };
255 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 258 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
256 return 0; 259 return 0;
257 260
258 return dcb_getapp(netdev, &app); 261 return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
264 struct mlx4_en_priv *priv = netdev_priv(netdev); 267 struct mlx4_en_priv *priv = netdev_priv(netdev);
265 struct dcb_app app; 268 struct dcb_app app;
266 269
267 if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 270 if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
268 return -EINVAL; 271 return -EINVAL;
269 272
270 memset(&app, 0, sizeof(struct dcb_app)); 273 memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
433{ 436{
434 struct mlx4_en_priv *priv = netdev_priv(dev); 437 struct mlx4_en_priv *priv = netdev_priv(dev);
435 438
436 return priv->cee_params.dcbx_cap; 439 return priv->dcbx_cap;
437} 440}
438 441
439static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 442static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
442 struct ieee_ets ets = {0}; 445 struct ieee_ets ets = {0};
443 struct ieee_pfc pfc = {0}; 446 struct ieee_pfc pfc = {0};
444 447
445 if (mode == priv->cee_params.dcbx_cap) 448 if (mode == priv->dcbx_cap)
446 return 0; 449 return 0;
447 450
448 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 451 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
451 !(mode & DCB_CAP_DCBX_HOST)) 454 !(mode & DCB_CAP_DCBX_HOST))
452 goto err; 455 goto err;
453 456
454 priv->cee_params.dcbx_cap = mode; 457 priv->dcbx_cap = mode;
455 458
456 ets.ets_cap = IEEE_8021QAZ_MAX_TCS; 459 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
457 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; 460 pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4198e9bf89d0..fedb829276f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
71#ifdef CONFIG_MLX4_EN_DCB 71#ifdef CONFIG_MLX4_EN_DCB
72 if (!mlx4_is_slave(priv->mdev->dev)) { 72 if (!mlx4_is_slave(priv->mdev->dev)) {
73 if (up) { 73 if (up) {
74 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 74 if (priv->dcbx_cap)
75 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
75 } else { 76 } else {
76 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; 77 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
77 priv->cee_params.dcb_cfg.pfc_state = false; 78 priv->cee_config.pfc_state = false;
78 } 79 }
79 } 80 }
80#endif /* CONFIG_MLX4_EN_DCB */ 81#endif /* CONFIG_MLX4_EN_DCB */
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3048 struct mlx4_en_priv *priv; 3049 struct mlx4_en_priv *priv;
3049 int i; 3050 int i;
3050 int err; 3051 int err;
3051#ifdef CONFIG_MLX4_EN_DCB
3052 struct tc_configuration *tc;
3053#endif
3054 3052
3055 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 3053 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3056 MAX_TX_RINGS, MAX_RX_RINGS); 3054 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3117 priv->msg_enable = MLX4_EN_MSG_LEVEL; 3115 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3118#ifdef CONFIG_MLX4_EN_DCB 3116#ifdef CONFIG_MLX4_EN_DCB
3119 if (!mlx4_is_slave(priv->mdev->dev)) { 3117 if (!mlx4_is_slave(priv->mdev->dev)) {
3120 priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | 3118 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3121 DCB_CAP_DCBX_HOST | 3119 DCB_CAP_DCBX_VER_IEEE;
3122 DCB_CAP_DCBX_VER_IEEE;
3123 priv->flags |= MLX4_EN_DCB_ENABLED; 3120 priv->flags |= MLX4_EN_DCB_ENABLED;
3124 priv->cee_params.dcb_cfg.pfc_state = false; 3121 priv->cee_config.pfc_state = false;
3125 3122
3126 for (i = 0; i < MLX4_EN_NUM_UP; i++) { 3123 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3127 tc = &priv->cee_params.dcb_cfg.tc_config[i]; 3124 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3128 tc->dcb_pfc = pfc_disabled;
3129 }
3130 3125
3131 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 3126 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3132 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 3127 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9df87ca0515a..e2509bba3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
818 real_size = get_real_size(skb, shinfo, dev, &lso_header_size, 818 real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
819 &inline_ok, &fragptr); 819 &inline_ok, &fragptr);
820 if (unlikely(!real_size)) 820 if (unlikely(!real_size))
821 goto tx_drop; 821 goto tx_drop_count;
822 822
823 /* Align descriptor to TXBB size */ 823 /* Align descriptor to TXBB size */
824 desc_size = ALIGN(real_size, TXBB_SIZE); 824 desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
826 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 826 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
827 if (netif_msg_tx_err(priv)) 827 if (netif_msg_tx_err(priv))
828 en_warn(priv, "Oversized header or SG list\n"); 828 en_warn(priv, "Oversized header or SG list\n");
829 goto tx_drop; 829 goto tx_drop_count;
830 } 830 }
831 831
832 bf_ok = ring->bf_enabled; 832 bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
1071 PCI_DMA_TODEVICE); 1071 PCI_DMA_TODEVICE);
1072 } 1072 }
1073 1073
1074tx_drop_count:
1075 ring->tx_dropped++;
1074tx_drop: 1076tx_drop:
1075 dev_kfree_skb_any(skb); 1077 dev_kfree_skb_any(skb);
1076 ring->tx_dropped++;
1077 return NETDEV_TX_OK; 1078 return NETDEV_TX_OK;
1078} 1079}
1079 1080
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
1106 goto tx_drop; 1107 goto tx_drop;
1107 1108
1108 if (mlx4_en_is_tx_ring_full(ring)) 1109 if (mlx4_en_is_tx_ring_full(ring))
1109 goto tx_drop; 1110 goto tx_drop_count;
1110 1111
1111 /* fetch ring->cons far ahead before needing it to avoid stall */ 1112 /* fetch ring->cons far ahead before needing it to avoid stall */
1112 ring_cons = READ_ONCE(ring->cons); 1113 ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
1176 1177
1177 return NETDEV_TX_OK; 1178 return NETDEV_TX_OK;
1178 1179
1179tx_drop: 1180tx_drop_count:
1180 ring->tx_dropped++; 1181 ring->tx_dropped++;
1182tx_drop:
1181 return NETDEV_TX_BUSY; 1183 return NETDEV_TX_BUSY;
1182} 1184}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2c2913dcae98..9099dbd04951 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
482 pfc_enabled_rx 482 pfc_enabled_rx
483}; 483};
484 484
485struct tc_configuration {
486 enum dcb_pfc_type dcb_pfc;
487};
488
489struct mlx4_en_cee_config { 485struct mlx4_en_cee_config {
490 bool pfc_state; 486 bool pfc_state;
491 struct tc_configuration tc_config[MLX4_EN_NUM_UP]; 487 enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
492}; 488};
493
494struct mlx4_en_cee_params {
495 u8 dcbx_cap;
496 struct mlx4_en_cee_config dcb_cfg;
497};
498
499#endif 489#endif
500 490
501struct ethtool_flow_id { 491struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
624 struct ieee_ets ets; 614 struct ieee_ets ets;
625 u16 maxrate[IEEE_8021QAZ_MAX_TCS]; 615 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
626 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; 616 enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
627 struct mlx4_en_cee_params cee_params; 617 struct mlx4_en_cee_config cee_config;
618 u8 dcbx_cap;
628#endif 619#endif
629#ifdef CONFIG_RFS_ACCEL 620#ifdef CONFIG_RFS_ACCEL
630 spinlock_t filters_lock; 621 spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 3d2095e5c61c..c5b2064297a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,7 +52,7 @@
52 52
53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54#define MLX4_IGNORE_FCS_MASK 0x1 54#define MLX4_IGNORE_FCS_MASK 0x1
55#define MLNX4_TX_MAX_NUMBER 8 55#define MLX4_TC_MAX_NUMBER 8
56 56
57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
58{ 58{
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
2022 u8 num_tc = dev->caps.max_tc_eth; 2022 u8 num_tc = dev->caps.max_tc_eth;
2023 2023
2024 if (!num_tc) 2024 if (!num_tc)
2025 num_tc = MLNX4_TX_MAX_NUMBER; 2025 num_tc = MLX4_TC_MAX_NUMBER;
2026 2026
2027 return num_tc; 2027 return num_tc;
2028} 2028}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d6e2a1cae19a..c2ec01a22d55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
143 return cmd->cmd_buf + (idx << cmd->log_stride); 143 return cmd->cmd_buf + (idx << cmd->log_stride);
144} 144}
145 145
146static u8 xor8_buf(void *buf, int len) 146static u8 xor8_buf(void *buf, size_t offset, int len)
147{ 147{
148 u8 *ptr = buf; 148 u8 *ptr = buf;
149 u8 sum = 0; 149 u8 sum = 0;
150 int i; 150 int i;
151 int end = len + offset;
151 152
152 for (i = 0; i < len; i++) 153 for (i = offset; i < end; i++)
153 sum ^= ptr[i]; 154 sum ^= ptr[i];
154 155
155 return sum; 156 return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
157 158
158static int verify_block_sig(struct mlx5_cmd_prot_block *block) 159static int verify_block_sig(struct mlx5_cmd_prot_block *block)
159{ 160{
160 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 161 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
162 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
163
164 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
161 return -EINVAL; 165 return -EINVAL;
162 166
163 if (xor8_buf(block, sizeof(*block)) != 0xff) 167 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
164 return -EINVAL; 168 return -EINVAL;
165 169
166 return 0; 170 return 0;
167} 171}
168 172
169static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 173static void calc_block_sig(struct mlx5_cmd_prot_block *block)
170 int csum)
171{ 174{
172 block->token = token; 175 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
173 if (csum) { 176 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
174 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 177
175 sizeof(block->data) - 2); 178 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
176 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 179 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
177 }
178} 180}
179 181
180static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 182static void calc_chain_sig(struct mlx5_cmd_msg *msg)
181{ 183{
182 struct mlx5_cmd_mailbox *next = msg->next; 184 struct mlx5_cmd_mailbox *next = msg->next;
183 185 int size = msg->len;
184 while (next) { 186 int blen = size - min_t(int, sizeof(msg->first.data), size);
185 calc_block_sig(next->buf, token, csum); 187 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
188 / MLX5_CMD_DATA_BLOCK_SIZE;
189 int i = 0;
190
191 for (i = 0; i < n && next; i++) {
192 calc_block_sig(next->buf);
186 next = next->next; 193 next = next->next;
187 } 194 }
188} 195}
189 196
190static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 197static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
191{ 198{
192 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 199 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
193 calc_chain_sig(ent->in, ent->token, csum); 200 if (csum) {
194 calc_chain_sig(ent->out, ent->token, csum); 201 calc_chain_sig(ent->in);
202 calc_chain_sig(ent->out);
203 }
195} 204}
196 205
197static void poll_timeout(struct mlx5_cmd_work_ent *ent) 206static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
222 struct mlx5_cmd_mailbox *next = ent->out->next; 231 struct mlx5_cmd_mailbox *next = ent->out->next;
223 int err; 232 int err;
224 u8 sig; 233 u8 sig;
234 int size = ent->out->len;
235 int blen = size - min_t(int, sizeof(ent->out->first.data), size);
236 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
237 / MLX5_CMD_DATA_BLOCK_SIZE;
238 int i = 0;
225 239
226 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 240 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
227 if (sig != 0xff) 241 if (sig != 0xff)
228 return -EINVAL; 242 return -EINVAL;
229 243
230 while (next) { 244 for (i = 0; i < n && next; i++) {
231 err = verify_block_sig(next->buf); 245 err = verify_block_sig(next->buf);
232 if (err) 246 if (err)
233 return err; 247 return err;
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
656 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 670 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
657 } 671 }
658 672
659 ent->token = alloc_token(cmd);
660 cmd->ent_arr[ent->idx] = ent; 673 cmd->ent_arr[ent->idx] = ent;
661 lay = get_inst(cmd, ent->idx); 674 lay = get_inst(cmd, ent->idx);
662 ent->lay = lay; 675 ent->lay = lay;
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
766static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 779static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
767 struct mlx5_cmd_msg *out, void *uout, int uout_size, 780 struct mlx5_cmd_msg *out, void *uout, int uout_size,
768 mlx5_cmd_cbk_t callback, 781 mlx5_cmd_cbk_t callback,
769 void *context, int page_queue, u8 *status) 782 void *context, int page_queue, u8 *status,
783 u8 token)
770{ 784{
771 struct mlx5_cmd *cmd = &dev->cmd; 785 struct mlx5_cmd *cmd = &dev->cmd;
772 struct mlx5_cmd_work_ent *ent; 786 struct mlx5_cmd_work_ent *ent;
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
783 if (IS_ERR(ent)) 797 if (IS_ERR(ent))
784 return PTR_ERR(ent); 798 return PTR_ERR(ent);
785 799
800 ent->token = token;
801
786 if (!callback) 802 if (!callback)
787 init_completion(&ent->done); 803 init_completion(&ent->done);
788 804
@@ -854,7 +870,8 @@ static const struct file_operations fops = {
854 .write = dbg_write, 870 .write = dbg_write,
855}; 871};
856 872
857static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) 873static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
874 u8 token)
858{ 875{
859 struct mlx5_cmd_prot_block *block; 876 struct mlx5_cmd_prot_block *block;
860 struct mlx5_cmd_mailbox *next; 877 struct mlx5_cmd_mailbox *next;
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
880 memcpy(block->data, from, copy); 897 memcpy(block->data, from, copy);
881 from += copy; 898 from += copy;
882 size -= copy; 899 size -= copy;
900 block->token = token;
883 next = next->next; 901 next = next->next;
884 } 902 }
885 903
@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
949} 967}
950 968
951static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 969static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
952 gfp_t flags, int size) 970 gfp_t flags, int size,
971 u8 token)
953{ 972{
954 struct mlx5_cmd_mailbox *tmp, *head = NULL; 973 struct mlx5_cmd_mailbox *tmp, *head = NULL;
955 struct mlx5_cmd_prot_block *block; 974 struct mlx5_cmd_prot_block *block;
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
978 tmp->next = head; 997 tmp->next = head;
979 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 998 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
980 block->block_num = cpu_to_be32(n - i - 1); 999 block->block_num = cpu_to_be32(n - i - 1);
1000 block->token = token;
981 head = tmp; 1001 head = tmp;
982 } 1002 }
983 msg->next = head; 1003 msg->next = head;
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1352 } 1372 }
1353 1373
1354 if (IS_ERR(msg)) 1374 if (IS_ERR(msg))
1355 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1375 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1356 1376
1357 return msg; 1377 return msg;
1358} 1378}
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1377 int err; 1397 int err;
1378 u8 status = 0; 1398 u8 status = 0;
1379 u32 drv_synd; 1399 u32 drv_synd;
1400 u8 token;
1380 1401
1381 if (pci_channel_offline(dev->pdev) || 1402 if (pci_channel_offline(dev->pdev) ||
1382 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1403 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1395 return err; 1416 return err;
1396 } 1417 }
1397 1418
1398 err = mlx5_copy_to_msg(inb, in, in_size); 1419 token = alloc_token(&dev->cmd);
1420
1421 err = mlx5_copy_to_msg(inb, in, in_size, token);
1399 if (err) { 1422 if (err) {
1400 mlx5_core_warn(dev, "err %d\n", err); 1423 mlx5_core_warn(dev, "err %d\n", err);
1401 goto out_in; 1424 goto out_in;
1402 } 1425 }
1403 1426
1404 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1427 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1405 if (IS_ERR(outb)) { 1428 if (IS_ERR(outb)) {
1406 err = PTR_ERR(outb); 1429 err = PTR_ERR(outb);
1407 goto out_in; 1430 goto out_in;
1408 } 1431 }
1409 1432
1410 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1433 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1411 pages_queue, &status); 1434 pages_queue, &status, token);
1412 if (err) 1435 if (err)
1413 goto out_out; 1436 goto out_out;
1414 1437
@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1476 INIT_LIST_HEAD(&cmd->cache.med.head); 1499 INIT_LIST_HEAD(&cmd->cache.med.head);
1477 1500
1478 for (i = 0; i < NUM_LONG_LISTS; i++) { 1501 for (i = 0; i < NUM_LONG_LISTS; i++) {
1479 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1502 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1480 if (IS_ERR(msg)) { 1503 if (IS_ERR(msg)) {
1481 err = PTR_ERR(msg); 1504 err = PTR_ERR(msg);
1482 goto ex_err; 1505 goto ex_err;
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
1486 } 1509 }
1487 1510
1488 for (i = 0; i < NUM_MED_LISTS; i++) { 1511 for (i = 0; i < NUM_MED_LISTS; i++) {
1489 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1512 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1490 if (IS_ERR(msg)) { 1513 if (IS_ERR(msg)) {
1491 err = PTR_ERR(msg); 1514 err = PTR_ERR(msg);
1492 goto ex_err; 1515 goto ex_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1b495efa7490..bf722aa88cf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -73,8 +73,12 @@
73#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) 73#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
74#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ 74#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
75 MLX5_MPWRQ_WQE_PAGE_ORDER) 75 MLX5_MPWRQ_WQE_PAGE_ORDER)
76#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ 76
77 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) 77#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
78#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
79 (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
80#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
81
78#define MLX5_UMR_ALIGN (2048) 82#define MLX5_UMR_ALIGN (2048)
79#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) 83#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
80 84
@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
219}; 223};
220 224
221enum { 225enum {
222 MLX5E_RQ_STATE_POST_WQES_ENABLE, 226 MLX5E_RQ_STATE_FLUSH,
223 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 227 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
224 MLX5E_RQ_STATE_FLUSH_TIMEOUT,
225 MLX5E_RQ_STATE_AM, 228 MLX5E_RQ_STATE_AM,
226}; 229};
227 230
@@ -304,6 +307,7 @@ struct mlx5e_rq {
304 307
305 unsigned long state; 308 unsigned long state;
306 int ix; 309 int ix;
310 u32 mpwqe_mtt_offset;
307 311
308 struct mlx5e_rx_am am; /* Adaptive Moderation */ 312 struct mlx5e_rx_am am; /* Adaptive Moderation */
309 313
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
365}; 369};
366 370
367enum { 371enum {
368 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, 372 MLX5E_SQ_STATE_FLUSH,
369 MLX5E_SQ_STATE_BF_ENABLE, 373 MLX5E_SQ_STATE_BF_ENABLE,
370 MLX5E_SQ_STATE_TX_TIMEOUT,
371}; 374};
372 375
373struct mlx5e_ico_wqe_info { 376struct mlx5e_ico_wqe_info {
@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
698bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 701bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
699int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 702int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
700void mlx5e_free_tx_descs(struct mlx5e_sq *sq); 703void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
701void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
702 704
703void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 705void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
704void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 706void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
814 MLX5E_MAX_NUM_CHANNELS); 816 MLX5E_MAX_NUM_CHANNELS);
815} 817}
816 818
817static inline int mlx5e_get_mtt_octw(int npages)
818{
819 return ALIGN(npages, 8) / 2;
820}
821
822extern const struct ethtool_ops mlx5e_ethtool_ops; 819extern const struct ethtool_ops mlx5e_ethtool_ops;
823#ifdef CONFIG_MLX5_CORE_EN_DCB 820#ifdef CONFIG_MLX5_CORE_EN_DCB
824extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; 821extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 673043ccd76c..9cce153e1035 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
139 struct mlx5e_tir *tir; 139 struct mlx5e_tir *tir;
140 void *in; 140 void *in;
141 int inlen; 141 int inlen;
142 int err; 142 int err = 0;
143 143
144 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 144 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
145 in = mlx5_vzalloc(inlen); 145 in = mlx5_vzalloc(inlen);
@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
151 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { 151 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
152 err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); 152 err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
153 if (err) 153 if (err)
154 return err; 154 goto out;
155 } 155 }
156 156
157out:
157 kvfree(in); 158 kvfree(in);
158 159
159 return 0; 160 return err;
160} 161}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index caa9a3ccc3f3..762af16ed021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
127 return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); 127 return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
128} 128}
129 129
130static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) 130static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
131 struct ieee_ets *ets)
131{ 132{
132 int bw_sum = 0; 133 int bw_sum = 0;
133 int i; 134 int i;
134 135
135 /* Validate Priority */ 136 /* Validate Priority */
136 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 137 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
137 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) 138 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
139 netdev_err(netdev,
140 "Failed to validate ETS: priority value greater than max(%d)\n",
141 MLX5E_MAX_PRIORITY);
138 return -EINVAL; 142 return -EINVAL;
143 }
139 } 144 }
140 145
141 /* Validate Bandwidth Sum */ 146 /* Validate Bandwidth Sum */
142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 147 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 148 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
144 if (!ets->tc_tx_bw[i]) 149 if (!ets->tc_tx_bw[i]) {
150 netdev_err(netdev,
151 "Failed to validate ETS: BW 0 is illegal\n");
145 return -EINVAL; 152 return -EINVAL;
153 }
146 154
147 bw_sum += ets->tc_tx_bw[i]; 155 bw_sum += ets->tc_tx_bw[i];
148 } 156 }
149 } 157 }
150 158
151 if (bw_sum != 0 && bw_sum != 100) 159 if (bw_sum != 0 && bw_sum != 100) {
160 netdev_err(netdev,
161 "Failed to validate ETS: BW sum is illegal\n");
152 return -EINVAL; 162 return -EINVAL;
163 }
153 return 0; 164 return 0;
154} 165}
155 166
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
159 struct mlx5e_priv *priv = netdev_priv(netdev); 170 struct mlx5e_priv *priv = netdev_priv(netdev);
160 int err; 171 int err;
161 172
162 err = mlx5e_dbcnl_validate_ets(ets); 173 err = mlx5e_dbcnl_validate_ets(netdev, ets);
163 if (err) 174 if (err)
164 return err; 175 return err;
165 176
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 4a3757e60441..7a346bb2ed00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
331 if (mlx5e_query_global_pause_combined(priv)) { 331 if (mlx5e_query_global_pause_combined(priv)) {
332 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 332 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
333 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 333 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
334 pport_per_prio_pfc_stats_desc, 0); 334 pport_per_prio_pfc_stats_desc, i);
335 } 335 }
336 } 336 }
337 337
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
352 sq_stats_desc, j); 352 sq_stats_desc, j);
353} 353}
354 354
355static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
356 int num_wqe)
357{
358 int packets_per_wqe;
359 int stride_size;
360 int num_strides;
361 int wqe_size;
362
363 if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
364 return num_wqe;
365
366 stride_size = 1 << priv->params.mpwqe_log_stride_sz;
367 num_strides = 1 << priv->params.mpwqe_log_num_strides;
368 wqe_size = stride_size * num_strides;
369
370 packets_per_wqe = wqe_size /
371 ALIGN(ETH_DATA_LEN, stride_size);
372 return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
373}
374
375static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
376 int num_packets)
377{
378 int packets_per_wqe;
379 int stride_size;
380 int num_strides;
381 int wqe_size;
382 int num_wqes;
383
384 if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
385 return num_packets;
386
387 stride_size = 1 << priv->params.mpwqe_log_stride_sz;
388 num_strides = 1 << priv->params.mpwqe_log_num_strides;
389 wqe_size = stride_size * num_strides;
390
391 num_packets = (1 << order_base_2(num_packets));
392
393 packets_per_wqe = wqe_size /
394 ALIGN(ETH_DATA_LEN, stride_size);
395 num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
396 return 1 << (order_base_2(num_wqes));
397}
398
355static void mlx5e_get_ringparam(struct net_device *dev, 399static void mlx5e_get_ringparam(struct net_device *dev,
356 struct ethtool_ringparam *param) 400 struct ethtool_ringparam *param)
357{ 401{
358 struct mlx5e_priv *priv = netdev_priv(dev); 402 struct mlx5e_priv *priv = netdev_priv(dev);
359 int rq_wq_type = priv->params.rq_wq_type; 403 int rq_wq_type = priv->params.rq_wq_type;
360 404
361 param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); 405 param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
406 1 << mlx5_max_log_rq_size(rq_wq_type));
362 param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; 407 param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
363 param->rx_pending = 1 << priv->params.log_rq_size; 408 param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
409 1 << priv->params.log_rq_size);
364 param->tx_pending = 1 << priv->params.log_sq_size; 410 param->tx_pending = 1 << priv->params.log_sq_size;
365} 411}
366 412
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
370 struct mlx5e_priv *priv = netdev_priv(dev); 416 struct mlx5e_priv *priv = netdev_priv(dev);
371 bool was_opened; 417 bool was_opened;
372 int rq_wq_type = priv->params.rq_wq_type; 418 int rq_wq_type = priv->params.rq_wq_type;
419 u32 rx_pending_wqes;
420 u32 min_rq_size;
421 u32 max_rq_size;
373 u16 min_rx_wqes; 422 u16 min_rx_wqes;
374 u8 log_rq_size; 423 u8 log_rq_size;
375 u8 log_sq_size; 424 u8 log_sq_size;
425 u32 num_mtts;
376 int err = 0; 426 int err = 0;
377 427
378 if (param->rx_jumbo_pending) { 428 if (param->rx_jumbo_pending) {
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
385 __func__); 435 __func__);
386 return -EINVAL; 436 return -EINVAL;
387 } 437 }
388 if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { 438
439 min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
440 1 << mlx5_min_log_rq_size(rq_wq_type));
441 max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
442 1 << mlx5_max_log_rq_size(rq_wq_type));
443 rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
444 param->rx_pending);
445
446 if (param->rx_pending < min_rq_size) {
389 netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", 447 netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
390 __func__, param->rx_pending, 448 __func__, param->rx_pending,
391 1 << mlx5_min_log_rq_size(rq_wq_type)); 449 min_rq_size);
392 return -EINVAL; 450 return -EINVAL;
393 } 451 }
394 if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { 452 if (param->rx_pending > max_rq_size) {
395 netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", 453 netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
396 __func__, param->rx_pending, 454 __func__, param->rx_pending,
397 1 << mlx5_max_log_rq_size(rq_wq_type)); 455 max_rq_size);
398 return -EINVAL; 456 return -EINVAL;
399 } 457 }
458
459 num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
460 rx_pending_wqes);
461 if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
462 !MLX5E_VALID_NUM_MTTS(num_mtts)) {
463 netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
464 __func__, param->rx_pending);
465 return -EINVAL;
466 }
467
400 if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { 468 if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
401 netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", 469 netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
402 __func__, param->tx_pending, 470 __func__, param->tx_pending,
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
410 return -EINVAL; 478 return -EINVAL;
411 } 479 }
412 480
413 log_rq_size = order_base_2(param->rx_pending); 481 log_rq_size = order_base_2(rx_pending_wqes);
414 log_sq_size = order_base_2(param->tx_pending); 482 log_sq_size = order_base_2(param->tx_pending);
415 min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); 483 min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
416 484
417 if (log_rq_size == priv->params.log_rq_size && 485 if (log_rq_size == priv->params.log_rq_size &&
418 log_sq_size == priv->params.log_sq_size && 486 log_sq_size == priv->params.log_sq_size &&
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
454 unsigned int count = ch->combined_count; 522 unsigned int count = ch->combined_count;
455 bool arfs_enabled; 523 bool arfs_enabled;
456 bool was_opened; 524 bool was_opened;
525 u32 num_mtts;
457 int err = 0; 526 int err = 0;
458 527
459 if (!count) { 528 if (!count) {
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
472 return -EINVAL; 541 return -EINVAL;
473 } 542 }
474 543
544 num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
545 if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
546 !MLX5E_VALID_NUM_MTTS(num_mtts)) {
547 netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
548 __func__, count);
549 return -EINVAL;
550 }
551
475 if (priv->params.num_channels == count) 552 if (priv->params.num_channels == count)
476 return 0; 553 return 0;
477 554
@@ -582,9 +659,10 @@ out:
582static void ptys2ethtool_supported_link(unsigned long *supported_modes, 659static void ptys2ethtool_supported_link(unsigned long *supported_modes,
583 u32 eth_proto_cap) 660 u32 eth_proto_cap)
584{ 661{
662 unsigned long proto_cap = eth_proto_cap;
585 int proto; 663 int proto;
586 664
587 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER) 665 for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
588 bitmap_or(supported_modes, supported_modes, 666 bitmap_or(supported_modes, supported_modes,
589 ptys2ethtool_table[proto].supported, 667 ptys2ethtool_table[proto].supported,
590 __ETHTOOL_LINK_MODE_MASK_NBITS); 668 __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -593,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
593static void ptys2ethtool_adver_link(unsigned long *advertising_modes, 671static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
594 u32 eth_proto_cap) 672 u32 eth_proto_cap)
595{ 673{
674 unsigned long proto_cap = eth_proto_cap;
596 int proto; 675 int proto;
597 676
598 for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER) 677 for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
599 bitmap_or(advertising_modes, advertising_modes, 678 bitmap_or(advertising_modes, advertising_modes,
600 ptys2ethtool_table[proto].advertised, 679 ptys2ethtool_table[proto].advertised,
601 __ETHTOOL_LINK_MODE_MASK_NBITS); 680 __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 870bea37c57c..2459c7f3db8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,13 +39,6 @@
39#include "eswitch.h" 39#include "eswitch.h"
40#include "vxlan.h" 40#include "vxlan.h"
41 41
42enum {
43 MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
44 MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
45 MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
46 MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
47};
48
49struct mlx5e_rq_param { 42struct mlx5e_rq_param {
50 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 43 u32 rqc[MLX5_ST_SZ_DW(rqc)];
51 struct mlx5_wq_param wq; 44 struct mlx5_wq_param wq;
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
162 s->tx_queue_stopped += sq_stats->stopped; 155 s->tx_queue_stopped += sq_stats->stopped;
163 s->tx_queue_wake += sq_stats->wake; 156 s->tx_queue_wake += sq_stats->wake;
164 s->tx_queue_dropped += sq_stats->dropped; 157 s->tx_queue_dropped += sq_stats->dropped;
158 s->tx_xmit_more += sq_stats->xmit_more;
165 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 159 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
166 tx_offload_none += sq_stats->csum_none; 160 tx_offload_none += sq_stats->csum_none;
167 } 161 }
@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
340 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; 334 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
341 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; 335 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
342 336
337 rq->mpwqe_mtt_offset = c->ix *
338 MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
339
343 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); 340 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
344 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); 341 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
345 rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; 342 rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
428 425
429 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); 426 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
430 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 427 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
431 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
432 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); 428 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
433 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - 429 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
434 MLX5_ADAPTER_PAGE_SHIFT); 430 MLX5_ADAPTER_PAGE_SHIFT);
@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
525 return -ETIMEDOUT; 521 return -ETIMEDOUT;
526} 522}
527 523
524static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
525{
526 struct mlx5_wq_ll *wq = &rq->wq;
527 struct mlx5e_rx_wqe *wqe;
528 __be16 wqe_ix_be;
529 u16 wqe_ix;
530
531 /* UMR WQE (if in progress) is always at wq->head */
532 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
533 mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
534
535 while (!mlx5_wq_ll_is_empty(wq)) {
536 wqe_ix_be = *wq->tail_next;
537 wqe_ix = be16_to_cpu(wqe_ix_be);
538 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
539 rq->dealloc_wqe(rq, wqe_ix);
540 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
541 &wqe->next.next_wqe_index);
542 }
543}
544
528static int mlx5e_open_rq(struct mlx5e_channel *c, 545static int mlx5e_open_rq(struct mlx5e_channel *c,
529 struct mlx5e_rq_param *param, 546 struct mlx5e_rq_param *param,
530 struct mlx5e_rq *rq) 547 struct mlx5e_rq *rq)
@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
548 if (param->am_enabled) 565 if (param->am_enabled)
549 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 566 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
550 567
551 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
552
553 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; 568 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
554 sq->ico_wqe_info[pi].num_wqebbs = 1; 569 sq->ico_wqe_info[pi].num_wqebbs = 1;
555 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ 570 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -566,23 +581,8 @@ err_destroy_rq:
566 581
567static void mlx5e_close_rq(struct mlx5e_rq *rq) 582static void mlx5e_close_rq(struct mlx5e_rq *rq)
568{ 583{
569 int tout = 0; 584 set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
570 int err;
571
572 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
573 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 585 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
574
575 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
576 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
577 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
578 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
579
580 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
581 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
582
583 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
584 napi_synchronize(&rq->channel->napi);
585
586 cancel_work_sync(&rq->am.work); 586 cancel_work_sync(&rq->am.work);
587 587
588 mlx5e_disable_rq(rq); 588 mlx5e_disable_rq(rq);
@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
821 goto err_disable_sq; 821 goto err_disable_sq;
822 822
823 if (sq->txq) { 823 if (sq->txq) {
824 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
825 netdev_tx_reset_queue(sq->txq); 824 netdev_tx_reset_queue(sq->txq);
826 netif_tx_start_queue(sq->txq); 825 netif_tx_start_queue(sq->txq);
827 } 826 }
@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
845 844
846static void mlx5e_close_sq(struct mlx5e_sq *sq) 845static void mlx5e_close_sq(struct mlx5e_sq *sq)
847{ 846{
848 int tout = 0; 847 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
849 int err; 848 /* prevent netif_tx_wake_queue */
849 napi_synchronize(&sq->channel->napi);
850 850
851 if (sq->txq) { 851 if (sq->txq) {
852 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
853 /* prevent netif_tx_wake_queue */
854 napi_synchronize(&sq->channel->napi);
855 netif_tx_disable_queue(sq->txq); 852 netif_tx_disable_queue(sq->txq);
856 853
857 /* ensure hw is notified of all pending wqes */ 854 /* last doorbell out, godspeed .. */
858 if (mlx5e_sq_has_room_for(sq, 1)) 855 if (mlx5e_sq_has_room_for(sq, 1))
859 mlx5e_send_nop(sq, true); 856 mlx5e_send_nop(sq, true);
860
861 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
862 MLX5_SQC_STATE_ERR, false, 0);
863 if (err)
864 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
865 }
866
867 /* wait till sq is empty, unless a TX timeout occurred on this SQ */
868 while (sq->cc != sq->pc &&
869 !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
870 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
871 if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
872 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
873 } 857 }
874 858
875 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
876 napi_synchronize(&sq->channel->napi);
877
878 mlx5e_free_tx_descs(sq);
879 mlx5e_disable_sq(sq); 859 mlx5e_disable_sq(sq);
860 mlx5e_free_tx_descs(sq);
880 mlx5e_destroy_sq(sq); 861 mlx5e_destroy_sq(sq);
881} 862}
882 863
@@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
1826 netif_set_real_num_tx_queues(netdev, num_txqs); 1807 netif_set_real_num_tx_queues(netdev, num_txqs);
1827 netif_set_real_num_rx_queues(netdev, priv->params.num_channels); 1808 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1828 1809
1829 err = mlx5e_set_dev_port_mtu(netdev);
1830 if (err)
1831 goto err_clear_state_opened_flag;
1832
1833 err = mlx5e_open_channels(priv); 1810 err = mlx5e_open_channels(priv);
1834 if (err) { 1811 if (err) {
1835 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", 1812 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2573 u16 max_mtu; 2550 u16 max_mtu;
2574 u16 min_mtu; 2551 u16 min_mtu;
2575 int err = 0; 2552 int err = 0;
2553 bool reset;
2576 2554
2577 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2555 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2578 2556
@@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2588 2566
2589 mutex_lock(&priv->state_lock); 2567 mutex_lock(&priv->state_lock);
2590 2568
2569 reset = !priv->params.lro_en &&
2570 (priv->params.rq_wq_type !=
2571 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
2572
2591 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); 2573 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2592 if (was_opened) 2574 if (was_opened && reset)
2593 mlx5e_close_locked(netdev); 2575 mlx5e_close_locked(netdev);
2594 2576
2595 netdev->mtu = new_mtu; 2577 netdev->mtu = new_mtu;
2578 mlx5e_set_dev_port_mtu(netdev);
2596 2579
2597 if (was_opened) 2580 if (was_opened && reset)
2598 err = mlx5e_open_locked(netdev); 2581 err = mlx5e_open_locked(netdev);
2599 2582
2600 mutex_unlock(&priv->state_lock); 2583 mutex_unlock(&priv->state_lock);
@@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
2794 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) 2777 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
2795 continue; 2778 continue;
2796 sched_work = true; 2779 sched_work = true;
2797 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); 2780 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
2798 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", 2781 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2799 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); 2782 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2800 } 2783 }
@@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3231 struct mlx5_create_mkey_mbox_in *in; 3214 struct mlx5_create_mkey_mbox_in *in;
3232 struct mlx5_mkey_seg *mkc; 3215 struct mlx5_mkey_seg *mkc;
3233 int inlen = sizeof(*in); 3216 int inlen = sizeof(*in);
3234 u64 npages = 3217 u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
3235 priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; 3218 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
3236 int err; 3219 int err;
3237 3220
3238 in = mlx5_vzalloc(inlen); 3221 in = mlx5_vzalloc(inlen);
@@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3246 MLX5_PERM_LOCAL_WRITE | 3229 MLX5_PERM_LOCAL_WRITE |
3247 MLX5_ACCESS_MODE_MTT; 3230 MLX5_ACCESS_MODE_MTT;
3248 3231
3232 npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
3233
3249 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 3234 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
3250 mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); 3235 mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
3251 mkc->len = cpu_to_be64(npages << PAGE_SHIFT); 3236 mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
3252 mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); 3237 mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
3253 mkc->log2_page_size = PAGE_SHIFT; 3238 mkc->log2_page_size = PAGE_SHIFT;
3254 3239
3255 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, 3240 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3385 queue_work(priv->wq, &priv->set_rx_mode_work); 3370 queue_work(priv->wq, &priv->set_rx_mode_work);
3386 3371
3387 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 3372 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3373 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
3388 rep.load = mlx5e_nic_rep_load; 3374 rep.load = mlx5e_nic_rep_load;
3389 rep.unload = mlx5e_nic_rep_unload; 3375 rep.unload = mlx5e_nic_rep_unload;
3390 rep.vport = 0; 3376 rep.vport = 0;
@@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3463 3449
3464 mlx5e_init_l2_addr(priv); 3450 mlx5e_init_l2_addr(priv);
3465 3451
3452 mlx5e_set_dev_port_mtu(netdev);
3453
3466 err = register_netdev(netdev); 3454 err = register_netdev(netdev);
3467 if (err) { 3455 if (err) {
3468 mlx5_core_err(mdev, "register_netdev failed, %d\n", err); 3456 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3501 struct mlx5_eswitch *esw = mdev->priv.eswitch; 3489 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3502 int total_vfs = MLX5_TOTAL_VPORTS(mdev); 3490 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3503 int vport; 3491 int vport;
3492 u8 mac[ETH_ALEN];
3504 3493
3505 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 3494 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3506 return; 3495 return;
3507 3496
3497 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
3498
3508 for (vport = 1; vport < total_vfs; vport++) { 3499 for (vport = 1; vport < total_vfs; vport++) {
3509 struct mlx5_eswitch_rep rep; 3500 struct mlx5_eswitch_rep rep;
3510 3501
3511 rep.load = mlx5e_vport_rep_load; 3502 rep.load = mlx5e_vport_rep_load;
3512 rep.unload = mlx5e_vport_rep_unload; 3503 rep.unload = mlx5e_vport_rep_unload;
3513 rep.vport = vport; 3504 rep.vport = vport;
3505 ether_addr_copy(rep.hw_id, mac);
3514 mlx5_eswitch_register_vport_rep(esw, &rep); 3506 mlx5_eswitch_register_vport_rep(esw, &rep);
3515 } 3507 }
3516} 3508}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 1c7d8b8314bf..134de4a11f1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
135int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) 135int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
136{ 136{
137 struct mlx5e_priv *priv = netdev_priv(dev); 137 struct mlx5e_priv *priv = netdev_priv(dev);
138 struct mlx5_eswitch_rep *rep = priv->ppriv;
138 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 139 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
139 u8 mac[ETH_ALEN];
140 140
141 if (esw->mode == SRIOV_NONE) 141 if (esw->mode == SRIOV_NONE)
142 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
143 143
144 switch (attr->id) { 144 switch (attr->id) {
145 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: 145 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
146 mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
147 attr->u.ppid.id_len = ETH_ALEN; 146 attr->u.ppid.id_len = ETH_ALEN;
148 memcpy(&attr->u.ppid.id, &mac, ETH_ALEN); 147 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
149 break; 148 break;
150 default: 149 default:
151 return -EOPNOTSUPP; 150 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9f2a16a507e0..e7c969df3dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
324 } 324 }
325} 325}
326 326
327static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) 327static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
328{ 328{
329 return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + 329 return rq->mpwqe_mtt_offset +
330 wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); 330 wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
331} 331}
332 332
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
340 struct mlx5_wqe_data_seg *dseg = &wqe->data; 340 struct mlx5_wqe_data_seg *dseg = &wqe->data;
341 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; 341 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
342 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); 342 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
343 u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); 343 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
344 344
345 memset(wqe, 0, sizeof(*wqe)); 345 memset(wqe, 0, sizeof(*wqe));
346 cseg->opmod_idx_opcode = 346 cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
353 353
354 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; 354 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
355 ucseg->klm_octowords = 355 ucseg->klm_octowords =
356 cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); 356 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
357 ucseg->bsf_octowords = 357 ucseg->bsf_octowords =
358 cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); 358 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
359 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 359 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
360 360
361 dseg->lkey = sq->mkey_be; 361 dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
423{ 423{
424 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; 424 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
425 int mtt_sz = mlx5e_get_wqe_mtt_sz(); 425 int mtt_sz = mlx5e_get_wqe_mtt_sz();
426 u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; 426 u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
427 int i; 427 int i;
428 428
429 wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * 429 wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
506 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); 506 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
507 507
508 clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); 508 clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
509
510 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
511 mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
512 return;
513 }
514
509 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); 515 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
510 rq->stats.mpwqe_frag++; 516 rq->stats.mpwqe_frag++;
511 517
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
595 wi->free_wqe(rq, wi); 601 wi->free_wqe(rq, wi);
596} 602}
597 603
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
599{
600 struct mlx5_wq_ll *wq = &rq->wq;
601 struct mlx5e_rx_wqe *wqe;
602 __be16 wqe_ix_be;
603 u16 wqe_ix;
604
605 while (!mlx5_wq_ll_is_empty(wq)) {
606 wqe_ix_be = *wq->tail_next;
607 wqe_ix = be16_to_cpu(wqe_ix_be);
608 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
609 rq->dealloc_wqe(rq, wqe_ix);
610 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
611 &wqe->next.next_wqe_index);
612 }
613}
614
615#define RQ_CANNOT_POST(rq) \ 604#define RQ_CANNOT_POST(rq) \
616 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 605 (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
617 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) 606 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
618 607
619bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) 608bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
620{ 609{
@@ -648,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
648static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, 637static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
649 u32 cqe_bcnt) 638 u32 cqe_bcnt)
650{ 639{
651 struct ethhdr *eth = (struct ethhdr *)(skb->data); 640 struct ethhdr *eth = (struct ethhdr *)(skb->data);
652 struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); 641 struct iphdr *ipv4;
653 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); 642 struct ipv6hdr *ipv6;
654 struct tcphdr *tcp; 643 struct tcphdr *tcp;
644 int network_depth = 0;
645 __be16 proto;
646 u16 tot_len;
655 647
656 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); 648 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
657 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || 649 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
658 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); 650 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
659 651
660 u16 tot_len = cqe_bcnt - ETH_HLEN; 652 skb->mac_len = ETH_HLEN;
653 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
654
655 ipv4 = (struct iphdr *)(skb->data + network_depth);
656 ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
657 tot_len = cqe_bcnt - network_depth;
661 658
662 if (eth->h_proto == htons(ETH_P_IP)) { 659 if (proto == htons(ETH_P_IP)) {
663 tcp = (struct tcphdr *)(skb->data + ETH_HLEN + 660 tcp = (struct tcphdr *)(skb->data + network_depth +
664 sizeof(struct iphdr)); 661 sizeof(struct iphdr));
665 ipv6 = NULL; 662 ipv6 = NULL;
666 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 663 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
667 } else { 664 } else {
668 tcp = (struct tcphdr *)(skb->data + ETH_HLEN + 665 tcp = (struct tcphdr *)(skb->data + network_depth +
669 sizeof(struct ipv6hdr)); 666 sizeof(struct ipv6hdr));
670 ipv4 = NULL; 667 ipv4 = NULL;
671 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 668 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -916,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
916 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 913 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
917 int work_done = 0; 914 int work_done = 0;
918 915
919 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) 916 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
920 return 0; 917 return 0;
921 918
922 if (cq->decmprs_left) 919 if (cq->decmprs_left)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 7b9d8a989b52..499487ce3b53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
70 u64 tx_queue_stopped; 70 u64 tx_queue_stopped;
71 u64 tx_queue_wake; 71 u64 tx_queue_wake;
72 u64 tx_queue_dropped; 72 u64 tx_queue_dropped;
73 u64 tx_xmit_more;
73 u64 rx_wqe_err; 74 u64 rx_wqe_err;
74 u64 rx_mpwqe_filler; 75 u64 rx_mpwqe_filler;
75 u64 rx_mpwqe_frag; 76 u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, 107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, 108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
298 /* commonly accessed in data path */ 300 /* commonly accessed in data path */
299 u64 packets; 301 u64 packets;
300 u64 bytes; 302 u64 bytes;
303 u64 xmit_more;
301 u64 tso_packets; 304 u64 tso_packets;
302 u64 tso_bytes; 305 u64 tso_bytes;
303 u64 tso_inner_packets; 306 u64 tso_inner_packets;
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
324 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 327 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
325 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 328 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
326 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 329 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
330 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
327}; 331};
328 332
329#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 333#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0f19b01e3fff..22cfc4ac1837 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
170 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 170 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
171 struct flow_dissector_key_control *key = 171 struct flow_dissector_key_control *key =
172 skb_flow_dissector_target(f->dissector, 172 skb_flow_dissector_target(f->dissector,
173 FLOW_DISSECTOR_KEY_BASIC, 173 FLOW_DISSECTOR_KEY_CONTROL,
174 f->key); 174 f->key);
175 addr_type = key->addr_type; 175 addr_type = key->addr_type;
176 } 176 }
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318 u32 *action, u32 *flow_tag) 318 u32 *action, u32 *flow_tag)
319{ 319{
320 const struct tc_action *a; 320 const struct tc_action *a;
321 LIST_HEAD(actions);
321 322
322 if (tc_no_actions(exts)) 323 if (tc_no_actions(exts))
323 return -EINVAL; 324 return -EINVAL;
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
325 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 326 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
326 *action = 0; 327 *action = 0;
327 328
328 tc_for_each_action(a, exts) { 329 tcf_exts_to_list(exts, &actions);
330 list_for_each_entry(a, &actions, list) {
329 /* Only support a single action per rule */ 331 /* Only support a single action per rule */
330 if (*action) 332 if (*action)
331 return -EINVAL; 333 return -EINVAL;
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
362 u32 *action, u32 *dest_vport) 364 u32 *action, u32 *dest_vport)
363{ 365{
364 const struct tc_action *a; 366 const struct tc_action *a;
367 LIST_HEAD(actions);
365 368
366 if (tc_no_actions(exts)) 369 if (tc_no_actions(exts))
367 return -EINVAL; 370 return -EINVAL;
368 371
369 *action = 0; 372 *action = 0;
370 373
371 tc_for_each_action(a, exts) { 374 tcf_exts_to_list(exts, &actions);
375 list_for_each_entry(a, &actions, list) {
372 /* Only support a single action per rule */ 376 /* Only support a single action per rule */
373 if (*action) 377 if (*action)
374 return -EINVAL; 378 return -EINVAL;
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
503 struct mlx5e_tc_flow *flow; 507 struct mlx5e_tc_flow *flow;
504 struct tc_action *a; 508 struct tc_action *a;
505 struct mlx5_fc *counter; 509 struct mlx5_fc *counter;
510 LIST_HEAD(actions);
506 u64 bytes; 511 u64 bytes;
507 u64 packets; 512 u64 packets;
508 u64 lastuse; 513 u64 lastuse;
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
518 523
519 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 524 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
520 525
521 tc_for_each_action(a, f->exts) 526 tcf_exts_to_list(f->exts, &actions);
527 list_for_each_entry(a, &actions, list)
522 tcf_action_stats_update(a, bytes, packets, lastuse); 528 tcf_action_stats_update(a, bytes, packets, lastuse);
523 529
524 return 0; 530 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e073bf59890d..eb0e72537f10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
356 sq->stats.stopped++; 356 sq->stats.stopped++;
357 } 357 }
358 358
359 sq->stats.xmit_more += skb->xmit_more;
359 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { 360 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
360 int bf_sz = 0; 361 int bf_sz = 0;
361 362
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
394 return mlx5e_sq_xmit(sq, skb); 395 return mlx5e_sq_xmit(sq, skb);
395} 396}
396 397
397void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
398{
399 struct mlx5e_tx_wqe_info *wi;
400 struct sk_buff *skb;
401 u16 ci;
402 int i;
403
404 while (sq->cc != sq->pc) {
405 ci = sq->cc & sq->wq.sz_m1;
406 skb = sq->skb[ci];
407 wi = &sq->wqe_info[ci];
408
409 if (!skb) { /* nop */
410 sq->cc++;
411 continue;
412 }
413
414 for (i = 0; i < wi->num_dma; i++) {
415 struct mlx5e_sq_dma *dma =
416 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
417
418 mlx5e_tx_dma_unmap(sq->pdev, dma);
419 }
420
421 dev_kfree_skb_any(skb);
422 sq->cc += wi->num_wqebbs;
423 }
424}
425
426bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 398bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
427{ 399{
428 struct mlx5e_sq *sq; 400 struct mlx5e_sq *sq;
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
434 406
435 sq = container_of(cq, struct mlx5e_sq, cq); 407 sq = container_of(cq, struct mlx5e_sq, cq);
436 408
437 if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) 409 if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
438 return false; 410 return false;
439 411
440 npkts = 0; 412 npkts = 0;
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
512 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 484 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
513 485
514 if (netif_tx_queue_stopped(sq->txq) && 486 if (netif_tx_queue_stopped(sq->txq) &&
515 mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && 487 mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
516 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { 488 netif_tx_wake_queue(sq->txq);
517 netif_tx_wake_queue(sq->txq); 489 sq->stats.wake++;
518 sq->stats.wake++;
519 } 490 }
520 491
521 return (i == MLX5E_TX_CQ_POLL_BUDGET); 492 return (i == MLX5E_TX_CQ_POLL_BUDGET);
522} 493}
494
495void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
496{
497 struct mlx5e_tx_wqe_info *wi;
498 struct sk_buff *skb;
499 u16 ci;
500 int i;
501
502 while (sq->cc != sq->pc) {
503 ci = sq->cc & sq->wq.sz_m1;
504 skb = sq->skb[ci];
505 wi = &sq->wqe_info[ci];
506
507 if (!skb) { /* nop */
508 sq->cc++;
509 continue;
510 }
511
512 for (i = 0; i < wi->num_dma; i++) {
513 struct mlx5e_sq_dma *dma =
514 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
515
516 mlx5e_tx_dma_unmap(sq->pdev, dma);
517 }
518
519 dev_kfree_skb_any(skb);
520 sq->cc += wi->num_wqebbs;
521 }
522}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 64ae2e800daa..9bf33bb69210 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
51 51
52static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) 52static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
53{ 53{
54 struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
54 struct mlx5_wq_cyc *wq; 55 struct mlx5_wq_cyc *wq;
55 struct mlx5_cqe64 *cqe; 56 struct mlx5_cqe64 *cqe;
56 struct mlx5e_sq *sq;
57 u16 sqcc; 57 u16 sqcc;
58 58
59 if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
60 return;
61
59 cqe = mlx5e_get_cqe(cq); 62 cqe = mlx5e_get_cqe(cq);
60 if (likely(!cqe)) 63 if (likely(!cqe))
61 return; 64 return;
62 65
63 sq = container_of(cq, struct mlx5e_sq, cq);
64 wq = &sq->wq; 66 wq = &sq->wq;
65 67
66 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 68 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f6d667797ee1..8b78f156214e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1451 1451
1452 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); 1452 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1453 1453
1454 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ 1454 /* Only VFs need ACLs for VST and spoofchk filtering */
1455 if (vport_num && esw->mode == SRIOV_LEGACY) {
1455 esw_vport_ingress_config(esw, vport); 1456 esw_vport_ingress_config(esw, vport);
1456 esw_vport_egress_config(esw, vport); 1457 esw_vport_egress_config(esw, vport);
1457 } 1458 }
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1502 */ 1503 */
1503 esw_vport_change_handle_locked(vport); 1504 esw_vport_change_handle_locked(vport);
1504 vport->enabled_events = 0; 1505 vport->enabled_events = 0;
1505 if (vport_num) { 1506 if (vport_num && esw->mode == SRIOV_LEGACY) {
1506 esw_vport_disable_egress_acl(esw, vport); 1507 esw_vport_disable_egress_acl(esw, vport);
1507 esw_vport_disable_ingress_acl(esw, vport); 1508 esw_vport_disable_ingress_acl(esw, vport);
1508 } 1509 }
@@ -1767,7 +1768,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1767 vport, err); 1768 vport, err);
1768 1769
1769 mutex_lock(&esw->state_lock); 1770 mutex_lock(&esw->state_lock);
1770 if (evport->enabled) 1771 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1771 err = esw_vport_ingress_config(esw, evport); 1772 err = esw_vport_ingress_config(esw, evport);
1772 mutex_unlock(&esw->state_lock); 1773 mutex_unlock(&esw->state_lock);
1773 return err; 1774 return err;
@@ -1839,7 +1840,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1839 mutex_lock(&esw->state_lock); 1840 mutex_lock(&esw->state_lock);
1840 evport->vlan = vlan; 1841 evport->vlan = vlan;
1841 evport->qos = qos; 1842 evport->qos = qos;
1842 if (evport->enabled) { 1843 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1843 err = esw_vport_ingress_config(esw, evport); 1844 err = esw_vport_ingress_config(esw, evport);
1844 if (err) 1845 if (err)
1845 goto out; 1846 goto out;
@@ -1868,10 +1869,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1868 mutex_lock(&esw->state_lock); 1869 mutex_lock(&esw->state_lock);
1869 pschk = evport->spoofchk; 1870 pschk = evport->spoofchk;
1870 evport->spoofchk = spoofchk; 1871 evport->spoofchk = spoofchk;
1871 if (evport->enabled) 1872 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1872 err = esw_vport_ingress_config(esw, evport); 1873 err = esw_vport_ingress_config(esw, evport);
1873 if (err) 1874 if (err)
1874 evport->spoofchk = pschk; 1875 evport->spoofchk = pschk;
1876 }
1875 mutex_unlock(&esw->state_lock); 1877 mutex_unlock(&esw->state_lock);
1876 1878
1877 return err; 1879 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c0b05603fc31..a96140971d77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
174 void *priv_data; 174 void *priv_data;
175 struct list_head vport_sqs_list; 175 struct list_head vport_sqs_list;
176 bool valid; 176 bool valid;
177 u8 hw_id[ETH_ALEN];
177}; 178};
178 179
179struct mlx5_esw_offload { 180struct mlx5_esw_offload {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a357e8eeeed8..3dc83a9459a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
113 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 113 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
114 dest.vport_num = vport; 114 dest.vport_num = vport;
115 115
116 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec, 116 flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
117 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 117 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
118 0, &dest); 118 0, &dest);
119 if (IS_ERR(flow_rule)) 119 if (IS_ERR(flow_rule))
@@ -535,7 +535,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
535 esw_destroy_offloads_fdb_table(esw); 535 esw_destroy_offloads_fdb_table(esw);
536} 536}
537 537
538static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) 538static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
539{ 539{
540 switch (mode) { 540 switch (mode) {
541 case DEVLINK_ESWITCH_MODE_LEGACY: 541 case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -551,6 +551,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
551 return 0; 551 return 0;
552} 552}
553 553
554static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
555{
556 switch (mlx5_mode) {
557 case SRIOV_LEGACY:
558 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
559 break;
560 case SRIOV_OFFLOADS:
561 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
562 break;
563 default:
564 return -EINVAL;
565 }
566
567 return 0;
568}
569
554int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 570int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
555{ 571{
556 struct mlx5_core_dev *dev; 572 struct mlx5_core_dev *dev;
@@ -566,7 +582,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
566 if (cur_mlx5_mode == SRIOV_NONE) 582 if (cur_mlx5_mode == SRIOV_NONE)
567 return -EOPNOTSUPP; 583 return -EOPNOTSUPP;
568 584
569 if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode)) 585 if (esw_mode_from_devlink(mode, &mlx5_mode))
570 return -EINVAL; 586 return -EINVAL;
571 587
572 if (cur_mlx5_mode == mlx5_mode) 588 if (cur_mlx5_mode == mlx5_mode)
@@ -592,9 +608,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
592 if (dev->priv.eswitch->mode == SRIOV_NONE) 608 if (dev->priv.eswitch->mode == SRIOV_NONE)
593 return -EOPNOTSUPP; 609 return -EOPNOTSUPP;
594 610
595 *mode = dev->priv.eswitch->mode; 611 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
596
597 return 0;
598} 612}
599 613
600void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, 614void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 75bb8c864557..3d6c1f65e586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -80,7 +80,7 @@
80 LEFTOVERS_NUM_PRIOS) 80 LEFTOVERS_NUM_PRIOS)
81 81
82#define ETHTOOL_PRIO_NUM_LEVELS 1 82#define ETHTOOL_PRIO_NUM_LEVELS 1
83#define ETHTOOL_NUM_PRIOS 10 83#define ETHTOOL_NUM_PRIOS 11
84#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) 84#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
85/* Vlan, mac, ttc, aRFS */ 85/* Vlan, mac, ttc, aRFS */
86#define KERNEL_NIC_PRIO_NUM_LEVELS 4 86#define KERNEL_NIC_PRIO_NUM_LEVELS 4
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index c2877e9de8a1..3a9195b4169d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
126 for (node = &first->node; node; node = rb_next(node)) { 126 for (node = &first->node; node; node = rb_next(node)) {
127 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); 127 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
128 struct mlx5_fc_cache *c = &counter->cache; 128 struct mlx5_fc_cache *c = &counter->cache;
129 u64 packets;
130 u64 bytes;
129 131
130 if (counter->id > last_id) 132 if (counter->id > last_id)
131 break; 133 break;
132 134
133 mlx5_cmd_fc_bulk_get(dev, b, 135 mlx5_cmd_fc_bulk_get(dev, b,
134 counter->id, &c->packets, &c->bytes); 136 counter->id, &packets, &bytes);
137
138 if (c->packets == packets)
139 continue;
140
141 c->packets = packets;
142 c->bytes = bytes;
143 c->lastuse = jiffies;
135 } 144 }
136 145
137out: 146out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4f491d43e77d..2385bae92672 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1420 dev_info(&pdev->dev, "%s was called\n", __func__); 1420 dev_info(&pdev->dev, "%s was called\n", __func__);
1421 mlx5_enter_error_state(dev); 1421 mlx5_enter_error_state(dev);
1422 mlx5_unload_one(dev, priv); 1422 mlx5_unload_one(dev, priv);
1423 pci_save_state(pdev);
1423 mlx5_pci_disable_device(dev); 1424 mlx5_pci_disable_device(dev);
1424 return state == pci_channel_io_perm_failure ? 1425 return state == pci_channel_io_perm_failure ?
1425 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1426 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1426} 1427}
1427 1428
1428static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1429{
1430 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1431 int err = 0;
1432
1433 dev_info(&pdev->dev, "%s was called\n", __func__);
1434
1435 err = mlx5_pci_enable_device(dev);
1436 if (err) {
1437 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1438 , __func__, err);
1439 return PCI_ERS_RESULT_DISCONNECT;
1440 }
1441 pci_set_master(pdev);
1442 pci_set_power_state(pdev, PCI_D0);
1443 pci_restore_state(pdev);
1444
1445 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1446}
1447
1448void mlx5_disable_device(struct mlx5_core_dev *dev)
1449{
1450 mlx5_pci_err_detected(dev->pdev, 0);
1451}
1452
1453/* wait for the device to show vital signs by waiting 1429/* wait for the device to show vital signs by waiting
1454 * for the health counter to start counting. 1430 * for the health counter to start counting.
1455 */ 1431 */
@@ -1477,21 +1453,44 @@ static int wait_vital(struct pci_dev *pdev)
1477 return -ETIMEDOUT; 1453 return -ETIMEDOUT;
1478} 1454}
1479 1455
1480static void mlx5_pci_resume(struct pci_dev *pdev) 1456static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1481{ 1457{
1482 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1458 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1483 struct mlx5_priv *priv = &dev->priv;
1484 int err; 1459 int err;
1485 1460
1486 dev_info(&pdev->dev, "%s was called\n", __func__); 1461 dev_info(&pdev->dev, "%s was called\n", __func__);
1487 1462
1488 pci_save_state(pdev); 1463 err = mlx5_pci_enable_device(dev);
1489 err = wait_vital(pdev);
1490 if (err) { 1464 if (err) {
1465 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1466 , __func__, err);
1467 return PCI_ERS_RESULT_DISCONNECT;
1468 }
1469
1470 pci_set_master(pdev);
1471 pci_restore_state(pdev);
1472
1473 if (wait_vital(pdev)) {
1491 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); 1474 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1492 return; 1475 return PCI_ERS_RESULT_DISCONNECT;
1493 } 1476 }
1494 1477
1478 return PCI_ERS_RESULT_RECOVERED;
1479}
1480
1481void mlx5_disable_device(struct mlx5_core_dev *dev)
1482{
1483 mlx5_pci_err_detected(dev->pdev, 0);
1484}
1485
1486static void mlx5_pci_resume(struct pci_dev *pdev)
1487{
1488 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1489 struct mlx5_priv *priv = &dev->priv;
1490 int err;
1491
1492 dev_info(&pdev->dev, "%s was called\n", __func__);
1493
1495 err = mlx5_load_one(dev, priv); 1494 err = mlx5_load_one(dev, priv);
1496 if (err) 1495 if (err)
1497 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" 1496 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index f33b997f2b61..af371a82c35b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -56,6 +56,7 @@
56#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) 56#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
57 57
58#define MLXSW_PORT_CPU_PORT 0x0 58#define MLXSW_PORT_CPU_PORT 0x0
59#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
59 60
60#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) 61#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
61 62
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 7ca9201f7dcb..1721098eef13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3383 */ 3383 */
3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); 3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3385 3385
3386/* reg_ritr_lb_en
3387 * Loop-back filter enable for unicast packets.
3388 * If the flag is set then loop-back filter for unicast packets is
3389 * implemented on the RIF. Multicast packets are always subject to
3390 * loop-back filtering.
3391 * Access: RW
3392 */
3393MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
3394
3386/* reg_ritr_virtual_router 3395/* reg_ritr_virtual_router
3387 * Virtual router ID associated with the router interface. 3396 * Virtual router ID associated with the router interface.
3388 * Access: RW 3397 * Access: RW
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3484 mlxsw_reg_ritr_op_set(payload, op); 3493 mlxsw_reg_ritr_op_set(payload, op);
3485 mlxsw_reg_ritr_rif_set(payload, rif); 3494 mlxsw_reg_ritr_rif_set(payload, rif);
3486 mlxsw_reg_ritr_ipv4_fe_set(payload, 1); 3495 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
3496 mlxsw_reg_ritr_lb_en_set(payload, 1);
3487 mlxsw_reg_ritr_mtu_set(payload, mtu); 3497 mlxsw_reg_ritr_mtu_set(payload, mtu);
3488 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); 3498 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3489} 3499}
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
4000{ 4010{
4001 MLXSW_REG_ZERO(ralue, payload); 4011 MLXSW_REG_ZERO(ralue, payload);
4002 mlxsw_reg_ralue_protocol_set(payload, protocol); 4012 mlxsw_reg_ralue_protocol_set(payload, protocol);
4013 mlxsw_reg_ralue_op_set(payload, op);
4003 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); 4014 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
4004 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); 4015 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
4005 mlxsw_reg_ralue_entry_type_set(payload, 4016 mlxsw_reg_ralue_entry_type_set(payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3e61500819d..d48873bcbddf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -56,6 +56,7 @@
56#include <generated/utsrelease.h> 56#include <generated/utsrelease.h>
57#include <net/pkt_cls.h> 57#include <net/pkt_cls.h>
58#include <net/tc_act/tc_mirred.h> 58#include <net/tc_act/tc_mirred.h>
59#include <net/netevent.h>
59 60
60#include "spectrum.h" 61#include "spectrum.h"
61#include "core.h" 62#include "core.h"
@@ -942,8 +943,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
942 kfree(mlxsw_sp_vport); 943 kfree(mlxsw_sp_vport);
943} 944}
944 945
945int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 946static int mlxsw_sp_port_add_vid(struct net_device *dev,
946 u16 vid) 947 __be16 __always_unused proto, u16 vid)
947{ 948{
948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 949 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
949 struct mlxsw_sp_port *mlxsw_sp_vport; 950 struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -956,16 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
956 if (!vid) 957 if (!vid)
957 return 0; 958 return 0;
958 959
959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 960 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
960 netdev_warn(dev, "VID=%d already configured\n", vid);
961 return 0; 961 return 0;
962 }
963 962
964 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 963 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
965 if (!mlxsw_sp_vport) { 964 if (!mlxsw_sp_vport)
966 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
967 return -ENOMEM; 965 return -ENOMEM;
968 }
969 966
970 /* When adding the first VLAN interface on a bridged port we need to 967 /* When adding the first VLAN interface on a bridged port we need to
971 * transition all the active 802.1Q bridge VLANs to use explicit 968 * transition all the active 802.1Q bridge VLANs to use explicit
@@ -973,24 +970,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
973 */ 970 */
974 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 971 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
975 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 972 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
976 if (err) { 973 if (err)
977 netdev_err(dev, "Failed to set to Virtual mode\n");
978 goto err_port_vp_mode_trans; 974 goto err_port_vp_mode_trans;
979 }
980 } 975 }
981 976
982 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 977 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
983 if (err) { 978 if (err)
984 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
985 goto err_port_vid_learning_set; 979 goto err_port_vid_learning_set;
986 }
987 980
988 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 981 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
989 if (err) { 982 if (err)
990 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
991 vid);
992 goto err_port_add_vid; 983 goto err_port_add_vid;
993 }
994 984
995 return 0; 985 return 0;
996 986
@@ -1010,7 +1000,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1010 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1000 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1011 struct mlxsw_sp_port *mlxsw_sp_vport; 1001 struct mlxsw_sp_port *mlxsw_sp_vport;
1012 struct mlxsw_sp_fid *f; 1002 struct mlxsw_sp_fid *f;
1013 int err;
1014 1003
1015 /* VLAN 0 is removed from HW filter when device goes down, but 1004 /* VLAN 0 is removed from HW filter when device goes down, but
1016 * it is reserved in our case, so simply return. 1005 * it is reserved in our case, so simply return.
@@ -1019,23 +1008,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1019 return 0; 1008 return 0;
1020 1009
1021 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1010 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1022 if (!mlxsw_sp_vport) { 1011 if (WARN_ON(!mlxsw_sp_vport))
1023 netdev_warn(dev, "VID=%d does not exist\n", vid);
1024 return 0; 1012 return 0;
1025 }
1026 1013
1027 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1014 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1028 if (err) {
1029 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
1030 vid);
1031 return err;
1032 }
1033 1015
1034 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1016 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
1035 if (err) {
1036 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
1037 return err;
1038 }
1039 1017
1040 /* Drop FID reference. If this was the last reference the 1018 /* Drop FID reference. If this was the last reference the
1041 * resources will be freed. 1019 * resources will be freed.
@@ -1048,13 +1026,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1048 * transition all active 802.1Q bridge VLANs to use VID to FID 1026 * transition all active 802.1Q bridge VLANs to use VID to FID
1049 * mappings and set port's mode to VLAN mode. 1027 * mappings and set port's mode to VLAN mode.
1050 */ 1028 */
1051 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1029 if (list_is_singular(&mlxsw_sp_port->vports_list))
1052 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1030 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1053 if (err) {
1054 netdev_err(dev, "Failed to set to VLAN mode\n");
1055 return err;
1056 }
1057 }
1058 1031
1059 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1032 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1060 1033
@@ -1149,6 +1122,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1149 bool ingress) 1122 bool ingress)
1150{ 1123{
1151 const struct tc_action *a; 1124 const struct tc_action *a;
1125 LIST_HEAD(actions);
1152 int err; 1126 int err;
1153 1127
1154 if (!tc_single_action(cls->exts)) { 1128 if (!tc_single_action(cls->exts)) {
@@ -1156,7 +1130,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1156 return -ENOTSUPP; 1130 return -ENOTSUPP;
1157 } 1131 }
1158 1132
1159 tc_for_each_action(a, cls->exts) { 1133 tcf_exts_to_list(cls->exts, &actions);
1134 list_for_each_entry(a, &actions, list) {
1160 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1135 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
1161 return -ENOTSUPP; 1136 return -ENOTSUPP;
1162 1137
@@ -2076,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2076 return 0; 2051 return 0;
2077} 2052}
2078 2053
2054static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2055{
2056 mlxsw_sp_port->pvid = 1;
2057
2058 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2059}
2060
2061static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2062{
2063 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2064}
2065
2079static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2066static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2080 bool split, u8 module, u8 width, u8 lane) 2067 bool split, u8 module, u8 width, u8 lane)
2081{ 2068{
@@ -2119,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2119 dev->netdev_ops = &mlxsw_sp_port_netdev_ops; 2106 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2120 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; 2107 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2121 2108
2109 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2110 if (err) {
2111 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2112 mlxsw_sp_port->local_port);
2113 goto err_port_swid_set;
2114 }
2115
2122 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); 2116 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2123 if (err) { 2117 if (err) {
2124 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", 2118 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2144,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2144 goto err_port_system_port_mapping_set; 2138 goto err_port_system_port_mapping_set;
2145 } 2139 }
2146 2140
2147 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2148 if (err) {
2149 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2150 mlxsw_sp_port->local_port);
2151 goto err_port_swid_set;
2152 }
2153
2154 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); 2141 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2155 if (err) { 2142 if (err) {
2156 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", 2143 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2191,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2191 goto err_port_dcb_init; 2178 goto err_port_dcb_init;
2192 } 2179 }
2193 2180
2181 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2182 if (err) {
2183 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2184 mlxsw_sp_port->local_port);
2185 goto err_port_pvid_vport_create;
2186 }
2187
2194 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2188 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2189 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2195 err = register_netdev(dev); 2190 err = register_netdev(dev);
2196 if (err) { 2191 if (err) {
2197 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2192 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -2208,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2208 goto err_core_port_init; 2203 goto err_core_port_init;
2209 } 2204 }
2210 2205
2211 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
2212 if (err)
2213 goto err_port_vlan_init;
2214
2215 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2216 return 0; 2206 return 0;
2217 2207
2218err_port_vlan_init:
2219 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2220err_core_port_init: 2208err_core_port_init:
2221 unregister_netdev(dev); 2209 unregister_netdev(dev);
2222err_register_netdev: 2210err_register_netdev:
2211 mlxsw_sp->ports[local_port] = NULL;
2212 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2213 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2214err_port_pvid_vport_create:
2215 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2223err_port_dcb_init: 2216err_port_dcb_init:
2224err_port_ets_init: 2217err_port_ets_init:
2225err_port_buffers_init: 2218err_port_buffers_init:
2226err_port_admin_status_set: 2219err_port_admin_status_set:
2227err_port_mtu_set: 2220err_port_mtu_set:
2228err_port_speed_by_width_set: 2221err_port_speed_by_width_set:
2229err_port_swid_set:
2230err_port_system_port_mapping_set: 2222err_port_system_port_mapping_set:
2231err_dev_addr_init: 2223err_dev_addr_init:
2224 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2225err_port_swid_set:
2232 free_percpu(mlxsw_sp_port->pcpu_stats); 2226 free_percpu(mlxsw_sp_port->pcpu_stats);
2233err_alloc_stats: 2227err_alloc_stats:
2234 kfree(mlxsw_sp_port->untagged_vlans); 2228 kfree(mlxsw_sp_port->untagged_vlans);
@@ -2245,12 +2239,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2245 2239
2246 if (!mlxsw_sp_port) 2240 if (!mlxsw_sp_port)
2247 return; 2241 return;
2248 mlxsw_sp->ports[local_port] = NULL;
2249 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2242 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2250 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2243 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2251 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2244 mlxsw_sp->ports[local_port] = NULL;
2252 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2253 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2245 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2246 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2247 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2254 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2248 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2255 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2249 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2256 free_percpu(mlxsw_sp_port->pcpu_stats); 2250 free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2662,6 +2656,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2662 { 2656 {
2663 .func = mlxsw_sp_rx_listener_func, 2657 .func = mlxsw_sp_rx_listener_func,
2664 .local_port = MLXSW_PORT_DONT_CARE, 2658 .local_port = MLXSW_PORT_DONT_CARE,
2659 .trap_id = MLXSW_TRAP_ID_MTUERROR,
2660 },
2661 {
2662 .func = mlxsw_sp_rx_listener_func,
2663 .local_port = MLXSW_PORT_DONT_CARE,
2664 .trap_id = MLXSW_TRAP_ID_TTLERROR,
2665 },
2666 {
2667 .func = mlxsw_sp_rx_listener_func,
2668 .local_port = MLXSW_PORT_DONT_CARE,
2669 .trap_id = MLXSW_TRAP_ID_LBERROR,
2670 },
2671 {
2672 .func = mlxsw_sp_rx_listener_func,
2673 .local_port = MLXSW_PORT_DONT_CARE,
2674 .trap_id = MLXSW_TRAP_ID_OSPF,
2675 },
2676 {
2677 .func = mlxsw_sp_rx_listener_func,
2678 .local_port = MLXSW_PORT_DONT_CARE,
2665 .trap_id = MLXSW_TRAP_ID_IP2ME, 2679 .trap_id = MLXSW_TRAP_ID_IP2ME,
2666 }, 2680 },
2667 { 2681 {
@@ -3311,6 +3325,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3311 return mlxsw_sp_fid_find(mlxsw_sp, fid); 3325 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3312} 3326}
3313 3327
3328static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3329{
3330 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3331 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3332}
3333
3334static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3335{
3336 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3337}
3338
3339static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3340 bool set)
3341{
3342 enum mlxsw_flood_table_type table_type;
3343 char *sftr_pl;
3344 u16 index;
3345 int err;
3346
3347 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3348 if (!sftr_pl)
3349 return -ENOMEM;
3350
3351 table_type = mlxsw_sp_flood_table_type_get(fid);
3352 index = mlxsw_sp_flood_table_index_get(fid);
3353 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
3354 1, MLXSW_PORT_ROUTER_PORT, set);
3355 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3356
3357 kfree(sftr_pl);
3358 return err;
3359}
3360
3314static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) 3361static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3315{ 3362{
3316 if (mlxsw_sp_fid_is_vfid(fid)) 3363 if (mlxsw_sp_fid_is_vfid(fid))
@@ -3347,10 +3394,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3347 if (rif == MLXSW_SP_RIF_MAX) 3394 if (rif == MLXSW_SP_RIF_MAX)
3348 return -ERANGE; 3395 return -ERANGE;
3349 3396
3350 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); 3397 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3351 if (err) 3398 if (err)
3352 return err; 3399 return err;
3353 3400
3401 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
3402 if (err)
3403 goto err_rif_bridge_op;
3404
3354 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); 3405 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3355 if (err) 3406 if (err)
3356 goto err_rif_fdb_op; 3407 goto err_rif_fdb_op;
@@ -3372,6 +3423,8 @@ err_rif_alloc:
3372 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); 3423 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3373err_rif_fdb_op: 3424err_rif_fdb_op:
3374 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3425 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3426err_rif_bridge_op:
3427 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3375 return err; 3428 return err;
3376} 3429}
3377 3430
@@ -3391,6 +3444,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3391 3444
3392 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); 3445 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3393 3446
3447 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3448
3394 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); 3449 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3395} 3450}
3396 3451
@@ -4487,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4487 .priority = 10, /* Must be called before FIB notifier block */ 4542 .priority = 10, /* Must be called before FIB notifier block */
4488}; 4543};
4489 4544
4545static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4546 .notifier_call = mlxsw_sp_router_netevent_event,
4547};
4548
4490static int __init mlxsw_sp_module_init(void) 4549static int __init mlxsw_sp_module_init(void)
4491{ 4550{
4492 int err; 4551 int err;
4493 4552
4494 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4553 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4495 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4554 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4555 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4556
4496 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 4557 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4497 if (err) 4558 if (err)
4498 goto err_core_driver_register; 4559 goto err_core_driver_register;
4499 return 0; 4560 return 0;
4500 4561
4501err_core_driver_register: 4562err_core_driver_register:
4563 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4564 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4502 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4565 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4503 return err; 4566 return err;
4504} 4567}
@@ -4506,6 +4569,7 @@ err_core_driver_register:
4506static void __exit mlxsw_sp_module_exit(void) 4569static void __exit mlxsw_sp_module_exit(void)
4507{ 4570{
4508 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 4571 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4572 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4509 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); 4573 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4510 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 4574 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4511} 4575}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index f69aa37d1521..ac48abebe904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
536 u16 vid); 536 u16 vid);
537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
538 u16 vid_end, bool is_member, bool untagged); 538 u16 vid_end, bool is_member, bool untagged);
539int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
540 u16 vid);
541int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 539int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
542 bool set); 540 bool set);
543void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 541void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -589,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
589 struct neighbour *n); 587 struct neighbour *n);
590void mlxsw_sp_router_neigh_destroy(struct net_device *dev, 588void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
591 struct neighbour *n); 589 struct neighbour *n);
590int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
591 unsigned long event, void *ptr);
592 592
593int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); 593int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
594void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); 594void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 074cdda7b6f3..953b214f38d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
330 MLXSW_SP_CPU_PORT_SB_CM, 330 MLXSW_SP_CPU_PORT_SB_CM,
331 MLXSW_SP_CPU_PORT_SB_CM, 331 MLXSW_SP_CPU_PORT_SB_CM,
332 MLXSW_SP_CPU_PORT_SB_CM, 332 MLXSW_SP_CPU_PORT_SB_CM,
333 MLXSW_SP_CPU_PORT_SB_CM, 333 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
334 MLXSW_SP_CPU_PORT_SB_CM, 334 MLXSW_SP_CPU_PORT_SB_CM,
335 MLXSW_SP_CPU_PORT_SB_CM, 335 MLXSW_SP_CPU_PORT_SB_CM,
336 MLXSW_SP_CPU_PORT_SB_CM, 336 MLXSW_SP_CPU_PORT_SB_CM,
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
717 u8 local_port = mlxsw_sp_port->local_port; 717 u8 local_port = mlxsw_sp_port->local_port;
718 u8 pg_buff = tc_index; 718 u8 pg_buff = tc_index;
719 enum mlxsw_reg_sbxx_dir dir = pool_type; 719 enum mlxsw_reg_sbxx_dir dir = pool_type;
720 u8 pool = pool_index; 720 u8 pool = pool_get(pool_index);
721 u32 max_buff; 721 u32 max_buff;
722 int err; 722 int err;
723 723
724 if (dir != dir_get(pool_index))
725 return -EINVAL;
726
724 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, 727 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
725 threshold, &max_buff); 728 threshold, &max_buff);
726 if (err) 729 if (err)
727 return err; 730 return err;
728 731
729 if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
730 if (pool < MLXSW_SP_SB_POOL_COUNT)
731 return -EINVAL;
732 pool -= MLXSW_SP_SB_POOL_COUNT;
733 } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
734 return -EINVAL;
735 }
736 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, 732 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
737 0, max_buff, pool); 733 0, max_buff, pool);
738} 734}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 01cfb7512827..b6ed7f7c531e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
341 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 341 char pfcc_pl[MLXSW_REG_PFCC_LEN];
342 342
343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
344 mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
345 mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
344 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); 346 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
345 347
346 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 348 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
351 struct ieee_pfc *pfc) 353 struct ieee_pfc *pfc)
352{ 354{
353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 355 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
356 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
354 int err; 357 int err;
355 358
356 if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && 359 if (pause_en && pfc->pfc_en) {
357 pfc->pfc_en) {
358 netdev_err(dev, "PAUSE frames already enabled on port\n"); 360 netdev_err(dev, "PAUSE frames already enabled on port\n");
359 return -EINVAL; 361 return -EINVAL;
360 } 362 }
361 363
362 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 364 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
363 mlxsw_sp_port->dcb.ets->prio_tc, 365 mlxsw_sp_port->dcb.ets->prio_tc,
364 false, pfc); 366 pause_en, pfc);
365 if (err) { 367 if (err) {
366 netdev_err(dev, "Failed to configure port's headroom for PFC\n"); 368 netdev_err(dev, "Failed to configure port's headroom for PFC\n");
367 return err; 369 return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
380 382
381err_port_pfc_set: 383err_port_pfc_set:
382 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 384 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
383 mlxsw_sp_port->dcb.ets->prio_tc, false, 385 mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
384 mlxsw_sp_port->dcb.pfc); 386 mlxsw_sp_port->dcb.pfc);
385 return err; 387 return err;
386} 388}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 81418d629231..3f5c51da6d3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
107} 107}
108 108
109struct mlxsw_sp_fib_key { 109struct mlxsw_sp_fib_key {
110 struct net_device *dev;
110 unsigned char addr[sizeof(struct in6_addr)]; 111 unsigned char addr[sizeof(struct in6_addr)];
111 unsigned char prefix_len; 112 unsigned char prefix_len;
112}; 113};
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
123 struct rhash_head ht_node; 124 struct rhash_head ht_node;
124 struct mlxsw_sp_fib_key key; 125 struct mlxsw_sp_fib_key key;
125 enum mlxsw_sp_fib_entry_type type; 126 enum mlxsw_sp_fib_entry_type type;
126 u8 added:1; 127 unsigned int ref_count;
127 u16 rif; /* used for action local */ 128 u16 rif; /* used for action local */
128 struct mlxsw_sp_vr *vr; 129 struct mlxsw_sp_vr *vr;
129 struct list_head nexthop_group_node; 130 struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
171 172
172static struct mlxsw_sp_fib_entry * 173static struct mlxsw_sp_fib_entry *
173mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, 174mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
174 size_t addr_len, unsigned char prefix_len) 175 size_t addr_len, unsigned char prefix_len,
176 struct net_device *dev)
175{ 177{
176 struct mlxsw_sp_fib_entry *fib_entry; 178 struct mlxsw_sp_fib_entry *fib_entry;
177 179
178 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); 180 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
179 if (!fib_entry) 181 if (!fib_entry)
180 return NULL; 182 return NULL;
183 fib_entry->key.dev = dev;
181 memcpy(fib_entry->key.addr, addr, addr_len); 184 memcpy(fib_entry->key.addr, addr, addr_len);
182 fib_entry->key.prefix_len = prefix_len; 185 fib_entry->key.prefix_len = prefix_len;
183 return fib_entry; 186 return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
190 193
191static struct mlxsw_sp_fib_entry * 194static struct mlxsw_sp_fib_entry *
192mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, 195mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
193 size_t addr_len, unsigned char prefix_len) 196 size_t addr_len, unsigned char prefix_len,
197 struct net_device *dev)
194{ 198{
195 struct mlxsw_sp_fib_key key = {{ 0 } }; 199 struct mlxsw_sp_fib_key key;
196 200
201 memset(&key, 0, sizeof(key));
202 key.dev = dev;
197 memcpy(key.addr, addr, addr_len); 203 memcpy(key.addr, addr, addr_len);
198 key.prefix_len = prefix_len; 204 key.prefix_len = prefix_len;
199 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); 205 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -657,7 +663,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
657 return 0; 663 return 0;
658 } 664 }
659 665
660 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); 666 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
661 if (WARN_ON(!r)) 667 if (WARN_ON(!r))
662 return -EINVAL; 668 return -EINVAL;
663 669
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
938 mlxsw_sp_port_dev_put(mlxsw_sp_port); 944 mlxsw_sp_port_dev_put(mlxsw_sp_port);
939} 945}
940 946
941static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, 947int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
942 unsigned long event, void *ptr) 948 unsigned long event, void *ptr)
943{ 949{
944 struct mlxsw_sp_neigh_entry *neigh_entry; 950 struct mlxsw_sp_neigh_entry *neigh_entry;
945 struct mlxsw_sp_port *mlxsw_sp_port; 951 struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1009 return NOTIFY_DONE; 1015 return NOTIFY_DONE;
1010} 1016}
1011 1017
1012static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
1013 .notifier_call = mlxsw_sp_router_netevent_event,
1014};
1015
1016static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) 1018static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1017{ 1019{
1018 int err; 1020 int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1027 */ 1029 */
1028 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); 1030 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1029 1031
1030 err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1031 if (err)
1032 goto err_register_netevent_notifier;
1033
1034 /* Create the delayed works for the activity_update */ 1032 /* Create the delayed works for the activity_update */
1035 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, 1033 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1036 mlxsw_sp_router_neighs_update_work); 1034 mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1039 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); 1037 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1040 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); 1038 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1041 return 0; 1039 return 0;
1042
1043err_register_netevent_notifier:
1044 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1045 return err;
1046} 1040}
1047 1041
1048static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) 1042static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1049{ 1043{
1050 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); 1044 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1051 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); 1045 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1052 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1053 rhashtable_destroy(&mlxsw_sp->router.neigh_ht); 1046 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1054} 1047}
1055 1048
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1524 return err; 1517 return err;
1525 mlxsw_sp_lpm_init(mlxsw_sp); 1518 mlxsw_sp_lpm_init(mlxsw_sp);
1526 mlxsw_sp_vrs_init(mlxsw_sp); 1519 mlxsw_sp_vrs_init(mlxsw_sp);
1527 return mlxsw_sp_neigh_init(mlxsw_sp); 1520 err = mlxsw_sp_neigh_init(mlxsw_sp);
1521 if (err)
1522 goto err_neigh_init;
1523 return 0;
1524
1525err_neigh_init:
1526 __mlxsw_sp_router_fini(mlxsw_sp);
1527 return err;
1528} 1528}
1529 1529
1530void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 1530void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_fib_entry *fib_entry) 1627 struct mlxsw_sp_fib_entry *fib_entry)
1628{ 1628{
1629 enum mlxsw_reg_ralue_op op; 1629 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1630 1630 MLXSW_REG_RALUE_OP_WRITE_WRITE);
1631 op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
1632 MLXSW_REG_RALUE_OP_WRITE_UPDATE;
1633 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
1634} 1631}
1635 1632
1636static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, 1633static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1651,9 +1648,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
1651 const struct mlxsw_sp_router_fib4_add_info *info = data; 1648 const struct mlxsw_sp_router_fib4_add_info *info = data;
1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; 1649 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; 1650 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
1651 struct mlxsw_sp_vr *vr = fib_entry->vr;
1654 1652
1655 mlxsw_sp_fib_entry_destroy(fib_entry); 1653 mlxsw_sp_fib_entry_destroy(fib_entry);
1656 mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); 1654 mlxsw_sp_vr_put(mlxsw_sp, vr);
1657 kfree(info); 1655 kfree(info);
1658} 1656}
1659 1657
@@ -1694,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1694 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); 1692 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1695} 1693}
1696 1694
1697static int 1695static struct mlxsw_sp_fib_entry *
1698mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, 1696mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
1699 const struct switchdev_obj_ipv4_fib *fib4, 1697 const struct switchdev_obj_ipv4_fib *fib4)
1700 struct switchdev_trans *trans)
1701{ 1698{
1702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1703 struct mlxsw_sp_router_fib4_add_info *info;
1704 struct mlxsw_sp_fib_entry *fib_entry; 1699 struct mlxsw_sp_fib_entry *fib_entry;
1700 struct fib_info *fi = fib4->fi;
1705 struct mlxsw_sp_vr *vr; 1701 struct mlxsw_sp_vr *vr;
1706 int err; 1702 int err;
1707 1703
1708 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, 1704 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
1709 MLXSW_SP_L3_PROTO_IPV4); 1705 MLXSW_SP_L3_PROTO_IPV4);
1710 if (IS_ERR(vr)) 1706 if (IS_ERR(vr))
1711 return PTR_ERR(vr); 1707 return ERR_CAST(vr);
1712 1708
1709 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1710 sizeof(fib4->dst),
1711 fib4->dst_len, fi->fib_dev);
1712 if (fib_entry) {
1713 /* Already exists, just take a reference */
1714 fib_entry->ref_count++;
1715 return fib_entry;
1716 }
1713 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, 1717 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
1714 sizeof(fib4->dst), fib4->dst_len); 1718 sizeof(fib4->dst),
1719 fib4->dst_len, fi->fib_dev);
1715 if (!fib_entry) { 1720 if (!fib_entry) {
1716 err = -ENOMEM; 1721 err = -ENOMEM;
1717 goto err_fib_entry_create; 1722 goto err_fib_entry_create;
1718 } 1723 }
1719 fib_entry->vr = vr; 1724 fib_entry->vr = vr;
1725 fib_entry->ref_count = 1;
1720 1726
1721 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); 1727 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
1722 if (err) 1728 if (err)
1723 goto err_fib4_entry_init; 1729 goto err_fib4_entry_init;
1724 1730
1731 return fib_entry;
1732
1733err_fib4_entry_init:
1734 mlxsw_sp_fib_entry_destroy(fib_entry);
1735err_fib_entry_create:
1736 mlxsw_sp_vr_put(mlxsw_sp, vr);
1737
1738 return ERR_PTR(err);
1739}
1740
1741static struct mlxsw_sp_fib_entry *
1742mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
1743 const struct switchdev_obj_ipv4_fib *fib4)
1744{
1745 struct mlxsw_sp_vr *vr;
1746
1747 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
1748 if (!vr)
1749 return NULL;
1750
1751 return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1752 sizeof(fib4->dst), fib4->dst_len,
1753 fib4->fi->fib_dev);
1754}
1755
1756void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1757 struct mlxsw_sp_fib_entry *fib_entry)
1758{
1759 struct mlxsw_sp_vr *vr = fib_entry->vr;
1760
1761 if (--fib_entry->ref_count == 0) {
1762 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1763 mlxsw_sp_fib_entry_destroy(fib_entry);
1764 }
1765 mlxsw_sp_vr_put(mlxsw_sp, vr);
1766}
1767
1768static int
1769mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
1770 const struct switchdev_obj_ipv4_fib *fib4,
1771 struct switchdev_trans *trans)
1772{
1773 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1774 struct mlxsw_sp_router_fib4_add_info *info;
1775 struct mlxsw_sp_fib_entry *fib_entry;
1776 int err;
1777
1778 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
1779 if (IS_ERR(fib_entry))
1780 return PTR_ERR(fib_entry);
1781
1725 info = kmalloc(sizeof(*info), GFP_KERNEL); 1782 info = kmalloc(sizeof(*info), GFP_KERNEL);
1726 if (!info) { 1783 if (!info) {
1727 err = -ENOMEM; 1784 err = -ENOMEM;
@@ -1735,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
1735 return 0; 1792 return 0;
1736 1793
1737err_alloc_info: 1794err_alloc_info:
1738 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1795 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1739err_fib4_entry_init:
1740 mlxsw_sp_fib_entry_destroy(fib_entry);
1741err_fib_entry_create:
1742 mlxsw_sp_vr_put(mlxsw_sp, vr);
1743 return err; 1796 return err;
1744} 1797}
1745 1798
@@ -1758,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
1758 fib_entry = info->fib_entry; 1811 fib_entry = info->fib_entry;
1759 kfree(info); 1812 kfree(info);
1760 1813
1814 if (fib_entry->ref_count != 1)
1815 return 0;
1816
1761 vr = fib_entry->vr; 1817 vr = fib_entry->vr;
1762 err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry); 1818 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
1763 if (err) 1819 if (err)
1764 goto err_fib_entry_insert; 1820 goto err_fib_entry_insert;
1765 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1821 err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
1766 if (err) 1822 if (err)
1767 goto err_fib_entry_add; 1823 goto err_fib_entry_add;
1768 return 0; 1824 return 0;
@@ -1770,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
1770err_fib_entry_add: 1826err_fib_entry_add:
1771 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); 1827 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1772err_fib_entry_insert: 1828err_fib_entry_insert:
1773 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1829 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1774 mlxsw_sp_fib_entry_destroy(fib_entry);
1775 mlxsw_sp_vr_put(mlxsw_sp, vr);
1776 return err; 1830 return err;
1777} 1831}
1778 1832
@@ -1792,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
1792{ 1846{
1793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1794 struct mlxsw_sp_fib_entry *fib_entry; 1848 struct mlxsw_sp_fib_entry *fib_entry;
1795 struct mlxsw_sp_vr *vr;
1796 1849
1797 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); 1850 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
1798 if (!vr) {
1799 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
1800 return -ENOENT;
1801 }
1802 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1803 sizeof(fib4->dst), fib4->dst_len);
1804 if (!fib_entry) { 1851 if (!fib_entry) {
1805 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); 1852 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
1806 return -ENOENT; 1853 return -ENOENT;
1807 } 1854 }
1808 mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry); 1855
1809 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); 1856 if (fib_entry->ref_count == 1) {
1810 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); 1857 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
1811 mlxsw_sp_fib_entry_destroy(fib_entry); 1858 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
1812 mlxsw_sp_vr_put(mlxsw_sp, vr); 1859 }
1860
1861 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1813 return 0; 1862 return 0;
1814} 1863}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a1ad5e6bdfa8..7b654c517b91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
167} 167}
168 168
169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170 u16 idx_begin, u16 idx_end, bool set, 170 u16 idx_begin, u16 idx_end, bool uc_set,
171 bool only_uc) 171 bool bm_set)
172{ 172{
173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174 u16 local_port = mlxsw_sp_port->local_port; 174 u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190 table_type, range, local_port, set); 190 table_type, range, local_port, uc_set);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
192 if (err) 192 if (err)
193 goto buffer_out; 193 goto buffer_out;
194 194
195 /* Flooding control allows one to decide whether a given port will
196 * flood unicast traffic for which there is no FDB entry.
197 */
198 if (only_uc)
199 goto buffer_out;
200
201 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 195 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
202 table_type, range, local_port, set); 196 table_type, range, local_port, bm_set);
203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
204 if (err) 198 if (err)
205 goto err_flood_bm_set; 199 goto err_flood_bm_set;
206 else 200
207 goto buffer_out; 201 goto buffer_out;
208 202
209err_flood_bm_set: 203err_flood_bm_set:
210 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 204 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
211 table_type, range, local_port, !set); 205 table_type, range, local_port, !uc_set);
212 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 206 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
213buffer_out: 207buffer_out:
214 kfree(sftr_pl); 208 kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
257 * the start of the vFIDs range. 251 * the start of the vFIDs range.
258 */ 252 */
259 vfid = mlxsw_sp_fid_to_vfid(fid); 253 vfid = mlxsw_sp_fid_to_vfid(fid);
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, 254 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
261 false);
262} 255}
263 256
264static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, 257static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -450,6 +443,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
450 443
451 kfree(f); 444 kfree(f);
452 445
446 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
447
453 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 448 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
454} 449}
455 450
@@ -458,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
458{ 453{
459 struct mlxsw_sp_fid *f; 454 struct mlxsw_sp_fid *f;
460 455
456 if (test_bit(fid, mlxsw_sp_port->active_vlans))
457 return 0;
458
461 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); 459 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
462 if (!f) { 460 if (!f) {
463 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); 461 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -515,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
515 } 513 }
516 514
517 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, 515 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
518 true, false); 516 mlxsw_sp_port->uc_flood, true);
519 if (err) 517 if (err)
520 goto err_port_flood_set; 518 goto err_port_flood_set;
521 519
@@ -997,13 +995,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
997} 995}
998 996
999static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 997static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1000 u16 vid_begin, u16 vid_end, bool init) 998 u16 vid_begin, u16 vid_end)
1001{ 999{
1002 struct net_device *dev = mlxsw_sp_port->dev; 1000 struct net_device *dev = mlxsw_sp_port->dev;
1003 u16 vid, pvid; 1001 u16 vid, pvid;
1004 int err; 1002 int err;
1005 1003
1006 if (!init && !mlxsw_sp_port->bridged) 1004 if (!mlxsw_sp_port->bridged)
1007 return -EINVAL; 1005 return -EINVAL;
1008 1006
1009 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 1007 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -1014,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1014 return err; 1012 return err;
1015 } 1013 }
1016 1014
1017 if (init)
1018 goto out;
1019
1020 pvid = mlxsw_sp_port->pvid; 1015 pvid = mlxsw_sp_port->pvid;
1021 if (pvid >= vid_begin && pvid <= vid_end) { 1016 if (pvid >= vid_begin && pvid <= vid_end) {
1022 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 1017 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -1028,7 +1023,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1028 1023
1029 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 1024 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1030 1025
1031out:
1032 /* Changing activity bits only if HW operation succeded */ 1026 /* Changing activity bits only if HW operation succeded */
1033 for (vid = vid_begin; vid <= vid_end; vid++) 1027 for (vid = vid_begin; vid <= vid_end; vid++)
1034 clear_bit(vid, mlxsw_sp_port->active_vlans); 1028 clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -1039,8 +1033,8 @@ out:
1039static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1033static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1040 const struct switchdev_obj_port_vlan *vlan) 1034 const struct switchdev_obj_port_vlan *vlan)
1041{ 1035{
1042 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1036 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1043 vlan->vid_begin, vlan->vid_end, false); 1037 vlan->vid_end);
1044} 1038}
1045 1039
1046void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 1040void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1048,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1048 u16 vid; 1042 u16 vid;
1049 1043
1050 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 1044 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1051 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); 1045 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1052} 1046}
1053 1047
1054static int 1048static int
@@ -1546,32 +1540,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1546 mlxsw_sp_fdb_fini(mlxsw_sp); 1540 mlxsw_sp_fdb_fini(mlxsw_sp);
1547} 1541}
1548 1542
1549int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1550{
1551 struct net_device *dev = mlxsw_sp_port->dev;
1552 int err;
1553
1554 /* Allow only untagged packets to ingress and tag them internally
1555 * with VID 1.
1556 */
1557 mlxsw_sp_port->pvid = 1;
1558 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1559 true);
1560 if (err) {
1561 netdev_err(dev, "Unable to init VLANs\n");
1562 return err;
1563 }
1564
1565 /* Add implicit VLAN interface in the device, so that untagged
1566 * packets will be classified to the default vFID.
1567 */
1568 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1569 if (err)
1570 netdev_err(dev, "Failed to configure default vFID\n");
1571
1572 return err;
1573}
1574
1575void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1543void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1576{ 1544{
1577 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1545 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 470d7696e9fe..ed8e30186400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -56,6 +56,10 @@ enum {
56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, 56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
57 MLXSW_TRAP_ID_ARPBC = 0x50, 57 MLXSW_TRAP_ID_ARPBC = 0x50,
58 MLXSW_TRAP_ID_ARPUC = 0x51, 58 MLXSW_TRAP_ID_ARPUC = 0x51,
59 MLXSW_TRAP_ID_MTUERROR = 0x52,
60 MLXSW_TRAP_ID_TTLERROR = 0x53,
61 MLXSW_TRAP_ID_LBERROR = 0x54,
62 MLXSW_TRAP_ID_OSPF = 0x55,
59 MLXSW_TRAP_ID_IP2ME = 0x5F, 63 MLXSW_TRAP_ID_IP2ME = 0x5F,
60 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, 64 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
61 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, 65 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 88678c172b19..252e4924de0f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
41 * Chris Telfer <chris.telfer@netronome.com> 41 * Chris Telfer <chris.telfer@netronome.com>
42 */ 42 */
43 43
44#include <linux/version.h>
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/kernel.h> 45#include <linux/kernel.h>
47#include <linux/init.h> 46#include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1441 1440
1442 nfp_net_set_hash(nn->netdev, skb, rxd); 1441 nfp_net_set_hash(nn->netdev, skb, rxd);
1443 1442
1444 /* Pad small frames to minimum */
1445 if (skb_put_padto(skb, 60))
1446 break;
1447
1448 /* Stats update */ 1443 /* Stats update */
1449 u64_stats_update_begin(&r_vec->rx_sync); 1444 u64_stats_update_begin(&r_vec->rx_sync);
1450 r_vec->rx_pkts++; 1445 r_vec->rx_pkts++;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 7d7933d00b8f..4c9897220969 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
40 * Brad Petrus <brad.petrus@netronome.com> 40 * Brad Petrus <brad.petrus@netronome.com>
41 */ 41 */
42 42
43#include <linux/version.h>
44#include <linux/kernel.h> 43#include <linux/kernel.h>
45#include <linux/netdevice.h> 44#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 45#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 37abef016a0a..f7062cb648e1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
38 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 38 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
39 */ 39 */
40 40
41#include <linux/version.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/kernel.h> 42#include <linux/kernel.h>
44#include <linux/init.h> 43#include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
134 } 133 }
135 134
136 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 135 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
137 if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 136 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
138 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", 137 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
139 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); 138 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
140 err = -EINVAL; 139 err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
142 } 141 }
143 142
144 /* Determine stride */ 143 /* Determine stride */
145 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || 144 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
146 nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
147 nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
148 stride = 2; 145 stride = 2;
149 tx_bar_no = NFP_NET_Q0_BAR; 146 tx_bar_no = NFP_NET_Q0_BAR;
150 rx_bar_no = NFP_NET_Q1_BAR; 147 rx_bar_no = NFP_NET_Q1_BAR;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4d4ecba0aad9..8e13ec84c538 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
475 mac[5] = tmp >> 8; 475 mac[5] = tmp >> 8;
476} 476}
477 477
478static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
479{
480 if (enable)
481 clk_prepare_enable(pldat->clk);
482 else
483 clk_disable_unprepare(pldat->clk);
484}
485
486static void __lpc_params_setup(struct netdata_local *pldat) 478static void __lpc_params_setup(struct netdata_local *pldat)
487{ 479{
488 u32 tmp; 480 u32 tmp;
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
1056 writel(0, LPC_ENET_MAC2(pldat->net_base)); 1048 writel(0, LPC_ENET_MAC2(pldat->net_base));
1057 spin_unlock_irqrestore(&pldat->lock, flags); 1049 spin_unlock_irqrestore(&pldat->lock, flags);
1058 1050
1059 __lpc_eth_clock_enable(pldat, false); 1051 clk_disable_unprepare(pldat->clk);
1060 1052
1061 return 0; 1053 return 0;
1062} 1054}
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1197static int lpc_eth_open(struct net_device *ndev) 1189static int lpc_eth_open(struct net_device *ndev)
1198{ 1190{
1199 struct netdata_local *pldat = netdev_priv(ndev); 1191 struct netdata_local *pldat = netdev_priv(ndev);
1192 int ret;
1200 1193
1201 if (netif_msg_ifup(pldat)) 1194 if (netif_msg_ifup(pldat))
1202 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); 1195 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1203 1196
1204 __lpc_eth_clock_enable(pldat, true); 1197 ret = clk_prepare_enable(pldat->clk);
1198 if (ret)
1199 return ret;
1205 1200
1206 /* Suspended PHY makes LPC ethernet core block, so resume now */ 1201 /* Suspended PHY makes LPC ethernet core block, so resume now */
1207 phy_resume(ndev->phydev); 1202 phy_resume(ndev->phydev);
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1320 } 1315 }
1321 1316
1322 /* Enable network clock */ 1317 /* Enable network clock */
1323 __lpc_eth_clock_enable(pldat, true); 1318 ret = clk_prepare_enable(pldat->clk);
1319 if (ret)
1320 goto err_out_clk_put;
1324 1321
1325 /* Map IO space */ 1322 /* Map IO space */
1326 pldat->net_base = ioremap(res->start, resource_size(res)); 1323 pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1454,6 +1451,7 @@ err_out_iounmap:
1454 iounmap(pldat->net_base); 1451 iounmap(pldat->net_base);
1455err_out_disable_clocks: 1452err_out_disable_clocks:
1456 clk_disable_unprepare(pldat->clk); 1453 clk_disable_unprepare(pldat->clk);
1454err_out_clk_put:
1457 clk_put(pldat->clk); 1455 clk_put(pldat->clk);
1458err_out_free_dev: 1456err_out_free_dev:
1459 free_netdev(ndev); 1457 free_netdev(ndev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 35e53771533f..45ab74676573 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -561,9 +561,18 @@ struct qed_dev {
561static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, 561static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
562 u32 concrete_fid) 562 u32 concrete_fid)
563{ 563{
564 u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
564 u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); 565 u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
566 u8 vf_valid = GET_FIELD(concrete_fid,
567 PXP_CONCRETE_FID_VFVALID);
568 u8 sw_fid;
565 569
566 return pfid; 570 if (vf_valid)
571 sw_fid = vfid + MAX_NUM_PFS;
572 else
573 sw_fid = pfid;
574
575 return sw_fid;
567} 576}
568 577
569#define PURE_LB_TC 8 578#define PURE_LB_TC 8
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d0dc28f93c0e..3656d2fd673d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -19,6 +19,7 @@
19#include "qed_dcbx.h" 19#include "qed_dcbx.h"
20#include "qed_hsi.h" 20#include "qed_hsi.h"
21#include "qed_sp.h" 21#include "qed_sp.h"
22#include "qed_sriov.h"
22#ifdef CONFIG_DCB 23#ifdef CONFIG_DCB
23#include <linux/qed/qed_eth_if.h> 24#include <linux/qed/qed_eth_if.h>
24#endif 25#endif
@@ -52,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
52 DCBX_APP_SF_ETHTYPE); 53 DCBX_APP_SF_ETHTYPE);
53} 54}
54 55
56static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
57{
58 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
59
60 /* Old MFW */
61 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
62 return qed_dcbx_app_ethtype(app_info_bitmap);
63
64 return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
65}
66
55static bool qed_dcbx_app_port(u32 app_info_bitmap) 67static bool qed_dcbx_app_port(u32 app_info_bitmap)
56{ 68{
57 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == 69 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
58 DCBX_APP_SF_PORT); 70 DCBX_APP_SF_PORT);
59} 71}
60 72
61static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) 73static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
62{ 74{
63 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 75 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
64 proto_id == QED_ETH_TYPE_DEFAULT); 76
77 /* Old MFW */
78 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
79 return qed_dcbx_app_port(app_info_bitmap);
80
81 return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
65} 82}
66 83
67static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) 84static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
68{ 85{
69 return !!(qed_dcbx_app_port(app_info_bitmap) && 86 bool ethtype;
70 proto_id == QED_TCP_PORT_ISCSI); 87
88 if (ieee)
89 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
90 else
91 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
92
93 return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
71} 94}
72 95
73static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) 96static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
74{ 97{
75 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 98 bool port;
76 proto_id == QED_ETH_TYPE_FCOE); 99
100 if (ieee)
101 port = qed_dcbx_ieee_app_port(app_info_bitmap,
102 DCBX_APP_SF_IEEE_TCP_PORT);
103 else
104 port = qed_dcbx_app_port(app_info_bitmap);
105
106 return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
77} 107}
78 108
79static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) 109static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
80{ 110{
81 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 111 bool ethtype;
82 proto_id == QED_ETH_TYPE_ROCE); 112
113 if (ieee)
114 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
115 else
116 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
117
118 return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
83} 119}
84 120
85static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) 121static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
86{ 122{
87 return !!(qed_dcbx_app_port(app_info_bitmap) && 123 bool ethtype;
88 proto_id == QED_UDP_PORT_TYPE_ROCE_V2); 124
125 if (ieee)
126 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
127 else
128 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
129
130 return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
131}
132
133static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
134{
135 bool port;
136
137 if (ieee)
138 port = qed_dcbx_ieee_app_port(app_info_bitmap,
139 DCBX_APP_SF_IEEE_UDP_PORT);
140 else
141 port = qed_dcbx_app_port(app_info_bitmap);
142
143 return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
89} 144}
90 145
91static void 146static void
@@ -164,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
164static bool 219static bool
165qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, 220qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
166 u32 app_prio_bitmap, 221 u32 app_prio_bitmap,
167 u16 id, enum dcbx_protocol_type *type) 222 u16 id, enum dcbx_protocol_type *type, bool ieee)
168{ 223{
169 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { 224 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
170 *type = DCBX_PROTOCOL_FCOE; 225 *type = DCBX_PROTOCOL_FCOE;
171 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { 226 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
172 *type = DCBX_PROTOCOL_ROCE; 227 *type = DCBX_PROTOCOL_ROCE;
173 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { 228 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
174 *type = DCBX_PROTOCOL_ISCSI; 229 *type = DCBX_PROTOCOL_ISCSI;
175 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { 230 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
176 *type = DCBX_PROTOCOL_ETH; 231 *type = DCBX_PROTOCOL_ETH;
177 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { 232 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
178 *type = DCBX_PROTOCOL_ROCE_V2; 233 *type = DCBX_PROTOCOL_ROCE_V2;
179 } else { 234 } else {
180 *type = DCBX_MAX_PROTOCOL_TYPE; 235 *type = DCBX_MAX_PROTOCOL_TYPE;
@@ -194,17 +249,18 @@ static int
194qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, 249qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
195 struct qed_dcbx_results *p_data, 250 struct qed_dcbx_results *p_data,
196 struct dcbx_app_priority_entry *p_tbl, 251 struct dcbx_app_priority_entry *p_tbl,
197 u32 pri_tc_tbl, int count, bool dcbx_enabled) 252 u32 pri_tc_tbl, int count, u8 dcbx_version)
198{ 253{
199 u8 tc, priority_map; 254 u8 tc, priority_map;
200 enum dcbx_protocol_type type; 255 enum dcbx_protocol_type type;
256 bool enable, ieee;
201 u16 protocol_id; 257 u16 protocol_id;
202 int priority; 258 int priority;
203 bool enable;
204 int i; 259 int i;
205 260
206 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); 261 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
207 262
263 ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
208 /* Parse APP TLV */ 264 /* Parse APP TLV */
209 for (i = 0; i < count; i++) { 265 for (i = 0; i < count; i++) {
210 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 266 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -219,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
219 275
220 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); 276 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
221 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 277 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
222 protocol_id, &type)) { 278 protocol_id, &type, ieee)) {
223 /* ETH always have the enable bit reset, as it gets 279 /* ETH always have the enable bit reset, as it gets
224 * vlan information per packet. For other protocols, 280 * vlan information per packet. For other protocols,
225 * should be set according to the dcbx_enabled 281 * should be set according to the dcbx_enabled
@@ -275,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
275 struct dcbx_ets_feature *p_ets; 331 struct dcbx_ets_feature *p_ets;
276 struct qed_hw_info *p_info; 332 struct qed_hw_info *p_info;
277 u32 pri_tc_tbl, flags; 333 u32 pri_tc_tbl, flags;
278 bool dcbx_enabled; 334 u8 dcbx_version;
279 int num_entries; 335 int num_entries;
280 int rc = 0; 336 int rc = 0;
281 337
282 /* If DCBx version is non zero, then negotiation was
283 * successfuly performed
284 */
285 flags = p_hwfn->p_dcbx_info->operational.flags; 338 flags = p_hwfn->p_dcbx_info->operational.flags;
286 dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); 339 dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
287 340
288 p_app = &p_hwfn->p_dcbx_info->operational.features.app; 341 p_app = &p_hwfn->p_dcbx_info->operational.features.app;
289 p_tbl = p_app->app_pri_tbl; 342 p_tbl = p_app->app_pri_tbl;
@@ -295,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
295 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); 348 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
296 349
297 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, 350 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
298 num_entries, dcbx_enabled); 351 num_entries, dcbx_version);
299 if (rc) 352 if (rc)
300 return rc; 353 return rc;
301 354
302 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); 355 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
303 data.pf_id = p_hwfn->rel_pf_id; 356 data.pf_id = p_hwfn->rel_pf_id;
304 data.dcbx_enabled = dcbx_enabled; 357 data.dcbx_enabled = !!dcbx_version;
305 358
306 qed_dcbx_dp_protocol(p_hwfn, &data); 359 qed_dcbx_dp_protocol(p_hwfn, &data);
307 360
@@ -400,7 +453,7 @@ static void
400qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, 453qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
401 struct dcbx_app_priority_feature *p_app, 454 struct dcbx_app_priority_feature *p_app,
402 struct dcbx_app_priority_entry *p_tbl, 455 struct dcbx_app_priority_entry *p_tbl,
403 struct qed_dcbx_params *p_params) 456 struct qed_dcbx_params *p_params, bool ieee)
404{ 457{
405 struct qed_app_entry *entry; 458 struct qed_app_entry *entry;
406 u8 pri_map; 459 u8 pri_map;
@@ -414,15 +467,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
414 DCBX_APP_NUM_ENTRIES); 467 DCBX_APP_NUM_ENTRIES);
415 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 468 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
416 entry = &p_params->app_entry[i]; 469 entry = &p_params->app_entry[i];
417 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, 470 if (ieee) {
418 DCBX_APP_SF)); 471 u8 sf_ieee;
472 u32 val;
473
474 sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
475 DCBX_APP_SF_IEEE);
476 switch (sf_ieee) {
477 case DCBX_APP_SF_IEEE_RESERVED:
478 /* Old MFW */
479 val = QED_MFW_GET_FIELD(p_tbl[i].entry,
480 DCBX_APP_SF);
481 entry->sf_ieee = val ?
482 QED_DCBX_SF_IEEE_TCP_UDP_PORT :
483 QED_DCBX_SF_IEEE_ETHTYPE;
484 break;
485 case DCBX_APP_SF_IEEE_ETHTYPE:
486 entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
487 break;
488 case DCBX_APP_SF_IEEE_TCP_PORT:
489 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
490 break;
491 case DCBX_APP_SF_IEEE_UDP_PORT:
492 entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
493 break;
494 case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
495 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
496 break;
497 }
498 } else {
499 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
500 DCBX_APP_SF));
501 }
502
419 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); 503 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
420 entry->prio = ffs(pri_map) - 1; 504 entry->prio = ffs(pri_map) - 1;
421 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 505 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
422 DCBX_APP_PROTOCOL_ID); 506 DCBX_APP_PROTOCOL_ID);
423 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 507 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
424 entry->proto_id, 508 entry->proto_id,
425 &entry->proto_type); 509 &entry->proto_type, ieee);
426 } 510 }
427 511
428 DP_VERBOSE(p_hwfn, QED_MSG_DCB, 512 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@@ -483,7 +567,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
483 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); 567 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
484 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); 568 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
485 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); 569 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
486 pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); 570 pri_map = p_ets->pri_tc_tbl[0];
487 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { 571 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
488 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; 572 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
489 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; 573 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -500,9 +584,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
500 struct dcbx_app_priority_feature *p_app, 584 struct dcbx_app_priority_feature *p_app,
501 struct dcbx_app_priority_entry *p_tbl, 585 struct dcbx_app_priority_entry *p_tbl,
502 struct dcbx_ets_feature *p_ets, 586 struct dcbx_ets_feature *p_ets,
503 u32 pfc, struct qed_dcbx_params *p_params) 587 u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
504{ 588{
505 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); 589 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
506 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); 590 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
507 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); 591 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
508} 592}
@@ -516,7 +600,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
516 p_feat = &p_hwfn->p_dcbx_info->local_admin.features; 600 p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
517 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 601 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
518 p_feat->app.app_pri_tbl, &p_feat->ets, 602 p_feat->app.app_pri_tbl, &p_feat->ets,
519 p_feat->pfc, &params->local.params); 603 p_feat->pfc, &params->local.params, false);
520 params->local.valid = true; 604 params->local.valid = true;
521} 605}
522 606
@@ -529,7 +613,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
529 p_feat = &p_hwfn->p_dcbx_info->remote.features; 613 p_feat = &p_hwfn->p_dcbx_info->remote.features;
530 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 614 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
531 p_feat->app.app_pri_tbl, &p_feat->ets, 615 p_feat->app.app_pri_tbl, &p_feat->ets,
532 p_feat->pfc, &params->remote.params); 616 p_feat->pfc, &params->remote.params, false);
533 params->remote.valid = true; 617 params->remote.valid = true;
534} 618}
535 619
@@ -574,7 +658,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
574 658
575 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 659 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
576 p_feat->app.app_pri_tbl, &p_feat->ets, 660 p_feat->app.app_pri_tbl, &p_feat->ets,
577 p_feat->pfc, &params->operational.params); 661 p_feat->pfc, &params->operational.params,
662 p_operational->ieee);
578 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); 663 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
579 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); 664 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
580 p_operational->err = err; 665 p_operational->err = err;
@@ -861,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
861 struct qed_ptt *p_ptt; 946 struct qed_ptt *p_ptt;
862 int rc; 947 int rc;
863 948
949 if (IS_VF(p_hwfn->cdev))
950 return -EINVAL;
951
864 p_ptt = qed_ptt_acquire(p_hwfn); 952 p_ptt = qed_ptt_acquire(p_hwfn);
865 if (!p_ptt) 953 if (!p_ptt)
866 return -EBUSY; 954 return -EBUSY;
@@ -900,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
900 if (p_params->pfc.prio[i]) 988 if (p_params->pfc.prio[i])
901 pfc_map |= BIT(i); 989 pfc_map |= BIT(i);
902 990
991 *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
903 *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); 992 *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
904 993
905 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); 994 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -944,7 +1033,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
944 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); 1033 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
945 p_ets->pri_tc_tbl[0] |= val; 1034 p_ets->pri_tc_tbl[0] |= val;
946 } 1035 }
947 p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
948 for (i = 0; i < 2; i++) { 1036 for (i = 0; i < 2; i++) {
949 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); 1037 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
950 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); 1038 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@@ -954,7 +1042,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
954static void 1042static void
955qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, 1043qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
956 struct dcbx_app_priority_feature *p_app, 1044 struct dcbx_app_priority_feature *p_app,
957 struct qed_dcbx_params *p_params) 1045 struct qed_dcbx_params *p_params, bool ieee)
958{ 1046{
959 u32 *entry; 1047 u32 *entry;
960 int i; 1048 int i;
@@ -975,12 +1063,45 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
975 1063
976 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 1064 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
977 entry = &p_app->app_pri_tbl[i].entry; 1065 entry = &p_app->app_pri_tbl[i].entry;
978 *entry &= ~DCBX_APP_SF_MASK; 1066 *entry = 0;
979 if (p_params->app_entry[i].ethtype) 1067 if (ieee) {
980 *entry |= ((u32)DCBX_APP_SF_ETHTYPE << 1068 *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
981 DCBX_APP_SF_SHIFT); 1069 switch (p_params->app_entry[i].sf_ieee) {
982 else 1070 case QED_DCBX_SF_IEEE_ETHTYPE:
983 *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); 1071 *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
1072 DCBX_APP_SF_IEEE_SHIFT);
1073 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
1074 DCBX_APP_SF_SHIFT);
1075 break;
1076 case QED_DCBX_SF_IEEE_TCP_PORT:
1077 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
1078 DCBX_APP_SF_IEEE_SHIFT);
1079 *entry |= ((u32)DCBX_APP_SF_PORT <<
1080 DCBX_APP_SF_SHIFT);
1081 break;
1082 case QED_DCBX_SF_IEEE_UDP_PORT:
1083 *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
1084 DCBX_APP_SF_IEEE_SHIFT);
1085 *entry |= ((u32)DCBX_APP_SF_PORT <<
1086 DCBX_APP_SF_SHIFT);
1087 break;
1088 case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
1089 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
1090 DCBX_APP_SF_IEEE_SHIFT);
1091 *entry |= ((u32)DCBX_APP_SF_PORT <<
1092 DCBX_APP_SF_SHIFT);
1093 break;
1094 }
1095 } else {
1096 *entry &= ~DCBX_APP_SF_MASK;
1097 if (p_params->app_entry[i].ethtype)
1098 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
1099 DCBX_APP_SF_SHIFT);
1100 else
1101 *entry |= ((u32)DCBX_APP_SF_PORT <<
1102 DCBX_APP_SF_SHIFT);
1103 }
1104
984 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; 1105 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
985 *entry |= ((u32)p_params->app_entry[i].proto_id << 1106 *entry |= ((u32)p_params->app_entry[i].proto_id <<
986 DCBX_APP_PROTOCOL_ID_SHIFT); 1107 DCBX_APP_PROTOCOL_ID_SHIFT);
@@ -995,15 +1116,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
995 struct dcbx_local_params *local_admin, 1116 struct dcbx_local_params *local_admin,
996 struct qed_dcbx_set *params) 1117 struct qed_dcbx_set *params)
997{ 1118{
1119 bool ieee = false;
1120
998 local_admin->flags = 0; 1121 local_admin->flags = 0;
999 memcpy(&local_admin->features, 1122 memcpy(&local_admin->features,
1000 &p_hwfn->p_dcbx_info->operational.features, 1123 &p_hwfn->p_dcbx_info->operational.features,
1001 sizeof(local_admin->features)); 1124 sizeof(local_admin->features));
1002 1125
1003 if (params->enabled) 1126 if (params->enabled) {
1004 local_admin->config = params->ver_num; 1127 local_admin->config = params->ver_num;
1005 else 1128 ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
1129 } else {
1006 local_admin->config = DCBX_CONFIG_VERSION_DISABLED; 1130 local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
1131 }
1007 1132
1008 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) 1133 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
1009 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, 1134 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@@ -1015,7 +1140,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
1015 1140
1016 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) 1141 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
1017 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, 1142 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
1018 &params->config.params); 1143 &params->config.params, ieee);
1019} 1144}
1020 1145
1021int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1146int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@@ -1064,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
1064 return 0; 1189 return 0;
1065 } 1190 }
1066 1191
1067 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); 1192 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
1068 if (!dcbx_info) { 1193 if (!dcbx_info) {
1069 DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); 1194 DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
1070 return -ENOMEM; 1195 return -ENOMEM;
@@ -1101,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1101{ 1226{
1102 struct qed_dcbx_get *dcbx_info; 1227 struct qed_dcbx_get *dcbx_info;
1103 1228
1104 dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); 1229 dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
1105 if (!dcbx_info) { 1230 if (!dcbx_info) {
1106 DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); 1231 DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
1107 return NULL; 1232 return NULL;
@@ -1596,8 +1721,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
1596 if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) 1721 if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
1597 break; 1722 break;
1598 /* First empty slot */ 1723 /* First empty slot */
1599 if (!entry->proto_id) 1724 if (!entry->proto_id) {
1725 dcbx_set.config.params.num_app_entries++;
1600 break; 1726 break;
1727 }
1601 } 1728 }
1602 1729
1603 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 1730 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@@ -2117,8 +2244,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
2117 (entry->proto_id == app->protocol)) 2244 (entry->proto_id == app->protocol))
2118 break; 2245 break;
2119 /* First empty slot */ 2246 /* First empty slot */
2120 if (!entry->proto_id) 2247 if (!entry->proto_id) {
2248 dcbx_set.config.params.num_app_entries++;
2121 break; 2249 break;
2250 }
2122 } 2251 }
2123 2252
2124 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 2253 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 592784019994..6f9d3b831a2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
6850#define DCBX_APP_SF_SHIFT 8 6850#define DCBX_APP_SF_SHIFT 8
6851#define DCBX_APP_SF_ETHTYPE 0 6851#define DCBX_APP_SF_ETHTYPE 0
6852#define DCBX_APP_SF_PORT 1 6852#define DCBX_APP_SF_PORT 1
6853#define DCBX_APP_SF_IEEE_MASK 0x0000f000
6854#define DCBX_APP_SF_IEEE_SHIFT 12
6855#define DCBX_APP_SF_IEEE_RESERVED 0
6856#define DCBX_APP_SF_IEEE_ETHTYPE 1
6857#define DCBX_APP_SF_IEEE_TCP_PORT 2
6858#define DCBX_APP_SF_IEEE_UDP_PORT 3
6859#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
6860
6853#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 6861#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
6854#define DCBX_APP_PROTOCOL_ID_SHIFT 16 6862#define DCBX_APP_PROTOCOL_ID_SHIFT 16
6855}; 6863};
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index e4bd02e46e57..9544e4c41359 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
722 txq->tx_db.data.bd_prod = 722 txq->tx_db.data.bd_prod =
723 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 723 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
724 724
725 if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq)) 725 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
726 qede_update_tx_producer(txq); 726 qede_update_tx_producer(txq);
727 727
728 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) 728 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
729 < (MAX_SKB_FRAGS + 1))) { 729 < (MAX_SKB_FRAGS + 1))) {
730 if (skb->xmit_more)
731 qede_update_tx_producer(txq);
732
730 netif_tx_stop_queue(netdev_txq); 733 netif_tx_stop_queue(netdev_txq);
731 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, 734 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
732 "Stop queue was called\n"); 735 "Stop queue was called\n");
@@ -2517,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2517 edev->ops->register_ops(cdev, &qede_ll_ops, edev); 2520 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2518 2521
2519#ifdef CONFIG_DCB 2522#ifdef CONFIG_DCB
2520 qede_set_dcbnl_ops(edev->ndev); 2523 if (!IS_VF(edev))
2524 qede_set_dcbnl_ops(edev->ndev);
2521#endif 2525#endif
2522 2526
2523 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 2527 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fd973f4f16c7..49bad00a0f8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 64 40#define _QLCNIC_LINUX_SUBVERSION 65
41#define QLCNIC_LINUX_VERSIONID "5.3.64" 41#define QLCNIC_LINUX_VERSIONID "5.3.65"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 87c642d3b075..fedd7366713c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
102#define QLCNIC_RESPONSE_DESC 0x05 102#define QLCNIC_RESPONSE_DESC 0x05
103#define QLCNIC_LRO_DESC 0x12 103#define QLCNIC_LRO_DESC 0x12
104 104
105#define QLCNIC_TX_POLL_BUDGET 128
106#define QLCNIC_TCP_HDR_SIZE 20 105#define QLCNIC_TCP_HDR_SIZE 20
107#define QLCNIC_TCP_TS_OPTION_SIZE 12 106#define QLCNIC_TCP_TS_OPTION_SIZE 12
108#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) 107#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
2008 struct qlcnic_host_tx_ring *tx_ring; 2007 struct qlcnic_host_tx_ring *tx_ring;
2009 struct qlcnic_adapter *adapter; 2008 struct qlcnic_adapter *adapter;
2010 2009
2011 budget = QLCNIC_TX_POLL_BUDGET;
2012 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); 2010 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
2013 adapter = tx_ring->adapter; 2011 adapter = tx_ring->adapter;
2014 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 2012 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c8285..24061b9b92e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
156 spinlock_t vlan_list_lock; /* Lock for VLAN list */ 156 spinlock_t vlan_list_lock; /* Lock for VLAN list */
157}; 157};
158 158
159struct qlcnic_async_work_list { 159struct qlcnic_async_cmd {
160 struct list_head list; 160 struct list_head list;
161 struct work_struct work;
162 void *ptr;
163 struct qlcnic_cmd_args *cmd; 161 struct qlcnic_cmd_args *cmd;
164}; 162};
165 163
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
168 struct workqueue_struct *bc_trans_wq; 166 struct workqueue_struct *bc_trans_wq;
169 struct workqueue_struct *bc_async_wq; 167 struct workqueue_struct *bc_async_wq;
170 struct workqueue_struct *bc_flr_wq; 168 struct workqueue_struct *bc_flr_wq;
171 struct list_head async_list; 169 struct qlcnic_adapter *adapter;
170 struct list_head async_cmd_list;
171 struct work_struct vf_async_work;
172 spinlock_t queue_lock; /* async_cmd_list queue lock */
172}; 173};
173 174
174struct qlcnic_sriov { 175struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..d7107055ec60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
29#define QLC_83XX_VF_RESET_FAIL_THRESH 8 29#define QLC_83XX_VF_RESET_FAIL_THRESH 8
30#define QLC_BC_CMD_MAX_RETRY_CNT 5 30#define QLC_BC_CMD_MAX_RETRY_CNT 5
31 31
32static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
32static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 33static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
33static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 34static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
34static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 35static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
177 } 178 }
178 179
179 bc->bc_async_wq = wq; 180 bc->bc_async_wq = wq;
180 INIT_LIST_HEAD(&bc->async_list); 181 INIT_LIST_HEAD(&bc->async_cmd_list);
182 INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
183 spin_lock_init(&bc->queue_lock);
184 bc->adapter = adapter;
181 185
182 for (i = 0; i < num_vfs; i++) { 186 for (i = 0; i < num_vfs; i++) {
183 vf = &sriov->vf_info[i]; 187 vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1517 1521
1518void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1522void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1519{ 1523{
1520 struct list_head *head = &bc->async_list; 1524 struct list_head *head = &bc->async_cmd_list;
1521 struct qlcnic_async_work_list *entry; 1525 struct qlcnic_async_cmd *entry;
1522 1526
1523 flush_workqueue(bc->bc_async_wq); 1527 flush_workqueue(bc->bc_async_wq);
1528 cancel_work_sync(&bc->vf_async_work);
1529
1530 spin_lock(&bc->queue_lock);
1524 while (!list_empty(head)) { 1531 while (!list_empty(head)) {
1525 entry = list_entry(head->next, struct qlcnic_async_work_list, 1532 entry = list_entry(head->next, struct qlcnic_async_cmd,
1526 list); 1533 list);
1527 cancel_work_sync(&entry->work);
1528 list_del(&entry->list); 1534 list_del(&entry->list);
1535 kfree(entry->cmd);
1529 kfree(entry); 1536 kfree(entry);
1530 } 1537 }
1538 spin_unlock(&bc->queue_lock);
1531} 1539}
1532 1540
1533void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1541void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1587 1595
1588static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) 1596static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1589{ 1597{
1590 struct qlcnic_async_work_list *entry; 1598 struct qlcnic_async_cmd *entry, *tmp;
1591 struct qlcnic_adapter *adapter; 1599 struct qlcnic_back_channel *bc;
1592 struct qlcnic_cmd_args *cmd; 1600 struct qlcnic_cmd_args *cmd;
1601 struct list_head *head;
1602 LIST_HEAD(del_list);
1603
1604 bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
1605 head = &bc->async_cmd_list;
1606
1607 spin_lock(&bc->queue_lock);
1608 list_splice_init(head, &del_list);
1609 spin_unlock(&bc->queue_lock);
1610
1611 list_for_each_entry_safe(entry, tmp, &del_list, list) {
1612 list_del(&entry->list);
1613 cmd = entry->cmd;
1614 __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
1615 kfree(entry);
1616 }
1617
1618 if (!list_empty(head))
1619 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1593 1620
1594 entry = container_of(work, struct qlcnic_async_work_list, work);
1595 adapter = entry->ptr;
1596 cmd = entry->cmd;
1597 __qlcnic_sriov_issue_cmd(adapter, cmd);
1598 return; 1621 return;
1599} 1622}
1600 1623
1601static struct qlcnic_async_work_list * 1624static struct qlcnic_async_cmd *
1602qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1625qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
1626 struct qlcnic_cmd_args *cmd)
1603{ 1627{
1604 struct list_head *node; 1628 struct qlcnic_async_cmd *entry = NULL;
1605 struct qlcnic_async_work_list *entry = NULL;
1606 u8 empty = 0;
1607 1629
1608 list_for_each(node, &bc->async_list) { 1630 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1609 entry = list_entry(node, struct qlcnic_async_work_list, list); 1631 if (!entry)
1610 if (!work_pending(&entry->work)) { 1632 return NULL;
1611 empty = 1;
1612 break;
1613 }
1614 }
1615 1633
1616 if (!empty) { 1634 entry->cmd = cmd;
1617 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1635
1618 GFP_ATOMIC); 1636 spin_lock(&bc->queue_lock);
1619 if (entry == NULL) 1637 list_add_tail(&entry->list, &bc->async_cmd_list);
1620 return NULL; 1638 spin_unlock(&bc->queue_lock);
1621 list_add_tail(&entry->list, &bc->async_list);
1622 }
1623 1639
1624 return entry; 1640 return entry;
1625} 1641}
1626 1642
1627static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, 1643static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1628 work_func_t func, void *data,
1629 struct qlcnic_cmd_args *cmd) 1644 struct qlcnic_cmd_args *cmd)
1630{ 1645{
1631 struct qlcnic_async_work_list *entry = NULL; 1646 struct qlcnic_async_cmd *entry = NULL;
1632 1647
1633 entry = qlcnic_sriov_get_free_node_async_work(bc); 1648 entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
1634 if (!entry) 1649 if (!entry) {
1650 qlcnic_free_mbx_args(cmd);
1651 kfree(cmd);
1635 return; 1652 return;
1653 }
1636 1654
1637 entry->ptr = data; 1655 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1638 entry->cmd = cmd;
1639 INIT_WORK(&entry->work, func);
1640 queue_work(bc->bc_async_wq, &entry->work);
1641} 1656}
1642 1657
1643static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, 1658static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1649 if (adapter->need_fw_reset) 1664 if (adapter->need_fw_reset)
1650 return -EIO; 1665 return -EIO;
1651 1666
1652 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, 1667 qlcnic_sriov_schedule_async_cmd(bc, cmd);
1653 adapter, cmd); 1668
1654 return 0; 1669 return 0;
1655} 1670}
1656 1671
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d7426d..5297bf77211c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
467 unsigned int rx_tail = cp->rx_tail; 467 unsigned int rx_tail = cp->rx_tail;
468 int rx; 468 int rx;
469 469
470rx_status_loop:
471 rx = 0; 470 rx = 0;
471rx_status_loop:
472 cpw16(IntrStatus, cp_rx_intr_mask); 472 cpw16(IntrStatus, cp_rx_intr_mask);
473 473
474 while (rx < budget) { 474 while (rx < budget) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 799d58d86e6d..054e795df90f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
201 201
202 [ARSTR] = 0x0000, 202 [ARSTR] = 0x0000,
203 [TSU_CTRST] = 0x0004, 203 [TSU_CTRST] = 0x0004,
204 [TSU_FWSLC] = 0x0038,
204 [TSU_VTAG0] = 0x0058, 205 [TSU_VTAG0] = 0x0058,
205 [TSU_ADSBSY] = 0x0060, 206 [TSU_ADSBSY] = 0x0060,
206 [TSU_TEN] = 0x0064, 207 [TSU_TEN] = 0x0064,
208 [TSU_POST1] = 0x0070,
209 [TSU_POST2] = 0x0074,
210 [TSU_POST3] = 0x0078,
211 [TSU_POST4] = 0x007c,
207 [TSU_ADRH0] = 0x0100, 212 [TSU_ADRH0] = 0x0100,
208 213
209 [TXNLCR0] = 0x0080, 214 [TXNLCR0] = 0x0080,
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2786{ 2791{
2787 if (sh_eth_is_rz_fast_ether(mdp)) { 2792 if (sh_eth_is_rz_fast_ether(mdp)) {
2788 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2793 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2794 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2795 TSU_FWSLC); /* Enable POST registers */
2789 return; 2796 return;
2790 } 2797 }
2791 2798
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index f658fee74f18..e00a669e9e09 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1517 } 1517 }
1518 1518
1519#if BITS_PER_LONG == 64 1519#if BITS_PER_LONG == 64
1520 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1520 mask[0] = raw_mask[0]; 1521 mask[0] = raw_mask[0];
1521 mask[1] = raw_mask[1]; 1522 mask[1] = raw_mask[1];
1522#else 1523#else
1524 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1523 mask[0] = raw_mask[0] & 0xffffffff; 1525 mask[0] = raw_mask[0] & 0xffffffff;
1524 mask[1] = raw_mask[0] >> 32; 1526 mask[1] = raw_mask[0] >> 32;
1525 mask[2] = raw_mask[1] & 0xffffffff; 1527 mask[2] = raw_mask[1] & 0xffffffff;
1526 mask[3] = raw_mask[1] >> 32;
1527#endif 1528#endif
1528} 1529}
1529 1530
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 726b80f45906..503a3b6dce91 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
2275 if (pd) { 2275 if (pd) {
2276 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2276 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2277 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); 2277 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2278
2279 if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
2280 dev_err(&pdev->dev,
2281 "at least one of 8-bit or 16-bit access support is required.\n");
2282 ret = -ENXIO;
2283 goto out_free_netdev;
2284 }
2278 } 2285 }
2279 2286
2280#if IS_BUILTIN(CONFIG_OF) 2287#if IS_BUILTIN(CONFIG_OF)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 1a55c7976df0..ea8465467469 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -37,6 +37,27 @@
37#include <linux/smc91x.h> 37#include <linux/smc91x.h>
38 38
39/* 39/*
40 * Any 16-bit access is performed with two 8-bit accesses if the hardware
41 * can't do it directly. Most registers are 16-bit so those are mandatory.
42 */
43#define SMC_outw_b(x, a, r) \
44 do { \
45 unsigned int __val16 = (x); \
46 unsigned int __reg = (r); \
47 SMC_outb(__val16, a, __reg); \
48 SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
49 } while (0)
50
51#define SMC_inw_b(a, r) \
52 ({ \
53 unsigned int __val16; \
54 unsigned int __reg = r; \
55 __val16 = SMC_inb(a, __reg); \
56 __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
57 __val16; \
58 })
59
60/*
40 * Define your architecture specific bus configuration parameters here. 61 * Define your architecture specific bus configuration parameters here.
41 */ 62 */
42 63
@@ -55,10 +76,30 @@
55#define SMC_IO_SHIFT (lp->io_shift) 76#define SMC_IO_SHIFT (lp->io_shift)
56 77
57#define SMC_inb(a, r) readb((a) + (r)) 78#define SMC_inb(a, r) readb((a) + (r))
58#define SMC_inw(a, r) readw((a) + (r)) 79#define SMC_inw(a, r) \
80 ({ \
81 unsigned int __smc_r = r; \
82 SMC_16BIT(lp) ? readw((a) + __smc_r) : \
83 SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
84 ({ BUG(); 0; }); \
85 })
86
59#define SMC_inl(a, r) readl((a) + (r)) 87#define SMC_inl(a, r) readl((a) + (r))
60#define SMC_outb(v, a, r) writeb(v, (a) + (r)) 88#define SMC_outb(v, a, r) writeb(v, (a) + (r))
89#define SMC_outw(v, a, r) \
90 do { \
91 unsigned int __v = v, __smc_r = r; \
92 if (SMC_16BIT(lp)) \
93 __SMC_outw(__v, a, __smc_r); \
94 else if (SMC_8BIT(lp)) \
95 SMC_outw_b(__v, a, __smc_r); \
96 else \
97 BUG(); \
98 } while (0)
99
61#define SMC_outl(v, a, r) writel(v, (a) + (r)) 100#define SMC_outl(v, a, r) writel(v, (a) + (r))
101#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
102#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
62#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 103#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
63#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 104#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
64#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 105#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@@ -66,7 +107,7 @@
66#define SMC_IRQ_FLAGS (-1) /* from resource */ 107#define SMC_IRQ_FLAGS (-1) /* from resource */
67 108
68/* We actually can't write halfwords properly if not word aligned */ 109/* We actually can't write halfwords properly if not word aligned */
69static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 110static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
70{ 111{
71 if ((machine_is_mainstone() || machine_is_stargate2() || 112 if ((machine_is_mainstone() || machine_is_stargate2() ||
72 machine_is_pxa_idp()) && reg & 2) { 113 machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
416 457
417#if ! SMC_CAN_USE_16BIT 458#if ! SMC_CAN_USE_16BIT
418 459
419/* 460#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
420 * Any 16-bit access is performed with two 8-bit accesses if the hardware 461#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
421 * can't do it directly. Most registers are 16-bit so those are mandatory.
422 */
423#define SMC_outw(x, ioaddr, reg) \
424 do { \
425 unsigned int __val16 = (x); \
426 SMC_outb( __val16, ioaddr, reg ); \
427 SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
428 } while (0)
429#define SMC_inw(ioaddr, reg) \
430 ({ \
431 unsigned int __val16; \
432 __val16 = SMC_inb( ioaddr, reg ); \
433 __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
434 __val16; \
435 })
436
437#define SMC_insw(a, r, p, l) BUG() 462#define SMC_insw(a, r, p, l) BUG()
438#define SMC_outsw(a, r, p, l) BUG() 463#define SMC_outsw(a, r, p, l) BUG()
439 464
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
445#endif 470#endif
446 471
447#if ! SMC_CAN_USE_8BIT 472#if ! SMC_CAN_USE_8BIT
473#undef SMC_inb
448#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) 474#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
475#undef SMC_outb
449#define SMC_outb(x, ioaddr, reg) BUG() 476#define SMC_outb(x, ioaddr, reg) BUG()
450#define SMC_insb(a, r, p, l) BUG() 477#define SMC_insb(a, r, p, l) BUG()
451#define SMC_outsb(a, r, p, l) BUG() 478#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index ca3134540d2d..4f8910b7db2e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
1099 goto err_out_free_bus_2; 1099 goto err_out_free_bus_2;
1100 } 1100 }
1101 1101
1102 if (smsc911x_mii_probe(dev) < 0) {
1103 SMSC_WARN(pdata, probe, "Error registering mii bus");
1104 goto err_out_unregister_bus_3;
1105 }
1106
1107 return 0; 1102 return 0;
1108 1103
1109err_out_unregister_bus_3:
1110 mdiobus_unregister(pdata->mii_bus);
1111err_out_free_bus_2: 1104err_out_free_bus_2:
1112 mdiobus_free(pdata->mii_bus); 1105 mdiobus_free(pdata->mii_bus);
1113err_out_1: 1106err_out_1:
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
1514 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); 1507 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1515} 1508}
1516 1509
1510static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1511{
1512 struct net_device *dev = dev_id;
1513 struct smsc911x_data *pdata = netdev_priv(dev);
1514 u32 intsts = smsc911x_reg_read(pdata, INT_STS);
1515 u32 inten = smsc911x_reg_read(pdata, INT_EN);
1516 int serviced = IRQ_NONE;
1517 u32 temp;
1518
1519 if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
1520 temp = smsc911x_reg_read(pdata, INT_EN);
1521 temp &= (~INT_EN_SW_INT_EN_);
1522 smsc911x_reg_write(pdata, INT_EN, temp);
1523 smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
1524 pdata->software_irq_signal = 1;
1525 smp_wmb();
1526 serviced = IRQ_HANDLED;
1527 }
1528
1529 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1530 /* Called when there is a multicast update scheduled and
1531 * it is now safe to complete the update */
1532 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1533 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1534 if (pdata->multicast_update_pending)
1535 smsc911x_rx_multicast_update_workaround(pdata);
1536 serviced = IRQ_HANDLED;
1537 }
1538
1539 if (intsts & inten & INT_STS_TDFA_) {
1540 temp = smsc911x_reg_read(pdata, FIFO_INT);
1541 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1542 smsc911x_reg_write(pdata, FIFO_INT, temp);
1543 smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
1544 netif_wake_queue(dev);
1545 serviced = IRQ_HANDLED;
1546 }
1547
1548 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1549 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1550 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1551 serviced = IRQ_HANDLED;
1552 }
1553
1554 if (likely(intsts & inten & INT_STS_RSFL_)) {
1555 if (likely(napi_schedule_prep(&pdata->napi))) {
1556 /* Disable Rx interrupts */
1557 temp = smsc911x_reg_read(pdata, INT_EN);
1558 temp &= (~INT_EN_RSFL_EN_);
1559 smsc911x_reg_write(pdata, INT_EN, temp);
1560 /* Schedule a NAPI poll */
1561 __napi_schedule(&pdata->napi);
1562 } else {
1563 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1564 }
1565 serviced = IRQ_HANDLED;
1566 }
1567
1568 return serviced;
1569}
1570
1517static int smsc911x_open(struct net_device *dev) 1571static int smsc911x_open(struct net_device *dev)
1518{ 1572{
1519 struct smsc911x_data *pdata = netdev_priv(dev); 1573 struct smsc911x_data *pdata = netdev_priv(dev);
1520 unsigned int timeout; 1574 unsigned int timeout;
1521 unsigned int temp; 1575 unsigned int temp;
1522 unsigned int intcfg; 1576 unsigned int intcfg;
1577 int retval;
1578 int irq_flags;
1523 1579
1524 /* if the phy is not yet registered, retry later*/ 1580 /* find and start the given phy */
1525 if (!dev->phydev) { 1581 if (!dev->phydev) {
1526 SMSC_WARN(pdata, hw, "phy_dev is NULL"); 1582 retval = smsc911x_mii_probe(dev);
1527 return -EAGAIN; 1583 if (retval < 0) {
1584 SMSC_WARN(pdata, probe, "Error starting phy");
1585 goto out;
1586 }
1528 } 1587 }
1529 1588
1530 /* Reset the LAN911x */ 1589 /* Reset the LAN911x */
1531 if (smsc911x_soft_reset(pdata)) { 1590 retval = smsc911x_soft_reset(pdata);
1591 if (retval) {
1532 SMSC_WARN(pdata, hw, "soft reset failed"); 1592 SMSC_WARN(pdata, hw, "soft reset failed");
1533 return -EIO; 1593 goto mii_free_out;
1534 } 1594 }
1535 1595
1536 smsc911x_reg_write(pdata, HW_CFG, 0x00050000); 1596 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
1586 pdata->software_irq_signal = 0; 1646 pdata->software_irq_signal = 0;
1587 smp_wmb(); 1647 smp_wmb();
1588 1648
1649 irq_flags = irq_get_trigger_type(dev->irq);
1650 retval = request_irq(dev->irq, smsc911x_irqhandler,
1651 irq_flags | IRQF_SHARED, dev->name, dev);
1652 if (retval) {
1653 SMSC_WARN(pdata, probe,
1654 "Unable to claim requested irq: %d", dev->irq);
1655 goto mii_free_out;
1656 }
1657
1589 temp = smsc911x_reg_read(pdata, INT_EN); 1658 temp = smsc911x_reg_read(pdata, INT_EN);
1590 temp |= INT_EN_SW_INT_EN_; 1659 temp |= INT_EN_SW_INT_EN_;
1591 smsc911x_reg_write(pdata, INT_EN, temp); 1660 smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
1600 if (!pdata->software_irq_signal) { 1669 if (!pdata->software_irq_signal) {
1601 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", 1670 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
1602 dev->irq); 1671 dev->irq);
1603 return -ENODEV; 1672 retval = -ENODEV;
1673 goto irq_stop_out;
1604 } 1674 }
1605 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", 1675 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
1606 dev->irq); 1676 dev->irq);
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
1646 1716
1647 netif_start_queue(dev); 1717 netif_start_queue(dev);
1648 return 0; 1718 return 0;
1719
1720irq_stop_out:
1721 free_irq(dev->irq, dev);
1722mii_free_out:
1723 phy_disconnect(dev->phydev);
1724 dev->phydev = NULL;
1725out:
1726 return retval;
1649} 1727}
1650 1728
1651/* Entry point for stopping the interface */ 1729/* Entry point for stopping the interface */
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
1667 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); 1745 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
1668 smsc911x_tx_update_txcounters(dev); 1746 smsc911x_tx_update_txcounters(dev);
1669 1747
1748 free_irq(dev->irq, dev);
1749
1670 /* Bring the PHY down */ 1750 /* Bring the PHY down */
1671 if (dev->phydev) 1751 if (dev->phydev) {
1672 phy_stop(dev->phydev); 1752 phy_stop(dev->phydev);
1753 phy_disconnect(dev->phydev);
1754 dev->phydev = NULL;
1755 }
1756 netif_carrier_off(dev);
1673 1757
1674 SMSC_TRACE(pdata, ifdown, "Interface stopped"); 1758 SMSC_TRACE(pdata, ifdown, "Interface stopped");
1675 return 0; 1759 return 0;
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1811 spin_unlock_irqrestore(&pdata->mac_lock, flags); 1895 spin_unlock_irqrestore(&pdata->mac_lock, flags);
1812} 1896}
1813 1897
1814static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1815{
1816 struct net_device *dev = dev_id;
1817 struct smsc911x_data *pdata = netdev_priv(dev);
1818 u32 intsts = smsc911x_reg_read(pdata, INT_STS);
1819 u32 inten = smsc911x_reg_read(pdata, INT_EN);
1820 int serviced = IRQ_NONE;
1821 u32 temp;
1822
1823 if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
1824 temp = smsc911x_reg_read(pdata, INT_EN);
1825 temp &= (~INT_EN_SW_INT_EN_);
1826 smsc911x_reg_write(pdata, INT_EN, temp);
1827 smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
1828 pdata->software_irq_signal = 1;
1829 smp_wmb();
1830 serviced = IRQ_HANDLED;
1831 }
1832
1833 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1834 /* Called when there is a multicast update scheduled and
1835 * it is now safe to complete the update */
1836 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1837 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1838 if (pdata->multicast_update_pending)
1839 smsc911x_rx_multicast_update_workaround(pdata);
1840 serviced = IRQ_HANDLED;
1841 }
1842
1843 if (intsts & inten & INT_STS_TDFA_) {
1844 temp = smsc911x_reg_read(pdata, FIFO_INT);
1845 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1846 smsc911x_reg_write(pdata, FIFO_INT, temp);
1847 smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
1848 netif_wake_queue(dev);
1849 serviced = IRQ_HANDLED;
1850 }
1851
1852 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1853 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1854 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1855 serviced = IRQ_HANDLED;
1856 }
1857
1858 if (likely(intsts & inten & INT_STS_RSFL_)) {
1859 if (likely(napi_schedule_prep(&pdata->napi))) {
1860 /* Disable Rx interrupts */
1861 temp = smsc911x_reg_read(pdata, INT_EN);
1862 temp &= (~INT_EN_RSFL_EN_);
1863 smsc911x_reg_write(pdata, INT_EN, temp);
1864 /* Schedule a NAPI poll */
1865 __napi_schedule(&pdata->napi);
1866 } else {
1867 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1868 }
1869 serviced = IRQ_HANDLED;
1870 }
1871
1872 return serviced;
1873}
1874
1875#ifdef CONFIG_NET_POLL_CONTROLLER 1898#ifdef CONFIG_NET_POLL_CONTROLLER
1876static void smsc911x_poll_controller(struct net_device *dev) 1899static void smsc911x_poll_controller(struct net_device *dev)
1877{ 1900{
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2291 pdata = netdev_priv(dev); 2314 pdata = netdev_priv(dev);
2292 BUG_ON(!pdata); 2315 BUG_ON(!pdata);
2293 BUG_ON(!pdata->ioaddr); 2316 BUG_ON(!pdata->ioaddr);
2294 BUG_ON(!dev->phydev); 2317 WARN_ON(dev->phydev);
2295 2318
2296 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2319 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2297 2320
2298 phy_disconnect(dev->phydev);
2299 mdiobus_unregister(pdata->mii_bus); 2321 mdiobus_unregister(pdata->mii_bus);
2300 mdiobus_free(pdata->mii_bus); 2322 mdiobus_free(pdata->mii_bus);
2301 2323
2302 unregister_netdev(dev); 2324 unregister_netdev(dev);
2303 free_irq(dev->irq, dev);
2304 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2325 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2305 "smsc911x-memory"); 2326 "smsc911x-memory");
2306 if (!res) 2327 if (!res)
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2385 struct smsc911x_data *pdata; 2406 struct smsc911x_data *pdata;
2386 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); 2407 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
2387 struct resource *res; 2408 struct resource *res;
2388 unsigned int intcfg = 0; 2409 int res_size, irq;
2389 int res_size, irq, irq_flags;
2390 int retval; 2410 int retval;
2391 2411
2392 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2425 2445
2426 pdata = netdev_priv(dev); 2446 pdata = netdev_priv(dev);
2427 dev->irq = irq; 2447 dev->irq = irq;
2428 irq_flags = irq_get_trigger_type(irq);
2429 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2448 pdata->ioaddr = ioremap_nocache(res->start, res_size);
2430 2449
2431 pdata->dev = dev; 2450 pdata->dev = dev;
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2472 if (retval < 0) 2491 if (retval < 0)
2473 goto out_disable_resources; 2492 goto out_disable_resources;
2474 2493
2475 /* configure irq polarity and type before connecting isr */ 2494 netif_carrier_off(dev);
2476 if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
2477 intcfg |= INT_CFG_IRQ_POL_;
2478
2479 if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
2480 intcfg |= INT_CFG_IRQ_TYPE_;
2481
2482 smsc911x_reg_write(pdata, INT_CFG, intcfg);
2483
2484 /* Ensure interrupts are globally disabled before connecting ISR */
2485 smsc911x_disable_irq_chip(dev);
2486 2495
2487 retval = request_irq(dev->irq, smsc911x_irqhandler, 2496 retval = smsc911x_mii_init(pdev, dev);
2488 irq_flags | IRQF_SHARED, dev->name, dev);
2489 if (retval) { 2497 if (retval) {
2490 SMSC_WARN(pdata, probe, 2498 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2491 "Unable to claim requested irq: %d", dev->irq);
2492 goto out_disable_resources; 2499 goto out_disable_resources;
2493 } 2500 }
2494 2501
2495 netif_carrier_off(dev);
2496
2497 retval = register_netdev(dev); 2502 retval = register_netdev(dev);
2498 if (retval) { 2503 if (retval) {
2499 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2504 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
2500 goto out_free_irq; 2505 goto out_disable_resources;
2501 } else { 2506 } else {
2502 SMSC_TRACE(pdata, probe, 2507 SMSC_TRACE(pdata, probe,
2503 "Network interface: \"%s\"", dev->name); 2508 "Network interface: \"%s\"", dev->name);
2504 } 2509 }
2505 2510
2506 retval = smsc911x_mii_init(pdev, dev);
2507 if (retval) {
2508 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2509 goto out_unregister_netdev_5;
2510 }
2511
2512 spin_lock_irq(&pdata->mac_lock); 2511 spin_lock_irq(&pdata->mac_lock);
2513 2512
2514 /* Check if mac address has been specified when bringing interface up */ 2513 /* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2544 2543
2545 return 0; 2544 return 0;
2546 2545
2547out_unregister_netdev_5:
2548 unregister_netdev(dev);
2549out_free_irq:
2550 free_irq(dev->irq, dev);
2551out_disable_resources: 2546out_disable_resources:
2552 pm_runtime_put(&pdev->dev); 2547 pm_runtime_put(&pdev->dev);
2553 pm_runtime_disable(&pdev->dev); 2548 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 9f159a775af3..4490ebaed127 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
1246 lp->mii_bus->read = &dwceqos_mdio_read; 1246 lp->mii_bus->read = &dwceqos_mdio_read;
1247 lp->mii_bus->write = &dwceqos_mdio_write; 1247 lp->mii_bus->write = &dwceqos_mdio_write;
1248 lp->mii_bus->priv = lp; 1248 lp->mii_bus->priv = lp;
1249 lp->mii_bus->parent = &lp->ndev->dev; 1249 lp->mii_bus->parent = &lp->pdev->dev;
1250 1250
1251 of_address_to_resource(lp->pdev->dev.of_node, 0, &res); 1251 of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
1252 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", 1252 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
1622 DWCEQOS_MMC_CTRL_RSTONRD); 1622 DWCEQOS_MMC_CTRL_RSTONRD);
1623 dwceqos_enable_mmc_interrupt(lp); 1623 dwceqos_enable_mmc_interrupt(lp);
1624 1624
1625 /* Enable Interrupts */ 1625 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
1626 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
1627 DWCEQOS_DMA_CH0_IE_NIE |
1628 DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
1629 DWCEQOS_DMA_CH0_IE_AIE |
1630 DWCEQOS_DMA_CH0_IE_FBEE);
1631
1632 dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); 1626 dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
1633 1627
1634 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | 1628 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
1905 netif_start_queue(ndev); 1899 netif_start_queue(ndev);
1906 tasklet_enable(&lp->tx_bdreclaim_tasklet); 1900 tasklet_enable(&lp->tx_bdreclaim_tasklet);
1907 1901
1902 /* Enable Interrupts -- do this only after we enable NAPI and the
1903 * tasklet.
1904 */
1905 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
1906 DWCEQOS_DMA_CH0_IE_NIE |
1907 DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
1908 DWCEQOS_DMA_CH0_IE_AIE |
1909 DWCEQOS_DMA_CH0_IE_FBEE);
1910
1908 return 0; 1911 return 0;
1909} 1912}
1910 1913
@@ -2850,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
2850 2853
2851 ndev->features = ndev->hw_features; 2854 ndev->features = ndev->hw_features;
2852 2855
2853 netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
2854
2855 ret = register_netdev(ndev);
2856 if (ret) {
2857 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2858 goto err_out_clk_dis_aper;
2859 }
2860
2861 lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); 2856 lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
2862 if (IS_ERR(lp->phy_ref_clk)) { 2857 if (IS_ERR(lp->phy_ref_clk)) {
2863 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); 2858 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
2864 ret = PTR_ERR(lp->phy_ref_clk); 2859 ret = PTR_ERR(lp->phy_ref_clk);
2865 goto err_out_unregister_netdev; 2860 goto err_out_clk_dis_aper;
2866 } 2861 }
2867 2862
2868 ret = clk_prepare_enable(lp->phy_ref_clk); 2863 ret = clk_prepare_enable(lp->phy_ref_clk);
2869 if (ret) { 2864 if (ret) {
2870 dev_err(&pdev->dev, "Unable to enable device clock.\n"); 2865 dev_err(&pdev->dev, "Unable to enable device clock.\n");
2871 goto err_out_unregister_netdev; 2866 goto err_out_clk_dis_aper;
2872 } 2867 }
2873 2868
2874 lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, 2869 lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2877,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2877 ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); 2872 ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
2878 if (ret < 0) { 2873 if (ret < 0) {
2879 dev_err(&pdev->dev, "invalid fixed-link"); 2874 dev_err(&pdev->dev, "invalid fixed-link");
2880 goto err_out_unregister_clk_notifier; 2875 goto err_out_clk_dis_phy;
2881 } 2876 }
2882 2877
2883 lp->phy_node = of_node_get(lp->pdev->dev.of_node); 2878 lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2886,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2886 ret = of_get_phy_mode(lp->pdev->dev.of_node); 2881 ret = of_get_phy_mode(lp->pdev->dev.of_node);
2887 if (ret < 0) { 2882 if (ret < 0) {
2888 dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); 2883 dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
2889 goto err_out_unregister_clk_notifier; 2884 goto err_out_clk_dis_phy;
2890 } 2885 }
2891 2886
2892 lp->phy_interface = ret; 2887 lp->phy_interface = ret;
@@ -2894,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
2894 ret = dwceqos_mii_init(lp); 2889 ret = dwceqos_mii_init(lp);
2895 if (ret) { 2890 if (ret) {
2896 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); 2891 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
2897 goto err_out_unregister_clk_notifier; 2892 goto err_out_clk_dis_phy;
2898 } 2893 }
2899 2894
2900 ret = dwceqos_mii_probe(ndev); 2895 ret = dwceqos_mii_probe(ndev);
2901 if (ret != 0) { 2896 if (ret != 0) {
2902 netdev_err(ndev, "mii_probe fail.\n"); 2897 netdev_err(ndev, "mii_probe fail.\n");
2903 ret = -ENXIO; 2898 ret = -ENXIO;
2904 goto err_out_unregister_clk_notifier; 2899 goto err_out_clk_dis_phy;
2905 } 2900 }
2906 2901
2907 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 2902 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2919,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2919 if (ret) { 2914 if (ret) {
2920 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", 2915 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
2921 ret); 2916 ret);
2922 goto err_out_unregister_clk_notifier; 2917 goto err_out_clk_dis_phy;
2923 } 2918 }
2924 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", 2919 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
2925 pdev->id, ndev->base_addr, ndev->irq); 2920 pdev->id, ndev->base_addr, ndev->irq);
@@ -2929,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
2929 if (ret) { 2924 if (ret) {
2930 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", 2925 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
2931 ndev->irq, ret); 2926 ndev->irq, ret);
2932 goto err_out_unregister_clk_notifier; 2927 goto err_out_clk_dis_phy;
2933 } 2928 }
2934 2929
2935 if (netif_msg_probe(lp)) 2930 if (netif_msg_probe(lp))
2936 netdev_dbg(ndev, "net_local@%p\n", lp); 2931 netdev_dbg(ndev, "net_local@%p\n", lp);
2937 2932
2933 netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
2934
2935 ret = register_netdev(ndev);
2936 if (ret) {
2937 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2938 goto err_out_clk_dis_phy;
2939 }
2940
2938 return 0; 2941 return 0;
2939 2942
2940err_out_unregister_clk_notifier: 2943err_out_clk_dis_phy:
2941 clk_disable_unprepare(lp->phy_ref_clk); 2944 clk_disable_unprepare(lp->phy_ref_clk);
2942err_out_unregister_netdev:
2943 unregister_netdev(ndev);
2944err_out_clk_dis_aper: 2945err_out_clk_dis_aper:
2945 clk_disable_unprepare(lp->apb_pclk); 2946 clk_disable_unprepare(lp->apb_pclk);
2946err_out_free_netdev: 2947err_out_free_netdev:
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 7452b5f9d024..7108c68f16d3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1987 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { 1987 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1988 err = pci_enable_msi(pdev); 1988 err = pci_enable_msi(pdev);
1989 if (err) 1989 if (err)
1990 pr_err("Can't eneble msi. error is %d\n", err); 1990 pr_err("Can't enable msi. error is %d\n", err);
1991 else 1991 else
1992 nic->irq_type = IRQ_MSI; 1992 nic->irq_type = IRQ_MSI;
1993 } else 1993 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c51f34693eae..f85d605e4560 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
734 netif_receive_skb(skb); 734 netif_receive_skb(skb);
735 ndev->stats.rx_bytes += len; 735 ndev->stats.rx_bytes += len;
736 ndev->stats.rx_packets++; 736 ndev->stats.rx_packets++;
737 kmemleak_not_leak(new_skb);
737 } else { 738 } else {
738 ndev->stats.rx_dropped++; 739 ndev->stats.rx_dropped++;
739 new_skb = skb; 740 new_skb = skb;
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1325 kfree_skb(skb); 1326 kfree_skb(skb);
1326 goto err_cleanup; 1327 goto err_cleanup;
1327 } 1328 }
1329 kmemleak_not_leak(skb);
1328 } 1330 }
1329 /* continue even if we didn't manage to submit all 1331 /* continue even if we didn't manage to submit all
1330 * receive descs 1332 * receive descs
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01a77145a0fa..8fd131207ee1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
166 166
167static void tsi108_timed_checker(unsigned long dev_ptr); 167static void tsi108_timed_checker(unsigned long dev_ptr);
168 168
169#ifdef DEBUG
169static void dump_eth_one(struct net_device *dev) 170static void dump_eth_one(struct net_device *dev)
170{ 171{
171 struct tsi108_prv_data *data = netdev_priv(dev); 172 struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
190 TSI_READ(TSI108_EC_RXESTAT), 191 TSI_READ(TSI108_EC_RXESTAT),
191 TSI_READ(TSI108_EC_RXERR), data->rxpending); 192 TSI_READ(TSI108_EC_RXERR), data->rxpending);
192} 193}
194#endif
193 195
194/* Synchronization is needed between the thread and up/down events. 196/* Synchronization is needed between the thread and up/down events.
195 * Note that the PHY is accessed through the same registers for both 197 * Note that the PHY is accessed through the same registers for both
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 3cee84a24815..93dc10b10c09 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1131 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); 1131 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
1132 mac_address = of_get_mac_address(ofdev->dev.of_node); 1132 mac_address = of_get_mac_address(ofdev->dev.of_node);
1133 1133
1134 if (mac_address) 1134 if (mac_address) {
1135 /* Set the MAC address. */ 1135 /* Set the MAC address. */
1136 memcpy(ndev->dev_addr, mac_address, ETH_ALEN); 1136 memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
1137 else 1137 } else {
1138 dev_warn(dev, "No MAC address found\n"); 1138 dev_warn(dev, "No MAC address found, using random\n");
1139 eth_hw_addr_random(ndev);
1140 }
1139 1141
1140 /* Clear the Tx CSR's in case this is a restart */ 1142 /* Clear the Tx CSR's in case this is a restart */
1141 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); 1143 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 467fb8b4d083..591af71eae56 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -644,12 +644,6 @@ struct netvsc_reconfig {
644 u32 event; 644 u32 event;
645}; 645};
646 646
647struct garp_wrk {
648 struct work_struct dwrk;
649 struct net_device *netdev;
650 struct netvsc_device *netvsc_dev;
651};
652
653/* The context of the netvsc device */ 647/* The context of the netvsc device */
654struct net_device_context { 648struct net_device_context {
655 /* point back to our device context */ 649 /* point back to our device context */
@@ -667,7 +661,6 @@ struct net_device_context {
667 661
668 struct work_struct work; 662 struct work_struct work;
669 u32 msg_enable; /* debug level */ 663 u32 msg_enable; /* debug level */
670 struct garp_wrk gwrk;
671 664
672 struct netvsc_stats __percpu *tx_stats; 665 struct netvsc_stats __percpu *tx_stats;
673 struct netvsc_stats __percpu *rx_stats; 666 struct netvsc_stats __percpu *rx_stats;
@@ -678,6 +671,15 @@ struct net_device_context {
678 671
679 /* the device is going away */ 672 /* the device is going away */
680 bool start_remove; 673 bool start_remove;
674
675 /* State to manage the associated VF interface. */
676 struct net_device *vf_netdev;
677 bool vf_inject;
678 atomic_t vf_use_cnt;
679 /* 1: allocated, serial number is valid. 0: not allocated */
680 u32 vf_alloc;
681 /* Serial number of the VF to team with */
682 u32 vf_serial;
681}; 683};
682 684
683/* Per netvsc device */ 685/* Per netvsc device */
@@ -733,15 +735,7 @@ struct netvsc_device {
733 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ 735 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
734 u32 pkt_align; /* alignment bytes, e.g. 8 */ 736 u32 pkt_align; /* alignment bytes, e.g. 8 */
735 737
736 /* 1: allocated, serial number is valid. 0: not allocated */
737 u32 vf_alloc;
738 /* Serial number of the VF to team with */
739 u32 vf_serial;
740 atomic_t open_cnt; 738 atomic_t open_cnt;
741 /* State to manage the associated VF interface. */
742 bool vf_inject;
743 struct net_device *vf_netdev;
744 atomic_t vf_use_cnt;
745}; 739};
746 740
747static inline struct netvsc_device * 741static inline struct netvsc_device *
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 20e09174ff62..410fb8e81376 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
77 init_waitqueue_head(&net_device->wait_drain); 77 init_waitqueue_head(&net_device->wait_drain);
78 net_device->destroy = false; 78 net_device->destroy = false;
79 atomic_set(&net_device->open_cnt, 0); 79 atomic_set(&net_device->open_cnt, 0);
80 atomic_set(&net_device->vf_use_cnt, 0);
81 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 80 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
82 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 81 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
83 82
84 net_device->vf_netdev = NULL;
85 net_device->vf_inject = false;
86
87 return net_device; 83 return net_device;
88} 84}
89 85
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
1106 nvscdev->send_table[i] = tab[i]; 1102 nvscdev->send_table[i] = tab[i];
1107} 1103}
1108 1104
1109static void netvsc_send_vf(struct netvsc_device *nvdev, 1105static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1110 struct nvsp_message *nvmsg) 1106 struct nvsp_message *nvmsg)
1111{ 1107{
1112 nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1108 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1113 nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1109 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1114} 1110}
1115 1111
1116static inline void netvsc_receive_inband(struct hv_device *hdev, 1112static inline void netvsc_receive_inband(struct hv_device *hdev,
1117 struct netvsc_device *nvdev, 1113 struct net_device_context *net_device_ctx,
1118 struct nvsp_message *nvmsg) 1114 struct nvsp_message *nvmsg)
1119{ 1115{
1120 switch (nvmsg->hdr.msg_type) { 1116 switch (nvmsg->hdr.msg_type) {
1121 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1117 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
1123 break; 1119 break;
1124 1120
1125 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: 1121 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1126 netvsc_send_vf(nvdev, nvmsg); 1122 netvsc_send_vf(net_device_ctx, nvmsg);
1127 break; 1123 break;
1128 } 1124 }
1129} 1125}
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1136 struct vmpacket_descriptor *desc) 1132 struct vmpacket_descriptor *desc)
1137{ 1133{
1138 struct nvsp_message *nvmsg; 1134 struct nvsp_message *nvmsg;
1135 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1139 1136
1140 nvmsg = (struct nvsp_message *)((unsigned long) 1137 nvmsg = (struct nvsp_message *)((unsigned long)
1141 desc + (desc->offset8 << 3)); 1138 desc + (desc->offset8 << 3));
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1150 break; 1147 break;
1151 1148
1152 case VM_PKT_DATA_INBAND: 1149 case VM_PKT_DATA_INBAND:
1153 netvsc_receive_inband(device, net_device, nvmsg); 1150 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1154 break; 1151 break;
1155 1152
1156 default: 1153 default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 41bd952cc28d..3ba29fc80d05 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
658 struct sk_buff *skb; 658 struct sk_buff *skb;
659 struct sk_buff *vf_skb; 659 struct sk_buff *vf_skb;
660 struct netvsc_stats *rx_stats; 660 struct netvsc_stats *rx_stats;
661 struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
662 u32 bytes_recvd = packet->total_data_buflen; 661 u32 bytes_recvd = packet->total_data_buflen;
663 int ret = 0; 662 int ret = 0;
664 663
665 if (!net || net->reg_state != NETREG_REGISTERED) 664 if (!net || net->reg_state != NETREG_REGISTERED)
666 return NVSP_STAT_FAIL; 665 return NVSP_STAT_FAIL;
667 666
668 if (READ_ONCE(netvsc_dev->vf_inject)) { 667 if (READ_ONCE(net_device_ctx->vf_inject)) {
669 atomic_inc(&netvsc_dev->vf_use_cnt); 668 atomic_inc(&net_device_ctx->vf_use_cnt);
670 if (!READ_ONCE(netvsc_dev->vf_inject)) { 669 if (!READ_ONCE(net_device_ctx->vf_inject)) {
671 /* 670 /*
672 * We raced; just move on. 671 * We raced; just move on.
673 */ 672 */
674 atomic_dec(&netvsc_dev->vf_use_cnt); 673 atomic_dec(&net_device_ctx->vf_use_cnt);
675 goto vf_injection_done; 674 goto vf_injection_done;
676 } 675 }
677 676
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
683 * the host). Deliver these via the VF interface 682 * the host). Deliver these via the VF interface
684 * in the guest. 683 * in the guest.
685 */ 684 */
686 vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, 685 vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
687 csum_info, *data, vlan_tci); 686 packet, csum_info, *data,
687 vlan_tci);
688 if (vf_skb != NULL) { 688 if (vf_skb != NULL) {
689 ++netvsc_dev->vf_netdev->stats.rx_packets; 689 ++net_device_ctx->vf_netdev->stats.rx_packets;
690 netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; 690 net_device_ctx->vf_netdev->stats.rx_bytes +=
691 bytes_recvd;
691 netif_receive_skb(vf_skb); 692 netif_receive_skb(vf_skb);
692 } else { 693 } else {
693 ++net->stats.rx_dropped; 694 ++net->stats.rx_dropped;
694 ret = NVSP_STAT_FAIL; 695 ret = NVSP_STAT_FAIL;
695 } 696 }
696 atomic_dec(&netvsc_dev->vf_use_cnt); 697 atomic_dec(&net_device_ctx->vf_use_cnt);
697 return ret; 698 return ret;
698 } 699 }
699 700
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
1150 free_netdev(netdev); 1151 free_netdev(netdev);
1151} 1152}
1152 1153
1153static void netvsc_notify_peers(struct work_struct *wrk)
1154{
1155 struct garp_wrk *gwrk;
1156
1157 gwrk = container_of(wrk, struct garp_wrk, dwrk);
1158
1159 netdev_notify_peers(gwrk->netdev);
1160
1161 atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
1162}
1163
1164static struct net_device *get_netvsc_net_device(char *mac) 1154static struct net_device *get_netvsc_net_device(char *mac)
1165{ 1155{
1166 struct net_device *dev, *found = NULL; 1156 struct net_device *dev, *found = NULL;
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1203 1193
1204 net_device_ctx = netdev_priv(ndev); 1194 net_device_ctx = netdev_priv(ndev);
1205 netvsc_dev = net_device_ctx->nvdev; 1195 netvsc_dev = net_device_ctx->nvdev;
1206 if (netvsc_dev == NULL) 1196 if (!netvsc_dev || net_device_ctx->vf_netdev)
1207 return NOTIFY_DONE; 1197 return NOTIFY_DONE;
1208 1198
1209 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 1199 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1211 * Take a reference on the module. 1201 * Take a reference on the module.
1212 */ 1202 */
1213 try_module_get(THIS_MODULE); 1203 try_module_get(THIS_MODULE);
1214 netvsc_dev->vf_netdev = vf_netdev; 1204 net_device_ctx->vf_netdev = vf_netdev;
1215 return NOTIFY_OK; 1205 return NOTIFY_OK;
1216} 1206}
1217 1207
1208static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
1209{
1210 net_device_ctx->vf_inject = true;
1211}
1212
1213static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
1214{
1215 net_device_ctx->vf_inject = false;
1216
1217 /* Wait for currently active users to drain out. */
1218 while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
1219 udelay(50);
1220}
1218 1221
1219static int netvsc_vf_up(struct net_device *vf_netdev) 1222static int netvsc_vf_up(struct net_device *vf_netdev)
1220{ 1223{
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1233 net_device_ctx = netdev_priv(ndev); 1236 net_device_ctx = netdev_priv(ndev);
1234 netvsc_dev = net_device_ctx->nvdev; 1237 netvsc_dev = net_device_ctx->nvdev;
1235 1238
1236 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1239 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1237 return NOTIFY_DONE; 1240 return NOTIFY_DONE;
1238 1241
1239 netdev_info(ndev, "VF up: %s\n", vf_netdev->name); 1242 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1240 netvsc_dev->vf_inject = true; 1243 netvsc_inject_enable(net_device_ctx);
1241 1244
1242 /* 1245 /*
1243 * Open the device before switching data path. 1246 * Open the device before switching data path.
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1252 1255
1253 netif_carrier_off(ndev); 1256 netif_carrier_off(ndev);
1254 1257
1255 /* 1258 /* Now notify peers through VF device. */
1256 * Now notify peers. We are scheduling work to 1259 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1257 * notify peers; take a reference to prevent
1258 * the VF interface from vanishing.
1259 */
1260 atomic_inc(&netvsc_dev->vf_use_cnt);
1261 net_device_ctx->gwrk.netdev = vf_netdev;
1262 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1263 schedule_work(&net_device_ctx->gwrk.dwrk);
1264 1260
1265 return NOTIFY_OK; 1261 return NOTIFY_OK;
1266} 1262}
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
1283 net_device_ctx = netdev_priv(ndev); 1279 net_device_ctx = netdev_priv(ndev);
1284 netvsc_dev = net_device_ctx->nvdev; 1280 netvsc_dev = net_device_ctx->nvdev;
1285 1281
1286 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1282 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1287 return NOTIFY_DONE; 1283 return NOTIFY_DONE;
1288 1284
1289 netdev_info(ndev, "VF down: %s\n", vf_netdev->name); 1285 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1290 netvsc_dev->vf_inject = false; 1286 netvsc_inject_disable(net_device_ctx);
1291 /*
1292 * Wait for currently active users to
1293 * drain out.
1294 */
1295
1296 while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
1297 udelay(50);
1298 netvsc_switch_datapath(ndev, false); 1287 netvsc_switch_datapath(ndev, false);
1299 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); 1288 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1300 rndis_filter_close(netvsc_dev); 1289 rndis_filter_close(netvsc_dev);
1301 netif_carrier_on(ndev); 1290 netif_carrier_on(ndev);
1302 /* 1291
1303 * Notify peers. 1292 /* Now notify peers through netvsc device. */
1304 */ 1293 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1305 atomic_inc(&netvsc_dev->vf_use_cnt);
1306 net_device_ctx->gwrk.netdev = ndev;
1307 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1308 schedule_work(&net_device_ctx->gwrk.dwrk);
1309 1294
1310 return NOTIFY_OK; 1295 return NOTIFY_OK;
1311} 1296}
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
1327 1312
1328 net_device_ctx = netdev_priv(ndev); 1313 net_device_ctx = netdev_priv(ndev);
1329 netvsc_dev = net_device_ctx->nvdev; 1314 netvsc_dev = net_device_ctx->nvdev;
1330 if (netvsc_dev == NULL) 1315 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1331 return NOTIFY_DONE; 1316 return NOTIFY_DONE;
1332 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 1317 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1333 1318 netvsc_inject_disable(net_device_ctx);
1334 netvsc_dev->vf_netdev = NULL; 1319 net_device_ctx->vf_netdev = NULL;
1335 module_put(THIS_MODULE); 1320 module_put(THIS_MODULE);
1336 return NOTIFY_OK; 1321 return NOTIFY_OK;
1337} 1322}
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
1377 1362
1378 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1363 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1379 INIT_WORK(&net_device_ctx->work, do_set_multicast); 1364 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1380 INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
1381 1365
1382 spin_lock_init(&net_device_ctx->lock); 1366 spin_lock_init(&net_device_ctx->lock);
1383 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1367 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1384 1368
1369 atomic_set(&net_device_ctx->vf_use_cnt, 0);
1370 net_device_ctx->vf_netdev = NULL;
1371 net_device_ctx->vf_inject = false;
1372
1385 net->netdev_ops = &device_ops; 1373 net->netdev_ops = &device_ops;
1386 1374
1387 net->hw_features = NETVSC_HW_FEATURES; 1375 net->hw_features = NETVSC_HW_FEATURES;
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
1494{ 1482{
1495 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 1483 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1496 1484
1497 /* Avoid Vlan, Bonding dev with same MAC registering as VF */ 1485 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) 1486 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1487 return NOTIFY_DONE;
1488
1489 /* Avoid Bonding master dev with same MAC registering as VF */
1490 if (event_dev->priv_flags & IFF_BONDING &&
1491 event_dev->flags & IFF_MASTER)
1499 return NOTIFY_DONE; 1492 return NOTIFY_DONE;
1500 1493
1501 switch (event) { 1494 switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d13e6e15d7b5..351e701eb043 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -270,6 +270,7 @@ struct macsec_dev {
270 struct pcpu_secy_stats __percpu *stats; 270 struct pcpu_secy_stats __percpu *stats;
271 struct list_head secys; 271 struct list_head secys;
272 struct gro_cells gro_cells; 272 struct gro_cells gro_cells;
273 unsigned int nest_level;
273}; 274};
274 275
275/** 276/**
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2699 2700
2700#define MACSEC_FEATURES \ 2701#define MACSEC_FEATURES \
2701 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2702 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2703static struct lock_class_key macsec_netdev_addr_lock_key;
2704
2702static int macsec_dev_init(struct net_device *dev) 2705static int macsec_dev_init(struct net_device *dev)
2703{ 2706{
2704 struct macsec_dev *macsec = macsec_priv(dev); 2707 struct macsec_dev *macsec = macsec_priv(dev);
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
2910 return macsec_priv(dev)->real_dev->ifindex; 2913 return macsec_priv(dev)->real_dev->ifindex;
2911} 2914}
2912 2915
2916
2917static int macsec_get_nest_level(struct net_device *dev)
2918{
2919 return macsec_priv(dev)->nest_level;
2920}
2921
2922
2913static const struct net_device_ops macsec_netdev_ops = { 2923static const struct net_device_ops macsec_netdev_ops = {
2914 .ndo_init = macsec_dev_init, 2924 .ndo_init = macsec_dev_init,
2915 .ndo_uninit = macsec_dev_uninit, 2925 .ndo_uninit = macsec_dev_uninit,
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
2923 .ndo_start_xmit = macsec_start_xmit, 2933 .ndo_start_xmit = macsec_start_xmit,
2924 .ndo_get_stats64 = macsec_get_stats64, 2934 .ndo_get_stats64 = macsec_get_stats64,
2925 .ndo_get_iflink = macsec_get_iflink, 2935 .ndo_get_iflink = macsec_get_iflink,
2936 .ndo_get_lock_subclass = macsec_get_nest_level,
2926}; 2937};
2927 2938
2928static const struct device_type macsec_type = { 2939static const struct device_type macsec_type = {
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
3047 } 3058 }
3048} 3059}
3049 3060
3061static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3062{
3063 struct macsec_dev *macsec = macsec_priv(dev);
3064 struct net_device *real_dev = macsec->real_dev;
3065
3066 unregister_netdevice_queue(dev, head);
3067 list_del_rcu(&macsec->secys);
3068 macsec_del_dev(macsec);
3069 netdev_upper_dev_unlink(real_dev, dev);
3070
3071 macsec_generation++;
3072}
3073
3050static void macsec_dellink(struct net_device *dev, struct list_head *head) 3074static void macsec_dellink(struct net_device *dev, struct list_head *head)
3051{ 3075{
3052 struct macsec_dev *macsec = macsec_priv(dev); 3076 struct macsec_dev *macsec = macsec_priv(dev);
3053 struct net_device *real_dev = macsec->real_dev; 3077 struct net_device *real_dev = macsec->real_dev;
3054 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3078 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3055 3079
3056 macsec_generation++; 3080 macsec_common_dellink(dev, head);
3057 3081
3058 unregister_netdevice_queue(dev, head);
3059 list_del_rcu(&macsec->secys);
3060 if (list_empty(&rxd->secys)) { 3082 if (list_empty(&rxd->secys)) {
3061 netdev_rx_handler_unregister(real_dev); 3083 netdev_rx_handler_unregister(real_dev);
3062 kfree(rxd); 3084 kfree(rxd);
3063 } 3085 }
3064
3065 macsec_del_dev(macsec);
3066} 3086}
3067 3087
3068static int register_macsec_dev(struct net_device *real_dev, 3088static int register_macsec_dev(struct net_device *real_dev,
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3181 3201
3182 dev_hold(real_dev); 3202 dev_hold(real_dev);
3183 3203
3204 macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3205 netdev_lockdep_set_classes(dev);
3206 lockdep_set_class_and_subclass(&dev->addr_list_lock,
3207 &macsec_netdev_addr_lock_key,
3208 macsec_get_nest_level(dev));
3209
3210 err = netdev_upper_dev_link(real_dev, dev);
3211 if (err < 0)
3212 goto unregister;
3213
3184 /* need to be already registered so that ->init has run and 3214 /* need to be already registered so that ->init has run and
3185 * the MAC addr is set 3215 * the MAC addr is set
3186 */ 3216 */
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3193 3223
3194 if (rx_handler && sci_exists(real_dev, sci)) { 3224 if (rx_handler && sci_exists(real_dev, sci)) {
3195 err = -EBUSY; 3225 err = -EBUSY;
3196 goto unregister; 3226 goto unlink;
3197 } 3227 }
3198 3228
3199 err = macsec_add_dev(dev, sci, icv_len); 3229 err = macsec_add_dev(dev, sci, icv_len);
3200 if (err) 3230 if (err)
3201 goto unregister; 3231 goto unlink;
3202 3232
3203 if (data) 3233 if (data)
3204 macsec_changelink_common(dev, data); 3234 macsec_changelink_common(dev, data);
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3213 3243
3214del_dev: 3244del_dev:
3215 macsec_del_dev(macsec); 3245 macsec_del_dev(macsec);
3246unlink:
3247 netdev_upper_dev_unlink(real_dev, dev);
3216unregister: 3248unregister:
3217 unregister_netdevice(dev); 3249 unregister_netdevice(dev);
3218 return err; 3250 return err;
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
3382 3414
3383 rxd = macsec_data_rtnl(real_dev); 3415 rxd = macsec_data_rtnl(real_dev);
3384 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3416 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3385 macsec_dellink(m->secy.netdev, &head); 3417 macsec_common_dellink(m->secy.netdev, &head);
3386 } 3418 }
3419
3420 netdev_rx_handler_unregister(real_dev);
3421 kfree(rxd);
3422
3387 unregister_netdevice_many(&head); 3423 unregister_netdevice_many(&head);
3388 break; 3424 break;
3389 } 3425 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cd9b53834bf6..3234fcdea317 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1315 vlan->dev = dev; 1315 vlan->dev = dev;
1316 vlan->port = port; 1316 vlan->port = port;
1317 vlan->set_features = MACVLAN_FEATURES; 1317 vlan->set_features = MACVLAN_FEATURES;
1318 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; 1318 vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
1319 1319
1320 vlan->mode = MACVLAN_MODE_VEPA; 1320 vlan->mode = MACVLAN_MODE_VEPA;
1321 if (data && data[IFLA_MACVLAN_MODE]) 1321 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a38c0dac514b..070e3290aa6e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q)
275 rtnl_unlock(); 275 rtnl_unlock();
276 276
277 synchronize_rcu(); 277 synchronize_rcu();
278 skb_array_cleanup(&q->skb_array);
279 sock_put(&q->sk); 278 sock_put(&q->sk);
280} 279}
281 280
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk)
533static void macvtap_sock_destruct(struct sock *sk) 532static void macvtap_sock_destruct(struct sock *sk)
534{ 533{
535 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); 534 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
536 struct sk_buff *skb;
537 535
538 while ((skb = skb_array_consume(&q->skb_array)) != NULL) 536 skb_array_cleanup(&q->skb_array);
539 kfree_skb(skb);
540} 537}
541 538
542static int macvtap_open(struct inode *inode, struct file *file) 539static int macvtap_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 47a64342cc16..b4863e4e522b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC
303 303
304config MDIO_XGENE 304config MDIO_XGENE
305 tristate "APM X-Gene SoC MDIO bus controller" 305 tristate "APM X-Gene SoC MDIO bus controller"
306 depends on ARCH_XGENE || COMPILE_TEST
306 help 307 help
307 This module provides a driver for the MDIO busses found in the 308 This module provides a driver for the MDIO busses found in the
308 APM X-Gene SoC's. 309 APM X-Gene SoC's.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1882d9828c99..885ac9cbab5a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
677 data[i] = kszphy_get_stat(phydev, i); 677 data[i] = kszphy_get_stat(phydev, i);
678} 678}
679 679
680static int kszphy_resume(struct phy_device *phydev) 680static int kszphy_suspend(struct phy_device *phydev)
681{ 681{
682 int value; 682 /* Disable PHY Interrupts */
683 if (phy_interrupt_is_valid(phydev)) {
684 phydev->interrupts = PHY_INTERRUPT_DISABLED;
685 if (phydev->drv->config_intr)
686 phydev->drv->config_intr(phydev);
687 }
683 688
684 mutex_lock(&phydev->lock); 689 return genphy_suspend(phydev);
690}
685 691
686 value = phy_read(phydev, MII_BMCR); 692static int kszphy_resume(struct phy_device *phydev)
687 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 693{
694 genphy_resume(phydev);
688 695
689 kszphy_config_intr(phydev); 696 /* Enable PHY Interrupts */
690 mutex_unlock(&phydev->lock); 697 if (phy_interrupt_is_valid(phydev)) {
698 phydev->interrupts = PHY_INTERRUPT_ENABLED;
699 if (phydev->drv->config_intr)
700 phydev->drv->config_intr(phydev);
701 }
691 702
692 return 0; 703 return 0;
693} 704}
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
900 .get_sset_count = kszphy_get_sset_count, 911 .get_sset_count = kszphy_get_sset_count,
901 .get_strings = kszphy_get_strings, 912 .get_strings = kszphy_get_strings,
902 .get_stats = kszphy_get_stats, 913 .get_stats = kszphy_get_stats,
903 .suspend = genphy_suspend, 914 .suspend = kszphy_suspend,
904 .resume = kszphy_resume, 915 .resume = kszphy_resume,
905}, { 916}, {
906 .phy_id = PHY_ID_KSZ8061, 917 .phy_id = PHY_ID_KSZ8061,
@@ -953,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
953 .get_strings = kszphy_get_strings, 964 .get_strings = kszphy_get_strings,
954 .get_stats = kszphy_get_stats, 965 .get_stats = kszphy_get_stats,
955 .suspend = genphy_suspend, 966 .suspend = genphy_suspend,
956 .resume = genphy_resume, 967 .resume = kszphy_resume,
957}, { 968}, {
958 .phy_id = PHY_ID_KSZ8873MLL, 969 .phy_id = PHY_ID_KSZ8873MLL,
959 .phy_id_mask = MICREL_PHY_ID_MASK, 970 .phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c5dc2c363f96..c6f66832a1a6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -722,8 +722,10 @@ phy_err:
722int phy_start_interrupts(struct phy_device *phydev) 722int phy_start_interrupts(struct phy_device *phydev)
723{ 723{
724 atomic_set(&phydev->irq_disable, 0); 724 atomic_set(&phydev->irq_disable, 0);
725 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", 725 if (request_irq(phydev->irq, phy_interrupt,
726 phydev) < 0) { 726 IRQF_SHARED,
727 "phy_interrupt",
728 phydev) < 0) {
727 pr_warn("%s: Can't get IRQ %d (PHY)\n", 729 pr_warn("%s: Can't get IRQ %d (PHY)\n",
728 phydev->mdio.bus->name, phydev->irq); 730 phydev->mdio.bus->name, phydev->irq);
729 phydev->irq = PHY_POLL; 731 phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index cdb19b385d42..b228bea7931f 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -14,9 +14,23 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
17#include <linux/filter.h> 18#include <linux/filter.h>
18#include <linux/if_team.h> 19#include <linux/if_team.h>
19 20
21static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
22 struct sk_buff *skb)
23{
24 if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
25 /* LACPDU packets should go to exact delivery */
26 const unsigned char *dest = eth_hdr(skb)->h_dest;
27
28 if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
29 return RX_HANDLER_EXACT;
30 }
31 return RX_HANDLER_ANOTHER;
32}
33
20struct lb_priv; 34struct lb_priv;
21 35
22typedef struct team_port *lb_select_tx_port_func_t(struct team *, 36typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
652 .port_enter = lb_port_enter, 666 .port_enter = lb_port_enter,
653 .port_leave = lb_port_leave, 667 .port_leave = lb_port_leave,
654 .port_disabled = lb_port_disabled, 668 .port_disabled = lb_port_disabled,
669 .receive = lb_receive,
655 .transmit = lb_transmit, 670 .transmit = lb_transmit,
656}; 671};
657 672
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9c8b5bc2b9d8..6f9df375c5d4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -894,11 +894,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
894 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 894 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
895 goto drop; 895 goto drop;
896 896
897 if (skb->sk && sk_fullsock(skb->sk)) { 897 skb_tx_timestamp(skb);
898 sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
899 &skb_shinfo(skb)->tx_flags);
900 sw_tx_timestamp(skb);
901 }
902 898
903 /* Orphan the skb - required as we might hang on to it 899 /* Orphan the skb - required as we might hang on to it
904 * for indefinite time. 900 * for indefinite time.
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 770212baaf05..528b9c9c4e60 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1009,6 +1009,7 @@ static int kaweth_probe(
1009 struct net_device *netdev; 1009 struct net_device *netdev;
1010 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1010 const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1011 int result = 0; 1011 int result = 0;
1012 int rv = -EIO;
1012 1013
1013 dev_dbg(dev, 1014 dev_dbg(dev,
1014 "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", 1015 "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@ static int kaweth_probe(
1029 kaweth = netdev_priv(netdev); 1030 kaweth = netdev_priv(netdev);
1030 kaweth->dev = udev; 1031 kaweth->dev = udev;
1031 kaweth->net = netdev; 1032 kaweth->net = netdev;
1033 kaweth->intf = intf;
1032 1034
1033 spin_lock_init(&kaweth->device_lock); 1035 spin_lock_init(&kaweth->device_lock);
1034 init_waitqueue_head(&kaweth->term_wait); 1036 init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@ static int kaweth_probe(
1048 /* Download the firmware */ 1050 /* Download the firmware */
1049 dev_info(dev, "Downloading firmware...\n"); 1051 dev_info(dev, "Downloading firmware...\n");
1050 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); 1052 kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
1053 if (!kaweth->firmware_buf) {
1054 rv = -ENOMEM;
1055 goto err_free_netdev;
1056 }
1051 if ((result = kaweth_download_firmware(kaweth, 1057 if ((result = kaweth_download_firmware(kaweth,
1052 "kaweth/new_code.bin", 1058 "kaweth/new_code.bin",
1053 100, 1059 100,
@@ -1139,8 +1145,6 @@ err_fw:
1139 1145
1140 dev_dbg(dev, "Initializing net device.\n"); 1146 dev_dbg(dev, "Initializing net device.\n");
1141 1147
1142 kaweth->intf = intf;
1143
1144 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 1148 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1145 if (!kaweth->tx_urb) 1149 if (!kaweth->tx_urb)
1146 goto err_free_netdev; 1150 goto err_free_netdev;
@@ -1204,7 +1208,7 @@ err_only_tx:
1204err_free_netdev: 1208err_free_netdev:
1205 free_netdev(netdev); 1209 free_netdev(netdev);
1206 1210
1207 return -EIO; 1211 return rv;
1208} 1212}
1209 1213
1210/**************************************************************** 1214/****************************************************************
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index c68fe495d3f9..4244b9d4418e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
914{ 914{
915 struct Vmxnet3_TxDataDesc *tdd; 915 struct Vmxnet3_TxDataDesc *tdd;
916 916
917 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 917 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
918 tq->tx_ring.next2fill *
919 tq->txdata_desc_size);
918 920
919 memcpy(tdd->data, skb->data, ctx->copy_size); 921 memcpy(tdd->data, skb->data, ctx->copy_size);
920 netdev_dbg(adapter->netdev, 922 netdev_dbg(adapter->netdev,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 74fc03072b87..7dc37a090549 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040900 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index da4e3d6632f6..6e65832051d6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1811 fl4.flowi4_mark = skb->mark; 1811 fl4.flowi4_mark = skb->mark;
1812 fl4.flowi4_proto = IPPROTO_UDP; 1812 fl4.flowi4_proto = IPPROTO_UDP;
1813 fl4.daddr = daddr; 1813 fl4.daddr = daddr;
1814 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; 1814 fl4.saddr = *saddr;
1815 1815
1816 rt = ip_route_output_key(vxlan->net, &fl4); 1816 rt = ip_route_output_key(vxlan->net, &fl4);
1817 if (!IS_ERR(rt)) { 1817 if (!IS_ERR(rt)) {
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1847 memset(&fl6, 0, sizeof(fl6)); 1847 memset(&fl6, 0, sizeof(fl6));
1848 fl6.flowi6_oif = oif; 1848 fl6.flowi6_oif = oif;
1849 fl6.daddr = *daddr; 1849 fl6.daddr = *daddr;
1850 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1850 fl6.saddr = *saddr;
1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1852 fl6.flowi6_mark = skb->mark; 1852 fl6.flowi6_mark = skb->mark;
1853 fl6.flowi6_proto = IPPROTO_UDP; 1853 fl6.flowi6_proto = IPPROTO_UDP;
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1920 struct rtable *rt = NULL; 1920 struct rtable *rt = NULL;
1921 const struct iphdr *old_iph; 1921 const struct iphdr *old_iph;
1922 union vxlan_addr *dst; 1922 union vxlan_addr *dst;
1923 union vxlan_addr remote_ip; 1923 union vxlan_addr remote_ip, local_ip;
1924 union vxlan_addr *src;
1924 struct vxlan_metadata _md; 1925 struct vxlan_metadata _md;
1925 struct vxlan_metadata *md = &_md; 1926 struct vxlan_metadata *md = &_md;
1926 __be16 src_port = 0, dst_port; 1927 __be16 src_port = 0, dst_port;
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1938 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 1939 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1939 vni = rdst->remote_vni; 1940 vni = rdst->remote_vni;
1940 dst = &rdst->remote_ip; 1941 dst = &rdst->remote_ip;
1942 src = &vxlan->cfg.saddr;
1941 dst_cache = &rdst->dst_cache; 1943 dst_cache = &rdst->dst_cache;
1942 } else { 1944 } else {
1943 if (!info) { 1945 if (!info) {
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1948 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 1950 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1949 vni = vxlan_tun_id_to_vni(info->key.tun_id); 1951 vni = vxlan_tun_id_to_vni(info->key.tun_id);
1950 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 1952 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1951 if (remote_ip.sa.sa_family == AF_INET) 1953 if (remote_ip.sa.sa_family == AF_INET) {
1952 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 1954 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1953 else 1955 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
1956 } else {
1954 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 1957 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1958 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
1959 }
1955 dst = &remote_ip; 1960 dst = &remote_ip;
1961 src = &local_ip;
1956 dst_cache = &info->dst_cache; 1962 dst_cache = &info->dst_cache;
1957 } 1963 }
1958 1964
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1992 } 1998 }
1993 1999
1994 if (dst->sa.sa_family == AF_INET) { 2000 if (dst->sa.sa_family == AF_INET) {
1995 __be32 saddr;
1996
1997 if (!vxlan->vn4_sock) 2001 if (!vxlan->vn4_sock)
1998 goto drop; 2002 goto drop;
1999 sk = vxlan->vn4_sock->sock->sk; 2003 sk = vxlan->vn4_sock->sock->sk;
2000 2004
2001 rt = vxlan_get_route(vxlan, skb, 2005 rt = vxlan_get_route(vxlan, skb,
2002 rdst ? rdst->remote_ifindex : 0, tos, 2006 rdst ? rdst->remote_ifindex : 0, tos,
2003 dst->sin.sin_addr.s_addr, &saddr, 2007 dst->sin.sin_addr.s_addr,
2008 &src->sin.sin_addr.s_addr,
2004 dst_cache, info); 2009 dst_cache, info);
2005 if (IS_ERR(rt)) { 2010 if (IS_ERR(rt)) {
2006 netdev_dbg(dev, "no route to %pI4\n", 2011 netdev_dbg(dev, "no route to %pI4\n",
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2017 } 2022 }
2018 2023
2019 /* Bypass encapsulation if the destination is local */ 2024 /* Bypass encapsulation if the destination is local */
2020 if (rt->rt_flags & RTCF_LOCAL && 2025 if (!info && rt->rt_flags & RTCF_LOCAL &&
2021 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2026 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2022 struct vxlan_dev *dst_vxlan; 2027 struct vxlan_dev *dst_vxlan;
2023 2028
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2043 if (err < 0) 2048 if (err < 0)
2044 goto xmit_tx_error; 2049 goto xmit_tx_error;
2045 2050
2046 udp_tunnel_xmit_skb(rt, sk, skb, saddr, 2051 udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
2047 dst->sin.sin_addr.s_addr, tos, ttl, df, 2052 dst->sin.sin_addr.s_addr, tos, ttl, df,
2048 src_port, dst_port, xnet, !udp_sum); 2053 src_port, dst_port, xnet, !udp_sum);
2049#if IS_ENABLED(CONFIG_IPV6) 2054#if IS_ENABLED(CONFIG_IPV6)
2050 } else { 2055 } else {
2051 struct dst_entry *ndst; 2056 struct dst_entry *ndst;
2052 struct in6_addr saddr;
2053 u32 rt6i_flags; 2057 u32 rt6i_flags;
2054 2058
2055 if (!vxlan->vn6_sock) 2059 if (!vxlan->vn6_sock)
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2058 2062
2059 ndst = vxlan6_get_route(vxlan, skb, 2063 ndst = vxlan6_get_route(vxlan, skb,
2060 rdst ? rdst->remote_ifindex : 0, tos, 2064 rdst ? rdst->remote_ifindex : 0, tos,
2061 label, &dst->sin6.sin6_addr, &saddr, 2065 label, &dst->sin6.sin6_addr,
2066 &src->sin6.sin6_addr,
2062 dst_cache, info); 2067 dst_cache, info);
2063 if (IS_ERR(ndst)) { 2068 if (IS_ERR(ndst)) {
2064 netdev_dbg(dev, "no route to %pI6\n", 2069 netdev_dbg(dev, "no route to %pI6\n",
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2077 2082
2078 /* Bypass encapsulation if the destination is local */ 2083 /* Bypass encapsulation if the destination is local */
2079 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2084 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2080 if (rt6i_flags & RTF_LOCAL && 2085 if (!info && rt6i_flags & RTF_LOCAL &&
2081 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2086 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2082 struct vxlan_dev *dst_vxlan; 2087 struct vxlan_dev *dst_vxlan;
2083 2088
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2104 return; 2109 return;
2105 } 2110 }
2106 udp_tunnel6_xmit_skb(ndst, sk, skb, dev, 2111 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2107 &saddr, &dst->sin6.sin6_addr, tos, ttl, 2112 &src->sin6.sin6_addr,
2113 &dst->sin6.sin6_addr, tos, ttl,
2108 label, src_port, dst_port, !udp_sum); 2114 label, src_port, dst_port, !udp_sum);
2109#endif 2115#endif
2110 } 2116 }
@@ -2776,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2776 struct net_device *lowerdev = NULL; 2782 struct net_device *lowerdev = NULL;
2777 2783
2778 if (conf->flags & VXLAN_F_GPE) { 2784 if (conf->flags & VXLAN_F_GPE) {
2779 if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
2780 return -EINVAL;
2781 /* For now, allow GPE only together with COLLECT_METADATA. 2785 /* For now, allow GPE only together with COLLECT_METADATA.
2782 * This can be relaxed later; in such case, the other side 2786 * This can be relaxed later; in such case, the other side
2783 * of the PtP link will have to be provided. 2787 * of the PtP link will have to be provided.
2784 */ 2788 */
2785 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) 2789 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
2790 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
2791 pr_info("unsupported combination of extensions\n");
2786 return -EINVAL; 2792 return -EINVAL;
2793 }
2787 2794
2788 vxlan_raw_setup(dev); 2795 vxlan_raw_setup(dev);
2789 } else { 2796 } else {
@@ -2836,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2836 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2843 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2837 2844
2838 needed_headroom = lowerdev->hard_header_len; 2845 needed_headroom = lowerdev->hard_header_len;
2846 } else if (vxlan_addr_multicast(&dst->remote_ip)) {
2847 pr_info("multicast destination requires interface to be specified\n");
2848 return -EINVAL;
2839 } 2849 }
2840 2850
2841 if (conf->mtu) { 2851 if (conf->mtu) {
@@ -2868,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2868 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && 2878 tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2869 tmp->cfg.dst_port == vxlan->cfg.dst_port && 2879 tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2870 (tmp->flags & VXLAN_F_RCV_FLAGS) == 2880 (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2871 (vxlan->flags & VXLAN_F_RCV_FLAGS)) 2881 (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
2872 return -EEXIST; 2882 pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
2883 return -EEXIST;
2884 }
2873 } 2885 }
2874 2886
2875 dev->ethtool_ops = &vxlan_ethtool_ops; 2887 dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2903,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2903 struct nlattr *tb[], struct nlattr *data[]) 2915 struct nlattr *tb[], struct nlattr *data[])
2904{ 2916{
2905 struct vxlan_config conf; 2917 struct vxlan_config conf;
2906 int err;
2907 2918
2908 memset(&conf, 0, sizeof(conf)); 2919 memset(&conf, 0, sizeof(conf));
2909 2920
@@ -3012,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3012 if (tb[IFLA_MTU]) 3023 if (tb[IFLA_MTU])
3013 conf.mtu = nla_get_u32(tb[IFLA_MTU]); 3024 conf.mtu = nla_get_u32(tb[IFLA_MTU]);
3014 3025
3015 err = vxlan_dev_configure(src_net, dev, &conf); 3026 return vxlan_dev_configure(src_net, dev, &conf);
3016 switch (err) {
3017 case -ENODEV:
3018 pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
3019 break;
3020
3021 case -EPERM:
3022 pr_info("IPv6 is disabled via sysctl\n");
3023 break;
3024
3025 case -EEXIST:
3026 pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
3027 break;
3028
3029 case -EINVAL:
3030 pr_info("unsupported combination of extensions\n");
3031 break;
3032 }
3033
3034 return err;
3035} 3027}
3036 3028
3037static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3029static void vxlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 78db5d679f19..24c8d65bcf34 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1525static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 1525static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1526{ 1526{
1527 struct ath10k *ar = htt->ar; 1527 struct ath10k *ar = htt->ar;
1528 static struct ieee80211_rx_status rx_status; 1528 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1529 struct sk_buff_head amsdu; 1529 struct sk_buff_head amsdu;
1530 int ret; 1530 int ret;
1531 1531
@@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1549 return ret; 1549 return ret;
1550 } 1550 }
1551 1551
1552 ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff); 1552 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1553 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); 1553 ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1554 ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status); 1554 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1555 ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status); 1555 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1556 ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status); 1556 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1557 1557
1558 return 0; 1558 return 0;
1559} 1559}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9a22c478dd1b..07933c51a850 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3162 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3162 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3163 break; 3163 break;
3164 case QCA9887_1_0_DEVICE_ID: 3164 case QCA9887_1_0_DEVICE_ID:
3165 dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
3166 hw_rev = ATH10K_HW_QCA9887; 3165 hw_rev = ATH10K_HW_QCA9887;
3167 pci_ps = false; 3166 pci_ps = false;
3168 pci_soft_reset = ath10k_pci_warm_reset; 3167 pci_soft_reset = ath10k_pci_warm_reset;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index d1d0c06d627c..14b13f07cd1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2482 return -EINVAL; 2482 return -EINVAL;
2483 } 2483 }
2484 2484
2485 ath9k_gpio_cap_init(ah);
2486
2485 if (AR_SREV_9485(ah) || 2487 if (AR_SREV_9485(ah) ||
2486 AR_SREV_9285(ah) || 2488 AR_SREV_9285(ah) ||
2487 AR_SREV_9330(ah) || 2489 AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2531 else 2533 else
2532 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 2534 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2533 2535
2534 ath9k_gpio_cap_init(ah);
2535
2536 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) 2536 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
2537 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; 2537 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
2538 else 2538 else
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a394622c9022..7cb65c303f8d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
718 if (!ath_complete_reset(sc, false)) 718 if (!ath_complete_reset(sc, false))
719 ah->reset_power_on = false; 719 ah->reset_power_on = false;
720 720
721 if (ah->led_pin >= 0) 721 if (ah->led_pin >= 0) {
722 ath9k_hw_set_gpio(ah, ah->led_pin, 722 ath9k_hw_set_gpio(ah, ah->led_pin,
723 (ah->config.led_active_high) ? 1 : 0); 723 (ah->config.led_active_high) ? 1 : 0);
724 ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
725 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
726 }
724 727
725 /* 728 /*
726 * Reset key cache to sane defaults (all entries cleared) instead of 729 * Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
864 867
865 spin_lock_bh(&sc->sc_pcu_lock); 868 spin_lock_bh(&sc->sc_pcu_lock);
866 869
867 if (ah->led_pin >= 0) 870 if (ah->led_pin >= 0) {
868 ath9k_hw_set_gpio(ah, ah->led_pin, 871 ath9k_hw_set_gpio(ah, ah->led_pin,
869 (ah->config.led_active_high) ? 0 : 1); 872 (ah->config.led_active_high) ? 0 : 1);
873 ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
874 }
870 875
871 ath_prepare_reset(sc); 876 ath_prepare_reset(sc);
872 877
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
1154 bool changed = (iter_data.primary_sta != ctx->primary_sta); 1159 bool changed = (iter_data.primary_sta != ctx->primary_sta);
1155 1160
1156 if (iter_data.primary_sta) { 1161 if (iter_data.primary_sta) {
1162 iter_data.primary_beacon_vif = iter_data.primary_sta;
1157 iter_data.beacons = true; 1163 iter_data.beacons = true;
1158 ath9k_set_assoc_state(sc, iter_data.primary_sta, 1164 ath9k_set_assoc_state(sc, iter_data.primary_sta,
1159 changed); 1165 changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
1563 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1569 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1564 int ret = 0; 1570 int ret = 0;
1565 1571
1566 if (old_state == IEEE80211_STA_AUTH && 1572 if (old_state == IEEE80211_STA_NOTEXIST &&
1567 new_state == IEEE80211_STA_ASSOC) { 1573 new_state == IEEE80211_STA_NONE) {
1568 ret = ath9k_sta_add(hw, vif, sta); 1574 ret = ath9k_sta_add(hw, vif, sta);
1569 ath_dbg(common, CONFIG, 1575 ath_dbg(common, CONFIG,
1570 "Add station: %pM\n", sta->addr); 1576 "Add station: %pM\n", sta->addr);
1571 } else if (old_state == IEEE80211_STA_ASSOC && 1577 } else if (old_state == IEEE80211_STA_NONE &&
1572 new_state == IEEE80211_STA_AUTH) { 1578 new_state == IEEE80211_STA_NOTEXIST) {
1573 ret = ath9k_sta_remove(hw, vif, sta); 1579 ret = ath9k_sta_remove(hw, vif, sta);
1574 ath_dbg(common, CONFIG, 1580 ath_dbg(common, CONFIG,
1575 "Remove station: %pM\n", sta->addr); 1581 "Remove station: %pM\n", sta->addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2628d5e12c64..b8aec5e5ef93 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4527 (u8 *)&settings->beacon.head[ie_offset], 4527 (u8 *)&settings->beacon.head[ie_offset],
4528 settings->beacon.head_len - ie_offset, 4528 settings->beacon.head_len - ie_offset,
4529 WLAN_EID_SSID); 4529 WLAN_EID_SSID);
4530 if (!ssid_ie) 4530 if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
4531 return -EINVAL; 4531 return -EINVAL;
4532 4532
4533 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); 4533 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5635 ifevent->action, ifevent->flags, ifevent->ifidx, 5635 ifevent->action, ifevent->flags, ifevent->ifidx,
5636 ifevent->bsscfgidx); 5636 ifevent->bsscfgidx);
5637 5637
5638 mutex_lock(&event->vif_event_lock); 5638 spin_lock(&event->vif_event_lock);
5639 event->action = ifevent->action; 5639 event->action = ifevent->action;
5640 vif = event->vif; 5640 vif = event->vif;
5641 5641
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5643 case BRCMF_E_IF_ADD: 5643 case BRCMF_E_IF_ADD:
5644 /* waiting process may have timed out */ 5644 /* waiting process may have timed out */
5645 if (!cfg->vif_event.vif) { 5645 if (!cfg->vif_event.vif) {
5646 mutex_unlock(&event->vif_event_lock); 5646 spin_unlock(&event->vif_event_lock);
5647 return -EBADF; 5647 return -EBADF;
5648 } 5648 }
5649 5649
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
5654 ifp->ndev->ieee80211_ptr = &vif->wdev; 5654 ifp->ndev->ieee80211_ptr = &vif->wdev;
5655 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); 5655 SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
5656 } 5656 }
5657 mutex_unlock(&event->vif_event_lock); 5657 spin_unlock(&event->vif_event_lock);
5658 wake_up(&event->vif_wq); 5658 wake_up(&event->vif_wq);
5659 return 0; 5659 return 0;
5660 5660
5661 case BRCMF_E_IF_DEL: 5661 case BRCMF_E_IF_DEL:
5662 mutex_unlock(&event->vif_event_lock); 5662 spin_unlock(&event->vif_event_lock);
5663 /* event may not be upon user request */ 5663 /* event may not be upon user request */
5664 if (brcmf_cfg80211_vif_event_armed(cfg)) 5664 if (brcmf_cfg80211_vif_event_armed(cfg))
5665 wake_up(&event->vif_wq); 5665 wake_up(&event->vif_wq);
5666 return 0; 5666 return 0;
5667 5667
5668 case BRCMF_E_IF_CHANGE: 5668 case BRCMF_E_IF_CHANGE:
5669 mutex_unlock(&event->vif_event_lock); 5669 spin_unlock(&event->vif_event_lock);
5670 wake_up(&event->vif_wq); 5670 wake_up(&event->vif_wq);
5671 return 0; 5671 return 0;
5672 5672
5673 default: 5673 default:
5674 mutex_unlock(&event->vif_event_lock); 5674 spin_unlock(&event->vif_event_lock);
5675 break; 5675 break;
5676 } 5676 }
5677 return -EINVAL; 5677 return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
5792static void init_vif_event(struct brcmf_cfg80211_vif_event *event) 5792static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
5793{ 5793{
5794 init_waitqueue_head(&event->vif_wq); 5794 init_waitqueue_head(&event->vif_wq);
5795 mutex_init(&event->vif_event_lock); 5795 spin_lock_init(&event->vif_event_lock);
5796} 5796}
5797 5797
5798static s32 brcmf_dongle_roam(struct brcmf_if *ifp) 5798static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
6691{ 6691{
6692 u8 evt_action; 6692 u8 evt_action;
6693 6693
6694 mutex_lock(&event->vif_event_lock); 6694 spin_lock(&event->vif_event_lock);
6695 evt_action = event->action; 6695 evt_action = event->action;
6696 mutex_unlock(&event->vif_event_lock); 6696 spin_unlock(&event->vif_event_lock);
6697 return evt_action == action; 6697 return evt_action == action;
6698} 6698}
6699 6699
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
6702{ 6702{
6703 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; 6703 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
6704 6704
6705 mutex_lock(&event->vif_event_lock); 6705 spin_lock(&event->vif_event_lock);
6706 event->vif = vif; 6706 event->vif = vif;
6707 event->action = 0; 6707 event->action = 0;
6708 mutex_unlock(&event->vif_event_lock); 6708 spin_unlock(&event->vif_event_lock);
6709} 6709}
6710 6710
6711bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) 6711bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
6713 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; 6713 struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
6714 bool armed; 6714 bool armed;
6715 6715
6716 mutex_lock(&event->vif_event_lock); 6716 spin_lock(&event->vif_event_lock);
6717 armed = event->vif != NULL; 6717 armed = event->vif != NULL;
6718 mutex_unlock(&event->vif_event_lock); 6718 spin_unlock(&event->vif_event_lock);
6719 6719
6720 return armed; 6720 return armed;
6721} 6721}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 7d77f869b7f1..8889832c17e0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -227,7 +227,7 @@ struct escan_info {
227 */ 227 */
228struct brcmf_cfg80211_vif_event { 228struct brcmf_cfg80211_vif_event {
229 wait_queue_head_t vif_wq; 229 wait_queue_head_t vif_wq;
230 struct mutex vif_event_lock; 230 spinlock_t vif_event_lock;
231 u8 action; 231 u8 action;
232 struct brcmf_cfg80211_vif *vif; 232 struct brcmf_cfg80211_vif *vif;
233}; 233};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 8d16f0204985..65e8c8766441 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
743 * serious troublesome side effects. The p2p module will clean 743 * serious troublesome side effects. The p2p module will clean
744 * up the ifp if needed. 744 * up the ifp if needed.
745 */ 745 */
746 brcmf_p2p_ifp_removed(ifp); 746 brcmf_p2p_ifp_removed(ifp, rtnl_locked);
747 kfree(ifp); 747 kfree(ifp);
748 } 748 }
749} 749}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 66f942f7448e..de19c7c92bc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2297 return err; 2297 return err;
2298} 2298}
2299 2299
2300void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) 2300void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
2301{ 2301{
2302 struct brcmf_cfg80211_info *cfg; 2302 struct brcmf_cfg80211_info *cfg;
2303 struct brcmf_cfg80211_vif *vif; 2303 struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
2306 vif = ifp->vif; 2306 vif = ifp->vif;
2307 cfg = wdev_to_cfg(&vif->wdev); 2307 cfg = wdev_to_cfg(&vif->wdev);
2308 cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; 2308 cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
2309 rtnl_lock(); 2309 if (!rtnl_locked)
2310 rtnl_lock();
2310 cfg80211_unregister_wdev(&vif->wdev); 2311 cfg80211_unregister_wdev(&vif->wdev);
2311 rtnl_unlock(); 2312 if (!rtnl_locked)
2313 rtnl_unlock();
2312 brcmf_free_vif(vif); 2314 brcmf_free_vif(vif);
2313} 2315}
2314 2316
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index a3bd18c2360b..8ce9447533ef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
155int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); 155int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
156int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, 156int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
157 enum brcmf_fil_p2p_if_types if_type); 157 enum brcmf_fil_p2p_if_types if_type);
158void brcmf_p2p_ifp_removed(struct brcmf_if *ifp); 158void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
159int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); 159int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
160void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); 160void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
161int brcmf_p2p_scan_prep(struct wiphy *wiphy, 161int brcmf_p2p_scan_prep(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 1abcabb9b6cd..46b52bf705fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
960 } 960 }
961 961
962 mvm->fw_dbg_conf = conf_id; 962 mvm->fw_dbg_conf = conf_id;
963 return ret; 963
964 return 0;
964} 965}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
index f7dff7612c9c..e9f1be9da7d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
105{ 105{
106 u32 trig_vif = le32_to_cpu(trig->vif_type); 106 u32 trig_vif = le32_to_cpu(trig->vif_type);
107 107
108 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; 108 return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
109 ieee80211_vif_type_p2p(vif) == trig_vif;
109} 110}
110 111
111static inline bool 112static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6d6064534d59..5dd77e336617 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 624 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
625 NL80211_FEATURE_LOW_PRIORITY_SCAN | 625 NL80211_FEATURE_LOW_PRIORITY_SCAN |
626 NL80211_FEATURE_P2P_GO_OPPPS | 626 NL80211_FEATURE_P2P_GO_OPPPS |
627 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
627 NL80211_FEATURE_DYNAMIC_SMPS | 628 NL80211_FEATURE_DYNAMIC_SMPS |
628 NL80211_FEATURE_STATIC_SMPS | 629 NL80211_FEATURE_STATIC_SMPS |
629 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 630 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b4fc86d5d7ef..6a615bb73042 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
467static inline struct iwl_mvm_vif * 467static inline struct iwl_mvm_vif *
468iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) 468iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
469{ 469{
470 if (!vif)
471 return NULL;
470 return (void *)vif->drv_priv; 472 return (void *)vif->drv_priv;
471} 473}
472 474
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index dc49c3de1f25..c47d6366875d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
205 205
206 do { 206 do {
207 /* Check if AMSDU can accommodate this MSDU */ 207 /* Check if AMSDU can accommodate this MSDU */
208 if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) 208 if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
209 adapter->tx_buf_size)
209 break; 210 break;
210 211
211 skb_src = skb_dequeue(&pra_list->skb_head); 212 skb_src = skb_dequeue(&pra_list->skb_head);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1d689169da76..9e1f2d9c9865 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5700,10 +5700,11 @@ out:
5700 mutex_unlock(&wl->mutex); 5700 mutex_unlock(&wl->mutex);
5701} 5701}
5702 5702
5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) 5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5704 struct ieee80211_sta *sta)
5704{ 5705{
5705 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; 5706 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5706 struct wl1271 *wl = wl_sta->wl; 5707 struct wl1271 *wl = hw->priv;
5707 u8 hlid = wl_sta->hlid; 5708 u8 hlid = wl_sta->hlid;
5708 5709
5709 /* return in units of Kbps */ 5710 /* return in units of Kbps */
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 88e91666f145..368795aad5c9 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt)
1269 } 1269 }
1270 } 1270 }
1271 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 1271 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1272 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1272 revalidate_disk(btt->btt_disk); 1273 revalidate_disk(btt->btt_disk);
1273 1274
1274 return 0; 1275 return 0;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 3fa7919f94a8..97dd2925ed6e 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev,
140} 140}
141static DEVICE_ATTR_RW(namespace); 141static DEVICE_ATTR_RW(namespace);
142 142
143static ssize_t size_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
145{
146 struct nd_btt *nd_btt = to_nd_btt(dev);
147 ssize_t rc;
148
149 device_lock(dev);
150 if (dev->driver)
151 rc = sprintf(buf, "%llu\n", nd_btt->size);
152 else {
153 /* no size to convey if the btt instance is disabled */
154 rc = -ENXIO;
155 }
156 device_unlock(dev);
157
158 return rc;
159}
160static DEVICE_ATTR_RO(size);
161
143static struct attribute *nd_btt_attributes[] = { 162static struct attribute *nd_btt_attributes[] = {
144 &dev_attr_sector_size.attr, 163 &dev_attr_sector_size.attr,
145 &dev_attr_namespace.attr, 164 &dev_attr_namespace.attr,
146 &dev_attr_uuid.attr, 165 &dev_attr_uuid.attr,
166 &dev_attr_size.attr,
147 NULL, 167 NULL,
148}; 168};
149 169
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 458daf927336..935866fe5ec2 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
185 return -ENXIO; 185 return -ENXIO;
186 186
187 nd_desc = nvdimm_bus->nd_desc; 187 nd_desc = nvdimm_bus->nd_desc;
188 /*
189 * if ndctl does not exist, it's PMEM_LEGACY and
190 * we want to just pretend everything is handled.
191 */
188 if (!nd_desc->ndctl) 192 if (!nd_desc->ndctl)
189 return -ENXIO; 193 return len;
190 194
191 memset(&ars_cap, 0, sizeof(ars_cap)); 195 memset(&ars_cap, 0, sizeof(ars_cap));
192 ars_cap.address = phys; 196 ars_cap.address = phys;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 40476399d227..8024a0ef86d3 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -143,6 +143,7 @@ struct nd_btt {
143 struct nd_namespace_common *ndns; 143 struct nd_namespace_common *ndns;
144 struct btt *btt; 144 struct btt *btt;
145 unsigned long lbasize; 145 unsigned long lbasize;
146 u64 size;
146 u8 *uuid; 147 u8 *uuid;
147 int id; 148 int id;
148}; 149};
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index db39d53cdfb9..f7d37a62f874 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -30,8 +30,8 @@ config NVME_FABRICS
30 30
31config NVME_RDMA 31config NVME_RDMA
32 tristate "NVM Express over Fabrics RDMA host driver" 32 tristate "NVM Express over Fabrics RDMA host driver"
33 depends on INFINIBAND 33 depends on INFINIBAND && BLOCK
34 depends on BLK_DEV_NVME 34 select NVME_CORE
35 select NVME_FABRICS 35 select NVME_FABRICS
36 select SG_POOL 36 select SG_POOL
37 help 37 help
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7ff2e820bbf4..2feacc70bf61 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request);
81bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 81bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
82 enum nvme_ctrl_state new_state) 82 enum nvme_ctrl_state new_state)
83{ 83{
84 enum nvme_ctrl_state old_state = ctrl->state; 84 enum nvme_ctrl_state old_state;
85 bool changed = false; 85 bool changed = false;
86 86
87 spin_lock_irq(&ctrl->lock); 87 spin_lock_irq(&ctrl->lock);
88
89 old_state = ctrl->state;
88 switch (new_state) { 90 switch (new_state) {
89 case NVME_CTRL_LIVE: 91 case NVME_CTRL_LIVE:
90 switch (old_state) { 92 switch (old_state) {
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
140 default: 142 default:
141 break; 143 break;
142 } 144 }
143 spin_unlock_irq(&ctrl->lock);
144 145
145 if (changed) 146 if (changed)
146 ctrl->state = new_state; 147 ctrl->state = new_state;
147 148
149 spin_unlock_irq(&ctrl->lock);
150
148 return changed; 151 return changed;
149} 152}
150EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 153EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
608 611
609 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 612 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
610 NVME_QID_ANY, 0, 0); 613 NVME_QID_ANY, 0, 0);
611 if (ret >= 0) 614 if (ret >= 0 && result)
612 *result = le32_to_cpu(cqe.result); 615 *result = le32_to_cpu(cqe.result);
613 return ret; 616 return ret;
614} 617}
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
628 631
629 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, 632 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
630 NVME_QID_ANY, 0, 0); 633 NVME_QID_ANY, 0, 0);
631 if (ret >= 0) 634 if (ret >= 0 && result)
632 *result = le32_to_cpu(cqe.result); 635 *result = le32_to_cpu(cqe.result);
633 return ret; 636 return ret;
634} 637}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index dc996761042f..4eff49174466 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
47 47
48 mutex_lock(&nvmf_hosts_mutex); 48 mutex_lock(&nvmf_hosts_mutex);
49 host = __nvmf_host_find(hostnqn); 49 host = __nvmf_host_find(hostnqn);
50 if (host) 50 if (host) {
51 kref_get(&host->ref);
51 goto out_unlock; 52 goto out_unlock;
53 }
52 54
53 host = kmalloc(sizeof(*host), GFP_KERNEL); 55 host = kmalloc(sizeof(*host), GFP_KERNEL);
54 if (!host) 56 if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
56 58
57 kref_init(&host->ref); 59 kref_init(&host->ref);
58 memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); 60 memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
59 uuid_le_gen(&host->id); 61 uuid_be_gen(&host->id);
60 62
61 list_add_tail(&host->list, &nvmf_hosts); 63 list_add_tail(&host->list, &nvmf_hosts);
62out_unlock: 64out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
73 return NULL; 75 return NULL;
74 76
75 kref_init(&host->ref); 77 kref_init(&host->ref);
76 uuid_le_gen(&host->id); 78 uuid_be_gen(&host->id);
77 snprintf(host->nqn, NVMF_NQN_SIZE, 79 snprintf(host->nqn, NVMF_NQN_SIZE,
78 "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id); 80 "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
79 81
80 mutex_lock(&nvmf_hosts_mutex); 82 mutex_lock(&nvmf_hosts_mutex);
81 list_add_tail(&host->list, &nvmf_hosts); 83 list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
363 cmd.connect.opcode = nvme_fabrics_command; 365 cmd.connect.opcode = nvme_fabrics_command;
364 cmd.connect.fctype = nvme_fabrics_type_connect; 366 cmd.connect.fctype = nvme_fabrics_type_connect;
365 cmd.connect.qid = 0; 367 cmd.connect.qid = 0;
366 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); 368
369 /*
370 * fabrics spec sets a minimum of depth 32 for admin queue,
371 * so set the queue with this depth always until
372 * justification otherwise.
373 */
374 cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
375
367 /* 376 /*
368 * Set keep-alive timeout in seconds granularity (ms * 1000) 377 * Set keep-alive timeout in seconds granularity (ms * 1000)
369 * and add a grace period for controller kato enforcement 378 * and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
375 if (!data) 384 if (!data)
376 return -ENOMEM; 385 return -ENOMEM;
377 386
378 memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); 387 memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
379 data->cntlid = cpu_to_le16(0xffff); 388 data->cntlid = cpu_to_le16(0xffff);
380 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 389 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
381 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 390 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
434 if (!data) 443 if (!data)
435 return -ENOMEM; 444 return -ENOMEM;
436 445
437 memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); 446 memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
438 data->cntlid = cpu_to_le16(ctrl->cntlid); 447 data->cntlid = cpu_to_le16(ctrl->cntlid);
439 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 448 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
440 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 449 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 89df52c8be97..46e460aee52d 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -34,7 +34,7 @@ struct nvmf_host {
34 struct kref ref; 34 struct kref ref;
35 struct list_head list; 35 struct list_head list;
36 char nqn[NVMF_NQN_SIZE]; 36 char nqn[NVMF_NQN_SIZE];
37 uuid_le id; 37 uuid_be id;
38}; 38};
39 39
40/** 40/**
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d7c33f9361aa..60f7eab11865 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
1543 reinit_completion(&dev->ioq_wait); 1543 reinit_completion(&dev->ioq_wait);
1544 retry: 1544 retry:
1545 timeout = ADMIN_TIMEOUT; 1545 timeout = ADMIN_TIMEOUT;
1546 for (; i > 0; i--) { 1546 for (; i > 0; i--, sent++)
1547 struct nvme_queue *nvmeq = dev->queues[i]; 1547 if (nvme_delete_queue(dev->queues[i], opcode))
1548
1549 if (!pass)
1550 nvme_suspend_queue(nvmeq);
1551 if (nvme_delete_queue(nvmeq, opcode))
1552 break; 1548 break;
1553 ++sent; 1549
1554 }
1555 while (sent--) { 1550 while (sent--) {
1556 timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); 1551 timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
1557 if (timeout == 0) 1552 if (timeout == 0)
@@ -1693,11 +1688,17 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1693 nvme_stop_queues(&dev->ctrl); 1688 nvme_stop_queues(&dev->ctrl);
1694 csts = readl(dev->bar + NVME_REG_CSTS); 1689 csts = readl(dev->bar + NVME_REG_CSTS);
1695 } 1690 }
1691
1692 for (i = dev->queue_count - 1; i > 0; i--)
1693 nvme_suspend_queue(dev->queues[i]);
1694
1696 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 1695 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
1697 for (i = dev->queue_count - 1; i >= 0; i--) { 1696 /* A device might become IO incapable very soon during
1698 struct nvme_queue *nvmeq = dev->queues[i]; 1697 * probe, before the admin queue is configured. Thus,
1699 nvme_suspend_queue(nvmeq); 1698 * queue_count can be 0 here.
1700 } 1699 */
1700 if (dev->queue_count)
1701 nvme_suspend_queue(dev->queues[0]);
1701 } else { 1702 } else {
1702 nvme_disable_io_queues(dev); 1703 nvme_disable_io_queues(dev);
1703 nvme_disable_admin_queue(dev, shutdown); 1704 nvme_disable_admin_queue(dev, shutdown);
@@ -2116,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
2116 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2117 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2117 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2118 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
2118 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2119 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2120 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
2121 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2119 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2122 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2120 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 2123 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
2121 { 0, } 2124 { 0, }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3e3ce2b0424e..c2c2c28e6eb5 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -12,13 +12,11 @@
12 * more details. 12 * more details.
13 */ 13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/delay.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/err.h> 18#include <linux/err.h>
20#include <linux/string.h> 19#include <linux/string.h>
21#include <linux/jiffies.h>
22#include <linux/atomic.h> 20#include <linux/atomic.h>
23#include <linux/blk-mq.h> 21#include <linux/blk-mq.h>
24#include <linux/types.h> 22#include <linux/types.h>
@@ -26,7 +24,6 @@
26#include <linux/mutex.h> 24#include <linux/mutex.h>
27#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
28#include <linux/nvme.h> 26#include <linux/nvme.h>
29#include <linux/t10-pi.h>
30#include <asm/unaligned.h> 27#include <asm/unaligned.h>
31 28
32#include <rdma/ib_verbs.h> 29#include <rdma/ib_verbs.h>
@@ -46,10 +43,6 @@
46 43
47#define NVME_RDMA_MAX_INLINE_SEGMENTS 1 44#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
48 45
49#define NVME_RDMA_MAX_PAGES_PER_MR 512
50
51#define NVME_RDMA_DEF_RECONNECT_DELAY 20
52
53/* 46/*
54 * We handle AEN commands ourselves and don't even let the 47 * We handle AEN commands ourselves and don't even let the
55 * block layer know about them. 48 * block layer know about them.
@@ -80,7 +73,6 @@ struct nvme_rdma_request {
80 u32 num_sge; 73 u32 num_sge;
81 int nents; 74 int nents;
82 bool inline_data; 75 bool inline_data;
83 bool need_inval;
84 struct ib_reg_wr reg_wr; 76 struct ib_reg_wr reg_wr;
85 struct ib_cqe reg_cqe; 77 struct ib_cqe reg_cqe;
86 struct nvme_rdma_queue *queue; 78 struct nvme_rdma_queue *queue;
@@ -90,6 +82,8 @@ struct nvme_rdma_request {
90 82
91enum nvme_rdma_queue_flags { 83enum nvme_rdma_queue_flags {
92 NVME_RDMA_Q_CONNECTED = (1 << 0), 84 NVME_RDMA_Q_CONNECTED = (1 << 0),
85 NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
86 NVME_RDMA_Q_DELETING = (1 << 2),
93}; 87};
94 88
95struct nvme_rdma_queue { 89struct nvme_rdma_queue {
@@ -169,7 +163,6 @@ MODULE_PARM_DESC(register_always,
169static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 163static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
170 struct rdma_cm_event *event); 164 struct rdma_cm_event *event);
171static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 165static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
172static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
173 166
174/* XXX: really should move to a generic header sooner or later.. */ 167/* XXX: really should move to a generic header sooner or later.. */
175static inline void put_unaligned_le24(u32 val, u8 *p) 168static inline void put_unaligned_le24(u32 val, u8 *p)
@@ -290,7 +283,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
290 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 283 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
291 int ret = 0; 284 int ret = 0;
292 285
293 if (!req->need_inval) 286 if (!req->mr->need_inval)
294 goto out; 287 goto out;
295 288
296 ib_dereg_mr(req->mr); 289 ib_dereg_mr(req->mr);
@@ -300,9 +293,10 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
300 if (IS_ERR(req->mr)) { 293 if (IS_ERR(req->mr)) {
301 ret = PTR_ERR(req->mr); 294 ret = PTR_ERR(req->mr);
302 req->mr = NULL; 295 req->mr = NULL;
296 goto out;
303 } 297 }
304 298
305 req->need_inval = false; 299 req->mr->need_inval = false;
306 300
307out: 301out:
308 return ret; 302 return ret;
@@ -489,9 +483,14 @@ out_err:
489 483
490static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 484static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
491{ 485{
492 struct nvme_rdma_device *dev = queue->device; 486 struct nvme_rdma_device *dev;
493 struct ib_device *ibdev = dev->dev; 487 struct ib_device *ibdev;
488
489 if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
490 return;
494 491
492 dev = queue->device;
493 ibdev = dev->dev;
495 rdma_destroy_qp(queue->cm_id); 494 rdma_destroy_qp(queue->cm_id);
496 ib_free_cq(queue->ib_cq); 495 ib_free_cq(queue->ib_cq);
497 496
@@ -542,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
542 ret = -ENOMEM; 541 ret = -ENOMEM;
543 goto out_destroy_qp; 542 goto out_destroy_qp;
544 } 543 }
544 set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
545 545
546 return 0; 546 return 0;
547 547
@@ -561,6 +561,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
561 561
562 queue = &ctrl->queues[idx]; 562 queue = &ctrl->queues[idx];
563 queue->ctrl = ctrl; 563 queue->ctrl = ctrl;
564 queue->flags = 0;
564 init_completion(&queue->cm_done); 565 init_completion(&queue->cm_done);
565 566
566 if (idx > 0) 567 if (idx > 0)
@@ -599,6 +600,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
599 return 0; 600 return 0;
600 601
601out_destroy_cm_id: 602out_destroy_cm_id:
603 nvme_rdma_destroy_queue_ib(queue);
602 rdma_destroy_id(queue->cm_id); 604 rdma_destroy_id(queue->cm_id);
603 return ret; 605 return ret;
604} 606}
@@ -617,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
617 619
618static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) 620static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
619{ 621{
620 if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) 622 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
621 return; 623 return;
622 nvme_rdma_stop_queue(queue); 624 nvme_rdma_stop_queue(queue);
623 nvme_rdma_free_queue(queue); 625 nvme_rdma_free_queue(queue);
@@ -649,7 +651,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
649 int i, ret; 651 int i, ret;
650 652
651 for (i = 1; i < ctrl->queue_count; i++) { 653 for (i = 1; i < ctrl->queue_count; i++) {
652 ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize); 654 ret = nvme_rdma_init_queue(ctrl, i,
655 ctrl->ctrl.opts->queue_size);
653 if (ret) { 656 if (ret) {
654 dev_info(ctrl->ctrl.device, 657 dev_info(ctrl->ctrl.device,
655 "failed to initialize i/o queue: %d\n", ret); 658 "failed to initialize i/o queue: %d\n", ret);
@@ -660,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
660 return 0; 663 return 0;
661 664
662out_free_queues: 665out_free_queues:
663 for (; i >= 1; i--) 666 for (i--; i >= 1; i--)
664 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); 667 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
665 668
666 return ret; 669 return ret;
@@ -687,11 +690,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
687 list_del(&ctrl->list); 690 list_del(&ctrl->list);
688 mutex_unlock(&nvme_rdma_ctrl_mutex); 691 mutex_unlock(&nvme_rdma_ctrl_mutex);
689 692
690 if (ctrl->ctrl.tagset) {
691 blk_cleanup_queue(ctrl->ctrl.connect_q);
692 blk_mq_free_tag_set(&ctrl->tag_set);
693 nvme_rdma_dev_put(ctrl->device);
694 }
695 kfree(ctrl->queues); 693 kfree(ctrl->queues);
696 nvmf_free_options(nctrl->opts); 694 nvmf_free_options(nctrl->opts);
697free_ctrl: 695free_ctrl:
@@ -748,8 +746,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
748 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 746 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
749 WARN_ON_ONCE(!changed); 747 WARN_ON_ONCE(!changed);
750 748
751 if (ctrl->queue_count > 1) 749 if (ctrl->queue_count > 1) {
752 nvme_start_queues(&ctrl->ctrl); 750 nvme_start_queues(&ctrl->ctrl);
751 nvme_queue_scan(&ctrl->ctrl);
752 nvme_queue_async_events(&ctrl->ctrl);
753 }
753 754
754 dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); 755 dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
755 756
@@ -771,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
771{ 772{
772 struct nvme_rdma_ctrl *ctrl = container_of(work, 773 struct nvme_rdma_ctrl *ctrl = container_of(work,
773 struct nvme_rdma_ctrl, err_work); 774 struct nvme_rdma_ctrl, err_work);
775 int i;
774 776
775 nvme_stop_keep_alive(&ctrl->ctrl); 777 nvme_stop_keep_alive(&ctrl->ctrl);
778
779 for (i = 0; i < ctrl->queue_count; i++)
780 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
781
776 if (ctrl->queue_count > 1) 782 if (ctrl->queue_count > 1)
777 nvme_stop_queues(&ctrl->ctrl); 783 nvme_stop_queues(&ctrl->ctrl);
778 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); 784 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -855,7 +861,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
855 if (!blk_rq_bytes(rq)) 861 if (!blk_rq_bytes(rq))
856 return; 862 return;
857 863
858 if (req->need_inval) { 864 if (req->mr->need_inval) {
859 res = nvme_rdma_inv_rkey(queue, req); 865 res = nvme_rdma_inv_rkey(queue, req);
860 if (res < 0) { 866 if (res < 0) {
861 dev_err(ctrl->ctrl.device, 867 dev_err(ctrl->ctrl.device,
@@ -941,7 +947,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
941 IB_ACCESS_REMOTE_READ | 947 IB_ACCESS_REMOTE_READ |
942 IB_ACCESS_REMOTE_WRITE; 948 IB_ACCESS_REMOTE_WRITE;
943 949
944 req->need_inval = true; 950 req->mr->need_inval = true;
945 951
946 sg->addr = cpu_to_le64(req->mr->iova); 952 sg->addr = cpu_to_le64(req->mr->iova);
947 put_unaligned_le24(req->mr->length, sg->length); 953 put_unaligned_le24(req->mr->length, sg->length);
@@ -964,7 +970,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
964 970
965 req->num_sge = 1; 971 req->num_sge = 1;
966 req->inline_data = false; 972 req->inline_data = false;
967 req->need_inval = false; 973 req->mr->need_inval = false;
968 974
969 c->common.flags |= NVME_CMD_SGL_METABUF; 975 c->common.flags |= NVME_CMD_SGL_METABUF;
970 976
@@ -1151,7 +1157,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1151 1157
1152 if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && 1158 if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
1153 wc->ex.invalidate_rkey == req->mr->rkey) 1159 wc->ex.invalidate_rkey == req->mr->rkey)
1154 req->need_inval = false; 1160 req->mr->need_inval = false;
1155 1161
1156 blk_mq_complete_request(rq, status); 1162 blk_mq_complete_request(rq, status);
1157 1163
@@ -1269,7 +1275,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1269{ 1275{
1270 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1276 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1271 struct rdma_conn_param param = { }; 1277 struct rdma_conn_param param = { };
1272 struct nvme_rdma_cm_req priv; 1278 struct nvme_rdma_cm_req priv = { };
1273 int ret; 1279 int ret;
1274 1280
1275 param.qp_num = queue->qp->qp_num; 1281 param.qp_num = queue->qp->qp_num;
@@ -1284,8 +1290,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1284 1290
1285 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1291 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1286 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); 1292 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
1287 priv.hrqsize = cpu_to_le16(queue->queue_size); 1293 /*
1288 priv.hsqsize = cpu_to_le16(queue->queue_size); 1294 * set the admin queue depth to the minimum size
1295 * specified by the Fabrics standard.
1296 */
1297 if (priv.qid == 0) {
1298 priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
1299 priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
1300 } else {
1301 /*
1302 * current interpretation of the fabrics spec
1303 * is at minimum you make hrqsize sqsize+1, or a
1304 * 1's based representation of sqsize.
1305 */
1306 priv.hrqsize = cpu_to_le16(queue->queue_size);
1307 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1308 }
1289 1309
1290 ret = rdma_connect(queue->cm_id, &param); 1310 ret = rdma_connect(queue->cm_id, &param);
1291 if (ret) { 1311 if (ret) {
@@ -1301,56 +1321,6 @@ out_destroy_queue_ib:
1301 return ret; 1321 return ret;
1302} 1322}
1303 1323
1304/**
1305 * nvme_rdma_device_unplug() - Handle RDMA device unplug
1306 * @queue: Queue that owns the cm_id that caught the event
1307 *
1308 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1309 * to unplug so we should take care of destroying our RDMA resources.
1310 * This event will be generated for each allocated cm_id.
1311 *
1312 * In our case, the RDMA resources are managed per controller and not
1313 * only per queue. So the way we handle this is we trigger an implicit
1314 * controller deletion upon the first DEVICE_REMOVAL event we see, and
1315 * hold the event inflight until the controller deletion is completed.
1316 *
1317 * One exception that we need to handle is the destruction of the cm_id
1318 * that caught the event. Since we hold the callout until the controller
1319 * deletion is completed, we'll deadlock if the controller deletion will
1320 * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
1321 * of destroying this queue before-hand, destroy the queue resources
1322 * after the controller deletion completed with the exception of destroying
1323 * the cm_id implicitely by returning a non-zero rc to the callout.
1324 */
1325static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
1326{
1327 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1328 int ret, ctrl_deleted = 0;
1329
1330 /* First disable the queue so ctrl delete won't free it */
1331 if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
1332 goto out;
1333
1334 /* delete the controller */
1335 ret = __nvme_rdma_del_ctrl(ctrl);
1336 if (!ret) {
1337 dev_warn(ctrl->ctrl.device,
1338 "Got rdma device removal event, deleting ctrl\n");
1339 flush_work(&ctrl->delete_work);
1340
1341 /* Return non-zero so the cm_id will destroy implicitly */
1342 ctrl_deleted = 1;
1343
1344 /* Free this queue ourselves */
1345 rdma_disconnect(queue->cm_id);
1346 ib_drain_qp(queue->qp);
1347 nvme_rdma_destroy_queue_ib(queue);
1348 }
1349
1350out:
1351 return ctrl_deleted;
1352}
1353
1354static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 1324static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1355 struct rdma_cm_event *ev) 1325 struct rdma_cm_event *ev)
1356{ 1326{
@@ -1392,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1392 nvme_rdma_error_recovery(queue->ctrl); 1362 nvme_rdma_error_recovery(queue->ctrl);
1393 break; 1363 break;
1394 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1364 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1395 /* return 1 means impliciy CM ID destroy */ 1365 /* device removal is handled via the ib_client API */
1396 return nvme_rdma_device_unplug(queue); 1366 break;
1397 default: 1367 default:
1398 dev_err(queue->ctrl->ctrl.device, 1368 dev_err(queue->ctrl->ctrl.device,
1399 "Unexpected RDMA CM event (%d)\n", ev->event); 1369 "Unexpected RDMA CM event (%d)\n", ev->event);
@@ -1465,7 +1435,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1465 if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) 1435 if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
1466 flush = true; 1436 flush = true;
1467 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1437 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1468 req->need_inval ? &req->reg_wr.wr : NULL, flush); 1438 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
1469 if (ret) { 1439 if (ret) {
1470 nvme_rdma_unmap_data(queue, rq); 1440 nvme_rdma_unmap_data(queue, rq);
1471 goto err; 1441 goto err;
@@ -1648,7 +1618,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
1648 nvme_rdma_free_io_queues(ctrl); 1618 nvme_rdma_free_io_queues(ctrl);
1649 } 1619 }
1650 1620
1651 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 1621 if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
1652 nvme_shutdown_ctrl(&ctrl->ctrl); 1622 nvme_shutdown_ctrl(&ctrl->ctrl);
1653 1623
1654 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); 1624 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1657,15 +1627,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
1657 nvme_rdma_destroy_admin_queue(ctrl); 1627 nvme_rdma_destroy_admin_queue(ctrl);
1658} 1628}
1659 1629
1630static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1631{
1632 nvme_uninit_ctrl(&ctrl->ctrl);
1633 if (shutdown)
1634 nvme_rdma_shutdown_ctrl(ctrl);
1635
1636 if (ctrl->ctrl.tagset) {
1637 blk_cleanup_queue(ctrl->ctrl.connect_q);
1638 blk_mq_free_tag_set(&ctrl->tag_set);
1639 nvme_rdma_dev_put(ctrl->device);
1640 }
1641
1642 nvme_put_ctrl(&ctrl->ctrl);
1643}
1644
1660static void nvme_rdma_del_ctrl_work(struct work_struct *work) 1645static void nvme_rdma_del_ctrl_work(struct work_struct *work)
1661{ 1646{
1662 struct nvme_rdma_ctrl *ctrl = container_of(work, 1647 struct nvme_rdma_ctrl *ctrl = container_of(work,
1663 struct nvme_rdma_ctrl, delete_work); 1648 struct nvme_rdma_ctrl, delete_work);
1664 1649
1665 nvme_remove_namespaces(&ctrl->ctrl); 1650 __nvme_rdma_remove_ctrl(ctrl, true);
1666 nvme_rdma_shutdown_ctrl(ctrl);
1667 nvme_uninit_ctrl(&ctrl->ctrl);
1668 nvme_put_ctrl(&ctrl->ctrl);
1669} 1651}
1670 1652
1671static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) 1653static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
@@ -1682,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
1682static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) 1664static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
1683{ 1665{
1684 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 1666 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1685 int ret; 1667 int ret = 0;
1686 1668
1669 /*
1670 * Keep a reference until all work is flushed since
1671 * __nvme_rdma_del_ctrl can free the ctrl mem
1672 */
1673 if (!kref_get_unless_zero(&ctrl->ctrl.kref))
1674 return -EBUSY;
1687 ret = __nvme_rdma_del_ctrl(ctrl); 1675 ret = __nvme_rdma_del_ctrl(ctrl);
1688 if (ret) 1676 if (!ret)
1689 return ret; 1677 flush_work(&ctrl->delete_work);
1690 1678 nvme_put_ctrl(&ctrl->ctrl);
1691 flush_work(&ctrl->delete_work); 1679 return ret;
1692
1693 return 0;
1694} 1680}
1695 1681
1696static void nvme_rdma_remove_ctrl_work(struct work_struct *work) 1682static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
@@ -1698,9 +1684,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
1698 struct nvme_rdma_ctrl *ctrl = container_of(work, 1684 struct nvme_rdma_ctrl *ctrl = container_of(work,
1699 struct nvme_rdma_ctrl, delete_work); 1685 struct nvme_rdma_ctrl, delete_work);
1700 1686
1701 nvme_remove_namespaces(&ctrl->ctrl); 1687 __nvme_rdma_remove_ctrl(ctrl, false);
1702 nvme_uninit_ctrl(&ctrl->ctrl);
1703 nvme_put_ctrl(&ctrl->ctrl);
1704} 1688}
1705 1689
1706static void nvme_rdma_reset_ctrl_work(struct work_struct *work) 1690static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1739,6 +1723,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
1739 if (ctrl->queue_count > 1) { 1723 if (ctrl->queue_count > 1) {
1740 nvme_start_queues(&ctrl->ctrl); 1724 nvme_start_queues(&ctrl->ctrl);
1741 nvme_queue_scan(&ctrl->ctrl); 1725 nvme_queue_scan(&ctrl->ctrl);
1726 nvme_queue_async_events(&ctrl->ctrl);
1742 } 1727 }
1743 1728
1744 return; 1729 return;
@@ -1809,7 +1794,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1809 1794
1810 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 1795 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
1811 ctrl->tag_set.ops = &nvme_rdma_mq_ops; 1796 ctrl->tag_set.ops = &nvme_rdma_mq_ops;
1812 ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; 1797 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
1813 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 1798 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
1814 ctrl->tag_set.numa_node = NUMA_NO_NODE; 1799 ctrl->tag_set.numa_node = NUMA_NO_NODE;
1815 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1800 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1907,7 +1892,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1907 spin_lock_init(&ctrl->lock); 1892 spin_lock_init(&ctrl->lock);
1908 1893
1909 ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ 1894 ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
1910 ctrl->ctrl.sqsize = opts->queue_size; 1895 ctrl->ctrl.sqsize = opts->queue_size - 1;
1911 ctrl->ctrl.kato = opts->kato; 1896 ctrl->ctrl.kato = opts->kato;
1912 1897
1913 ret = -ENOMEM; 1898 ret = -ENOMEM;
@@ -1988,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
1988 .create_ctrl = nvme_rdma_create_ctrl, 1973 .create_ctrl = nvme_rdma_create_ctrl,
1989}; 1974};
1990 1975
1976static void nvme_rdma_add_one(struct ib_device *ib_device)
1977{
1978}
1979
1980static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1981{
1982 struct nvme_rdma_ctrl *ctrl;
1983
1984 /* Delete all controllers using this device */
1985 mutex_lock(&nvme_rdma_ctrl_mutex);
1986 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
1987 if (ctrl->device->dev != ib_device)
1988 continue;
1989 dev_info(ctrl->ctrl.device,
1990 "Removing ctrl: NQN \"%s\", addr %pISp\n",
1991 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
1992 __nvme_rdma_del_ctrl(ctrl);
1993 }
1994 mutex_unlock(&nvme_rdma_ctrl_mutex);
1995
1996 flush_workqueue(nvme_rdma_wq);
1997}
1998
1999static struct ib_client nvme_rdma_ib_client = {
2000 .name = "nvme_rdma",
2001 .add = nvme_rdma_add_one,
2002 .remove = nvme_rdma_remove_one
2003};
2004
1991static int __init nvme_rdma_init_module(void) 2005static int __init nvme_rdma_init_module(void)
1992{ 2006{
2007 int ret;
2008
1993 nvme_rdma_wq = create_workqueue("nvme_rdma_wq"); 2009 nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
1994 if (!nvme_rdma_wq) 2010 if (!nvme_rdma_wq)
1995 return -ENOMEM; 2011 return -ENOMEM;
1996 2012
2013 ret = ib_register_client(&nvme_rdma_ib_client);
2014 if (ret) {
2015 destroy_workqueue(nvme_rdma_wq);
2016 return ret;
2017 }
2018
1997 nvmf_register_transport(&nvme_rdma_transport); 2019 nvmf_register_transport(&nvme_rdma_transport);
1998 return 0; 2020 return 0;
1999} 2021}
2000 2022
2001static void __exit nvme_rdma_cleanup_module(void) 2023static void __exit nvme_rdma_cleanup_module(void)
2002{ 2024{
2003 struct nvme_rdma_ctrl *ctrl;
2004
2005 nvmf_unregister_transport(&nvme_rdma_transport); 2025 nvmf_unregister_transport(&nvme_rdma_transport);
2006 2026 ib_unregister_client(&nvme_rdma_ib_client);
2007 mutex_lock(&nvme_rdma_ctrl_mutex);
2008 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2009 __nvme_rdma_del_ctrl(ctrl);
2010 mutex_unlock(&nvme_rdma_ctrl_mutex);
2011
2012 destroy_workqueue(nvme_rdma_wq); 2027 destroy_workqueue(nvme_rdma_wq);
2013} 2028}
2014 2029
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index a5c31cbeb481..3a5b9d0576cb 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -15,8 +15,8 @@ config NVME_TARGET
15 15
16config NVME_TARGET_LOOP 16config NVME_TARGET_LOOP
17 tristate "NVMe loopback device support" 17 tristate "NVMe loopback device support"
18 depends on BLK_DEV_NVME
19 depends on NVME_TARGET 18 depends on NVME_TARGET
19 select NVME_CORE
20 select NVME_FABRICS 20 select NVME_FABRICS
21 select SG_POOL 21 select SG_POOL
22 help 22 help
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2fac17a5ad53..47c564b5a289 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -13,7 +13,6 @@
13 */ 13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h>
17#include <generated/utsrelease.h> 16#include <generated/utsrelease.h>
18#include "nvmet.h" 17#include "nvmet.h"
19 18
@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
83{ 82{
84 struct nvmet_ctrl *ctrl = req->sq->ctrl; 83 struct nvmet_ctrl *ctrl = req->sq->ctrl;
85 struct nvme_id_ctrl *id; 84 struct nvme_id_ctrl *id;
86 u64 serial;
87 u16 status = 0; 85 u16 status = 0;
88 86
89 id = kzalloc(sizeof(*id), GFP_KERNEL); 87 id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
96 id->vid = 0; 94 id->vid = 0;
97 id->ssvid = 0; 95 id->ssvid = 0;
98 96
99 /* generate a random serial number as our controllers are ephemeral: */
100 get_random_bytes(&serial, sizeof(serial));
101 memset(id->sn, ' ', sizeof(id->sn)); 97 memset(id->sn, ' ', sizeof(id->sn));
102 snprintf(id->sn, sizeof(id->sn), "%llx", serial); 98 snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
103 99
104 memset(id->mn, ' ', sizeof(id->mn)); 100 memset(id->mn, ' ', sizeof(id->mn));
105 strncpy((char *)id->mn, "Linux", sizeof(id->mn)); 101 strncpy((char *)id->mn, "Linux", sizeof(id->mn));
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8a891ca53367..6559d5afa7bf 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/random.h>
16#include "nvmet.h" 17#include "nvmet.h"
17 18
18static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; 19static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
728 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 729 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
729 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 730 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
730 731
732 /* generate a random serial number as our controllers are ephemeral: */
733 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
734
731 kref_init(&ctrl->ref); 735 kref_init(&ctrl->ref);
732 ctrl->subsys = subsys; 736 ctrl->subsys = subsys;
733 737
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 94e782987cc9..395e60dad835 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work)
414 struct nvme_loop_ctrl *ctrl = container_of(work, 414 struct nvme_loop_ctrl *ctrl = container_of(work,
415 struct nvme_loop_ctrl, delete_work); 415 struct nvme_loop_ctrl, delete_work);
416 416
417 nvme_remove_namespaces(&ctrl->ctrl);
418 nvme_loop_shutdown_ctrl(ctrl);
419 nvme_uninit_ctrl(&ctrl->ctrl); 417 nvme_uninit_ctrl(&ctrl->ctrl);
418 nvme_loop_shutdown_ctrl(ctrl);
420 nvme_put_ctrl(&ctrl->ctrl); 419 nvme_put_ctrl(&ctrl->ctrl);
421} 420}
422 421
@@ -501,7 +500,6 @@ out_free_queues:
501 nvme_loop_destroy_admin_queue(ctrl); 500 nvme_loop_destroy_admin_queue(ctrl);
502out_disable: 501out_disable:
503 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 502 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
504 nvme_remove_namespaces(&ctrl->ctrl);
505 nvme_uninit_ctrl(&ctrl->ctrl); 503 nvme_uninit_ctrl(&ctrl->ctrl);
506 nvme_put_ctrl(&ctrl->ctrl); 504 nvme_put_ctrl(&ctrl->ctrl);
507} 505}
@@ -558,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
558 556
559 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 557 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
560 ctrl->tag_set.ops = &nvme_loop_mq_ops; 558 ctrl->tag_set.ops = &nvme_loop_mq_ops;
561 ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; 559 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
562 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 560 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
563 ctrl->tag_set.numa_node = NUMA_NO_NODE; 561 ctrl->tag_set.numa_node = NUMA_NO_NODE;
564 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 562 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -622,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
622 620
623 ret = -ENOMEM; 621 ret = -ENOMEM;
624 622
625 ctrl->ctrl.sqsize = opts->queue_size; 623 ctrl->ctrl.sqsize = opts->queue_size - 1;
626 ctrl->ctrl.kato = opts->kato; 624 ctrl->ctrl.kato = opts->kato;
627 625
628 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), 626 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 57dd6d834c28..76b6eedccaf9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -113,6 +113,7 @@ struct nvmet_ctrl {
113 113
114 struct mutex lock; 114 struct mutex lock;
115 u64 cap; 115 u64 cap;
116 u64 serial;
116 u32 cc; 117 u32 cc;
117 u32 csts; 118 u32 csts;
118 119
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e06d504bdf0c..1cbe6e053b5b 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state {
77 NVMET_RDMA_Q_CONNECTING, 77 NVMET_RDMA_Q_CONNECTING,
78 NVMET_RDMA_Q_LIVE, 78 NVMET_RDMA_Q_LIVE,
79 NVMET_RDMA_Q_DISCONNECTING, 79 NVMET_RDMA_Q_DISCONNECTING,
80 NVMET_RDMA_IN_DEVICE_REMOVAL,
80}; 81};
81 82
82struct nvmet_rdma_queue { 83struct nvmet_rdma_queue {
@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
615 if (!len) 616 if (!len)
616 return 0; 617 return 0;
617 618
618 /* use the already allocated data buffer if possible */ 619 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
619 if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { 620 len);
620 nvmet_rdma_use_inline_sg(rsp, len, 0); 621 if (status)
621 } else { 622 return status;
622 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
623 len);
624 if (status)
625 return status;
626 }
627 623
628 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 624 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
629 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 625 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -982,9 +978,13 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
982 container_of(w, struct nvmet_rdma_queue, release_work); 978 container_of(w, struct nvmet_rdma_queue, release_work);
983 struct rdma_cm_id *cm_id = queue->cm_id; 979 struct rdma_cm_id *cm_id = queue->cm_id;
984 struct nvmet_rdma_device *dev = queue->dev; 980 struct nvmet_rdma_device *dev = queue->dev;
981 enum nvmet_rdma_queue_state state = queue->state;
985 982
986 nvmet_rdma_free_queue(queue); 983 nvmet_rdma_free_queue(queue);
987 rdma_destroy_id(cm_id); 984
985 if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
986 rdma_destroy_id(cm_id);
987
988 kref_put(&dev->ref, nvmet_rdma_free_dev); 988 kref_put(&dev->ref, nvmet_rdma_free_dev);
989} 989}
990 990
@@ -1004,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1004 queue->host_qid = le16_to_cpu(req->qid); 1004 queue->host_qid = le16_to_cpu(req->qid);
1005 1005
1006 /* 1006 /*
1007 * req->hsqsize corresponds to our recv queue size 1007 * req->hsqsize corresponds to our recv queue size plus 1
1008 * req->hrqsize corresponds to our send queue size 1008 * req->hrqsize corresponds to our send queue size
1009 */ 1009 */
1010 queue->recv_queue_size = le16_to_cpu(req->hsqsize); 1010 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1011 queue->send_queue_size = le16_to_cpu(req->hrqsize); 1011 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1012 1012
1013 if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) 1013 if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
@@ -1233,8 +1233,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1233 switch (queue->state) { 1233 switch (queue->state) {
1234 case NVMET_RDMA_Q_CONNECTING: 1234 case NVMET_RDMA_Q_CONNECTING:
1235 case NVMET_RDMA_Q_LIVE: 1235 case NVMET_RDMA_Q_LIVE:
1236 disconnect = true;
1237 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1236 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1237 case NVMET_RDMA_IN_DEVICE_REMOVAL:
1238 disconnect = true;
1238 break; 1239 break;
1239 case NVMET_RDMA_Q_DISCONNECTING: 1240 case NVMET_RDMA_Q_DISCONNECTING:
1240 break; 1241 break;
@@ -1272,6 +1273,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1272 schedule_work(&queue->release_work); 1273 schedule_work(&queue->release_work);
1273} 1274}
1274 1275
1276/**
1277 * nvme_rdma_device_removal() - Handle RDMA device removal
1278 * @queue: nvmet rdma queue (cm id qp_context)
1279 * @addr: nvmet address (cm_id context)
1280 *
1281 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1282 * to unplug so we should take care of destroying our RDMA resources.
1283 * This event will be generated for each allocated cm_id.
1284 *
1285 * Note that this event can be generated on a normal queue cm_id
1286 * and/or a device bound listener cm_id (where in this case
1287 * queue will be null).
1288 *
1289 * we claim ownership on destroying the cm_id. For queues we move
1290 * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1291 * we nullify the priv to prevent double cm_id destruction and destroying
1292 * the cm_id implicitely by returning a non-zero rc to the callout.
1293 */
1294static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1295 struct nvmet_rdma_queue *queue)
1296{
1297 unsigned long flags;
1298
1299 if (!queue) {
1300 struct nvmet_port *port = cm_id->context;
1301
1302 /*
1303 * This is a listener cm_id. Make sure that
1304 * future remove_port won't invoke a double
1305 * cm_id destroy. use atomic xchg to make sure
1306 * we don't compete with remove_port.
1307 */
1308 if (xchg(&port->priv, NULL) != cm_id)
1309 return 0;
1310 } else {
1311 /*
1312 * This is a queue cm_id. Make sure that
1313 * release queue will not destroy the cm_id
1314 * and schedule all ctrl queues removal (only
1315 * if the queue is not disconnecting already).
1316 */
1317 spin_lock_irqsave(&queue->state_lock, flags);
1318 if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
1319 queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
1320 spin_unlock_irqrestore(&queue->state_lock, flags);
1321 nvmet_rdma_queue_disconnect(queue);
1322 flush_scheduled_work();
1323 }
1324
1325 /*
1326 * We need to return 1 so that the core will destroy
1327 * it's own ID. What a great API design..
1328 */
1329 return 1;
1330}
1331
1275static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 1332static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1276 struct rdma_cm_event *event) 1333 struct rdma_cm_event *event)
1277{ 1334{
@@ -1294,20 +1351,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1294 break; 1351 break;
1295 case RDMA_CM_EVENT_ADDR_CHANGE: 1352 case RDMA_CM_EVENT_ADDR_CHANGE:
1296 case RDMA_CM_EVENT_DISCONNECTED: 1353 case RDMA_CM_EVENT_DISCONNECTED:
1297 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1298 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1354 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1299 /* 1355 nvmet_rdma_queue_disconnect(queue);
1300 * We can get the device removal callback even for a 1356 break;
1301 * CM ID that we aren't actually using. In that case 1357 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1302 * the context pointer is NULL, so we shouldn't try 1358 ret = nvmet_rdma_device_removal(cm_id, queue);
1303 * to disconnect a non-existing queue. But we also
1304 * need to return 1 so that the core will destroy
1305 * it's own ID. What a great API design..
1306 */
1307 if (queue)
1308 nvmet_rdma_queue_disconnect(queue);
1309 else
1310 ret = 1;
1311 break; 1359 break;
1312 case RDMA_CM_EVENT_REJECTED: 1360 case RDMA_CM_EVENT_REJECTED:
1313 case RDMA_CM_EVENT_UNREACHABLE: 1361 case RDMA_CM_EVENT_UNREACHABLE:
@@ -1396,9 +1444,10 @@ out_destroy_id:
1396 1444
1397static void nvmet_rdma_remove_port(struct nvmet_port *port) 1445static void nvmet_rdma_remove_port(struct nvmet_port *port)
1398{ 1446{
1399 struct rdma_cm_id *cm_id = port->priv; 1447 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1400 1448
1401 rdma_destroy_id(cm_id); 1449 if (cm_id)
1450 rdma_destroy_id(cm_id);
1402} 1451}
1403 1452
1404static struct nvmet_fabrics_ops nvmet_rdma_ops = { 1453static struct nvmet_fabrics_ops nvmet_rdma_ops = {
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7792266db259..3ce69536a7b3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
1631 */ 1631 */
1632 1632
1633 err: 1633 err:
1634 if (it.node) 1634 of_node_put(it.node);
1635 of_node_put(it.node);
1636 return rc; 1635 return rc;
1637} 1636}
1638 1637
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
2343 const struct device_node *parent, int port_reg, int reg) 2342 const struct device_node *parent, int port_reg, int reg)
2344{ 2343{
2345 struct of_endpoint endpoint; 2344 struct of_endpoint endpoint;
2346 struct device_node *node, *prev_node = NULL; 2345 struct device_node *node = NULL;
2347
2348 while (1) {
2349 node = of_graph_get_next_endpoint(parent, prev_node);
2350 of_node_put(prev_node);
2351 if (!node)
2352 break;
2353 2346
2347 for_each_endpoint_of_node(parent, node) {
2354 of_graph_parse_endpoint(node, &endpoint); 2348 of_graph_parse_endpoint(node, &endpoint);
2355 if (((port_reg == -1) || (endpoint.port == port_reg)) && 2349 if (((port_reg == -1) || (endpoint.port == port_reg)) &&
2356 ((reg == -1) || (endpoint.id == reg))) 2350 ((reg == -1) || (endpoint.id == reg)))
2357 return node; 2351 return node;
2358
2359 prev_node = node;
2360 } 2352 }
2361 2353
2362 return NULL; 2354 return NULL;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 55f1b8391149..085c6389afd1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob,
517 pr_warning("End of tree marker overwritten: %08x\n", 517 pr_warning("End of tree marker overwritten: %08x\n",
518 be32_to_cpup(mem + size)); 518 be32_to_cpup(mem + size));
519 519
520 if (detached) { 520 if (detached && mynodes) {
521 of_node_set_flag(*mynodes, OF_DETACHED); 521 of_node_set_flag(*mynodes, OF_DETACHED);
522 pr_debug("unflattened tree is detached\n"); 522 pr_debug("unflattened tree is detached\n");
523 } 523 }
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 89a71c6074fc..a2e68f740eda 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches)
544 544
545 list_del(&desc->list); 545 list_del(&desc->list);
546 546
547 of_node_set_flag(desc->dev, OF_POPULATED);
548
547 pr_debug("of_irq_init: init %s (%p), parent %p\n", 549 pr_debug("of_irq_init: init %s (%p), parent %p\n",
548 desc->dev->full_name, 550 desc->dev->full_name,
549 desc->dev, desc->interrupt_parent); 551 desc->dev, desc->interrupt_parent);
550 ret = desc->irq_init_cb(desc->dev, 552 ret = desc->irq_init_cb(desc->dev,
551 desc->interrupt_parent); 553 desc->interrupt_parent);
552 if (ret) { 554 if (ret) {
555 of_node_clear_flag(desc->dev, OF_POPULATED);
553 kfree(desc); 556 kfree(desc);
554 continue; 557 continue;
555 } 558 }
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches)
559 * its children can get processed in a subsequent pass. 562 * its children can get processed in a subsequent pass.
560 */ 563 */
561 list_add_tail(&desc->list, &intc_parent_list); 564 list_add_tail(&desc->list, &intc_parent_list);
562
563 of_node_set_flag(desc->dev, OF_POPULATED);
564 } 565 }
565 566
566 /* Get the next pending parent that might have children */ 567 /* Get the next pending parent that might have children */
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 8aa197691074..f39ccd5aa701 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root,
497} 497}
498EXPORT_SYMBOL_GPL(of_platform_default_populate); 498EXPORT_SYMBOL_GPL(of_platform_default_populate);
499 499
500#ifndef CONFIG_PPC
500static int __init of_platform_default_populate_init(void) 501static int __init of_platform_default_populate_init(void)
501{ 502{
502 struct device_node *node; 503 struct device_node *node;
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void)
521 return 0; 522 return 0;
522} 523}
523arch_initcall_sync(of_platform_default_populate_init); 524arch_initcall_sync(of_platform_default_populate_init);
525#endif
524 526
525static int of_platform_device_destroy(struct device *dev, void *data) 527static int of_platform_device_destroy(struct device *dev, void *data)
526{ 528{
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 5f4a2e04c8d7..add66236215c 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
44 bridge->release_fn = release_fn; 44 bridge->release_fn = release_fn;
45 bridge->release_data = release_data; 45 bridge->release_data = release_data;
46} 46}
47EXPORT_SYMBOL_GPL(pci_set_host_bridge_release);
47 48
48void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, 49void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
49 struct resource *res) 50 struct resource *res)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a02981efdad5..98f12223c734 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1069 nvec = maxvec; 1069 nvec = maxvec;
1070 1070
1071 for (;;) { 1071 for (;;) {
1072 if (!(flags & PCI_IRQ_NOAFFINITY)) { 1072 if (flags & PCI_IRQ_AFFINITY) {
1073 dev->irq_affinity = irq_create_affinity_mask(&nvec); 1073 dev->irq_affinity = irq_create_affinity_mask(&nvec);
1074 if (nvec < minvec) 1074 if (nvec < minvec)
1075 return -ENOSPC; 1075 return -ENOSPC;
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1105 **/ 1105 **/
1106int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) 1106int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1107{ 1107{
1108 return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY); 1108 return __pci_enable_msi_range(dev, minvec, maxvec, 0);
1109} 1109}
1110EXPORT_SYMBOL(pci_enable_msi_range); 1110EXPORT_SYMBOL(pci_enable_msi_range);
1111 1111
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
1120 return -ERANGE; 1120 return -ERANGE;
1121 1121
1122 for (;;) { 1122 for (;;) {
1123 if (!(flags & PCI_IRQ_NOAFFINITY)) { 1123 if (flags & PCI_IRQ_AFFINITY) {
1124 dev->irq_affinity = irq_create_affinity_mask(&nvec); 1124 dev->irq_affinity = irq_create_affinity_mask(&nvec);
1125 if (nvec < minvec) 1125 if (nvec < minvec)
1126 return -ENOSPC; 1126 return -ENOSPC;
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
1160int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1160int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1161 int minvec, int maxvec) 1161 int minvec, int maxvec)
1162{ 1162{
1163 return __pci_enable_msix_range(dev, entries, minvec, maxvec, 1163 return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
1164 PCI_IRQ_NOAFFINITY);
1165} 1164}
1166EXPORT_SYMBOL(pci_enable_msix_range); 1165EXPORT_SYMBOL(pci_enable_msix_range);
1167 1166
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1187{ 1186{
1188 int vecs = -ENOSPC; 1187 int vecs = -ENOSPC;
1189 1188
1190 if (!(flags & PCI_IRQ_NOMSIX)) { 1189 if (flags & PCI_IRQ_MSIX) {
1191 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1190 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
1192 flags); 1191 flags);
1193 if (vecs > 0) 1192 if (vecs > 0)
1194 return vecs; 1193 return vecs;
1195 } 1194 }
1196 1195
1197 if (!(flags & PCI_IRQ_NOMSI)) { 1196 if (flags & PCI_IRQ_MSI) {
1198 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); 1197 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
1199 if (vecs > 0) 1198 if (vecs > 0)
1200 return vecs; 1199 return vecs;
1201 } 1200 }
1202 1201
1203 /* use legacy irq if allowed */ 1202 /* use legacy irq if allowed */
1204 if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1) 1203 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
1204 pci_intx(dev, 1);
1205 return 1; 1205 return 1;
1206 }
1207
1206 return vecs; 1208 return vecs;
1207} 1209}
1208EXPORT_SYMBOL(pci_alloc_irq_vectors); 1210EXPORT_SYMBOL(pci_alloc_irq_vectors);
@@ -1411,6 +1413,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
1411 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 1413 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1412 pci_msi_domain_update_chip_ops(info); 1414 pci_msi_domain_update_chip_ops(info);
1413 1415
1416 info->flags |= MSI_FLAG_ACTIVATE_EARLY;
1417
1414 domain = msi_create_irq_domain(fwnode, info, parent); 1418 domain = msi_create_irq_domain(fwnode, info, parent);
1415 if (!domain) 1419 if (!domain)
1416 return NULL; 1420 return NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 37ff0158e45f..44e0ff37480b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3327 if (nhi->vendor != PCI_VENDOR_ID_INTEL 3327 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3328 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && 3328 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
3329 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && 3329 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
3330 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
3330 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) 3331 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3331 || nhi->subsystem_vendor != 0x2222 3332 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3332 || nhi->subsystem_device != 0x1111)
3333 goto out; 3333 goto out;
3334 dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); 3334 dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
3335 device_pm_wait_for_dev(&dev->dev, &nhi->dev); 3335 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3344,6 +3344,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3344 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, 3344 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3345 quirk_apple_wait_for_thunderbolt); 3345 quirk_apple_wait_for_thunderbolt);
3346DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 3346DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3347 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
3348 quirk_apple_wait_for_thunderbolt);
3349DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3347 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, 3350 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
3348 quirk_apple_wait_for_thunderbolt); 3351 quirk_apple_wait_for_thunderbolt);
3349#endif 3352#endif
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d1ef7acf6930..f9357e09e9b3 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
40 list_del(&dev->bus_list); 40 list_del(&dev->bus_list);
41 up_write(&pci_bus_sem); 41 up_write(&pci_bus_sem);
42 42
43 pci_bridge_d3_device_removed(dev);
43 pci_free_resources(dev); 44 pci_free_resources(dev);
44 put_device(&dev->dev); 45 put_device(&dev->dev);
45} 46}
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
96 dev->subordinate = NULL; 97 dev->subordinate = NULL;
97 } 98 }
98 99
99 pci_bridge_d3_device_removed(dev);
100
101 pci_destroy_dev(dev); 100 pci_destroy_dev(dev);
102} 101}
103 102
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 489ea1098c96..69b5e811ea2b 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
977 977
978/************************ runtime PM support ***************************/ 978/************************ runtime PM support ***************************/
979 979
980static int pcmcia_dev_suspend(struct device *dev, pm_message_t state); 980static int pcmcia_dev_suspend(struct device *dev);
981static int pcmcia_dev_resume(struct device *dev); 981static int pcmcia_dev_resume(struct device *dev);
982 982
983static int runtime_suspend(struct device *dev) 983static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
985 int rc; 985 int rc;
986 986
987 device_lock(dev); 987 device_lock(dev);
988 rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); 988 rc = pcmcia_dev_suspend(dev);
989 device_unlock(dev); 989 device_unlock(dev);
990 return rc; 990 return rc;
991} 991}
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
1135 1135
1136/* PM support, also needed for reset */ 1136/* PM support, also needed for reset */
1137 1137
1138static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) 1138static int pcmcia_dev_suspend(struct device *dev)
1139{ 1139{
1140 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 1140 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
1141 struct pcmcia_driver *p_drv = NULL; 1141 struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
1410 .remove_dev = &pcmcia_bus_remove_socket, 1410 .remove_dev = &pcmcia_bus_remove_socket,
1411}; 1411};
1412 1412
1413static const struct dev_pm_ops pcmcia_bus_pm_ops = {
1414 SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
1415};
1413 1416
1414struct bus_type pcmcia_bus_type = { 1417struct bus_type pcmcia_bus_type = {
1415 .name = "pcmcia", 1418 .name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
1418 .dev_groups = pcmcia_dev_groups, 1421 .dev_groups = pcmcia_dev_groups,
1419 .probe = pcmcia_device_probe, 1422 .probe = pcmcia_device_probe,
1420 .remove = pcmcia_device_remove, 1423 .remove = pcmcia_device_remove,
1421 .suspend = pcmcia_dev_suspend, 1424 .pm = &pcmcia_bus_pm_ops,
1422 .resume = pcmcia_dev_resume,
1423}; 1425};
1424 1426
1425 1427
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 483f919e0d2e..91b5f5724cba 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
214} 214}
215#endif 215#endif
216 216
217void pxa2xx_configure_sockets(struct device *dev) 217void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
218{ 218{
219 struct pcmcia_low_level *ops = dev->platform_data;
220 /* 219 /*
221 * We have at least one socket, so set MECR:CIT 220 * We have at least one socket, so set MECR:CIT
222 * (Card Is There) 221 * (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
322 goto err1; 321 goto err1;
323 } 322 }
324 323
325 pxa2xx_configure_sockets(&dev->dev); 324 pxa2xx_configure_sockets(&dev->dev, ops);
326 dev_set_drvdata(&dev->dev, sinfo); 325 dev_set_drvdata(&dev->dev, sinfo);
327 326
328 return 0; 327 return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
348 347
349static int pxa2xx_drv_pcmcia_resume(struct device *dev) 348static int pxa2xx_drv_pcmcia_resume(struct device *dev)
350{ 349{
351 pxa2xx_configure_sockets(dev); 350 struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
351
352 pxa2xx_configure_sockets(dev, ops);
352 return 0; 353 return 0;
353} 354}
354 355
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index b609b45469ed..e58c7a415418 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,4 +1,4 @@
1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); 1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); 2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
3void pxa2xx_configure_sockets(struct device *dev); 3void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
4 4
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 12f0dd091477..2f490930430d 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
134 134
135int pcmcia_badge4_init(struct sa1111_dev *dev) 135int pcmcia_badge4_init(struct sa1111_dev *dev)
136{ 136{
137 int ret = -ENODEV; 137 printk(KERN_INFO
138 138 "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
139 if (machine_is_badge4()) { 139 __func__,
140 printk(KERN_INFO 140 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
141 "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n", 141
142 __func__, 142 sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
143 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); 143 return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
144 144 sa11xx_drv_pcmcia_add_one);
145 sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
146 ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
147 sa11xx_drv_pcmcia_add_one);
148 }
149
150 return ret;
151} 145}
152 146
153static int __init pcmv_setup(char *s) 147static int __init pcmv_setup(char *s)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index a1531feb8460..3d95dffcff7a 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/hardware.h> 19#include <mach/hardware.h>
20#include <asm/hardware/sa1111.h> 20#include <asm/hardware/sa1111.h>
21#include <asm/mach-types.h>
21#include <asm/irq.h> 22#include <asm/irq.h>
22 23
23#include "sa1111_generic.h" 24#include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
203 sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR); 204 sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
204 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); 205 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
205 206
207 ret = -ENODEV;
206#ifdef CONFIG_SA1100_BADGE4 208#ifdef CONFIG_SA1100_BADGE4
207 pcmcia_badge4_init(dev); 209 if (machine_is_badge4())
210 ret = pcmcia_badge4_init(dev);
208#endif 211#endif
209#ifdef CONFIG_SA1100_JORNADA720 212#ifdef CONFIG_SA1100_JORNADA720
210 pcmcia_jornada720_init(dev); 213 if (machine_is_jornada720())
214 ret = pcmcia_jornada720_init(dev);
211#endif 215#endif
212#ifdef CONFIG_ARCH_LUBBOCK 216#ifdef CONFIG_ARCH_LUBBOCK
213 pcmcia_lubbock_init(dev); 217 if (machine_is_lubbock())
218 ret = pcmcia_lubbock_init(dev);
214#endif 219#endif
215#ifdef CONFIG_ASSABET_NEPONSET 220#ifdef CONFIG_ASSABET_NEPONSET
216 pcmcia_neponset_init(dev); 221 if (machine_is_assabet())
222 ret = pcmcia_neponset_init(dev);
217#endif 223#endif
218 return 0; 224
225 if (ret) {
226 release_mem_region(dev->res.start, 512);
227 sa1111_disable_device(dev);
228 }
229
230 return ret;
219} 231}
220 232
221static int pcmcia_remove(struct sa1111_dev *dev) 233static int pcmcia_remove(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index c2c30580c83f..480a3ede27c8 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
94 94
95int pcmcia_jornada720_init(struct sa1111_dev *sadev) 95int pcmcia_jornada720_init(struct sa1111_dev *sadev)
96{ 96{
97 int ret = -ENODEV; 97 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
98 98
99 if (machine_is_jornada720()) { 99 /* Fixme: why messing around with SA11x0's GPIO1? */
100 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; 100 GRER |= 0x00000002;
101 101
102 GRER |= 0x00000002; 102 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
103 sa1111_set_io_dir(sadev, pin, 0, 0);
104 sa1111_set_io(sadev, pin, 0);
105 sa1111_set_sleep_io(sadev, pin, 0);
103 106
104 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ 107 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
105 sa1111_set_io_dir(sadev, pin, 0, 0); 108 return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
106 sa1111_set_io(sadev, pin, 0); 109 sa11xx_drv_pcmcia_add_one);
107 sa1111_set_sleep_io(sadev, pin, 0);
108
109 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
110 ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
111 sa11xx_drv_pcmcia_add_one);
112 }
113
114 return ret;
115} 110}
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index c5caf5790451..e741f499c875 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
210 210
211int pcmcia_lubbock_init(struct sa1111_dev *sadev) 211int pcmcia_lubbock_init(struct sa1111_dev *sadev)
212{ 212{
213 int ret = -ENODEV; 213 /*
214 214 * Set GPIO_A<3:0> to be outputs for the MAX1600,
215 if (machine_is_lubbock()) { 215 * and switch to standby mode.
216 /* 216 */
217 * Set GPIO_A<3:0> to be outputs for the MAX1600, 217 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
218 * and switch to standby mode. 218 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
219 */ 219 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
220 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
221 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
222 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
223
224 /* Set CF Socket 1 power to standby mode. */
225 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
226 220
227 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); 221 /* Set CF Socket 1 power to standby mode. */
228 pxa2xx_configure_sockets(&sadev->dev); 222 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
229 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
230 pxa2xx_drv_pcmcia_add_one);
231 }
232 223
233 return ret; 224 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
225 pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
226 return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
227 pxa2xx_drv_pcmcia_add_one);
234} 228}
235 229
236MODULE_LICENSE("GPL"); 230MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 1d78739c4c07..019c395eb4bf 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
110 110
111int pcmcia_neponset_init(struct sa1111_dev *sadev) 111int pcmcia_neponset_init(struct sa1111_dev *sadev)
112{ 112{
113 int ret = -ENODEV; 113 /*
114 114 * Set GPIO_A<3:0> to be outputs for the MAX1600,
115 if (machine_is_assabet()) { 115 * and switch to standby mode.
116 /* 116 */
117 * Set GPIO_A<3:0> to be outputs for the MAX1600, 117 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
118 * and switch to standby mode. 118 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
119 */ 119 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
120 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); 120 sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
121 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 121 return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
122 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 122 sa11xx_drv_pcmcia_add_one);
123 sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
124 ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
125 sa11xx_drv_pcmcia_add_one);
126 }
127
128 return ret;
129} 123}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 9f6ec87b9f9e..48140ac73ed6 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -144,19 +144,19 @@ static int
144sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) 144sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
145{ 145{
146 struct soc_pcmcia_timing timing; 146 struct soc_pcmcia_timing timing;
147 unsigned int clock = clk_get_rate(skt->clk); 147 unsigned int clock = clk_get_rate(skt->clk) / 1000;
148 unsigned long mecr = MECR; 148 unsigned long mecr = MECR;
149 char *p = buf; 149 char *p = buf;
150 150
151 soc_common_pcmcia_get_timing(skt, &timing); 151 soc_common_pcmcia_get_timing(skt, &timing);
152 152
153 p+=sprintf(p, "I/O : %u (%u)\n", timing.io, 153 p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io,
154 sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr))); 154 sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
155 155
156 p+=sprintf(p, "attribute: %u (%u)\n", timing.attr, 156 p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
157 sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr))); 157 sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
158 158
159 p+=sprintf(p, "common : %u (%u)\n", timing.mem, 159 p+=sprintf(p, "common : %uns (%uns)\n", timing.mem,
160 sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr))); 160 sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
161 161
162 return p - buf; 162 return p - buf;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index eed5e9c05353..d5ca760c4eb2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
235 stat |= skt->cs_state.Vcc ? SS_POWERON : 0; 235 stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
236 236
237 if (skt->cs_state.flags & SS_IOCARD) 237 if (skt->cs_state.flags & SS_IOCARD)
238 stat |= state.bvd1 ? SS_STSCHG : 0; 238 stat |= state.bvd1 ? 0 : SS_STSCHG;
239 else { 239 else {
240 if (state.bvd1 == 0) 240 if (state.bvd1 == 0)
241 stat |= SS_BATDEAD; 241 stat |= SS_BATDEAD;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 6ccb994bdfcb..f5e1008a223d 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
688 return 0; 688 return 0;
689} 689}
690 690
691static DEFINE_MUTEX(arm_pmu_mutex); 691static DEFINE_SPINLOCK(arm_pmu_lock);
692static LIST_HEAD(arm_pmu_list); 692static LIST_HEAD(arm_pmu_list);
693 693
694/* 694/*
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
701{ 701{
702 struct arm_pmu *pmu; 702 struct arm_pmu *pmu;
703 703
704 mutex_lock(&arm_pmu_mutex); 704 spin_lock(&arm_pmu_lock);
705 list_for_each_entry(pmu, &arm_pmu_list, entry) { 705 list_for_each_entry(pmu, &arm_pmu_list, entry) {
706 706
707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
709 if (pmu->reset) 709 if (pmu->reset)
710 pmu->reset(pmu); 710 pmu->reset(pmu);
711 } 711 }
712 mutex_unlock(&arm_pmu_mutex); 712 spin_unlock(&arm_pmu_lock);
713 return 0; 713 return 0;
714} 714}
715 715
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
821 if (!cpu_hw_events) 821 if (!cpu_hw_events)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
824 mutex_lock(&arm_pmu_mutex); 824 spin_lock(&arm_pmu_lock);
825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list); 825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
826 mutex_unlock(&arm_pmu_mutex); 826 spin_unlock(&arm_pmu_lock);
827 827
828 err = cpu_pm_pmu_register(cpu_pmu); 828 err = cpu_pm_pmu_register(cpu_pmu);
829 if (err) 829 if (err)
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
859 return 0; 859 return 0;
860 860
861out_unregister: 861out_unregister:
862 mutex_lock(&arm_pmu_mutex); 862 spin_lock(&arm_pmu_lock);
863 list_del(&cpu_pmu->entry); 863 list_del(&cpu_pmu->entry);
864 mutex_unlock(&arm_pmu_mutex); 864 spin_unlock(&arm_pmu_lock);
865 free_percpu(cpu_hw_events); 865 free_percpu(cpu_hw_events);
866 return err; 866 return err;
867} 867}
@@ -869,9 +869,9 @@ out_unregister:
869static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 869static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
870{ 870{
871 cpu_pm_pmu_unregister(cpu_pmu); 871 cpu_pm_pmu_unregister(cpu_pmu);
872 mutex_lock(&arm_pmu_mutex); 872 spin_lock(&arm_pmu_lock);
873 list_del(&cpu_pmu->entry); 873 list_del(&cpu_pmu->entry);
874 mutex_unlock(&arm_pmu_mutex); 874 spin_unlock(&arm_pmu_lock);
875 free_percpu(cpu_pmu->hw_events); 875 free_percpu(cpu_pmu->hw_events);
876} 876}
877 877
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
925 if (i > 0 && spi != using_spi) { 925 if (i > 0 && spi != using_spi) {
926 pr_err("PPI/SPI IRQ type mismatch for %s!\n", 926 pr_err("PPI/SPI IRQ type mismatch for %s!\n",
927 dn->name); 927 dn->name);
928 of_node_put(dn);
928 kfree(irqs); 929 kfree(irqs);
929 return -EINVAL; 930 return -EINVAL;
930 } 931 }
@@ -967,11 +968,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
967 968
968 /* If we didn't manage to parse anything, try the interrupt affinity */ 969 /* If we didn't manage to parse anything, try the interrupt affinity */
969 if (cpumask_weight(&pmu->supported_cpus) == 0) { 970 if (cpumask_weight(&pmu->supported_cpus) == 0) {
970 if (!using_spi) { 971 int irq = platform_get_irq(pdev, 0);
972
973 if (irq >= 0 && irq_is_percpu(irq)) {
971 /* If using PPIs, check the affinity of the partition */ 974 /* If using PPIs, check the affinity of the partition */
972 int ret, irq; 975 int ret;
973 976
974 irq = platform_get_irq(pdev, 0);
975 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); 977 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
976 if (ret) { 978 if (ret) {
977 kfree(irqs); 979 kfree(irqs);
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 18d662610075..8ffc44afdb75 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy)
367 rc = -ENODEV; 367 rc = -ENODEV;
368 }; 368 };
369 369
370 return 0; 370 return rc;
371} 371}
372 372
373static const struct phy_ops phy_ops = { 373static const struct phy_ops phy_ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 0a45bc6088ae..8c7eb335622e 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -40,6 +40,7 @@
40#include <linux/power_supply.h> 40#include <linux/power_supply.h>
41#include <linux/regulator/consumer.h> 41#include <linux/regulator/consumer.h>
42#include <linux/reset.h> 42#include <linux/reset.h>
43#include <linux/usb/of.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
44 45
45#define REG_ISCR 0x00 46#define REG_ISCR 0x00
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg {
110struct sun4i_usb_phy_data { 111struct sun4i_usb_phy_data {
111 void __iomem *base; 112 void __iomem *base;
112 const struct sun4i_usb_phy_cfg *cfg; 113 const struct sun4i_usb_phy_cfg *cfg;
114 enum usb_dr_mode dr_mode;
113 struct mutex mutex; 115 struct mutex mutex;
114 struct sun4i_usb_phy { 116 struct sun4i_usb_phy {
115 struct phy *phy; 117 struct phy *phy;
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data {
120 bool regulator_on; 122 bool regulator_on;
121 int index; 123 int index;
122 } phys[MAX_PHYS]; 124 } phys[MAX_PHYS];
125 int first_phy;
123 /* phy0 / otg related variables */ 126 /* phy0 / otg related variables */
124 struct extcon_dev *extcon; 127 struct extcon_dev *extcon;
125 bool phy0_init; 128 bool phy0_init;
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
285 sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN); 288 sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
286 sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN); 289 sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
287 290
288 if (data->id_det_gpio) { 291 /* Force ISCR and cable state updates */
289 /* OTG mode, force ISCR and cable state updates */ 292 data->id_det = -1;
290 data->id_det = -1; 293 data->vbus_det = -1;
291 data->vbus_det = -1; 294 queue_delayed_work(system_wq, &data->detect, 0);
292 queue_delayed_work(system_wq, &data->detect, 0);
293 } else {
294 /* Host only mode */
295 sun4i_usb_phy0_set_id_detect(_phy, 0);
296 sun4i_usb_phy0_set_vbus_detect(_phy, 1);
297 }
298 } 295 }
299 296
300 return 0; 297 return 0;
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
319 return 0; 316 return 0;
320} 317}
321 318
319static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
320{
321 switch (data->dr_mode) {
322 case USB_DR_MODE_OTG:
323 return gpiod_get_value_cansleep(data->id_det_gpio);
324 case USB_DR_MODE_HOST:
325 return 0;
326 case USB_DR_MODE_PERIPHERAL:
327 default:
328 return 1;
329 }
330}
331
322static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data) 332static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
323{ 333{
324 if (data->vbus_det_gpio) 334 if (data->vbus_det_gpio)
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
432 struct phy *phy0 = data->phys[0].phy; 442 struct phy *phy0 = data->phys[0].phy;
433 int id_det, vbus_det, id_notify = 0, vbus_notify = 0; 443 int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
434 444
435 id_det = gpiod_get_value_cansleep(data->id_det_gpio); 445 if (phy0 == NULL)
446 return;
447
448 id_det = sun4i_usb_phy0_get_id_det(data);
436 vbus_det = sun4i_usb_phy0_get_vbus_det(data); 449 vbus_det = sun4i_usb_phy0_get_vbus_det(data);
437 450
438 mutex_lock(&phy0->mutex); 451 mutex_lock(&phy0->mutex);
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
448 * without vbus detection report vbus low for long enough for 461 * without vbus detection report vbus low for long enough for
449 * the musb-ip to end the current device session. 462 * the musb-ip to end the current device session.
450 */ 463 */
451 if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) { 464 if (data->dr_mode == USB_DR_MODE_OTG &&
465 !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
452 sun4i_usb_phy0_set_vbus_detect(phy0, 0); 466 sun4i_usb_phy0_set_vbus_detect(phy0, 0);
453 msleep(200); 467 msleep(200);
454 sun4i_usb_phy0_set_vbus_detect(phy0, 1); 468 sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
474 * without vbus detection report vbus low for long enough to 488 * without vbus detection report vbus low for long enough to
475 * the musb-ip to end the current host session. 489 * the musb-ip to end the current host session.
476 */ 490 */
477 if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) { 491 if (data->dr_mode == USB_DR_MODE_OTG &&
492 !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
478 mutex_lock(&phy0->mutex); 493 mutex_lock(&phy0->mutex);
479 sun4i_usb_phy0_set_vbus_detect(phy0, 0); 494 sun4i_usb_phy0_set_vbus_detect(phy0, 0);
480 msleep(1000); 495 msleep(1000);
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
519{ 534{
520 struct sun4i_usb_phy_data *data = dev_get_drvdata(dev); 535 struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
521 536
522 if (args->args[0] >= data->cfg->num_phys) 537 if (args->args[0] < data->first_phy ||
538 args->args[0] >= data->cfg->num_phys)
523 return ERR_PTR(-ENODEV); 539 return ERR_PTR(-ENODEV);
524 540
525 return data->phys[args->args[0]].phy; 541 return data->phys[args->args[0]].phy;
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
593 return -EPROBE_DEFER; 609 return -EPROBE_DEFER;
594 } 610 }
595 611
596 /* vbus_det without id_det makes no sense, and is not supported */ 612 data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
597 if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) { 613 switch (data->dr_mode) {
598 dev_err(dev, "usb0_id_det missing or invalid\n"); 614 case USB_DR_MODE_OTG:
599 return -ENODEV; 615 /* otg without id_det makes no sense, and is not supported */
600 } 616 if (!data->id_det_gpio) {
601 617 dev_err(dev, "usb0_id_det missing or invalid\n");
602 if (data->id_det_gpio) { 618 return -ENODEV;
619 }
620 /* fall through */
621 case USB_DR_MODE_HOST:
622 case USB_DR_MODE_PERIPHERAL:
603 data->extcon = devm_extcon_dev_allocate(dev, 623 data->extcon = devm_extcon_dev_allocate(dev,
604 sun4i_usb_phy0_cable); 624 sun4i_usb_phy0_cable);
605 if (IS_ERR(data->extcon)) 625 if (IS_ERR(data->extcon))
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
610 dev_err(dev, "failed to register extcon: %d\n", ret); 630 dev_err(dev, "failed to register extcon: %d\n", ret);
611 return ret; 631 return ret;
612 } 632 }
633 break;
634 default:
635 dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
636 data->first_phy = 1;
613 } 637 }
614 638
615 for (i = 0; i < data->cfg->num_phys; i++) { 639 for (i = data->first_phy; i < data->cfg->num_phys; i++) {
616 struct sun4i_usb_phy *phy = data->phys + i; 640 struct sun4i_usb_phy *phy = data->phys + i;
617 char name[16]; 641 char name[16];
618 642
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c
index ac4f31abefe3..28fce4bce638 100644
--- a/drivers/phy/phy-sun9i-usb.c
+++ b/drivers/phy/phy-sun9i-usb.c
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
141 } 141 }
142 142
143 phy->hsic_clk = devm_clk_get(dev, "hsic_12M"); 143 phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
144 if (IS_ERR(phy->clk)) { 144 if (IS_ERR(phy->hsic_clk)) {
145 dev_err(dev, "failed to get hsic_12M clock\n"); 145 dev_err(dev, "failed to get hsic_12M clock\n");
146 return PTR_ERR(phy->clk); 146 return PTR_ERR(phy->hsic_clk);
147 } 147 }
148 148
149 phy->reset = devm_reset_control_get(dev, "hsic"); 149 phy->reset = devm_reset_control_get(dev, "hsic");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 5749a4eee746..0fe8fad25e4d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1539 offset += range->npins; 1539 offset += range->npins;
1540 } 1540 }
1541 1541
1542 /* Mask and clear all interrupts */ 1542 /* Clear all interrupts */
1543 chv_writel(0, pctrl->regs + CHV_INTMASK);
1544 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1543 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1545 1544
1546 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, 1545 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
1547 handle_simple_irq, IRQ_TYPE_NONE); 1546 handle_bad_irq, IRQ_TYPE_NONE);
1548 if (ret) { 1547 if (ret) {
1549 dev_err(pctrl->dev, "failed to add IRQ chip\n"); 1548 dev_err(pctrl->dev, "failed to add IRQ chip\n");
1550 goto fail; 1549 goto fail;
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index eb4990ff26ca..7fb765642ee7 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/io.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/pinctrl/pinconf.h> 17#include <linux/pinctrl/pinconf.h>
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 11623c6b0cb3..44e69c963f5d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
727 return PTR_ERR(pc->pcdev); 727 return PTR_ERR(pc->pcdev);
728 } 728 }
729 729
730 ret = meson_gpiolib_register(pc); 730 return meson_gpiolib_register(pc);
731 if (ret) {
732 pinctrl_unregister(pc->pcdev);
733 return ret;
734 }
735
736 return 0;
737} 731}
738 732
739static struct platform_driver meson_pinctrl_driver = { 733static struct platform_driver meson_pinctrl_driver = {
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 634b4d30eefb..b3e772390ab6 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
43 43
44 spin_lock_irqsave(&gpio_dev->lock, flags); 44 spin_lock_irqsave(&gpio_dev->lock, flags);
45 pin_reg = readl(gpio_dev->base + offset * 4); 45 pin_reg = readl(gpio_dev->base + offset * 4);
46 /*
47 * Suppose BIOS or Bootloader sets specific debounce for the
48 * GPIO. if not, set debounce to be 2.75ms and remove glitch.
49 */
50 if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
51 pin_reg |= 0xf;
52 pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
53 pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
54 pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
55 }
56
57 pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); 46 pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
58 writel(pin_reg, gpio_dev->base + offset * 4); 47 writel(pin_reg, gpio_dev->base + offset * 4);
59 spin_unlock_irqrestore(&gpio_dev->lock, flags); 48 spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
326 315
327 spin_lock_irqsave(&gpio_dev->lock, flags); 316 spin_lock_irqsave(&gpio_dev->lock, flags);
328 pin_reg = readl(gpio_dev->base + (d->hwirq)*4); 317 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
329 /*
330 Suppose BIOS or Bootloader sets specific debounce for the
331 GPIO. if not, set debounce to be 2.75ms.
332 */
333 if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
334 pin_reg |= 0xf;
335 pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
336 pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
337 }
338 pin_reg |= BIT(INTERRUPT_ENABLE_OFF); 318 pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
339 pin_reg |= BIT(INTERRUPT_MASK_OFF); 319 pin_reg |= BIT(INTERRUPT_MASK_OFF);
340 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 320 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index c6d410ef8de0..55375b1b3cc8 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
809 PADS_FUNCTION_SELECT2, 12, 0x3), 809 PADS_FUNCTION_SELECT2, 12, 0x3),
810 MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, 810 MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
811 PADS_FUNCTION_SELECT2, 14, 0x3), 811 PADS_FUNCTION_SELECT2, 14, 0x3),
812 MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, 812 MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
813 PADS_FUNCTION_SELECT2, 16, 0x3), 813 PADS_FUNCTION_SELECT2, 16, 0x3),
814 MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, 814 MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
815 PADS_FUNCTION_SELECT2, 18, 0x3), 815 PADS_FUNCTION_SELECT2, 18, 0x3),
816 MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, 816 MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
817 PADS_FUNCTION_SELECT2, 20, 0x3), 817 PADS_FUNCTION_SELECT2, 20, 0x3),
818 MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG, 818 MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
819 PADS_FUNCTION_SELECT2, 22, 0x3), 819 PADS_FUNCTION_SELECT2, 22, 0x3),
820 MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG, 820 MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
821 PADS_FUNCTION_SELECT2, 24, 0x3), 821 PADS_FUNCTION_SELECT2, 24, 0x3),
822 MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5, 822 MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
823 PADS_FUNCTION_SELECT2, 26, 0x3), 823 PADS_FUNCTION_SELECT2, 26, 0x3),
824 PIN_GROUP(TCK, "tck"), 824 PIN_GROUP(TCK, "tck"),
825 PIN_GROUP(TRSTN, "trstn"), 825 PIN_GROUP(TRSTN, "trstn"),
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
1432{ 1432{
1433 struct pistachio_pinctrl *pctl; 1433 struct pistachio_pinctrl *pctl;
1434 struct resource *res; 1434 struct resource *res;
1435 int ret;
1436 1435
1437 pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); 1436 pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
1438 if (!pctl) 1437 if (!pctl)
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
1464 return PTR_ERR(pctl->pctldev); 1463 return PTR_ERR(pctl->pctldev);
1465 } 1464 }
1466 1465
1467 ret = pistachio_gpio_register(pctl); 1466 return pistachio_gpio_register(pctl);
1468 if (ret < 0) {
1469 pinctrl_unregister(pctl->pctldev);
1470 return ret;
1471 }
1472
1473 return 0;
1474} 1467}
1475 1468
1476static struct platform_driver pistachio_pinctrl_driver = { 1469static struct platform_driver pistachio_pinctrl_driver = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index ce483b03a263..f9d661e5c14a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
485 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), 485 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
486 SUNXI_FUNCTION(0x0, "gpio_in"), 486 SUNXI_FUNCTION(0x0, "gpio_in"),
487 SUNXI_FUNCTION(0x1, "gpio_out"), 487 SUNXI_FUNCTION(0x1, "gpio_out"),
488 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ 488 SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
489 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ 489 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */
490 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), 490 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
491 SUNXI_FUNCTION(0x0, "gpio_in"), 491 SUNXI_FUNCTION(0x0, "gpio_in"),
492 SUNXI_FUNCTION(0x1, "gpio_out"), 492 SUNXI_FUNCTION(0x1, "gpio_out"),
493 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ 493 SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
494 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ 494 SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */
495 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), 495 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
496 SUNXI_FUNCTION(0x0, "gpio_in"), 496 SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 3040abe6f73a..3131cac2b76f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
407 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), 407 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
408 SUNXI_FUNCTION(0x0, "gpio_in"), 408 SUNXI_FUNCTION(0x0, "gpio_in"),
409 SUNXI_FUNCTION(0x1, "gpio_out"), 409 SUNXI_FUNCTION(0x1, "gpio_out"),
410 SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ 410 SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
411 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */ 411 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */
412 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), 412 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
413 SUNXI_FUNCTION(0x0, "gpio_in"), 413 SUNXI_FUNCTION(0x0, "gpio_in"),
414 SUNXI_FUNCTION(0x1, "gpio_out"), 414 SUNXI_FUNCTION(0x1, "gpio_out"),
415 SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ 415 SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
416 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */ 416 SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */
417 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), 417 SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
418 SUNXI_FUNCTION(0x0, "gpio_in"), 418 SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index f99b183d5296..374a8028fec7 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Generic driver for the OLPC Embedded Controller. 2 * Generic driver for the OLPC Embedded Controller.
3 * 3 *
4 * Author: Andres Salomon <dilinger@queued.net>
5 *
4 * Copyright (C) 2011-2012 One Laptop per Child Foundation. 6 * Copyright (C) 2011-2012 One Laptop per Child Foundation.
5 * 7 *
6 * Licensed under the GPL v2 or later. 8 * Licensed under the GPL v2 or later.
@@ -12,7 +14,7 @@
12#include <linux/platform_device.h> 14#include <linux/platform_device.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/workqueue.h> 16#include <linux/workqueue.h>
15#include <linux/module.h> 17#include <linux/init.h>
16#include <linux/list.h> 18#include <linux/list.h>
17#include <linux/olpc-ec.h> 19#include <linux/olpc-ec.h>
18#include <asm/olpc.h> 20#include <asm/olpc.h>
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void)
326{ 328{
327 return platform_driver_register(&olpc_ec_plat_driver); 329 return platform_driver_register(&olpc_ec_plat_driver);
328} 330}
329
330arch_initcall(olpc_ec_init_module); 331arch_initcall(olpc_ec_init_module);
331
332MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
333MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index d2bc092defd7..da2fe18162e1 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
110 /* BIOS error detected */ 110 /* BIOS error detected */
111 { KE_IGNORE, 0xe00d, { KEY_RESERVED } }, 111 { KE_IGNORE, 0xe00d, { KEY_RESERVED } },
112 112
113 /* Unknown, defined in ACPI DSDT */ 113 /* Battery was removed or inserted */
114 /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */ 114 { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
115 115
116 /* Wifi Catcher */ 116 /* Wifi Catcher */
117 { KE_KEY, 0xe011, { KEY_PROG2 } }, 117 { KE_KEY, 0xe011, { KEY_PROG2 } },
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 63b371d6ee55..91ae58510d92 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -1,6 +1,8 @@
1/* Moorestown PMIC GPIO (access through IPC) driver 1/* Moorestown PMIC GPIO (access through IPC) driver
2 * Copyright (c) 2008 - 2009, Intel Corporation. 2 * Copyright (c) 2008 - 2009, Intel Corporation.
3 * 3 *
4 * Author: Alek Du <alek.du@intel.com>
5 *
4 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
@@ -21,7 +23,6 @@
21 23
22#define pr_fmt(fmt) "%s: " fmt, __func__ 24#define pr_fmt(fmt) "%s: " fmt, __func__
23 25
24#include <linux/module.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void)
322{ 323{
323 return platform_driver_register(&platform_pmic_gpio_driver); 324 return platform_driver_register(&platform_pmic_gpio_driver);
324} 325}
325
326subsys_initcall(platform_pmic_gpio_init); 326subsys_initcall(platform_pmic_gpio_init);
327
328MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
329MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
330MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 9c65f134d447..da7a75f82489 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
457} 457}
458 458
459static inline void max17042_read_model_data(struct max17042_chip *chip, 459static inline void max17042_read_model_data(struct max17042_chip *chip,
460 u8 addr, u32 *data, int size) 460 u8 addr, u16 *data, int size)
461{ 461{
462 struct regmap *map = chip->regmap; 462 struct regmap *map = chip->regmap;
463 int i; 463 int i;
464 u32 tmp;
464 465
465 for (i = 0; i < size; i++) 466 for (i = 0; i < size; i++) {
466 regmap_read(map, addr + i, &data[i]); 467 regmap_read(map, addr + i, &tmp);
468 data[i] = (u16)tmp;
469 }
467} 470}
468 471
469static inline int max17042_model_data_compare(struct max17042_chip *chip, 472static inline int max17042_model_data_compare(struct max17042_chip *chip,
@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
486{ 489{
487 int ret; 490 int ret;
488 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); 491 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
489 u32 *temp_data; 492 u16 *temp_data;
490 493
491 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); 494 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
492 if (!temp_data) 495 if (!temp_data)
@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
501 ret = max17042_model_data_compare( 504 ret = max17042_model_data_compare(
502 chip, 505 chip,
503 chip->pdata->config_data->cell_char_tbl, 506 chip->pdata->config_data->cell_char_tbl,
504 (u16 *)temp_data, 507 temp_data,
505 table_size); 508 table_size);
506 509
507 max10742_lock_model(chip); 510 max10742_lock_model(chip);
@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
514{ 517{
515 int i; 518 int i;
516 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); 519 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
517 u32 *temp_data; 520 u16 *temp_data;
518 int ret = 0; 521 int ret = 0;
519 522
520 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); 523 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 3bfac539334b..c74c3f67b8da 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -200,8 +200,8 @@ config REBOOT_MODE
200config SYSCON_REBOOT_MODE 200config SYSCON_REBOOT_MODE
201 tristate "Generic SYSCON regmap reboot mode driver" 201 tristate "Generic SYSCON regmap reboot mode driver"
202 depends on OF 202 depends on OF
203 depends on MFD_SYSCON
203 select REBOOT_MODE 204 select REBOOT_MODE
204 select MFD_SYSCON
205 help 205 help
206 Say y here will enable reboot mode driver. This will 206 Say y here will enable reboot mode driver. This will
207 get reboot mode arguments and store it in SYSCON mapped 207 get reboot mode arguments and store it in SYSCON mapped
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
index 9ab7f562a83b..f69387e12c1e 100644
--- a/drivers/power/reset/hisi-reboot.c
+++ b/drivers/power/reset/hisi-reboot.c
@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
53 53
54 if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { 54 if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
55 pr_err("failed to find reboot-offset property\n"); 55 pr_err("failed to find reboot-offset property\n");
56 iounmap(base);
56 return -EINVAL; 57 return -EINVAL;
57 } 58 }
58 59
59 err = register_restart_handler(&hisi_restart_nb); 60 err = register_restart_handler(&hisi_restart_nb);
60 if (err) 61 if (err) {
61 dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", 62 dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
62 err); 63 err);
64 iounmap(base);
65 }
63 66
64 return err; 67 return err;
65} 68}
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
index 73dfae41def8..4c56e54af6ac 100644
--- a/drivers/power/tps65217_charger.c
+++ b/drivers/power/tps65217_charger.c
@@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
206 if (!charger) 206 if (!charger)
207 return -ENOMEM; 207 return -ENOMEM;
208 208
209 platform_set_drvdata(pdev, charger);
209 charger->tps = tps; 210 charger->tps = tps;
210 charger->dev = &pdev->dev; 211 charger->dev = &pdev->dev;
211 212
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 32f0f014a067..9d19b9a62011 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
1161 } else if (ibw_start < (ib_win->rstart + ib_win->size) && 1161 } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
1162 (ibw_start + ibw_size) > ib_win->rstart) { 1162 (ibw_start + ibw_size) > ib_win->rstart) {
1163 /* Return error if address translation involved */ 1163 /* Return error if address translation involved */
1164 if (direct && ib_win->xlat) { 1164 if (!direct || ib_win->xlat) {
1165 ret = -EFAULT; 1165 ret = -EFAULT;
1166 break; 1166 break;
1167 } 1167 }
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index cecc15a880de..3fa17ac8df54 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch)
1080static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, 1080static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1081 long timeout) 1081 long timeout)
1082{ 1082{
1083 struct rio_channel *ch = NULL; 1083 struct rio_channel *ch;
1084 struct rio_channel *new_ch = NULL; 1084 struct rio_channel *new_ch;
1085 struct conn_req *req; 1085 struct conn_req *req;
1086 struct cm_peer *peer; 1086 struct cm_peer *peer;
1087 int found = 0; 1087 int found = 0;
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1155 1155
1156 spin_unlock_bh(&ch->lock); 1156 spin_unlock_bh(&ch->lock);
1157 riocm_put_channel(ch); 1157 riocm_put_channel(ch);
1158 ch = NULL;
1158 kfree(req); 1159 kfree(req);
1159 1160
1160 down_read(&rdev_sem); 1161 down_read(&rdev_sem);
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1172 if (!found) { 1173 if (!found) {
1173 /* If peer device object not found, simply ignore the request */ 1174 /* If peer device object not found, simply ignore the request */
1174 err = -ENODEV; 1175 err = -ENODEV;
1175 goto err_nodev; 1176 goto err_put_new_ch;
1176 } 1177 }
1177 1178
1178 new_ch->rdev = peer->rdev; 1179 new_ch->rdev = peer->rdev;
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1184 1185
1185 *new_ch_id = new_ch->id; 1186 *new_ch_id = new_ch->id;
1186 return new_ch; 1187 return new_ch;
1188
1189err_put_new_ch:
1190 spin_lock_bh(&idr_lock);
1191 idr_remove(&ch_idr, new_ch->id);
1192 spin_unlock_bh(&idr_lock);
1193 riocm_put_channel(new_ch);
1194
1187err_put: 1195err_put:
1188 riocm_put_channel(ch); 1196 if (ch)
1189err_nodev: 1197 riocm_put_channel(ch);
1190 if (new_ch) {
1191 spin_lock_bh(&idr_lock);
1192 idr_remove(&ch_idr, new_ch->id);
1193 spin_unlock_bh(&idr_lock);
1194 riocm_put_channel(new_ch);
1195 }
1196 *new_ch_id = 0; 1198 *new_ch_id = 0;
1197 return ERR_PTR(err); 1199 return ERR_PTR(err);
1198} 1200}
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index b2daa6641417..c9ff26199711 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -2,7 +2,7 @@
2 * max14577.c - Regulator driver for the Maxim 14577/77836 2 * max14577.c - Regulator driver for the Maxim 14577/77836
3 * 3 *
4 * Copyright (C) 2013,2014 Samsung Electronics 4 * Copyright (C) 2013,2014 Samsung Electronics
5 * Krzysztof Kozlowski <k.kozlowski@samsung.com> 5 * Krzysztof Kozlowski <krzk@kernel.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
331} 331}
332module_exit(max14577_regulator_exit); 332module_exit(max14577_regulator_exit);
333 333
334MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); 334MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
335MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); 335MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
337MODULE_ALIAS("platform:max14577-regulator"); 337MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index de730fd3f8a5..cfbb9512e486 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2013-2015 Samsung Electronics 4 * Copyright (C) 2013-2015 Samsung Electronics
5 * Jonghwa Lee <jonghwa3.lee@samsung.com> 5 * Jonghwa Lee <jonghwa3.lee@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski.k@gmail.com> 6 * Krzysztof Kozlowski <krzk@kernel.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
314 314
315MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver"); 315MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
316MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); 316MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
317MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>"); 317MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
318MODULE_LICENSE("GPL"); 318MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 5022fa8d10c6..8ed46a9a55c8 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
178static const struct regulator_desc pma8084_ftsmps = { 178static const struct regulator_desc pma8084_ftsmps = {
179 .linear_ranges = (struct regulator_linear_range[]) { 179 .linear_ranges = (struct regulator_linear_range[]) {
180 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), 180 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
181 REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), 181 REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
182 }, 182 },
183 .n_linear_ranges = 2, 183 .n_linear_ranges = 2,
184 .n_voltages = 340, 184 .n_voltages = 262,
185 .ops = &rpm_smps_ldo_ops, 185 .ops = &rpm_smps_ldo_ops,
186}; 186};
187 187
188static const struct regulator_desc pma8084_pldo = { 188static const struct regulator_desc pma8084_pldo = {
189 .linear_ranges = (struct regulator_linear_range[]) { 189 .linear_ranges = (struct regulator_linear_range[]) {
190 REGULATOR_LINEAR_RANGE(750000, 0, 30, 25000), 190 REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
191 REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), 191 REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
192 REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
192 }, 193 },
193 .n_linear_ranges = 2, 194 .n_linear_ranges = 3,
194 .n_voltages = 100, 195 .n_voltages = 164,
195 .ops = &rpm_smps_ldo_ops, 196 .ops = &rpm_smps_ldo_ops,
196}; 197};
197 198
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
221static const struct regulator_desc pm8841_ftsmps = { 222static const struct regulator_desc pm8841_ftsmps = {
222 .linear_ranges = (struct regulator_linear_range[]) { 223 .linear_ranges = (struct regulator_linear_range[]) {
223 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), 224 REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
224 REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), 225 REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
225 }, 226 },
226 .n_linear_ranges = 2, 227 .n_linear_ranges = 2,
227 .n_voltages = 340, 228 .n_voltages = 262,
228 .ops = &rpm_smps_ldo_ops, 229 .ops = &rpm_smps_ldo_ops,
229}; 230};
230 231
231static const struct regulator_desc pm8941_boost = { 232static const struct regulator_desc pm8941_boost = {
232 .linear_ranges = (struct regulator_linear_range[]) { 233 .linear_ranges = (struct regulator_linear_range[]) {
233 REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000), 234 REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
234 }, 235 },
235 .n_linear_ranges = 1, 236 .n_linear_ranges = 1,
236 .n_voltages = 16, 237 .n_voltages = 31,
237 .ops = &rpm_smps_ldo_ops, 238 .ops = &rpm_smps_ldo_ops,
238}; 239};
239 240
240static const struct regulator_desc pm8941_pldo = { 241static const struct regulator_desc pm8941_pldo = {
241 .linear_ranges = (struct regulator_linear_range[]) { 242 .linear_ranges = (struct regulator_linear_range[]) {
242 REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000), 243 REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
243 REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), 244 REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
245 REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
244 }, 246 },
245 .n_linear_ranges = 2, 247 .n_linear_ranges = 3,
246 .n_voltages = 100, 248 .n_voltages = 164,
247 .ops = &rpm_smps_ldo_ops, 249 .ops = &rpm_smps_ldo_ops,
248}; 250};
249 251
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8973d34ce5ba..fb1b56a71475 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1643 u8 *sense = NULL; 1643 u8 *sense = NULL;
1644 int expires; 1644 int expires;
1645 1645
1646 cqr = (struct dasd_ccw_req *) intparm;
1646 if (IS_ERR(irb)) { 1647 if (IS_ERR(irb)) {
1647 switch (PTR_ERR(irb)) { 1648 switch (PTR_ERR(irb)) {
1648 case -EIO: 1649 case -EIO:
1650 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1651 device = (struct dasd_device *) cqr->startdev;
1652 cqr->status = DASD_CQR_CLEARED;
1653 dasd_device_clear_timer(device);
1654 wake_up(&dasd_flush_wq);
1655 dasd_schedule_device_bh(device);
1656 return;
1657 }
1649 break; 1658 break;
1650 case -ETIMEDOUT: 1659 case -ETIMEDOUT:
1651 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1660 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1661 } 1670 }
1662 1671
1663 now = get_tod_clock(); 1672 now = get_tod_clock();
1664 cqr = (struct dasd_ccw_req *) intparm;
1665 /* check for conditions that should be handled immediately */ 1673 /* check for conditions that should be handled immediately */
1666 if (!cqr || 1674 if (!cqr ||
1667 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1675 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fd2eff440098..98bbec44bcd0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5078 return PTR_ERR(cqr); 5078 return PTR_ERR(cqr);
5079 } 5079 }
5080 5080
5081 cqr->lpm = lpum;
5082retry:
5081 cqr->startdev = device; 5083 cqr->startdev = device;
5082 cqr->memdev = device; 5084 cqr->memdev = device;
5083 cqr->block = NULL; 5085 cqr->block = NULL;
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5122 (prssdp + 1); 5124 (prssdp + 1);
5123 memcpy(messages, message_buf, 5125 memcpy(messages, message_buf,
5124 sizeof(struct dasd_rssd_messages)); 5126 sizeof(struct dasd_rssd_messages));
5127 } else if (cqr->lpm) {
5128 /*
5129 * on z/VM we might not be able to do I/O on the requested path
5130 * but instead we get the required information on any path
5131 * so retry with open path mask
5132 */
5133 cqr->lpm = 0;
5134 goto retry;
5125 } else 5135 } else
5126 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5136 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5127 "Reading messages failed with rc=%d\n" 5137 "Reading messages failed with rc=%d\n"
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7ada078ffdd0..6a58bc8f46e2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
762 priv->state = DEV_STATE_NOT_OPER; 762 priv->state = DEV_STATE_NOT_OPER;
763 priv->dev_id.devno = sch->schib.pmcw.dev; 763 priv->dev_id.devno = sch->schib.pmcw.dev;
764 priv->dev_id.ssid = sch->schid.ssid; 764 priv->dev_id.ssid = sch->schid.ssid;
765 priv->schid = sch->schid;
766 765
767 INIT_WORK(&priv->todo_work, ccw_device_todo); 766 INIT_WORK(&priv->todo_work, ccw_device_todo);
768 INIT_LIST_HEAD(&priv->cmb_list); 767 INIT_LIST_HEAD(&priv->cmb_list);
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
1000 put_device(&old_sch->dev); 999 put_device(&old_sch->dev);
1001 /* Initialize new subchannel. */ 1000 /* Initialize new subchannel. */
1002 spin_lock_irq(sch->lock); 1001 spin_lock_irq(sch->lock);
1003 cdev->private->schid = sch->schid;
1004 cdev->ccwlock = sch->lock; 1002 cdev->ccwlock = sch->lock;
1005 if (!sch_is_pseudo_sch(sch)) 1003 if (!sch_is_pseudo_sch(sch))
1006 sch_set_cdev(sch, cdev); 1004 sch_set_cdev(sch, cdev);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 15b56a15db15..9bc3512374c9 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -26,6 +26,7 @@
26static void 26static void
27ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 27ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
28{ 28{
29 struct subchannel *sch = to_subchannel(cdev->dev.parent);
29 char dbf_text[15]; 30 char dbf_text[15];
30 31
31 if (!scsw_is_valid_cstat(&irb->scsw) || 32 if (!scsw_is_valid_cstat(&irb->scsw) ||
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
36 "received" 37 "received"
37 " ... device %04x on subchannel 0.%x.%04x, dev_stat " 38 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
38 ": %02X sch_stat : %02X\n", 39 ": %02X sch_stat : %02X\n",
39 cdev->private->dev_id.devno, cdev->private->schid.ssid, 40 cdev->private->dev_id.devno, sch->schid.ssid,
40 cdev->private->schid.sch_no, 41 sch->schid.sch_no,
41 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); 42 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
42 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); 43 sprintf(dbf_text, "chk%x", sch->schid.sch_no);
43 CIO_TRACE_EVENT(0, dbf_text); 44 CIO_TRACE_EVENT(0, dbf_text);
44 CIO_HEX_EVENT(0, irb, sizeof(struct irb)); 45 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
45} 46}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8975060af96c..220f49145b2f 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -120,7 +120,6 @@ struct ccw_device_private {
120 int state; /* device state */ 120 int state; /* device state */
121 atomic_t onoff; 121 atomic_t onoff;
122 struct ccw_dev_id dev_id; /* device id */ 122 struct ccw_dev_id dev_id; /* device id */
123 struct subchannel_id schid; /* subchannel number */
124 struct ccw_request req; /* internal I/O request */ 123 struct ccw_request req; /* internal I/O request */
125 int iretry; 124 int iretry;
126 u8 pgid_valid_mask; /* mask of valid PGIDs */ 125 u8 pgid_valid_mask; /* mask of valid PGIDs */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4bb5262f7aee..71bf9bded485 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q)
686 q->qdio_error = 0; 686 q->qdio_error = 0;
687} 687}
688 688
689static inline int qdio_tasklet_schedule(struct qdio_q *q)
690{
691 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
692 tasklet_schedule(&q->tasklet);
693 return 0;
694 }
695 return -EPERM;
696}
697
689static void __qdio_inbound_processing(struct qdio_q *q) 698static void __qdio_inbound_processing(struct qdio_q *q)
690{ 699{
691 qperf_inc(q, tasklet_inbound); 700 qperf_inc(q, tasklet_inbound);
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q)
698 if (!qdio_inbound_q_done(q)) { 707 if (!qdio_inbound_q_done(q)) {
699 /* means poll time is not yet over */ 708 /* means poll time is not yet over */
700 qperf_inc(q, tasklet_inbound_resched); 709 qperf_inc(q, tasklet_inbound_resched);
701 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 710 if (!qdio_tasklet_schedule(q))
702 tasklet_schedule(&q->tasklet);
703 return; 711 return;
704 }
705 } 712 }
706 713
707 qdio_stop_polling(q); 714 qdio_stop_polling(q);
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
711 */ 718 */
712 if (!qdio_inbound_q_done(q)) { 719 if (!qdio_inbound_q_done(q)) {
713 qperf_inc(q, tasklet_inbound_resched2); 720 qperf_inc(q, tasklet_inbound_resched2);
714 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 721 qdio_tasklet_schedule(q);
715 tasklet_schedule(&q->tasklet);
716 } 722 }
717} 723}
718 724
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q)
869 * is noticed and outbound_handler is called after some time. 875 * is noticed and outbound_handler is called after some time.
870 */ 876 */
871 if (qdio_outbound_q_done(q)) 877 if (qdio_outbound_q_done(q))
872 del_timer(&q->u.out.timer); 878 del_timer_sync(&q->u.out.timer);
873 else 879 else
874 if (!timer_pending(&q->u.out.timer)) 880 if (!timer_pending(&q->u.out.timer) &&
881 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
875 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 882 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
876 return; 883 return;
877 884
878sched: 885sched:
879 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 886 qdio_tasklet_schedule(q);
880 return;
881 tasklet_schedule(&q->tasklet);
882} 887}
883 888
884/* outbound tasklet */ 889/* outbound tasklet */
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data)
892{ 897{
893 struct qdio_q *q = (struct qdio_q *)data; 898 struct qdio_q *q = (struct qdio_q *)data;
894 899
895 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 900 qdio_tasklet_schedule(q);
896 return;
897 tasklet_schedule(&q->tasklet);
898} 901}
899 902
900static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) 903static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
907 910
908 for_each_output_queue(q->irq_ptr, out, i) 911 for_each_output_queue(q->irq_ptr, out, i)
909 if (!qdio_outbound_q_done(out)) 912 if (!qdio_outbound_q_done(out))
910 tasklet_schedule(&out->tasklet); 913 qdio_tasklet_schedule(out);
911} 914}
912 915
913static void __tiqdio_inbound_processing(struct qdio_q *q) 916static void __tiqdio_inbound_processing(struct qdio_q *q)
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
929 932
930 if (!qdio_inbound_q_done(q)) { 933 if (!qdio_inbound_q_done(q)) {
931 qperf_inc(q, tasklet_inbound_resched); 934 qperf_inc(q, tasklet_inbound_resched);
932 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 935 if (!qdio_tasklet_schedule(q))
933 tasklet_schedule(&q->tasklet);
934 return; 936 return;
935 }
936 } 937 }
937 938
938 qdio_stop_polling(q); 939 qdio_stop_polling(q);
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
942 */ 943 */
943 if (!qdio_inbound_q_done(q)) { 944 if (!qdio_inbound_q_done(q)) {
944 qperf_inc(q, tasklet_inbound_resched2); 945 qperf_inc(q, tasklet_inbound_resched2);
945 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 946 qdio_tasklet_schedule(q);
946 tasklet_schedule(&q->tasklet);
947 } 947 }
948} 948}
949 949
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
977 int i; 977 int i;
978 struct qdio_q *q; 978 struct qdio_q *q;
979 979
980 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 980 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
981 return; 981 return;
982 982
983 for_each_input_queue(irq_ptr, q, i) { 983 for_each_input_queue(irq_ptr, q, i) {
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1003 continue; 1003 continue;
1004 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) 1004 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1005 qdio_siga_sync_q(q); 1005 qdio_siga_sync_q(q);
1006 tasklet_schedule(&q->tasklet); 1006 qdio_tasklet_schedule(q);
1007 } 1007 }
1008} 1008}
1009 1009
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1066 struct irb *irb) 1066 struct irb *irb)
1067{ 1067{
1068 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1068 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1069 struct subchannel_id schid;
1069 int cstat, dstat; 1070 int cstat, dstat;
1070 1071
1071 if (!intparm || !irq_ptr) { 1072 if (!intparm || !irq_ptr) {
1072 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 1073 ccw_device_get_schid(cdev, &schid);
1074 DBF_ERROR("qint:%4x", schid.sch_no);
1073 return; 1075 return;
1074 } 1076 }
1075 1077
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1122int qdio_get_ssqd_desc(struct ccw_device *cdev, 1124int qdio_get_ssqd_desc(struct ccw_device *cdev,
1123 struct qdio_ssqd_desc *data) 1125 struct qdio_ssqd_desc *data)
1124{ 1126{
1127 struct subchannel_id schid;
1125 1128
1126 if (!cdev || !cdev->private) 1129 if (!cdev || !cdev->private)
1127 return -EINVAL; 1130 return -EINVAL;
1128 1131
1129 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); 1132 ccw_device_get_schid(cdev, &schid);
1130 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); 1133 DBF_EVENT("get ssqd:%4x", schid.sch_no);
1134 return qdio_setup_get_ssqd(NULL, &schid, data);
1131} 1135}
1132EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1136EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1133 1137
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
1141 tasklet_kill(&q->tasklet); 1145 tasklet_kill(&q->tasklet);
1142 1146
1143 for_each_output_queue(irq_ptr, q, i) { 1147 for_each_output_queue(irq_ptr, q, i) {
1144 del_timer(&q->u.out.timer); 1148 del_timer_sync(&q->u.out.timer);
1145 tasklet_kill(&q->tasklet); 1149 tasklet_kill(&q->tasklet);
1146 } 1150 }
1147} 1151}
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
1154int qdio_shutdown(struct ccw_device *cdev, int how) 1158int qdio_shutdown(struct ccw_device *cdev, int how)
1155{ 1159{
1156 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1160 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1161 struct subchannel_id schid;
1157 int rc; 1162 int rc;
1158 unsigned long flags;
1159 1163
1160 if (!irq_ptr) 1164 if (!irq_ptr)
1161 return -ENODEV; 1165 return -ENODEV;
1162 1166
1163 WARN_ON_ONCE(irqs_disabled()); 1167 WARN_ON_ONCE(irqs_disabled());
1164 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1168 ccw_device_get_schid(cdev, &schid);
1169 DBF_EVENT("qshutdown:%4x", schid.sch_no);
1165 1170
1166 mutex_lock(&irq_ptr->setup_mutex); 1171 mutex_lock(&irq_ptr->setup_mutex);
1167 /* 1172 /*
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1184 qdio_shutdown_debug_entries(irq_ptr); 1189 qdio_shutdown_debug_entries(irq_ptr);
1185 1190
1186 /* cleanup subchannel */ 1191 /* cleanup subchannel */
1187 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1192 spin_lock_irq(get_ccwdev_lock(cdev));
1188 1193
1189 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1194 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1190 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1195 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1198 } 1203 }
1199 1204
1200 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1205 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1201 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1206 spin_unlock_irq(get_ccwdev_lock(cdev));
1202 wait_event_interruptible_timeout(cdev->private->wait_q, 1207 wait_event_interruptible_timeout(cdev->private->wait_q,
1203 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1208 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1204 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1209 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1205 10 * HZ); 1210 10 * HZ);
1206 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1211 spin_lock_irq(get_ccwdev_lock(cdev));
1207 1212
1208no_cleanup: 1213no_cleanup:
1209 qdio_shutdown_thinint(irq_ptr); 1214 qdio_shutdown_thinint(irq_ptr);
@@ -1211,7 +1216,7 @@ no_cleanup:
1211 /* restore interrupt handler */ 1216 /* restore interrupt handler */
1212 if ((void *)cdev->handler == (void *)qdio_int_handler) 1217 if ((void *)cdev->handler == (void *)qdio_int_handler)
1213 cdev->handler = irq_ptr->orig_handler; 1218 cdev->handler = irq_ptr->orig_handler;
1214 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1219 spin_unlock_irq(get_ccwdev_lock(cdev));
1215 1220
1216 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1221 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1217 mutex_unlock(&irq_ptr->setup_mutex); 1222 mutex_unlock(&irq_ptr->setup_mutex);
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown);
1228int qdio_free(struct ccw_device *cdev) 1233int qdio_free(struct ccw_device *cdev)
1229{ 1234{
1230 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1235 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1236 struct subchannel_id schid;
1231 1237
1232 if (!irq_ptr) 1238 if (!irq_ptr)
1233 return -ENODEV; 1239 return -ENODEV;
1234 1240
1235 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); 1241 ccw_device_get_schid(cdev, &schid);
1242 DBF_EVENT("qfree:%4x", schid.sch_no);
1236 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); 1243 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1237 mutex_lock(&irq_ptr->setup_mutex); 1244 mutex_lock(&irq_ptr->setup_mutex);
1238 1245
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free);
1251 */ 1258 */
1252int qdio_allocate(struct qdio_initialize *init_data) 1259int qdio_allocate(struct qdio_initialize *init_data)
1253{ 1260{
1261 struct subchannel_id schid;
1254 struct qdio_irq *irq_ptr; 1262 struct qdio_irq *irq_ptr;
1255 1263
1256 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); 1264 ccw_device_get_schid(init_data->cdev, &schid);
1265 DBF_EVENT("qallocate:%4x", schid.sch_no);
1257 1266
1258 if ((init_data->no_input_qs && !init_data->input_handler) || 1267 if ((init_data->no_input_qs && !init_data->input_handler) ||
1259 (init_data->no_output_qs && !init_data->output_handler)) 1268 (init_data->no_output_qs && !init_data->output_handler))
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1331 */ 1340 */
1332int qdio_establish(struct qdio_initialize *init_data) 1341int qdio_establish(struct qdio_initialize *init_data)
1333{ 1342{
1334 struct qdio_irq *irq_ptr;
1335 struct ccw_device *cdev = init_data->cdev; 1343 struct ccw_device *cdev = init_data->cdev;
1336 unsigned long saveflags; 1344 struct subchannel_id schid;
1345 struct qdio_irq *irq_ptr;
1337 int rc; 1346 int rc;
1338 1347
1339 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); 1348 ccw_device_get_schid(cdev, &schid);
1349 DBF_EVENT("qestablish:%4x", schid.sch_no);
1340 1350
1341 irq_ptr = cdev->private->qdio_data; 1351 irq_ptr = cdev->private->qdio_data;
1342 if (!irq_ptr) 1352 if (!irq_ptr)
1343 return -ENODEV; 1353 return -ENODEV;
1344 1354
1345 if (cdev->private->state != DEV_STATE_ONLINE)
1346 return -EINVAL;
1347
1348 mutex_lock(&irq_ptr->setup_mutex); 1355 mutex_lock(&irq_ptr->setup_mutex);
1349 qdio_setup_irq(init_data); 1356 qdio_setup_irq(init_data);
1350 1357
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data)
1361 irq_ptr->ccw.count = irq_ptr->equeue.count; 1368 irq_ptr->ccw.count = irq_ptr->equeue.count;
1362 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1369 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1363 1370
1364 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1371 spin_lock_irq(get_ccwdev_lock(cdev));
1365 ccw_device_set_options_mask(cdev, 0); 1372 ccw_device_set_options_mask(cdev, 0);
1366 1373
1367 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1374 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1375 spin_unlock_irq(get_ccwdev_lock(cdev));
1368 if (rc) { 1376 if (rc) {
1369 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1377 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1370 DBF_ERROR("rc:%4x", rc); 1378 DBF_ERROR("rc:%4x", rc);
1371 }
1372 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1373
1374 if (rc) {
1375 mutex_unlock(&irq_ptr->setup_mutex); 1379 mutex_unlock(&irq_ptr->setup_mutex);
1376 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1380 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1377 return rc; 1381 return rc;
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish);
1407 */ 1411 */
1408int qdio_activate(struct ccw_device *cdev) 1412int qdio_activate(struct ccw_device *cdev)
1409{ 1413{
1414 struct subchannel_id schid;
1410 struct qdio_irq *irq_ptr; 1415 struct qdio_irq *irq_ptr;
1411 int rc; 1416 int rc;
1412 unsigned long saveflags;
1413 1417
1414 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); 1418 ccw_device_get_schid(cdev, &schid);
1419 DBF_EVENT("qactivate:%4x", schid.sch_no);
1415 1420
1416 irq_ptr = cdev->private->qdio_data; 1421 irq_ptr = cdev->private->qdio_data;
1417 if (!irq_ptr) 1422 if (!irq_ptr)
1418 return -ENODEV; 1423 return -ENODEV;
1419 1424
1420 if (cdev->private->state != DEV_STATE_ONLINE)
1421 return -EINVAL;
1422
1423 mutex_lock(&irq_ptr->setup_mutex); 1425 mutex_lock(&irq_ptr->setup_mutex);
1424 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1426 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1425 rc = -EBUSY; 1427 rc = -EBUSY;
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev)
1431 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1433 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1432 irq_ptr->ccw.cda = 0; 1434 irq_ptr->ccw.cda = 0;
1433 1435
1434 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1436 spin_lock_irq(get_ccwdev_lock(cdev));
1435 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1437 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1436 1438
1437 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1439 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1438 0, DOIO_DENY_PREFETCH); 1440 0, DOIO_DENY_PREFETCH);
1441 spin_unlock_irq(get_ccwdev_lock(cdev));
1439 if (rc) { 1442 if (rc) {
1440 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1443 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1441 DBF_ERROR("rc:%4x", rc); 1444 DBF_ERROR("rc:%4x", rc);
1442 }
1443 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1444
1445 if (rc)
1446 goto out; 1445 goto out;
1446 }
1447 1447
1448 if (is_thinint_irq(irq_ptr)) 1448 if (is_thinint_irq(irq_ptr))
1449 tiqdio_add_input_queues(irq_ptr); 1449 tiqdio_add_input_queues(irq_ptr);
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1585 1585
1586 /* in case of SIGA errors we must process the error immediately */ 1586 /* in case of SIGA errors we must process the error immediately */
1587 if (used >= q->u.out.scan_threshold || rc) 1587 if (used >= q->u.out.scan_threshold || rc)
1588 tasklet_schedule(&q->tasklet); 1588 qdio_tasklet_schedule(q);
1589 else 1589 else
1590 /* free the SBALs in case of no further traffic */ 1590 /* free the SBALs in case of no further traffic */
1591 if (!timer_pending(&q->u.out.timer)) 1591 if (!timer_pending(&q->u.out.timer) &&
1592 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1592 mod_timer(&q->u.out.timer, jiffies + HZ); 1593 mod_timer(&q->u.out.timer, jiffies + HZ);
1593 return rc; 1594 return rc;
1594} 1595}
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..df40692a9011 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -6,4 +6,8 @@
6# it under the terms of the GNU General Public License (version 2 only) 6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o 9s390-virtio-objs := virtio_ccw.o
10ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
11s390-virtio-objs += kvm_virtio.o
12endif
13obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 1d060fd293a3..5e5c11f37b24 100644
--- a/drivers/s390/virtio/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void)
458 if (test_devices_support(total_memory_size) < 0) 458 if (test_devices_support(total_memory_size) < 0)
459 return -ENODEV; 459 return -ENODEV;
460 460
461 pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
462
461 rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); 463 rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
462 if (rc) 464 if (rc)
463 return rc; 465 return rc;
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void)
482} 484}
483 485
484/* code for early console output with virtio_console */ 486/* code for early console output with virtio_console */
485static __init int early_put_chars(u32 vtermno, const char *buf, int count) 487static int early_put_chars(u32 vtermno, const char *buf, int count)
486{ 488{
487 char scratch[17]; 489 char scratch[17];
488 unsigned int len = count; 490 unsigned int len = count;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index b381b3718a98..5648b715fed9 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
63 struct fib *fibptr; 63 struct fib *fibptr;
64 struct hw_fib * hw_fib = (struct hw_fib *)0; 64 struct hw_fib * hw_fib = (struct hw_fib *)0;
65 dma_addr_t hw_fib_pa = (dma_addr_t)0LL; 65 dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
66 unsigned size; 66 unsigned int size, osize;
67 int retval; 67 int retval;
68 68
69 if (dev->in_reset) { 69 if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
87 * will not overrun the buffer when we copy the memory. Return 87 * will not overrun the buffer when we copy the memory. Return
88 * an error if we would. 88 * an error if we would.
89 */ 89 */
90 size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); 90 osize = size = le16_to_cpu(kfib->header.Size) +
91 sizeof(struct aac_fibhdr);
91 if (size < le16_to_cpu(kfib->header.SenderSize)) 92 if (size < le16_to_cpu(kfib->header.SenderSize))
92 size = le16_to_cpu(kfib->header.SenderSize); 93 size = le16_to_cpu(kfib->header.SenderSize);
93 if (size > dev->max_fib_size) { 94 if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
118 goto cleanup; 119 goto cleanup;
119 } 120 }
120 121
122 /* Sanity check the second copy */
123 if ((osize != le16_to_cpu(kfib->header.Size) +
124 sizeof(struct aac_fibhdr))
125 || (size < le16_to_cpu(kfib->header.SenderSize))) {
126 retval = -EINVAL;
127 goto cleanup;
128 }
129
121 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { 130 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
122 aac_adapter_interrupt(dev); 131 aac_adapter_interrupt(dev);
123 /* 132 /*
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 83458f7a2824..6dc96c8dfe75 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
361 361
362/* Get sense key string or NULL if not available */ 362/* Get sense key string or NULL if not available */
363const char * 363const char *
364scsi_sense_key_string(unsigned char key) { 364scsi_sense_key_string(unsigned char key)
365 if (key <= 0xE) 365{
366 if (key < ARRAY_SIZE(snstext))
366 return snstext[key]; 367 return snstext[key];
367 return NULL; 368 return NULL;
368} 369}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index a569c65f22b1..dcf36537a767 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
2923 mutex_unlock(&fip->ctlr_mutex); 2923 mutex_unlock(&fip->ctlr_mutex);
2924 2924
2925drop: 2925drop:
2926 kfree(skb); 2926 kfree_skb(skb);
2927 return rc; 2927 return rc;
2928} 2928}
2929 2929
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index bf85974be862..17d04c702e1b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -10410,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10410 __ipr_remove(pdev); 10410 __ipr_remove(pdev);
10411 return rc; 10411 return rc;
10412 } 10412 }
10413 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10414 ioa_cfg->scan_enabled = 1;
10415 schedule_work(&ioa_cfg->work_q);
10416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10413 10417
10414 scsi_scan_host(ioa_cfg->host);
10415 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10418 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10416 10419
10417 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 10420 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10421,10 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10421 } 10424 }
10422 } 10425 }
10423 10426
10424 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10427 scsi_scan_host(ioa_cfg->host);
10425 ioa_cfg->scan_enabled = 1; 10428
10426 schedule_work(&ioa_cfg->work_q);
10427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10428 return 0; 10429 return 0;
10429} 10430}
10430 10431
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2dab3dc2aa69..c1ed25adb17e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5037 /* Find first memory bar */ 5037 /* Find first memory bar */
5038 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5038 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5039 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); 5039 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
5040 if (pci_request_selected_regions(instance->pdev, instance->bar, 5040 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5041 "megasas: LSI")) { 5041 "megasas: LSI")) {
5042 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5042 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5043 return -EBUSY; 5043 return -EBUSY;
@@ -5339,7 +5339,7 @@ fail_ready_state:
5339 iounmap(instance->reg_set); 5339 iounmap(instance->reg_set);
5340 5340
5341 fail_ioremap: 5341 fail_ioremap:
5342 pci_release_selected_regions(instance->pdev, instance->bar); 5342 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5343 5343
5344 return -EINVAL; 5344 return -EINVAL;
5345} 5345}
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
5360 5360
5361 iounmap(instance->reg_set); 5361 iounmap(instance->reg_set);
5362 5362
5363 pci_release_selected_regions(instance->pdev, instance->bar); 5363 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5364} 5364}
5365 5365
5366/** 5366/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ec837544f784..52d8bbf7feb5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance)
2603 2603
2604 iounmap(instance->reg_set); 2604 iounmap(instance->reg_set);
2605 2605
2606 pci_release_selected_regions(instance->pdev, instance->bar); 2606 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
2607} 2607}
2608 2608
2609/** 2609/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 751f13edece0..750f82c339d4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2188 } else 2188 } else
2189 ioc->msix96_vector = 0; 2189 ioc->msix96_vector = 0;
2190 2190
2191 if (ioc->is_warpdrive) {
2192 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
2193 &ioc->chip->ReplyPostHostIndex;
2194
2195 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
2196 ioc->reply_post_host_index[i] =
2197 (resource_size_t __iomem *)
2198 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
2199 * 4)));
2200 }
2201
2191 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 2202 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2192 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 2203 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2193 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 2204 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5280 if (r) 5291 if (r)
5281 goto out_free_resources; 5292 goto out_free_resources;
5282 5293
5283 if (ioc->is_warpdrive) {
5284 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
5285 &ioc->chip->ReplyPostHostIndex;
5286
5287 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
5288 ioc->reply_post_host_index[i] =
5289 (resource_size_t __iomem *)
5290 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
5291 * 4)));
5292 }
5293
5294 pci_set_drvdata(ioc->pdev, ioc->shost); 5294 pci_set_drvdata(ioc->pdev, ioc->shost);
5295 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 5295 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
5296 if (r) 5296 if (r)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eaccd651ccda..246456925335 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -246,6 +246,10 @@ static struct {
246 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 246 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
247 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 247 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
248 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 248 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
249 {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
250 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
251 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
252 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
249 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 253 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
250 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 254 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
251 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 255 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3f0ff072184b..60b651bfaa01 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -341,22 +341,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
341} 341}
342 342
343/** 343/**
344 * is_sas_attached - check if device is SAS attached
345 * @sdev: scsi device to check
346 *
347 * returns true if the device is SAS attached
348 */
349int is_sas_attached(struct scsi_device *sdev)
350{
351 struct Scsi_Host *shost = sdev->host;
352
353 return shost->transportt->host_attrs.ac.class ==
354 &sas_host_class.class;
355}
356EXPORT_SYMBOL(is_sas_attached);
357
358
359/**
360 * sas_remove_children - tear down a devices SAS data structures 344 * sas_remove_children - tear down a devices SAS data structures
361 * @dev: device belonging to the sas object 345 * @dev: device belonging to the sas object
362 * 346 *
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 53ef1cb6418e..8c9a35c91705 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
587 587
588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
589 589
590 if (is_sas_attached(sdev)) 590 if (scsi_is_sas_rphy(&sdev->sdev_gendev))
591 efd.addr = sas_get_address(sdev); 591 efd.addr = sas_get_address(sdev);
592 592
593 if (efd.addr) { 593 if (efd.addr) {
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
778 if (!edev) 778 if (!edev)
779 return; 779 return;
780 780
781 enclosure_unregister(edev);
782
781 ses_dev = edev->scratch; 783 ses_dev = edev->scratch;
782 edev->scratch = NULL; 784 edev->scratch = NULL;
783 785
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
789 kfree(edev->component[0].scratch); 791 kfree(edev->component[0].scratch);
790 792
791 put_device(&edev->edev); 793 put_device(&edev->edev);
792 enclosure_unregister(edev);
793} 794}
794 795
795static void ses_intf_remove(struct device *cdev, 796static void ses_intf_remove(struct device *cdev,
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index e3da1a2fdb66..2a9da2e0ea6b 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
962 scsi_host_put(sh); 962 scsi_host_put(sh);
963} 963}
964 964
965static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = { 965static const struct pci_device_id wd719x_pci_table[] = {
966 { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, 966 { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
967 {} 967 {}
968}; 968};
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 823cbc92d1e7..7a37090dabbe 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
720 clk_disable_unprepare(spfi->sys_clk); 720 clk_disable_unprepare(spfi->sys_clk);
721 } 721 }
722 722
723 spi_master_put(master);
724
725 return 0; 723 return 0;
726} 724}
727 725
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0be89e052428..899d7a8f0889 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
685 pm_runtime_disable(&pdev->dev); 685 pm_runtime_disable(&pdev->dev);
686 686
687 mtk_spi_reset(mdata); 687 mtk_spi_reset(mdata);
688 spi_master_put(master);
689 688
690 return 0; 689 return 0;
691} 690}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index f3df522db93b..58d2d48e16a5 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
214 return PTR_ERR(ssp->clk); 214 return PTR_ERR(ssp->clk);
215 215
216 memset(&pi, 0, sizeof(pi)); 216 memset(&pi, 0, sizeof(pi));
217 pi.fwnode = dev->dev.fwnode;
217 pi.parent = &dev->dev; 218 pi.parent = &dev->dev;
218 pi.name = "pxa2xx-spi"; 219 pi.name = "pxa2xx-spi";
219 pi.id = ssp->port_id; 220 pi.id = ssp->port_id;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index c338ef1136f6..7f1555621f8e 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
1030 1030
1031 pm_runtime_put_noidle(&pdev->dev); 1031 pm_runtime_put_noidle(&pdev->dev);
1032 pm_runtime_disable(&pdev->dev); 1032 pm_runtime_disable(&pdev->dev);
1033 spi_master_put(master);
1034 1033
1035 return 0; 1034 return 0;
1036} 1035}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0f83ad1d5a58..1de3a772eb7d 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
262 262
263 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { 263 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
264 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); 264 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
265 /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
266 if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
267 continue;
265 if (brps <= 32) /* max of brdv is 32 */ 268 if (brps <= 32) /* max of brdv is 32 */
266 break; 269 break;
267 } 270 }
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51ad42fad567..200ca228d885 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
960 struct spi_transfer *xfer; 960 struct spi_transfer *xfer;
961 bool keep_cs = false; 961 bool keep_cs = false;
962 int ret = 0; 962 int ret = 0;
963 unsigned long ms = 1; 963 unsigned long long ms = 1;
964 struct spi_statistics *statm = &master->statistics; 964 struct spi_statistics *statm = &master->statistics;
965 struct spi_statistics *stats = &msg->spi->statistics; 965 struct spi_statistics *stats = &msg->spi->statistics;
966 966
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
991 991
992 if (ret > 0) { 992 if (ret > 0) {
993 ret = 0; 993 ret = 0;
994 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 994 ms = 8LL * 1000LL * xfer->len;
995 do_div(ms, xfer->speed_hz);
995 ms += ms + 100; /* some tolerance */ 996 ms += ms + 100; /* some tolerance */
996 997
998 if (ms > UINT_MAX)
999 ms = UINT_MAX;
1000
997 ms = wait_for_completion_timeout(&master->xfer_completion, 1001 ms = wait_for_completion_timeout(&master->xfer_completion,
998 msecs_to_jiffies(ms)); 1002 msecs_to_jiffies(ms));
999 } 1003 }
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1159 if (ret < 0) { 1163 if (ret < 0) {
1160 dev_err(&master->dev, "Failed to power device: %d\n", 1164 dev_err(&master->dev, "Failed to power device: %d\n",
1161 ret); 1165 ret);
1166 mutex_unlock(&master->io_mutex);
1162 return; 1167 return;
1163 } 1168 }
1164 } 1169 }
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1174 1179
1175 if (master->auto_runtime_pm) 1180 if (master->auto_runtime_pm)
1176 pm_runtime_put(master->dev.parent); 1181 pm_runtime_put(master->dev.parent);
1182 mutex_unlock(&master->io_mutex);
1177 return; 1183 return;
1178 } 1184 }
1179 } 1185 }
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index d7dd1e55e347..9f525ff7290c 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
196 break; 196 break;
197 case CMDF_ROUND_DOWN: 197 case CMDF_ROUND_DOWN:
198 divisor = ns / PCI1760_PWM_TIMEBASE; 198 divisor = ns / PCI1760_PWM_TIMEBASE;
199 break;
199 default: 200 default:
200 return -EINVAL; 201 return -EINVAL;
201 } 202 }
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 4ab186669f0c..ec5b9a23494d 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -56,11 +56,6 @@
56 56
57#define N_CHANS 8 57#define N_CHANS 8
58 58
59enum waveform_state_bits {
60 WAVEFORM_AI_RUNNING,
61 WAVEFORM_AO_RUNNING
62};
63
64/* Data unique to this driver */ 59/* Data unique to this driver */
65struct waveform_private { 60struct waveform_private {
66 struct timer_list ai_timer; /* timer for AI commands */ 61 struct timer_list ai_timer; /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
68 unsigned int wf_amplitude; /* waveform amplitude in microvolts */ 63 unsigned int wf_amplitude; /* waveform amplitude in microvolts */
69 unsigned int wf_period; /* waveform period in microseconds */ 64 unsigned int wf_period; /* waveform period in microseconds */
70 unsigned int wf_current; /* current time in waveform period */ 65 unsigned int wf_current; /* current time in waveform period */
71 unsigned long state_bits;
72 unsigned int ai_scan_period; /* AI scan period in usec */ 66 unsigned int ai_scan_period; /* AI scan period in usec */
73 unsigned int ai_convert_period; /* AI conversion period in usec */ 67 unsigned int ai_convert_period; /* AI conversion period in usec */
74 struct timer_list ao_timer; /* timer for AO commands */ 68 struct timer_list ao_timer; /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
191 unsigned int nsamples; 185 unsigned int nsamples;
192 unsigned int time_increment; 186 unsigned int time_increment;
193 187
194 /* check command is still active */
195 if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
196 return;
197
198 now = ktime_to_us(ktime_get()); 188 now = ktime_to_us(ktime_get());
199 nsamples = comedi_nsamples_left(s, UINT_MAX); 189 nsamples = comedi_nsamples_left(s, UINT_MAX);
200 190
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
386 */ 376 */
387 devpriv->ai_timer.expires = 377 devpriv->ai_timer.expires =
388 jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; 378 jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
389
390 /* mark command as active */
391 smp_mb__before_atomic();
392 set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
393 smp_mb__after_atomic();
394 add_timer(&devpriv->ai_timer); 379 add_timer(&devpriv->ai_timer);
395 return 0; 380 return 0;
396} 381}
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
400{ 385{
401 struct waveform_private *devpriv = dev->private; 386 struct waveform_private *devpriv = dev->private;
402 387
403 /* mark command as no longer active */ 388 if (in_softirq()) {
404 clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits); 389 /* Assume we were called from the timer routine itself. */
405 smp_mb__after_atomic(); 390 del_timer(&devpriv->ai_timer);
406 /* cannot call del_timer_sync() as may be called from timer routine */ 391 } else {
407 del_timer(&devpriv->ai_timer); 392 del_timer_sync(&devpriv->ai_timer);
393 }
408 return 0; 394 return 0;
409} 395}
410 396
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
436 u64 scans_since; 422 u64 scans_since;
437 unsigned int scans_avail = 0; 423 unsigned int scans_avail = 0;
438 424
439 /* check command is still active */
440 if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
441 return;
442
443 /* determine number of scan periods since last time */ 425 /* determine number of scan periods since last time */
444 now = ktime_to_us(ktime_get()); 426 now = ktime_to_us(ktime_get());
445 scans_since = now - devpriv->ao_last_scan_time; 427 scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
518 devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); 500 devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
519 devpriv->ao_timer.expires = 501 devpriv->ao_timer.expires =
520 jiffies + usecs_to_jiffies(devpriv->ao_scan_period); 502 jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
521
522 /* mark command as active */
523 smp_mb__before_atomic();
524 set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
525 smp_mb__after_atomic();
526 add_timer(&devpriv->ao_timer); 503 add_timer(&devpriv->ao_timer);
527 504
528 return 1; 505 return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
608 struct waveform_private *devpriv = dev->private; 585 struct waveform_private *devpriv = dev->private;
609 586
610 s->async->inttrig = NULL; 587 s->async->inttrig = NULL;
611 /* mark command as no longer active */ 588 if (in_softirq()) {
612 clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits); 589 /* Assume we were called from the timer routine itself. */
613 smp_mb__after_atomic(); 590 del_timer(&devpriv->ao_timer);
614 /* cannot call del_timer_sync() as may be called from timer routine */ 591 } else {
615 del_timer(&devpriv->ao_timer); 592 del_timer_sync(&devpriv->ao_timer);
593 }
616 return 0; 594 return 0;
617} 595}
618 596
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 65daef0c00d5..0f4eb954aa80 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
634 const struct daq200_boardtype *board; 634 const struct daq200_boardtype *board;
635 int i; 635 int i;
636 636
637 if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) 637 if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
638 return NULL; 638 return NULL;
639 639
640 for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { 640 for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 904f637797b6..8bbd93814340 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
588 s = &dev->subdevices[0]; 588 s = &dev->subdevices[0];
589 s->type = COMEDI_SUBD_AI; 589 s->type = COMEDI_SUBD_AI;
590 s->subdev_flags = SDF_READABLE | 590 s->subdev_flags = SDF_READABLE |
591 (it->options[2] == 1) ? SDF_DIFF : 591 ((it->options[2] == 1) ? SDF_DIFF :
592 (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND; 592 (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
593 s->n_chan = (it->options[2] == 1) ? 8 : 16; 593 s->n_chan = (it->options[2] == 1) ? 8 : 16;
594 s->maxdata = 0x0fff; 594 s->maxdata = 0x0fff;
595 s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges 595 s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 8dabb19519a5..0f97d7b611d7 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
2772 int i; 2772 int i;
2773 static const int timeout = 1000; 2773 static const int timeout = 1000;
2774 2774
2775 if (trig_num != cmd->start_arg) 2775 /*
2776 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
2777 * For backwards compatibility, also allow trig_num == 0 when
2778 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
2779 * in that case, the internal trigger is being used as a pre-trigger
2780 * before the external trigger.
2781 */
2782 if (!(trig_num == cmd->start_arg ||
2783 (trig_num == 0 && cmd->start_src != TRIG_INT)))
2776 return -EINVAL; 2784 return -EINVAL;
2777 2785
2778 /* 2786 /*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
5480 s->maxdata = (devpriv->is_m_series) ? 0xffffffff 5488 s->maxdata = (devpriv->is_m_series) ? 0xffffffff
5481 : 0x00ffffff; 5489 : 0x00ffffff;
5482 s->insn_read = ni_tio_insn_read; 5490 s->insn_read = ni_tio_insn_read;
5483 s->insn_write = ni_tio_insn_read; 5491 s->insn_write = ni_tio_insn_write;
5484 s->insn_config = ni_tio_insn_config; 5492 s->insn_config = ni_tio_insn_config;
5485#ifdef PCIDMA 5493#ifdef PCIDMA
5486 if (dev->irq && devpriv->mite) { 5494 if (dev->irq && devpriv->mite) {
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 170ac980abcb..24c348d2f5bb 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
419 mutex_lock(&indio_dev->mlock); 419 mutex_lock(&indio_dev->mlock);
420 switch ((u32)this_attr->address) { 420 switch ((u32)this_attr->address) {
421 case AD5933_OUT_RANGE: 421 case AD5933_OUT_RANGE:
422 ret = -EINVAL;
422 for (i = 0; i < 4; i++) 423 for (i = 0; i < 4; i++)
423 if (val == st->range_avail[i]) { 424 if (val == st->range_avail[i]) {
424 st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3); 425 st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
426 ret = ad5933_cmd(st, 0); 427 ret = ad5933_cmd(st, 0);
427 break; 428 break;
428 } 429 }
429 ret = -EINVAL;
430 break; 430 break;
431 case AD5933_IN_PGA_GAIN: 431 case AD5933_IN_PGA_GAIN:
432 if (sysfs_streq(buf, "1")) { 432 if (sysfs_streq(buf, "1")) {
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 3664bfd0178b..2c4dc69731e8 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
388 struct inode *inode = NULL; 388 struct inode *inode = NULL;
389 __u64 bits = 0; 389 __u64 bits = 0;
390 int rc = 0; 390 int rc = 0;
391 struct dentry *alias;
391 392
392 /* NB 1 request reference will be taken away by ll_intent_lock() 393 /* NB 1 request reference will be taken away by ll_intent_lock()
393 * when I return 394 * when I return
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
412 */ 413 */
413 } 414 }
414 415
415 /* Only hash *de if it is unhashed (new dentry). 416 alias = ll_splice_alias(inode, *de);
416 * Atoimc_open may passing hashed dentries for open. 417 if (IS_ERR(alias)) {
417 */ 418 rc = PTR_ERR(alias);
418 if (d_unhashed(*de)) { 419 goto out;
419 struct dentry *alias;
420
421 alias = ll_splice_alias(inode, *de);
422 if (IS_ERR(alias)) {
423 rc = PTR_ERR(alias);
424 goto out;
425 }
426 *de = alias;
427 } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
428 !it_disposition(it, DISP_OPEN_CREATE)) {
429 /* With DISP_OPEN_CREATE dentry will be
430 * instantiated in ll_create_it.
431 */
432 LASSERT(!d_inode(*de));
433 d_instantiate(*de, inode);
434 } 420 }
421 *de = alias;
435 422
436 if (!it_disposition(it, DISP_LOOKUP_NEG)) { 423 if (!it_disposition(it, DISP_LOOKUP_NEG)) {
437 /* we have lookup look - unhide dentry */ 424 /* we have lookup look - unhide dentry */
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
587 dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, 574 dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
588 *opened); 575 *opened);
589 576
577 /* Only negative dentries enter here */
578 LASSERT(!d_inode(dentry));
579
580 if (!d_in_lookup(dentry)) {
581 /* A valid negative dentry that just passed revalidation,
582 * there's little point to try and open it server-side,
583 * even though there's a minuscle chance it might succeed.
584 * Either way it's a valid race to just return -ENOENT here.
585 */
586 if (!(open_flags & O_CREAT))
587 return -ENOENT;
588
589 /* Otherwise we just unhash it to be rehashed afresh via
590 * lookup if necessary
591 */
592 d_drop(dentry);
593 }
594
590 it = kzalloc(sizeof(*it), GFP_NOFS); 595 it = kzalloc(sizeof(*it), GFP_NOFS);
591 if (!it) 596 if (!it)
592 return -ENOMEM; 597 return -ENOMEM;
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0b1760cba6e3..78f524fcd214 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
3363 if (!hif_workqueue) { 3363 if (!hif_workqueue) {
3364 netdev_err(vif->ndev, "Failed to create workqueue\n"); 3364 netdev_err(vif->ndev, "Failed to create workqueue\n");
3365 result = -ENOMEM; 3365 result = -ENOMEM;
3366 goto _fail_mq_; 3366 goto _fail_;
3367 } 3367 }
3368 3368
3369 setup_timer(&periodic_rssi, GetPeriodicRSSI, 3369 setup_timer(&periodic_rssi, GetPeriodicRSSI,
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
3391 3391
3392 clients_count++; 3392 clients_count++;
3393 3393
3394_fail_mq_:
3395 destroy_workqueue(hif_workqueue); 3394 destroy_workqueue(hif_workqueue);
3396_fail_: 3395_fail_:
3397 return result; 3396 return result;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 3a66255f14fc..32215110d597 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
648 mutex_unlock(&wl->hif_cs); 648 mutex_unlock(&wl->hif_cs);
649 } 649 }
650 if (&wl->txq_event) 650 if (&wl->txq_event)
651 wait_for_completion(&wl->txq_event); 651 complete(&wl->txq_event);
652 652
653 wlan_deinitialize_threads(dev); 653 wlan_deinitialize_threads(dev);
654 deinit_irq(dev); 654 deinit_irq(dev);
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 9092600a1794..2c2e8aca8305 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
1191 struct wilc_priv *priv; 1191 struct wilc_priv *priv;
1192 struct wilc_vif *vif; 1192 struct wilc_vif *vif;
1193 u32 i = 0; 1193 u32 i = 0;
1194 u32 associatedsta = 0; 1194 u32 associatedsta = ~0;
1195 u32 inactive_time = 0; 1195 u32 inactive_time = 0;
1196 priv = wiphy_priv(wiphy); 1196 priv = wiphy_priv(wiphy);
1197 vif = netdev_priv(dev); 1197 vif = netdev_priv(dev);
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
1204 } 1204 }
1205 } 1205 }
1206 1206
1207 if (associatedsta == -1) { 1207 if (associatedsta == ~0) {
1208 netdev_err(dev, "sta required is not associated\n"); 1208 netdev_err(dev, "sta required is not associated\n");
1209 return -ENOENT; 1209 return -ENOENT;
1210 } 1210 }
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
index 1b4ff0f4c716..ed5dd0e88657 100644
--- a/drivers/thermal/clock_cooling.c
+++ b/drivers/thermal/clock_cooling.c
@@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name)
426 if (!ccdev) 426 if (!ccdev)
427 return ERR_PTR(-ENOMEM); 427 return ERR_PTR(-ENOMEM);
428 428
429 mutex_init(&ccdev->lock);
429 ccdev->dev = dev; 430 ccdev->dev = dev;
430 ccdev->clk = devm_clk_get(dev, clock_name); 431 ccdev->clk = devm_clk_get(dev, clock_name);
431 if (IS_ERR(ccdev->clk)) 432 if (IS_ERR(ccdev->clk))
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 3788ed74c9ab..a32b41783b77 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
740} 740}
741 741
742/* Bind cpufreq callbacks to thermal cooling device ops */ 742/* Bind cpufreq callbacks to thermal cooling device ops */
743
743static struct thermal_cooling_device_ops cpufreq_cooling_ops = { 744static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
744 .get_max_state = cpufreq_get_max_state, 745 .get_max_state = cpufreq_get_max_state,
745 .get_cur_state = cpufreq_get_cur_state, 746 .get_cur_state = cpufreq_get_cur_state,
746 .set_cur_state = cpufreq_set_cur_state, 747 .set_cur_state = cpufreq_set_cur_state,
747}; 748};
748 749
750static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
751 .get_max_state = cpufreq_get_max_state,
752 .get_cur_state = cpufreq_get_cur_state,
753 .set_cur_state = cpufreq_set_cur_state,
754 .get_requested_power = cpufreq_get_requested_power,
755 .state2power = cpufreq_state2power,
756 .power2state = cpufreq_power2state,
757};
758
749/* Notifier for cpufreq policy change */ 759/* Notifier for cpufreq policy change */
750static struct notifier_block thermal_cpufreq_notifier_block = { 760static struct notifier_block thermal_cpufreq_notifier_block = {
751 .notifier_call = cpufreq_thermal_notifier, 761 .notifier_call = cpufreq_thermal_notifier,
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np,
795 struct cpumask temp_mask; 805 struct cpumask temp_mask;
796 unsigned int freq, i, num_cpus; 806 unsigned int freq, i, num_cpus;
797 int ret; 807 int ret;
808 struct thermal_cooling_device_ops *cooling_ops;
798 809
799 cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); 810 cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
800 policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); 811 policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np,
850 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 861 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
851 862
852 if (capacitance) { 863 if (capacitance) {
853 cpufreq_cooling_ops.get_requested_power =
854 cpufreq_get_requested_power;
855 cpufreq_cooling_ops.state2power = cpufreq_state2power;
856 cpufreq_cooling_ops.power2state = cpufreq_power2state;
857 cpufreq_dev->plat_get_static_power = plat_static_func; 864 cpufreq_dev->plat_get_static_power = plat_static_func;
858 865
859 ret = build_dyn_power_table(cpufreq_dev, capacitance); 866 ret = build_dyn_power_table(cpufreq_dev, capacitance);
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np,
861 cool_dev = ERR_PTR(ret); 868 cool_dev = ERR_PTR(ret);
862 goto free_table; 869 goto free_table;
863 } 870 }
871
872 cooling_ops = &cpufreq_power_cooling_ops;
873 } else {
874 cooling_ops = &cpufreq_cooling_ops;
864 } 875 }
865 876
866 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 877 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np,
885 cpufreq_dev->id); 896 cpufreq_dev->id);
886 897
887 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, 898 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
888 &cpufreq_cooling_ops); 899 cooling_ops);
889 if (IS_ERR(cool_dev)) 900 if (IS_ERR(cool_dev))
890 goto remove_idr; 901 goto remove_idr;
891 902
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 34fe36504a55..68bd1b569118 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
116 instance->target = get_target_state(tz, cdev, percentage, 116 instance->target = get_target_state(tz, cdev, percentage,
117 cur_trip_level); 117 cur_trip_level);
118 118
119 mutex_lock(&instance->cdev->lock);
119 instance->cdev->updated = false; 120 instance->cdev->updated = false;
121 mutex_unlock(&instance->cdev->lock);
120 thermal_cdev_update(cdev); 122 thermal_cdev_update(cdev);
121 } 123 }
122 return 0; 124 return 0;
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index fc52016d4e85..bb118a152cbb 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
71 dev_dbg(&instance->cdev->device, "target=%d\n", 71 dev_dbg(&instance->cdev->device, "target=%d\n",
72 (int)instance->target); 72 (int)instance->target);
73 73
74 mutex_lock(&instance->cdev->lock);
74 instance->cdev->updated = false; /* cdev needs update */ 75 instance->cdev->updated = false; /* cdev needs update */
76 mutex_unlock(&instance->cdev->lock);
75 } 77 }
76 78
77 mutex_unlock(&tz->lock); 79 mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd711db..e473548b5d28 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
471 471
472static int imx_thermal_probe(struct platform_device *pdev) 472static int imx_thermal_probe(struct platform_device *pdev)
473{ 473{
474 const struct of_device_id *of_id =
475 of_match_device(of_imx_thermal_match, &pdev->dev);
476 struct imx_thermal_data *data; 474 struct imx_thermal_data *data;
477 struct regmap *map; 475 struct regmap *map;
478 int measure_freq; 476 int measure_freq;
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
490 } 488 }
491 data->tempmon = map; 489 data->tempmon = map;
492 490
493 data->socdata = of_id->data; 491 data->socdata = of_device_get_match_data(&pdev->dev);
494 492
495 /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ 493 /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
496 if (data->socdata->version == TEMPMON_IMX6SX) { 494 if (data->socdata->version == TEMPMON_IMX6SX) {
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index a578cd257db4..1891f34ab7fc 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = {
225 .remove = int3406_thermal_remove, 225 .remove = int3406_thermal_remove,
226 .driver = { 226 .driver = {
227 .name = "int3406 thermal", 227 .name = "int3406 thermal",
228 .owner = THIS_MODULE,
229 .acpi_match_table = int3406_thermal_match, 228 .acpi_match_table = int3406_thermal_match,
230 }, 229 },
231}; 230};
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 6a6ec1c95a7a..9b4815e81b0d 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/thermal.h> 23#include <linux/thermal.h>
24#include <linux/pm.h>
24 25
25/* Intel PCH thermal Device IDs */ 26/* Intel PCH thermal Device IDs */
26#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ 27#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
@@ -65,6 +66,7 @@ struct pch_thermal_device {
65 unsigned long crt_temp; 66 unsigned long crt_temp;
66 int hot_trip_id; 67 int hot_trip_id;
67 unsigned long hot_temp; 68 unsigned long hot_temp;
69 bool bios_enabled;
68}; 70};
69 71
70static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) 72static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
@@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
75 *nr_trips = 0; 77 *nr_trips = 0;
76 78
77 /* Check if BIOS has already enabled thermal sensor */ 79 /* Check if BIOS has already enabled thermal sensor */
78 if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) 80 if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) {
81 ptd->bios_enabled = true;
79 goto read_trips; 82 goto read_trips;
83 }
80 84
81 tsel = readb(ptd->hw_base + WPT_TSEL); 85 tsel = readb(ptd->hw_base + WPT_TSEL);
82 /* 86 /*
@@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
130 return 0; 134 return 0;
131} 135}
132 136
137static int pch_wpt_suspend(struct pch_thermal_device *ptd)
138{
139 u8 tsel;
140
141 if (ptd->bios_enabled)
142 return 0;
143
144 tsel = readb(ptd->hw_base + WPT_TSEL);
145
146 writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
147
148 return 0;
149}
150
151static int pch_wpt_resume(struct pch_thermal_device *ptd)
152{
153 u8 tsel;
154
155 if (ptd->bios_enabled)
156 return 0;
157
158 tsel = readb(ptd->hw_base + WPT_TSEL);
159
160 writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
161
162 return 0;
163}
164
133struct pch_dev_ops { 165struct pch_dev_ops {
134 int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips); 166 int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
135 int (*get_temp)(struct pch_thermal_device *ptd, int *temp); 167 int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
168 int (*suspend)(struct pch_thermal_device *ptd);
169 int (*resume)(struct pch_thermal_device *ptd);
136}; 170};
137 171
138 172
@@ -140,6 +174,8 @@ struct pch_dev_ops {
140static const struct pch_dev_ops pch_dev_ops_wpt = { 174static const struct pch_dev_ops pch_dev_ops_wpt = {
141 .hw_init = pch_wpt_init, 175 .hw_init = pch_wpt_init,
142 .get_temp = pch_wpt_get_temp, 176 .get_temp = pch_wpt_get_temp,
177 .suspend = pch_wpt_suspend,
178 .resume = pch_wpt_resume,
143}; 179};
144 180
145static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) 181static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
@@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
269 pci_disable_device(pdev); 305 pci_disable_device(pdev);
270} 306}
271 307
308static int intel_pch_thermal_suspend(struct device *device)
309{
310 struct pci_dev *pdev = to_pci_dev(device);
311 struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
312
313 return ptd->ops->suspend(ptd);
314}
315
316static int intel_pch_thermal_resume(struct device *device)
317{
318 struct pci_dev *pdev = to_pci_dev(device);
319 struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
320
321 return ptd->ops->resume(ptd);
322}
323
272static struct pci_device_id intel_pch_thermal_id[] = { 324static struct pci_device_id intel_pch_thermal_id[] = {
273 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, 325 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
274 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, 326 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
@@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = {
276}; 328};
277MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); 329MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
278 330
331static const struct dev_pm_ops intel_pch_pm_ops = {
332 .suspend = intel_pch_thermal_suspend,
333 .resume = intel_pch_thermal_resume,
334};
335
279static struct pci_driver intel_pch_thermal_driver = { 336static struct pci_driver intel_pch_thermal_driver = {
280 .name = "intel_pch_thermal", 337 .name = "intel_pch_thermal",
281 .id_table = intel_pch_thermal_id, 338 .id_table = intel_pch_thermal_id,
282 .probe = intel_pch_thermal_probe, 339 .probe = intel_pch_thermal_probe,
283 .remove = intel_pch_thermal_remove, 340 .remove = intel_pch_thermal_remove,
341 .driver.pm = &intel_pch_pm_ops,
284}; 342};
285 343
286module_pci_driver(intel_pch_thermal_driver); 344module_pci_driver(intel_pch_thermal_driver);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 015ce2eb6eb7..0e4dc0afcfd2 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -388,7 +388,7 @@ static int clamp_thread(void *arg)
388 int sleeptime; 388 int sleeptime;
389 unsigned long target_jiffies; 389 unsigned long target_jiffies;
390 unsigned int guard; 390 unsigned int guard;
391 unsigned int compensation = 0; 391 unsigned int compensated_ratio;
392 int interval; /* jiffies to sleep for each attempt */ 392 int interval; /* jiffies to sleep for each attempt */
393 unsigned int duration_jiffies = msecs_to_jiffies(duration); 393 unsigned int duration_jiffies = msecs_to_jiffies(duration);
394 unsigned int window_size_now; 394 unsigned int window_size_now;
@@ -409,8 +409,11 @@ static int clamp_thread(void *arg)
409 * c-states, thus we need to compensate the injected idle ratio 409 * c-states, thus we need to compensate the injected idle ratio
410 * to achieve the actual target reported by the HW. 410 * to achieve the actual target reported by the HW.
411 */ 411 */
412 compensation = get_compensation(target_ratio); 412 compensated_ratio = target_ratio +
413 interval = duration_jiffies*100/(target_ratio+compensation); 413 get_compensation(target_ratio);
414 if (compensated_ratio <= 0)
415 compensated_ratio = 1;
416 interval = duration_jiffies * 100 / compensated_ratio;
414 417
415 /* align idle time */ 418 /* align idle time */
416 target_jiffies = roundup(jiffies, interval); 419 target_jiffies = roundup(jiffies, interval);
@@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
647 goto exit_set; 650 goto exit_set;
648 } else if (set_target_ratio > 0 && new_target_ratio == 0) { 651 } else if (set_target_ratio > 0 && new_target_ratio == 0) {
649 pr_info("Stop forced idle injection\n"); 652 pr_info("Stop forced idle injection\n");
650 set_target_ratio = 0;
651 end_power_clamp(); 653 end_power_clamp();
654 set_target_ratio = 0;
652 } else /* adjust currently running */ { 655 } else /* adjust currently running */ {
653 set_target_ratio = new_target_ratio; 656 set_target_ratio = new_target_ratio;
654 /* make new set_target_ratio visible to other cpus */ 657 /* make new set_target_ratio visible to other cpus */
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 2f1a863a8e15..b4d3116cfdaf 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
529 continue; 529 continue;
530 530
531 instance->target = 0; 531 instance->target = 0;
532 mutex_lock(&instance->cdev->lock);
532 instance->cdev->updated = false; 533 instance->cdev->updated = false;
534 mutex_unlock(&instance->cdev->lock);
533 thermal_cdev_update(instance->cdev); 535 thermal_cdev_update(instance->cdev);
534 } 536 }
535} 537}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 71a339271fa5..5f817923f374 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
504 if (IS_ERR(priv->zone)) { 504 if (IS_ERR(priv->zone)) {
505 dev_err(dev, "can't register thermal zone\n"); 505 dev_err(dev, "can't register thermal zone\n");
506 ret = PTR_ERR(priv->zone); 506 ret = PTR_ERR(priv->zone);
507 priv->zone = NULL;
507 goto error_unregister; 508 goto error_unregister;
508 } 509 }
509 510
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index ea9366ad3e6b..bcef2e7c4ec9 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
175 update_passive_instance(tz, trip_type, -1); 175 update_passive_instance(tz, trip_type, -1);
176 176
177 instance->initialized = true; 177 instance->initialized = true;
178 mutex_lock(&instance->cdev->lock);
178 instance->cdev->updated = false; /* cdev needs update */ 179 instance->cdev->updated = false; /* cdev needs update */
180 mutex_unlock(&instance->cdev->lock);
179 } 181 }
180 182
181 mutex_unlock(&tz->lock); 183 mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5133cd1e10b7..e2fc6161dded 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
1093 return ret; 1093 return ret;
1094 1094
1095 instance->target = state; 1095 instance->target = state;
1096 mutex_lock(&cdev->lock);
1096 cdev->updated = false; 1097 cdev->updated = false;
1098 mutex_unlock(&cdev->lock);
1097 thermal_cdev_update(cdev); 1099 thermal_cdev_update(cdev);
1098 1100
1099 return 0; 1101 return 0;
@@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
1623 struct thermal_instance *instance; 1625 struct thermal_instance *instance;
1624 unsigned long target = 0; 1626 unsigned long target = 0;
1625 1627
1628 mutex_lock(&cdev->lock);
1626 /* cooling device is updated*/ 1629 /* cooling device is updated*/
1627 if (cdev->updated) 1630 if (cdev->updated) {
1631 mutex_unlock(&cdev->lock);
1628 return; 1632 return;
1633 }
1629 1634
1630 mutex_lock(&cdev->lock);
1631 /* Make sure cdev enters the deepest cooling state */ 1635 /* Make sure cdev enters the deepest cooling state */
1632 list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { 1636 list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
1633 dev_dbg(&cdev->device, "zone%d->target=%lu\n", 1637 dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
1637 if (instance->target > target) 1641 if (instance->target > target)
1638 target = instance->target; 1642 target = instance->target;
1639 } 1643 }
1640 mutex_unlock(&cdev->lock);
1641 cdev->ops->set_cur_state(cdev, target); 1644 cdev->ops->set_cur_state(cdev, target);
1642 cdev->updated = true; 1645 cdev->updated = true;
1646 mutex_unlock(&cdev->lock);
1643 trace_cdev_update(cdev, target); 1647 trace_cdev_update(cdev, target);
1644 dev_dbg(&cdev->device, "set to state %lu\n", target); 1648 dev_dbg(&cdev->device, "set to state %lu\n", target);
1645} 1649}
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 06fd2ed9ef9d..c41c7742903a 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
232 232
233 return result; 233 return result;
234} 234}
235EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs);
235 236
236void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) 237void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
237{ 238{
@@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
270 hwmon_device_unregister(hwmon->device); 271 hwmon_device_unregister(hwmon->device);
271 kfree(hwmon); 272 kfree(hwmon);
272} 273}
274EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9c15344b657a..a8c20413dbda 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -651,6 +651,12 @@ static struct pci_device_id nhi_ids[] = {
651 { 651 {
652 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 652 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
653 .vendor = PCI_VENDOR_ID_INTEL, 653 .vendor = PCI_VENDOR_ID_INTEL,
654 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
655 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
656 },
657 {
658 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
659 .vendor = PCI_VENDOR_ID_INTEL,
654 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, 660 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
655 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 661 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
656 }, 662 },
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1e116f53d6dd..9840fdecb73b 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
372 372
373 if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && 373 if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
374 sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && 374 sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
375 sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE) 375 sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
376 sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
377 sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
376 tb_sw_warn(sw, "unsupported switch device id %#x\n", 378 tb_sw_warn(sw, "unsupported switch device id %#x\n",
377 sw->config.device_id); 379 sw->config.device_id);
378 380
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 122e0e4029fe..1a16feac9a36 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -15,8 +15,6 @@
15#include <linux/serial_reg.h> 15#include <linux/serial_reg.h>
16#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17 17
18#include "../serial_mctrl_gpio.h"
19
20struct uart_8250_dma { 18struct uart_8250_dma {
21 int (*tx_dma)(struct uart_8250_port *p); 19 int (*tx_dma)(struct uart_8250_port *p);
22 int (*rx_dma)(struct uart_8250_port *p); 20 int (*rx_dma)(struct uart_8250_port *p);
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p);
133 131
134static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) 132static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
135{ 133{
136 int mctrl_gpio = 0;
137
138 serial_out(up, UART_MCR, value); 134 serial_out(up, UART_MCR, value);
139
140 if (value & UART_MCR_RTS)
141 mctrl_gpio |= TIOCM_RTS;
142 if (value & UART_MCR_DTR)
143 mctrl_gpio |= TIOCM_DTR;
144
145 mctrl_gpio_set(up->gpios, mctrl_gpio);
146} 135}
147 136
148static inline int serial8250_in_MCR(struct uart_8250_port *up) 137static inline int serial8250_in_MCR(struct uart_8250_port *up)
149{ 138{
150 int mctrl, mctrl_gpio = 0; 139 return serial_in(up, UART_MCR);
151
152 mctrl = serial_in(up, UART_MCR);
153
154 /* save current MCR values */
155 if (mctrl & UART_MCR_RTS)
156 mctrl_gpio |= TIOCM_RTS;
157 if (mctrl & UART_MCR_DTR)
158 mctrl_gpio |= TIOCM_DTR;
159
160 mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
161
162 if (mctrl_gpio & TIOCM_RTS)
163 mctrl |= UART_MCR_RTS;
164 else
165 mctrl &= ~UART_MCR_RTS;
166
167 if (mctrl_gpio & TIOCM_DTR)
168 mctrl |= UART_MCR_DTR;
169 else
170 mctrl &= ~UART_MCR_DTR;
171
172 return mctrl;
173} 140}
174 141
175#if defined(__alpha__) && !defined(CONFIG_PCI) 142#if defined(__alpha__) && !defined(CONFIG_PCI)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 13ad5c3d2e68..dcf43f66404f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
974 974
975 uart = serial8250_find_match_or_unused(&up->port); 975 uart = serial8250_find_match_or_unused(&up->port);
976 if (uart && uart->port.type != PORT_8250_CIR) { 976 if (uart && uart->port.type != PORT_8250_CIR) {
977 struct mctrl_gpios *gpios;
978
979 if (uart->port.dev) 977 if (uart->port.dev)
980 uart_remove_one_port(&serial8250_reg, &uart->port); 978 uart_remove_one_port(&serial8250_reg, &uart->port);
981 979
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1013 if (up->port.flags & UPF_FIXED_TYPE) 1011 if (up->port.flags & UPF_FIXED_TYPE)
1014 uart->port.type = up->port.type; 1012 uart->port.type = up->port.type;
1015 1013
1016 gpios = mctrl_gpio_init(&uart->port, 0);
1017 if (IS_ERR(gpios)) {
1018 if (PTR_ERR(gpios) != -ENOSYS)
1019 return PTR_ERR(gpios);
1020 } else
1021 uart->gpios = gpios;
1022
1023 serial8250_set_defaults(uart); 1014 serial8250_set_defaults(uart);
1024 1015
1025 /* Possibly override default I/O functions. */ 1016 /* Possibly override default I/O functions. */
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 737b4b3957b0..0facc789fe7d 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -31,7 +31,7 @@
31#define IO_ADDR2 0x60 31#define IO_ADDR2 0x60
32#define LDN 0x7 32#define LDN 0x7
33 33
34#define IRQ_MODE 0x70 34#define FINTEK_IRQ_MODE 0x70
35#define IRQ_SHARE BIT(4) 35#define IRQ_SHARE BIT(4)
36#define IRQ_MODE_MASK (BIT(6) | BIT(5)) 36#define IRQ_MODE_MASK (BIT(6) | BIT(5))
37#define IRQ_LEVEL_LOW 0 37#define IRQ_LEVEL_LOW 0
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
195 outb(LDN, pdata->base_port + ADDR_PORT); 195 outb(LDN, pdata->base_port + ADDR_PORT);
196 outb(pdata->index, pdata->base_port + DATA_PORT); 196 outb(pdata->index, pdata->base_port + DATA_PORT);
197 197
198 outb(IRQ_MODE, pdata->base_port + ADDR_PORT); 198 outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
199 tmp = inb(pdata->base_port + DATA_PORT); 199 tmp = inb(pdata->base_port + DATA_PORT);
200 200
201 tmp &= ~IRQ_MODE_MASK; 201 tmp &= ~IRQ_MODE_MASK;
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 339de9cd0866..20c5db2f4264 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p,
168 unsigned long w = BIT(24) - 1; 168 unsigned long w = BIT(24) - 1;
169 unsigned long mul, div; 169 unsigned long mul, div;
170 170
171 /* Gracefully handle the B0 case: fall back to B9600 */
172 fuart = fuart ? fuart : 9600 * 16;
173
171 if (mid->board->freq < fuart) { 174 if (mid->board->freq < fuart) {
172 /* Find prescaler value that satisfies Fuart < Fref */ 175 /* Find prescaler value that satisfies Fuart < Fref */
173 if (mid->board->freq > baud) 176 if (mid->board->freq > baud)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e14982f36a04..61ad6c3b20a0 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
134 134
135 serial8250_do_set_mctrl(port, mctrl); 135 serial8250_do_set_mctrl(port, mctrl);
136 136
137 if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios, 137 /*
138 UART_GPIO_RTS))) { 138 * Turn off autoRTS if RTS is lowered and restore autoRTS setting
139 /* 139 * if RTS is raised
140 * Turn off autoRTS if RTS is lowered and restore autoRTS 140 */
141 * setting if RTS is raised 141 lcr = serial_in(up, UART_LCR);
142 */ 142 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
143 lcr = serial_in(up, UART_LCR); 143 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
144 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 144 priv->efr |= UART_EFR_RTS;
145 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 145 else
146 priv->efr |= UART_EFR_RTS; 146 priv->efr &= ~UART_EFR_RTS;
147 else 147 serial_out(up, UART_EFR, priv->efr);
148 priv->efr &= ~UART_EFR_RTS; 148 serial_out(up, UART_LCR, lcr);
149 serial_out(up, UART_EFR, priv->efr);
150 serial_out(up, UART_LCR, lcr);
151 }
152} 149}
153 150
154/* 151/*
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port,
449 priv->efr = 0; 446 priv->efr = 0;
450 up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); 447 up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
451 448
452 if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW 449 if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
453 && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
454 UART_GPIO_RTS))) {
455 /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ 450 /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
456 up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 451 up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
457 priv->efr |= UART_EFR_CTS; 452 priv->efr |= UART_EFR_CTS;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 20ebaea5c414..bc51b32b2774 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1950#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 1950#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
1951#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 1951#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
1952 1952
1953#define PCI_VENDOR_ID_ACCESIO 0x494f
1954#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
1955#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
1956#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C
1957#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E
1958#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091
1959#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093
1960#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099
1961#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B
1962#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1
1963#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3
1964#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA
1965#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC
1966#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108
1967#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110
1968#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111
1969#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118
1970#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119
1971#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152
1972#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A
1973#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190
1974#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191
1975#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198
1976#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199
1977#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0
1978#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A
1979#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B
1980#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A
1981#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B
1982#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098
1983#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9
1984#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9
1985#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
1986#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
1987
1988
1989
1953/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 1990/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
1954#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 1991#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
1955#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 1992#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -5113,6 +5150,108 @@ static struct pci_device_id serial_pci_tbl[] = {
5113 0, 5150 0,
5114 0, pbn_pericom_PI7C9X7958 }, 5151 0, pbn_pericom_PI7C9X7958 },
5115 /* 5152 /*
5153 * ACCES I/O Products quad
5154 */
5155 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
5156 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5157 pbn_pericom_PI7C9X7954 },
5158 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
5159 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5160 pbn_pericom_PI7C9X7954 },
5161 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
5162 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5163 pbn_pericom_PI7C9X7954 },
5164 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
5165 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5166 pbn_pericom_PI7C9X7954 },
5167 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
5168 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5169 pbn_pericom_PI7C9X7954 },
5170 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
5171 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5172 pbn_pericom_PI7C9X7954 },
5173 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
5174 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5175 pbn_pericom_PI7C9X7954 },
5176 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
5177 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5178 pbn_pericom_PI7C9X7954 },
5179 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
5180 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5181 pbn_pericom_PI7C9X7954 },
5182 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
5183 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5184 pbn_pericom_PI7C9X7954 },
5185 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
5186 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5187 pbn_pericom_PI7C9X7954 },
5188 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
5189 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5190 pbn_pericom_PI7C9X7954 },
5191 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
5192 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5193 pbn_pericom_PI7C9X7954 },
5194 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
5195 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5196 pbn_pericom_PI7C9X7954 },
5197 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
5198 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5199 pbn_pericom_PI7C9X7954 },
5200 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
5201 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5202 pbn_pericom_PI7C9X7954 },
5203 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
5204 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5205 pbn_pericom_PI7C9X7954 },
5206 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
5207 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5208 pbn_pericom_PI7C9X7954 },
5209 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
5210 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5211 pbn_pericom_PI7C9X7954 },
5212 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
5213 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5214 pbn_pericom_PI7C9X7954 },
5215 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
5216 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5217 pbn_pericom_PI7C9X7954 },
5218 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
5219 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5220 pbn_pericom_PI7C9X7954 },
5221 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
5222 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5223 pbn_pericom_PI7C9X7954 },
5224 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
5225 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5226 pbn_pericom_PI7C9X7954 },
5227 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
5228 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5229 pbn_pericom_PI7C9X7958 },
5230 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
5231 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5232 pbn_pericom_PI7C9X7958 },
5233 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
5234 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5235 pbn_pericom_PI7C9X7958 },
5236 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
5237 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5238 pbn_pericom_PI7C9X7958 },
5239 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
5240 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5241 pbn_pericom_PI7C9X7958 },
5242 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
5243 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5244 pbn_pericom_PI7C9X7958 },
5245 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
5246 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5247 pbn_pericom_PI7C9X7958 },
5248 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
5249 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5250 pbn_pericom_PI7C9X7958 },
5251 { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
5252 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
5253 pbn_pericom_PI7C9X7958 },
5254 /*
5116 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) 5255 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
5117 */ 5256 */
5118 { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, 5257 { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 7481b95c6d84..bdfa659b9606 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port)
1618 if (up->bugs & UART_BUG_NOMSR) 1618 if (up->bugs & UART_BUG_NOMSR)
1619 return; 1619 return;
1620 1620
1621 mctrl_gpio_disable_ms(up->gpios);
1622
1623 up->ier &= ~UART_IER_MSI; 1621 up->ier &= ~UART_IER_MSI;
1624 serial_port_out(port, UART_IER, up->ier); 1622 serial_port_out(port, UART_IER, up->ier);
1625} 1623}
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port)
1632 if (up->bugs & UART_BUG_NOMSR) 1630 if (up->bugs & UART_BUG_NOMSR)
1633 return; 1631 return;
1634 1632
1635 mctrl_gpio_enable_ms(up->gpios);
1636
1637 up->ier |= UART_IER_MSI; 1633 up->ier |= UART_IER_MSI;
1638 1634
1639 serial8250_rpm_get(up); 1635 serial8250_rpm_get(up);
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
1917 ret |= TIOCM_DSR; 1913 ret |= TIOCM_DSR;
1918 if (status & UART_MSR_CTS) 1914 if (status & UART_MSR_CTS)
1919 ret |= TIOCM_CTS; 1915 ret |= TIOCM_CTS;
1920 1916 return ret;
1921 return mctrl_gpio_get(up->gpios, &ret);
1922} 1917}
1923EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl); 1918EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
1924 1919
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index c9ec839a5ddf..7c6f7afca5dd 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -6,7 +6,6 @@
6config SERIAL_8250 6config SERIAL_8250
7 tristate "8250/16550 and compatible serial support" 7 tristate "8250/16550 and compatible serial support"
8 select SERIAL_CORE 8 select SERIAL_CORE
9 select SERIAL_MCTRL_GPIO if GPIOLIB
10 ---help--- 9 ---help---
11 This selects whether you want to include the driver for the standard 10 This selects whether you want to include the driver for the standard
12 serial ports. The standard answer is Y. People who might say N 11 serial ports. The standard answer is Y. People who might say N
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 065f5d97aa67..b93356834bb5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
949 int retval; 949 int retval;
950 struct ci_hw_ep *hwep; 950 struct ci_hw_ep *hwep;
951 951
952 /*
953 * Unexpected USB controller behavior, caused by bad signal integrity
954 * or ground reference problems, can lead to isr_setup_status_phase
955 * being called with ci->status equal to NULL.
956 * If this situation occurs, you should review your USB hardware design.
957 */
958 if (WARN_ON_ONCE(!ci->status))
959 return -EPIPE;
960
952 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; 961 hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
953 ci->status->context = ci; 962 ci->status->context = ci;
954 ci->status->complete = isr_setup_status_complete; 963 ci->status->complete = isr_setup_status_complete;
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1596{ 1605{
1597 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget); 1606 struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1598 1607
1599 /* Data+ pullup controlled by OTG state machine in OTG fsm mode */ 1608 /*
1600 if (ci_otg_is_fsm_mode(ci)) 1609 * Data+ pullup controlled by OTG state machine in OTG fsm mode;
1610 * and don't touch Data+ in host mode for dual role config.
1611 */
1612 if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1601 return 0; 1613 return 0;
1602 1614
1603 pm_runtime_get_sync(&ci->gadget.dev); 1615 pm_runtime_get_sync(&ci->gadget.dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 71912301ef7f..0f3f62e81e5b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1354,7 +1354,6 @@ made_compressed_probe:
1354 spin_lock_init(&acm->write_lock); 1354 spin_lock_init(&acm->write_lock);
1355 spin_lock_init(&acm->read_lock); 1355 spin_lock_init(&acm->read_lock);
1356 mutex_init(&acm->mutex); 1356 mutex_init(&acm->mutex);
1357 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
1358 acm->is_int_ep = usb_endpoint_xfer_int(epread); 1357 acm->is_int_ep = usb_endpoint_xfer_int(epread);
1359 if (acm->is_int_ep) 1358 if (acm->is_int_ep)
1360 acm->bInterval = epread->bInterval; 1359 acm->bInterval = epread->bInterval;
@@ -1394,14 +1393,14 @@ made_compressed_probe:
1394 urb->transfer_dma = rb->dma; 1393 urb->transfer_dma = rb->dma;
1395 if (acm->is_int_ep) { 1394 if (acm->is_int_ep) {
1396 usb_fill_int_urb(urb, acm->dev, 1395 usb_fill_int_urb(urb, acm->dev,
1397 acm->rx_endpoint, 1396 usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
1398 rb->base, 1397 rb->base,
1399 acm->readsize, 1398 acm->readsize,
1400 acm_read_bulk_callback, rb, 1399 acm_read_bulk_callback, rb,
1401 acm->bInterval); 1400 acm->bInterval);
1402 } else { 1401 } else {
1403 usb_fill_bulk_urb(urb, acm->dev, 1402 usb_fill_bulk_urb(urb, acm->dev,
1404 acm->rx_endpoint, 1403 usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
1405 rb->base, 1404 rb->base,
1406 acm->readsize, 1405 acm->readsize,
1407 acm_read_bulk_callback, rb); 1406 acm_read_bulk_callback, rb);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 05ce308d5d2a..1f1eabfd8462 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
96 struct acm_rb read_buffers[ACM_NR]; 96 struct acm_rb read_buffers[ACM_NR];
97 struct acm_wb *putbuffer; /* for acm_tty_put_char() */ 97 struct acm_wb *putbuffer; /* for acm_tty_put_char() */
98 int rx_buflimit; 98 int rx_buflimit;
99 int rx_endpoint;
100 spinlock_t read_lock; 99 spinlock_t read_lock;
101 int write_used; /* number of non-empty write buffers */ 100 int write_used; /* number of non-empty write buffers */
102 int transmitting; 101 int transmitting;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 31ccdccd7a04..a2d90aca779f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
171 ep, buffer, size); 171 ep, buffer, size);
172} 172}
173 173
174static const unsigned short low_speed_maxpacket_maxes[4] = {
175 [USB_ENDPOINT_XFER_CONTROL] = 8,
176 [USB_ENDPOINT_XFER_ISOC] = 0,
177 [USB_ENDPOINT_XFER_BULK] = 0,
178 [USB_ENDPOINT_XFER_INT] = 8,
179};
180static const unsigned short full_speed_maxpacket_maxes[4] = {
181 [USB_ENDPOINT_XFER_CONTROL] = 64,
182 [USB_ENDPOINT_XFER_ISOC] = 1023,
183 [USB_ENDPOINT_XFER_BULK] = 64,
184 [USB_ENDPOINT_XFER_INT] = 64,
185};
186static const unsigned short high_speed_maxpacket_maxes[4] = {
187 [USB_ENDPOINT_XFER_CONTROL] = 64,
188 [USB_ENDPOINT_XFER_ISOC] = 1024,
189 [USB_ENDPOINT_XFER_BULK] = 512,
190 [USB_ENDPOINT_XFER_INT] = 1024,
191};
192static const unsigned short super_speed_maxpacket_maxes[4] = {
193 [USB_ENDPOINT_XFER_CONTROL] = 512,
194 [USB_ENDPOINT_XFER_ISOC] = 1024,
195 [USB_ENDPOINT_XFER_BULK] = 1024,
196 [USB_ENDPOINT_XFER_INT] = 1024,
197};
198
174static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, 199static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
175 int asnum, struct usb_host_interface *ifp, int num_ep, 200 int asnum, struct usb_host_interface *ifp, int num_ep,
176 unsigned char *buffer, int size) 201 unsigned char *buffer, int size)
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
179 struct usb_endpoint_descriptor *d; 204 struct usb_endpoint_descriptor *d;
180 struct usb_host_endpoint *endpoint; 205 struct usb_host_endpoint *endpoint;
181 int n, i, j, retval; 206 int n, i, j, retval;
207 unsigned int maxp;
208 const unsigned short *maxpacket_maxes;
182 209
183 d = (struct usb_endpoint_descriptor *) buffer; 210 d = (struct usb_endpoint_descriptor *) buffer;
184 buffer += d->bLength; 211 buffer += d->bLength;
@@ -213,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
213 memcpy(&endpoint->desc, d, n); 240 memcpy(&endpoint->desc, d, n);
214 INIT_LIST_HEAD(&endpoint->urb_list); 241 INIT_LIST_HEAD(&endpoint->urb_list);
215 242
216 /* Fix up bInterval values outside the legal range. Use 32 ms if no 243 /*
217 * proper value can be guessed. */ 244 * Fix up bInterval values outside the legal range.
245 * Use 10 or 8 ms if no proper value can be guessed.
246 */
218 i = 0; /* i = min, j = max, n = default */ 247 i = 0; /* i = min, j = max, n = default */
219 j = 255; 248 j = 255;
220 if (usb_endpoint_xfer_int(d)) { 249 if (usb_endpoint_xfer_int(d)) {
@@ -223,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
223 case USB_SPEED_SUPER_PLUS: 252 case USB_SPEED_SUPER_PLUS:
224 case USB_SPEED_SUPER: 253 case USB_SPEED_SUPER:
225 case USB_SPEED_HIGH: 254 case USB_SPEED_HIGH:
226 /* Many device manufacturers are using full-speed 255 /*
256 * Many device manufacturers are using full-speed
227 * bInterval values in high-speed interrupt endpoint 257 * bInterval values in high-speed interrupt endpoint
228 * descriptors. Try to fix those and fall back to a 258 * descriptors. Try to fix those and fall back to an
229 * 32 ms default value otherwise. */ 259 * 8-ms default value otherwise.
260 */
230 n = fls(d->bInterval*8); 261 n = fls(d->bInterval*8);
231 if (n == 0) 262 if (n == 0)
232 n = 9; /* 32 ms = 2^(9-1) uframes */ 263 n = 7; /* 8 ms = 2^(7-1) uframes */
233 j = 16; 264 j = 16;
234 265
235 /* 266 /*
@@ -244,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
244 } 275 }
245 break; 276 break;
246 default: /* USB_SPEED_FULL or _LOW */ 277 default: /* USB_SPEED_FULL or _LOW */
247 /* For low-speed, 10 ms is the official minimum. 278 /*
279 * For low-speed, 10 ms is the official minimum.
248 * But some "overclocked" devices might want faster 280 * But some "overclocked" devices might want faster
249 * polling so we'll allow it. */ 281 * polling so we'll allow it.
250 n = 32; 282 */
283 n = 10;
251 break; 284 break;
252 } 285 }
253 } else if (usb_endpoint_xfer_isoc(d)) { 286 } else if (usb_endpoint_xfer_isoc(d)) {
@@ -255,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
255 j = 16; 288 j = 16;
256 switch (to_usb_device(ddev)->speed) { 289 switch (to_usb_device(ddev)->speed) {
257 case USB_SPEED_HIGH: 290 case USB_SPEED_HIGH:
258 n = 9; /* 32 ms = 2^(9-1) uframes */ 291 n = 7; /* 8 ms = 2^(7-1) uframes */
259 break; 292 break;
260 default: /* USB_SPEED_FULL */ 293 default: /* USB_SPEED_FULL */
261 n = 6; /* 32 ms = 2^(6-1) frames */ 294 n = 4; /* 8 ms = 2^(4-1) frames */
262 break; 295 break;
263 } 296 }
264 } 297 }
@@ -286,6 +319,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
286 endpoint->desc.wMaxPacketSize = cpu_to_le16(8); 319 endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
287 } 320 }
288 321
322 /* Validate the wMaxPacketSize field */
323 maxp = usb_endpoint_maxp(&endpoint->desc);
324
325 /* Find the highest legal maxpacket size for this endpoint */
326 i = 0; /* additional transactions per microframe */
327 switch (to_usb_device(ddev)->speed) {
328 case USB_SPEED_LOW:
329 maxpacket_maxes = low_speed_maxpacket_maxes;
330 break;
331 case USB_SPEED_FULL:
332 maxpacket_maxes = full_speed_maxpacket_maxes;
333 break;
334 case USB_SPEED_HIGH:
335 /* Bits 12..11 are allowed only for HS periodic endpoints */
336 if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
337 i = maxp & (BIT(12) | BIT(11));
338 maxp &= ~i;
339 }
340 /* fallthrough */
341 default:
342 maxpacket_maxes = high_speed_maxpacket_maxes;
343 break;
344 case USB_SPEED_SUPER:
345 case USB_SPEED_SUPER_PLUS:
346 maxpacket_maxes = super_speed_maxpacket_maxes;
347 break;
348 }
349 j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
350
351 if (maxp > j) {
352 dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
353 cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
354 maxp = j;
355 endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
356 }
357
289 /* 358 /*
290 * Some buggy high speed devices have bulk endpoints using 359 * Some buggy high speed devices have bulk endpoints using
291 * maxpacket sizes other than 512. High speed HCDs may not 360 * maxpacket sizes other than 512. High speed HCDs may not
@@ -293,9 +362,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
293 */ 362 */
294 if (to_usb_device(ddev)->speed == USB_SPEED_HIGH 363 if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
295 && usb_endpoint_xfer_bulk(d)) { 364 && usb_endpoint_xfer_bulk(d)) {
296 unsigned maxp;
297
298 maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
299 if (maxp != 512) 365 if (maxp != 512)
300 dev_warn(ddev, "config %d interface %d altsetting %d " 366 dev_warn(ddev, "config %d interface %d altsetting %d "
301 "bulk endpoint 0x%X has invalid maxpacket %d\n", 367 "bulk endpoint 0x%X has invalid maxpacket %d\n",
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9f5043a2167..09c8d9ca61ae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
241 goto error_decrease_mem; 241 goto error_decrease_mem;
242 } 242 }
243 243
244 mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); 244 mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
245 &dma_handle);
245 if (!mem) { 246 if (!mem) {
246 ret = -ENOMEM; 247 ret = -ENOMEM;
247 goto error_free_usbm; 248 goto error_free_usbm;
@@ -1708,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1708 as->urb->start_frame = uurb->start_frame; 1709 as->urb->start_frame = uurb->start_frame;
1709 as->urb->number_of_packets = number_of_packets; 1710 as->urb->number_of_packets = number_of_packets;
1710 as->urb->stream_id = stream_id; 1711 as->urb->stream_id = stream_id;
1711 if (uurb->type == USBDEVFS_URB_TYPE_ISO || 1712
1712 ps->dev->speed == USB_SPEED_HIGH) 1713 if (ep->desc.bInterval) {
1713 as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); 1714 if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
1714 else 1715 ps->dev->speed == USB_SPEED_HIGH ||
1715 as->urb->interval = ep->desc.bInterval; 1716 ps->dev->speed >= USB_SPEED_SUPER)
1717 as->urb->interval = 1 <<
1718 min(15, ep->desc.bInterval - 1);
1719 else
1720 as->urb->interval = ep->desc.bInterval;
1721 }
1722
1716 as->urb->context = as; 1723 as->urb->context = as;
1717 as->urb->complete = async_completed; 1724 as->urb->complete = async_completed;
1718 for (totlen = u = 0; u < number_of_packets; u++) { 1725 for (totlen = u = 0; u < number_of_packets; u++) {
@@ -2582,7 +2589,9 @@ static unsigned int usbdev_poll(struct file *file,
2582 if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) 2589 if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
2583 mask |= POLLOUT | POLLWRNORM; 2590 mask |= POLLOUT | POLLWRNORM;
2584 if (!connected(ps)) 2591 if (!connected(ps))
2585 mask |= POLLERR | POLLHUP; 2592 mask |= POLLHUP;
2593 if (list_empty(&ps->list))
2594 mask |= POLLERR;
2586 return mask; 2595 return mask;
2587} 2596}
2588 2597
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bee13517676f..1d5fc32d06d0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1052 1052
1053 /* Continue a partial initialization */ 1053 /* Continue a partial initialization */
1054 if (type == HUB_INIT2 || type == HUB_INIT3) { 1054 if (type == HUB_INIT2 || type == HUB_INIT3) {
1055 device_lock(hub->intfdev); 1055 device_lock(&hdev->dev);
1056 1056
1057 /* Was the hub disconnected while we were waiting? */ 1057 /* Was the hub disconnected while we were waiting? */
1058 if (hub->disconnected) { 1058 if (hub->disconnected)
1059 device_unlock(hub->intfdev); 1059 goto disconnected;
1060 kref_put(&hub->kref, hub_release);
1061 return;
1062 }
1063 if (type == HUB_INIT2) 1060 if (type == HUB_INIT2)
1064 goto init2; 1061 goto init2;
1065 goto init3; 1062 goto init3;
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1262 queue_delayed_work(system_power_efficient_wq, 1259 queue_delayed_work(system_power_efficient_wq,
1263 &hub->init_work, 1260 &hub->init_work,
1264 msecs_to_jiffies(delay)); 1261 msecs_to_jiffies(delay));
1265 device_unlock(hub->intfdev); 1262 device_unlock(&hdev->dev);
1266 return; /* Continues at init3: below */ 1263 return; /* Continues at init3: below */
1267 } else { 1264 } else {
1268 msleep(delay); 1265 msleep(delay);
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1281 /* Scan all ports that need attention */ 1278 /* Scan all ports that need attention */
1282 kick_hub_wq(hub); 1279 kick_hub_wq(hub);
1283 1280
1284 /* Allow autosuspend if it was suppressed */ 1281 if (type == HUB_INIT2 || type == HUB_INIT3) {
1285 if (type <= HUB_INIT3) 1282 /* Allow autosuspend if it was suppressed */
1283 disconnected:
1286 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1284 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
1287 1285 device_unlock(&hdev->dev);
1288 if (type == HUB_INIT2 || type == HUB_INIT3) 1286 }
1289 device_unlock(hub->intfdev);
1290 1287
1291 kref_put(&hub->kref, hub_release); 1288 kref_put(&hub->kref, hub_release);
1292} 1289}
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
1315 struct usb_device *hdev = hub->hdev; 1312 struct usb_device *hdev = hub->hdev;
1316 int i; 1313 int i;
1317 1314
1318 cancel_delayed_work_sync(&hub->init_work);
1319
1320 /* hub_wq and related activity won't re-trigger */ 1315 /* hub_wq and related activity won't re-trigger */
1321 hub->quiescing = 1; 1316 hub->quiescing = 1;
1322 1317
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9fae0291cd69..d64551243789 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -868,6 +868,7 @@ struct dwc2_hsotg {
868 void *priv; 868 void *priv;
869 int irq; 869 int irq;
870 struct clk *clk; 870 struct clk *clk;
871 struct reset_control *reset;
871 872
872 unsigned int queuing_high_bandwidth:1; 873 unsigned int queuing_high_bandwidth:1;
873 unsigned int srp_success:1; 874 unsigned int srp_success:1;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index fc6f5251de5d..530959a8a6d1 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -45,6 +45,7 @@
45#include <linux/platform_device.h> 45#include <linux/platform_device.h>
46#include <linux/phy/phy.h> 46#include <linux/phy/phy.h>
47#include <linux/platform_data/s3c-hsotg.h> 47#include <linux/platform_data/s3c-hsotg.h>
48#include <linux/reset.h>
48 49
49#include <linux/usb/of.h> 50#include <linux/usb/of.h>
50 51
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
337{ 338{
338 int i, ret; 339 int i, ret;
339 340
341 hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
342 if (IS_ERR(hsotg->reset)) {
343 ret = PTR_ERR(hsotg->reset);
344 switch (ret) {
345 case -ENOENT:
346 case -ENOTSUPP:
347 hsotg->reset = NULL;
348 break;
349 default:
350 dev_err(hsotg->dev, "error getting reset control %d\n",
351 ret);
352 return ret;
353 }
354 }
355
356 if (hsotg->reset)
357 reset_control_deassert(hsotg->reset);
358
340 /* Set default UTMI width */ 359 /* Set default UTMI width */
341 hsotg->phyif = GUSBCFG_PHYIF16; 360 hsotg->phyif = GUSBCFG_PHYIF16;
342 361
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
434 if (hsotg->ll_hw_enabled) 453 if (hsotg->ll_hw_enabled)
435 dwc2_lowlevel_hw_disable(hsotg); 454 dwc2_lowlevel_hw_disable(hsotg);
436 455
456 if (hsotg->reset)
457 reset_control_assert(hsotg->reset);
458
437 return 0; 459 return 0;
438} 460}
439 461
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 946643157b78..35d092456bec 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev)
1192 } 1192 }
1193 1193
1194 pm_runtime_mark_last_busy(dev); 1194 pm_runtime_mark_last_busy(dev);
1195 pm_runtime_put(dev);
1195 1196
1196 return 0; 1197 return 0;
1197} 1198}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 22dfc3dd6a13..33ab2a203c1b 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
192 int ret; 192 int ret;
193 193
194 ret = sprintf(str, "ep%d%s: ", epnum >> 1, 194 ret = sprintf(str, "ep%d%s: ", epnum >> 1,
195 (epnum & 1) ? "in" : "in"); 195 (epnum & 1) ? "in" : "out");
196 if (ret < 0) 196 if (ret < 0)
197 return "UNKNOWN"; 197 return "UNKNOWN";
198 198
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 974335377d9f..e56d59b19a0e 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
61 if (!simple->clks) 61 if (!simple->clks)
62 return -ENOMEM; 62 return -ENOMEM;
63 63
64 platform_set_drvdata(pdev, simple);
64 simple->dev = dev; 65 simple->dev = dev;
65 66
66 for (i = 0; i < simple->num_clocks; i++) { 67 for (i = 0; i < simple->num_clocks; i++) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 45f5a232d9fb..6df0f5dad9a4 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -37,6 +37,7 @@
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa 37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa 38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
40 41
41static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 42static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
42static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; 43static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
227 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, 228 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
228 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, 229 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
229 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 230 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
231 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
230 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 232 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
231 { } /* Terminating Entry */ 233 { } /* Terminating Entry */
232}; 234};
@@ -241,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
241 return -EBUSY; 243 return -EBUSY;
242} 244}
243 245
246static int dwc3_pci_runtime_resume(struct device *dev)
247{
248 struct platform_device *dwc3 = dev_get_drvdata(dev);
249
250 return pm_runtime_get(&dwc3->dev);
251}
252#endif /* CONFIG_PM */
253
254#ifdef CONFIG_PM_SLEEP
244static int dwc3_pci_pm_dummy(struct device *dev) 255static int dwc3_pci_pm_dummy(struct device *dev)
245{ 256{
246 /* 257 /*
@@ -253,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev)
253 */ 264 */
254 return 0; 265 return 0;
255} 266}
256#endif /* CONFIG_PM */ 267#endif /* CONFIG_PM_SLEEP */
257 268
258static struct dev_pm_ops dwc3_pci_dev_pm_ops = { 269static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
259 SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy) 270 SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
260 SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy, 271 SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
261 NULL) 272 NULL)
262}; 273};
263 274
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8f8c2157910e..122e64df2f4d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
829 if (!req->request.no_interrupt && !chain) 829 if (!req->request.no_interrupt && !chain)
830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; 830 trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
831 831
832 if (last) 832 if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
833 trb->ctrl |= DWC3_TRB_CTRL_LST; 833 trb->ctrl |= DWC3_TRB_CTRL_LST;
834 834
835 if (chain) 835 if (chain)
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
1433 1433
1434static int __dwc3_gadget_wakeup(struct dwc3 *dwc) 1434static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1435{ 1435{
1436 unsigned long timeout; 1436 int retries;
1437 1437
1438 int ret; 1438 int ret;
1439 u32 reg; 1439 u32 reg;
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1484 } 1484 }
1485 1485
1486 /* poll until Link State changes to ON */ 1486 /* poll until Link State changes to ON */
1487 timeout = jiffies + msecs_to_jiffies(100); 1487 retries = 20000;
1488 1488
1489 while (!time_after(jiffies, timeout)) { 1489 while (retries--) {
1490 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1490 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1491 1491
1492 /* in HS, means ON */ 1492 /* in HS, means ON */
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1955 1955
1956static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, 1956static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1957 struct dwc3_request *req, struct dwc3_trb *trb, 1957 struct dwc3_request *req, struct dwc3_trb *trb,
1958 const struct dwc3_event_depevt *event, int status) 1958 const struct dwc3_event_depevt *event, int status,
1959 int chain)
1959{ 1960{
1960 unsigned int count; 1961 unsigned int count;
1961 unsigned int s_pkt = 0; 1962 unsigned int s_pkt = 0;
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1964 dep->queued_requests--; 1965 dep->queued_requests--;
1965 trace_dwc3_complete_trb(dep, trb); 1966 trace_dwc3_complete_trb(dep, trb);
1966 1967
1968 /*
1969 * If we're in the middle of series of chained TRBs and we
1970 * receive a short transfer along the way, DWC3 will skip
1971 * through all TRBs including the last TRB in the chain (the
1972 * where CHN bit is zero. DWC3 will also avoid clearing HWO
1973 * bit and SW has to do it manually.
1974 *
1975 * We're going to do that here to avoid problems of HW trying
1976 * to use bogus TRBs for transfers.
1977 */
1978 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
1979 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1980
1967 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 1981 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1968 /* 1982 return 1;
1969 * We continue despite the error. There is not much we 1983
1970 * can do. If we don't clean it up we loop forever. If
1971 * we skip the TRB then it gets overwritten after a
1972 * while since we use them in a ring buffer. A BUG()
1973 * would help. Lets hope that if this occurs, someone
1974 * fixes the root cause instead of looking away :)
1975 */
1976 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1977 dep->name, trb);
1978 count = trb->size & DWC3_TRB_SIZE_MASK; 1984 count = trb->size & DWC3_TRB_SIZE_MASK;
1979 1985
1980 if (dep->direction) { 1986 if (dep->direction) {
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
2013 s_pkt = 1; 2019 s_pkt = 1;
2014 } 2020 }
2015 2021
2016 /* 2022 if (s_pkt && !chain)
2017 * We assume here we will always receive the entire data block
2018 * which we should receive. Meaning, if we program RX to
2019 * receive 4K but we receive only 2K, we assume that's all we
2020 * should receive and we simply bounce the request back to the
2021 * gadget driver for further processing.
2022 */
2023 req->request.actual += req->request.length - count;
2024 if (s_pkt)
2025 return 1; 2023 return 1;
2026 if ((event->status & DEPEVT_STATUS_LST) && 2024 if ((event->status & DEPEVT_STATUS_LST) &&
2027 (trb->ctrl & (DWC3_TRB_CTRL_LST | 2025 (trb->ctrl & (DWC3_TRB_CTRL_LST |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2040 struct dwc3_trb *trb; 2038 struct dwc3_trb *trb;
2041 unsigned int slot; 2039 unsigned int slot;
2042 unsigned int i; 2040 unsigned int i;
2041 int count = 0;
2043 int ret; 2042 int ret;
2044 2043
2045 do { 2044 do {
2045 int chain;
2046
2046 req = next_request(&dep->started_list); 2047 req = next_request(&dep->started_list);
2047 if (WARN_ON_ONCE(!req)) 2048 if (WARN_ON_ONCE(!req))
2048 return 1; 2049 return 1;
2049 2050
2051 chain = req->request.num_mapped_sgs > 0;
2050 i = 0; 2052 i = 0;
2051 do { 2053 do {
2052 slot = req->first_trb_index + i; 2054 slot = req->first_trb_index + i;
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2054 slot++; 2056 slot++;
2055 slot %= DWC3_TRB_NUM; 2057 slot %= DWC3_TRB_NUM;
2056 trb = &dep->trb_pool[slot]; 2058 trb = &dep->trb_pool[slot];
2059 count += trb->size & DWC3_TRB_SIZE_MASK;
2057 2060
2058 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2061 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2059 event, status); 2062 event, status, chain);
2060 if (ret) 2063 if (ret)
2061 break; 2064 break;
2062 } while (++i < req->request.num_mapped_sgs); 2065 } while (++i < req->request.num_mapped_sgs);
2063 2066
2067 /*
2068 * We assume here we will always receive the entire data block
2069 * which we should receive. Meaning, if we program RX to
2070 * receive 4K but we receive only 2K, we assume that's all we
2071 * should receive and we simply bounce the request back to the
2072 * gadget driver for further processing.
2073 */
2074 req->request.actual += req->request.length - count;
2064 dwc3_gadget_giveback(dep, req, status); 2075 dwc3_gadget_giveback(dep, req, status);
2065 2076
2066 if (ret) 2077 if (ret)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eb648485a58c..5ebe6af7976e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1913,6 +1913,8 @@ unknown:
1913 break; 1913 break;
1914 1914
1915 case USB_RECIP_ENDPOINT: 1915 case USB_RECIP_ENDPOINT:
1916 if (!cdev->config)
1917 break;
1916 endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); 1918 endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
1917 list_for_each_entry(f, &cdev->config->functions, list) { 1919 list_for_each_entry(f, &cdev->config->functions, list) {
1918 if (test_bit(endp, f->endpoints)) 1920 if (test_bit(endp, f->endpoints))
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
2124 2126
2125 cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); 2127 cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
2126 if (!cdev->os_desc_req) { 2128 if (!cdev->os_desc_req) {
2127 ret = PTR_ERR(cdev->os_desc_req); 2129 ret = -ENOMEM;
2128 goto end; 2130 goto end;
2129 } 2131 }
2130 2132
2131 /* OS feature descriptor length <= 4kB */ 2133 /* OS feature descriptor length <= 4kB */
2132 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); 2134 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
2133 if (!cdev->os_desc_req->buf) { 2135 if (!cdev->os_desc_req->buf) {
2134 ret = PTR_ERR(cdev->os_desc_req->buf); 2136 ret = -ENOMEM;
2135 kfree(cdev->os_desc_req); 2137 kfree(cdev->os_desc_req);
2136 goto end; 2138 goto end;
2137 } 2139 }
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 70cf3477f951..f9237fe2be05 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item)
1490{ 1490{
1491 struct gadget_info *gi = to_gadget_info(item); 1491 struct gadget_info *gi = to_gadget_info(item);
1492 1492
1493 mutex_lock(&gi->lock);
1493 unregister_gadget(gi); 1494 unregister_gadget(gi);
1495 mutex_unlock(&gi->lock);
1494} 1496}
1495EXPORT_SYMBOL_GPL(unregister_gadget_item); 1497EXPORT_SYMBOL_GPL(unregister_gadget_item);
1496 1498
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index d58bfc32be9e..007ec6e4a5d4 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
341{ 341{
342 struct sk_buff *skb2 = NULL; 342 struct sk_buff *skb2 = NULL;
343 struct usb_ep *in = port->in_ep; 343 struct usb_ep *in = port->in_ep;
344 int padlen = 0; 344 int headroom, tailroom, padlen = 0;
345 u16 len = skb->len; 345 u16 len;
346 346
347 int headroom = skb_headroom(skb); 347 if (!skb)
348 int tailroom = skb_tailroom(skb); 348 return NULL;
349
350 len = skb->len;
351 headroom = skb_headroom(skb);
352 tailroom = skb_tailroom(skb);
349 353
350 /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, 354 /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
351 * stick two bytes of zero-length EEM packet on the end. 355 * stick two bytes of zero-length EEM packet on the end.
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c8005823b190..16562e461121 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
374{ 374{
375 struct sk_buff *skb2; 375 struct sk_buff *skb2;
376 376
377 if (!skb)
378 return NULL;
379
377 skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); 380 skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
378 rndis_add_hdr(skb2); 381 rndis_add_hdr(skb2);
379 382
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 943c21aafd3b..ab6ac1b74ac0 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
680{ 680{
681 rndis_reset_cmplt_type *resp; 681 rndis_reset_cmplt_type *resp;
682 rndis_resp_t *r; 682 rndis_resp_t *r;
683 u8 *xbuf;
684 u32 length;
685
686 /* drain the response queue */
687 while ((xbuf = rndis_get_next_response(params, &length)))
688 rndis_free_response(params, xbuf);
683 689
684 r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); 690 r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
685 if (!r) 691 if (!r)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index a3f7e7c55ebb..5f562c1ec795 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
556 /* Multi frame CDC protocols may store the frame for 556 /* Multi frame CDC protocols may store the frame for
557 * later which is not a dropped frame. 557 * later which is not a dropped frame.
558 */ 558 */
559 if (dev->port_usb->supports_multi_frame) 559 if (dev->port_usb &&
560 dev->port_usb->supports_multi_frame)
560 goto multiframe; 561 goto multiframe;
561 goto drop; 562 goto drop;
562 } 563 }
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 6ded6345cd09..e0cd1e4c8892 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
375*/ 375*/
376{ 376{
377 struct list_head *pool = &port->write_pool; 377 struct list_head *pool = &port->write_pool;
378 struct usb_ep *in = port->port_usb->in; 378 struct usb_ep *in;
379 int status = 0; 379 int status = 0;
380 bool do_tty_wake = false; 380 bool do_tty_wake = false;
381 381
382 if (!port->port_usb)
383 return status;
384
385 in = port->port_usb->in;
386
382 while (!port->write_busy && !list_empty(pool)) { 387 while (!port->write_busy && !list_empty(pool)) {
383 struct usb_request *req; 388 struct usb_request *req;
384 int len; 389 int len;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 66753ba7a42e..31125a4a2658 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
2023 if (!data) { 2023 if (!data) {
2024 kfree(*class_array); 2024 kfree(*class_array);
2025 *class_array = NULL; 2025 *class_array = NULL;
2026 ret = PTR_ERR(data); 2026 ret = -ENOMEM;
2027 goto unlock; 2027 goto unlock;
2028 } 2028 }
2029 cl_arr = *class_array; 2029 cl_arr = *class_array;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index aa3707bdebb4..16104b5ebdcb 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
542 */ 542 */
543 spin_lock_irq(&epdata->dev->lock); 543 spin_lock_irq(&epdata->dev->lock);
544 value = -ENODEV; 544 value = -ENODEV;
545 if (unlikely(epdata->ep)) 545 if (unlikely(epdata->ep == NULL))
546 goto fail; 546 goto fail;
547 547
548 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 548 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
606 } 606 }
607 if (is_sync_kiocb(iocb)) { 607 if (is_sync_kiocb(iocb)) {
608 value = ep_io(epdata, buf, len); 608 value = ep_io(epdata, buf, len);
609 if (value >= 0 && copy_to_iter(buf, value, to)) 609 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
610 value = -EFAULT; 610 value = -EFAULT;
611 } else { 611 } else {
612 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); 612 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index ff8685ea7219..40c04bb25f2f 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
827 return; 827 return;
828 828
829 if (req->num_mapped_sgs) { 829 if (req->num_mapped_sgs) {
830 dma_unmap_sg(dev, req->sg, req->num_mapped_sgs, 830 dma_unmap_sg(dev, req->sg, req->num_sgs,
831 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 831 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
832 832
833 req->num_mapped_sgs = 0; 833 req->num_mapped_sgs = 0;
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1145 if (ret != -EPROBE_DEFER) 1145 if (ret != -EPROBE_DEFER)
1146 list_del(&driver->pending); 1146 list_del(&driver->pending);
1147 if (ret) 1147 if (ret)
1148 goto err4; 1148 goto err5;
1149 break; 1149 break;
1150 } 1150 }
1151 } 1151 }
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
1154 1154
1155 return 0; 1155 return 0;
1156 1156
1157err5:
1158 device_del(&udc->dev);
1159
1157err4: 1160err4:
1158 list_del(&udc->list); 1161 list_del(&udc->list);
1159 mutex_unlock(&udc_lock); 1162 mutex_unlock(&udc_lock);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 93d28cb00b76..8bb011ea78f7 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
1878 1878
1879 tmp = in_be16(&udc->usb_param->frame_n); 1879 tmp = in_be16(&udc->usb_param->frame_n);
1880 if (tmp & 0x8000) 1880 if (tmp & 0x8000)
1881 tmp = tmp & 0x07ff; 1881 return tmp & 0x07ff;
1882 else 1882 return -EINVAL;
1883 tmp = -EINVAL;
1884
1885 return (int)tmp;
1886} 1883}
1887 1884
1888static int fsl_qe_start(struct usb_gadget *gadget, 1885static int fsl_qe_start(struct usb_gadget *gadget,
@@ -2053,7 +2050,7 @@ static void setup_received_handle(struct qe_udc *udc,
2053 struct qe_ep *ep; 2050 struct qe_ep *ep;
2054 2051
2055 if (wValue != 0 || wLength != 0 2052 if (wValue != 0 || wLength != 0
2056 || pipe > USB_MAX_ENDPOINTS) 2053 || pipe >= USB_MAX_ENDPOINTS)
2057 break; 2054 break;
2058 ep = &udc->eps[pipe]; 2055 ep = &udc->eps[pipe];
2059 2056
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 93a3bec81df7..fb8fc34827ab 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -106,6 +106,7 @@
106 106
107/* DRD_CON */ 107/* DRD_CON */
108#define DRD_CON_PERI_CON BIT(24) 108#define DRD_CON_PERI_CON BIT(24)
109#define DRD_CON_VBOUT BIT(0)
109 110
110/* USB_INT_ENA_1 and USB_INT_STA_1 */ 111/* USB_INT_ENA_1 and USB_INT_STA_1 */
111#define USB_INT_1_B3_PLLWKUP BIT(31) 112#define USB_INT_1_B3_PLLWKUP BIT(31)
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
363{ 364{
364 /* FIXME: How to change host / peripheral mode as well? */ 365 /* FIXME: How to change host / peripheral mode as well? */
365 usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON); 366 usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
367 usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
366 368
367 usb3_write(usb3, ~0, USB3_USB_INT_STA_1); 369 usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
368 usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); 370 usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index a962b89b65a6..1e5f529d51a2 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
332 int port = HCS_N_PORTS(ehci->hcs_params); 332 int port = HCS_N_PORTS(ehci->hcs_params);
333 333
334 while (port--) { 334 while (port--) {
335 ehci_writel(ehci, PORT_RWC_BITS,
336 &ehci->regs->port_status[port]);
337 spin_unlock_irq(&ehci->lock); 335 spin_unlock_irq(&ehci->lock);
338 ehci_port_power(ehci, port, false); 336 ehci_port_power(ehci, port, false);
339 spin_lock_irq(&ehci->lock); 337 spin_lock_irq(&ehci->lock);
338 ehci_writel(ehci, PORT_RWC_BITS,
339 &ehci->regs->port_status[port]);
340 } 340 }
341} 341}
342 342
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index c369c29e496d..2f7690092a7f 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
1675 if (pin_number > 7) 1675 if (pin_number > 7)
1676 return; 1676 return;
1677 1677
1678 mask = 1u << pin_number; 1678 mask = 1u << (pin_number % 4);
1679 idx = pin_number / 4; 1679 idx = pin_number / 4;
1680 1680
1681 if (value) 1681 if (value)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index d61fcc48099e..730b9fd26685 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
386 386
387 ret = 0; 387 ret = 0;
388 virt_dev = xhci->devs[slot_id]; 388 virt_dev = xhci->devs[slot_id];
389 if (!virt_dev)
390 return -ENODEV;
391
389 cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 392 cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
390 if (!cmd) { 393 if (!cmd) {
391 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 394 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4fd041bec332..d7b0f97abbad 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
314 usb_remove_hcd(xhci->shared_hcd); 314 usb_remove_hcd(xhci->shared_hcd);
315 usb_put_hcd(xhci->shared_hcd); 315 usb_put_hcd(xhci->shared_hcd);
316 } 316 }
317 usb_hcd_pci_remove(dev);
318 317
319 /* Workaround for spurious wakeups at shutdown with HSW */ 318 /* Workaround for spurious wakeups at shutdown with HSW */
320 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 319 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
321 pci_set_power_state(dev, PCI_D3hot); 320 pci_set_power_state(dev, PCI_D3hot);
321
322 usb_hcd_pci_remove(dev);
322} 323}
323 324
324#ifdef CONFIG_PM 325#ifdef CONFIG_PM
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 918e0c739b79..797137e26549 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
850 spin_lock_irqsave(&xhci->lock, flags); 850 spin_lock_irqsave(&xhci->lock, flags);
851 851
852 ep->stop_cmds_pending--; 852 ep->stop_cmds_pending--;
853 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
854 spin_unlock_irqrestore(&xhci->lock, flags);
855 return;
856 }
853 if (xhci->xhc_state & XHCI_STATE_DYING) { 857 if (xhci->xhc_state & XHCI_STATE_DYING) {
854 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 858 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
855 "Stop EP timer ran, but another timer marked " 859 "Stop EP timer ran, but another timer marked "
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
903 spin_unlock_irqrestore(&xhci->lock, flags); 907 spin_unlock_irqrestore(&xhci->lock, flags);
904 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 908 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
905 "Calling usb_hc_died()"); 909 "Calling usb_hc_died()");
906 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 910 usb_hc_died(xhci_to_hcd(xhci));
907 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 911 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
908 "xHCI host controller is dead."); 912 "xHCI host controller is dead.");
909} 913}
@@ -1334,12 +1338,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1334 1338
1335 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); 1339 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1336 1340
1337 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1338 xhci_err(xhci,
1339 "Command completion event does not match command\n");
1340 return;
1341 }
1342
1343 del_timer(&xhci->cmd_timer); 1341 del_timer(&xhci->cmd_timer);
1344 1342
1345 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1343 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1351,6 +1349,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1351 xhci_handle_stopped_cmd_ring(xhci, cmd); 1349 xhci_handle_stopped_cmd_ring(xhci, cmd);
1352 return; 1350 return;
1353 } 1351 }
1352
1353 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1354 xhci_err(xhci,
1355 "Command completion event does not match command\n");
1356 return;
1357 }
1358
1354 /* 1359 /*
1355 * Host aborted the command ring, check if the current command was 1360 * Host aborted the command ring, check if the current command was
1356 * supposed to be aborted, otherwise continue normally. 1361 * supposed to be aborted, otherwise continue normally.
@@ -3243,7 +3248,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3243 send_addr = addr; 3248 send_addr = addr;
3244 3249
3245 /* Queue the TRBs, even if they are zero-length */ 3250 /* Queue the TRBs, even if they are zero-length */
3246 for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { 3251 for (enqd_len = 0; first_trb || enqd_len < full_len;
3252 enqd_len += trb_buff_len) {
3247 field = TRB_TYPE(TRB_NORMAL); 3253 field = TRB_TYPE(TRB_NORMAL);
3248 3254
3249 /* TRB buffer should not cross 64KB boundaries */ 3255 /* TRB buffer should not cross 64KB boundaries */
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 52c27cab78c3..9b5b3b2281ca 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
665{ 665{
666 char data[30 *3 + 4]; 666 char data[30 *3 + 4];
667 char *d = data; 667 char *d = data;
668 int m = (sizeof(data) - 1) / 3; 668 int m = (sizeof(data) - 1) / 3 - 1;
669 int bytes_read = 0; 669 int bytes_read = 0;
670 int retry_on_empty = 10; 670 int retry_on_empty = 10;
671 int retry_on_timeout = 5; 671 int retry_on_timeout = 5;
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) {
1684 int i = 0; 1684 int i = 0;
1685 char data[30 *3 + 4]; 1685 char data[30 *3 + 4];
1686 char *d = data; 1686 char *d = data;
1687 int m = (sizeof(data) - 1) / 3; 1687 int m = (sizeof(data) - 1) / 3 - 1;
1688 int l = 0; 1688 int l = 0;
1689 struct u132_target *target = &ftdi->target[ed]; 1689 struct u132_target *target = &ftdi->target[ed];
1690 struct u132_command *command = &ftdi->command[ 1690 struct u132_command *command = &ftdi->command[
@@ -1876,7 +1876,7 @@ more:{
1876 if (packet_bytes > 2) { 1876 if (packet_bytes > 2) {
1877 char diag[30 *3 + 4]; 1877 char diag[30 *3 + 4];
1878 char *d = diag; 1878 char *d = diag;
1879 int m = (sizeof(diag) - 1) / 3; 1879 int m = (sizeof(diag) - 1) / 3 - 1;
1880 char *b = ftdi->bulk_in_buffer; 1880 char *b = ftdi->bulk_in_buffer;
1881 int bytes_read = 0; 1881 int bytes_read = 0;
1882 diag[0] = 0; 1882 diag[0] = 0;
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi)
2053 if (packet_bytes > 2) { 2053 if (packet_bytes > 2) {
2054 char diag[30 *3 + 4]; 2054 char diag[30 *3 + 4];
2055 char *d = diag; 2055 char *d = diag;
2056 int m = (sizeof(diag) - 1) / 3; 2056 int m = (sizeof(diag) - 1) / 3 - 1;
2057 char *b = ftdi->bulk_in_buffer; 2057 char *b = ftdi->bulk_in_buffer;
2058 int bytes_read = 0; 2058 int bytes_read = 0;
2059 unsigned char c = 0; 2059 unsigned char c = 0;
@@ -2155,7 +2155,7 @@ more:{
2155 if (packet_bytes > 2) { 2155 if (packet_bytes > 2) {
2156 char diag[30 *3 + 4]; 2156 char diag[30 *3 + 4];
2157 char *d = diag; 2157 char *d = diag;
2158 int m = (sizeof(diag) - 1) / 3; 2158 int m = (sizeof(diag) - 1) / 3 - 1;
2159 char *b = ftdi->bulk_in_buffer; 2159 char *b = ftdi->bulk_in_buffer;
2160 int bytes_read = 0; 2160 int bytes_read = 0;
2161 diag[0] = 0; 2161 diag[0] = 0;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 6b978f04b8d7..5c8210dc6fd9 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req)
585{ 585{
586 struct usb_sg_request *req = (struct usb_sg_request *) _req; 586 struct usb_sg_request *req = (struct usb_sg_request *) _req;
587 587
588 req->status = -ETIMEDOUT;
589 usb_sg_cancel(req); 588 usb_sg_cancel(req);
590} 589}
591 590
@@ -616,8 +615,10 @@ static int perform_sglist(
616 mod_timer(&sg_timer, jiffies + 615 mod_timer(&sg_timer, jiffies +
617 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 616 msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
618 usb_sg_wait(req); 617 usb_sg_wait(req);
619 del_timer_sync(&sg_timer); 618 if (!del_timer_sync(&sg_timer))
620 retval = req->status; 619 retval = -ETIMEDOUT;
620 else
621 retval = req->status;
621 622
622 /* FIXME check resulting data pattern */ 623 /* FIXME check resulting data pattern */
623 624
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
2602 ktime_get_ts64(&start); 2603 ktime_get_ts64(&start);
2603 2604
2604 retval = usbtest_do_ioctl(intf, param_32); 2605 retval = usbtest_do_ioctl(intf, param_32);
2605 if (retval) 2606 if (retval < 0)
2606 goto free_mutex; 2607 goto free_mutex;
2607 2608
2608 ktime_get_ts64(&end); 2609 ktime_get_ts64(&end);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 886526b5fcdd..73cfa13fc0dc 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
87config USB_MUSB_TUSB6010 87config USB_MUSB_TUSB6010
88 tristate "TUSB6010" 88 tristate "TUSB6010"
89 depends on HAS_IOMEM 89 depends on HAS_IOMEM
90 depends on ARCH_OMAP2PLUS || COMPILE_TEST 90 depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
91 depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules 91 depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
92 92
93config USB_MUSB_OMAP2PLUS 93config USB_MUSB_OMAP2PLUS
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 192248f974ec..fe08e776fec3 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -290,6 +290,7 @@ int musb_hub_control(
290 u32 temp; 290 u32 temp;
291 int retval = 0; 291 int retval = 0;
292 unsigned long flags; 292 unsigned long flags;
293 bool start_musb = false;
293 294
294 spin_lock_irqsave(&musb->lock, flags); 295 spin_lock_irqsave(&musb->lock, flags);
295 296
@@ -390,7 +391,7 @@ int musb_hub_control(
390 * logic relating to VBUS power-up. 391 * logic relating to VBUS power-up.
391 */ 392 */
392 if (!hcd->self.is_b_host && musb_has_gadget(musb)) 393 if (!hcd->self.is_b_host && musb_has_gadget(musb))
393 musb_start(musb); 394 start_musb = true;
394 break; 395 break;
395 case USB_PORT_FEAT_RESET: 396 case USB_PORT_FEAT_RESET:
396 musb_port_reset(musb, true); 397 musb_port_reset(musb, true);
@@ -451,5 +452,9 @@ error:
451 retval = -EPIPE; 452 retval = -EPIPE;
452 } 453 }
453 spin_unlock_irqrestore(&musb->lock, flags); 454 spin_unlock_irqrestore(&musb->lock, flags);
455
456 if (start_musb)
457 musb_start(musb);
458
454 return retval; 459 return retval;
455} 460}
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 980c9dee09eb..427efb5eebae 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
144int usb_gen_phy_init(struct usb_phy *phy) 144int usb_gen_phy_init(struct usb_phy *phy)
145{ 145{
146 struct usb_phy_generic *nop = dev_get_drvdata(phy->dev); 146 struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
147 int ret;
147 148
148 if (!IS_ERR(nop->vcc)) { 149 if (!IS_ERR(nop->vcc)) {
149 if (regulator_enable(nop->vcc)) 150 if (regulator_enable(nop->vcc))
150 dev_err(phy->dev, "Failed to enable power\n"); 151 dev_err(phy->dev, "Failed to enable power\n");
151 } 152 }
152 153
153 if (!IS_ERR(nop->clk)) 154 if (!IS_ERR(nop->clk)) {
154 clk_prepare_enable(nop->clk); 155 ret = clk_prepare_enable(nop->clk);
156 if (ret)
157 return ret;
158 }
155 159
156 nop_reset(nop); 160 nop_reset(nop);
157 161
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 6f6d2a7fd5a0..6523af4f8f93 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev)
140 (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, 140 (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
141 otg_dev->vbus); 141 otg_dev->vbus);
142 142
143 platform_set_drvdata(pdev, otg_dev);
144
143 return 0; 145 return 0;
144} 146}
145 147
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8fbbc2d32371..ac67bab9124c 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
514 if (gpio > 0) 514 if (gpio > 0)
515 dparam->enable_gpio = gpio; 515 dparam->enable_gpio = gpio;
516 516
517 if (dparam->type == USBHS_TYPE_RCAR_GEN2) 517 if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
518 dparam->type == USBHS_TYPE_RCAR_GEN3)
518 dparam->has_usb_dmac = 1; 519 dparam->has_usb_dmac = 1;
519 520
520 return info; 521 return info;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 280ed5ff021b..857e78337324 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
871 871
872 /* use PIO if packet is less than pio_dma_border or pipe is DCP */ 872 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
873 if ((len < usbhs_get_dparam(priv, pio_dma_border)) || 873 if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
874 usbhs_pipe_is_dcp(pipe)) 874 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
875 goto usbhsf_pio_prepare_push; 875 goto usbhsf_pio_prepare_push;
876 876
877 /* check data length if this driver don't use USB-DMAC */ 877 /* check data length if this driver don't use USB-DMAC */
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
976 976
977 /* use PIO if packet is less than pio_dma_border or pipe is DCP */ 977 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
978 if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || 978 if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
979 usbhs_pipe_is_dcp(pipe)) 979 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
980 goto usbhsf_pio_prepare_pop; 980 goto usbhsf_pio_prepare_pop;
981 981
982 fifo = usbhsf_get_dma_fifo(priv, pkt); 982 fifo = usbhsf_get_dma_fifo(priv, pkt);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index d4be5d594896..28965ef4f824 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
282 if (usbhs_mod_is_host(priv)) 282 if (usbhs_mod_is_host(priv))
283 usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); 283 usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
284 284
285 usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); 285 /*
286 * The driver should not clear the xxxSTS after the line of
287 * "call irq callback functions" because each "if" statement is
288 * possible to call the callback function for avoiding any side effects.
289 */
290 if (irq_state.intsts0 & BRDY)
291 usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
286 usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); 292 usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
287 usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); 293 if (irq_state.intsts0 & BEMP)
294 usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
288 295
289 /* 296 /*
290 * call irq callback functions 297 * call irq callback functions
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 50f3363cc382..c4c64740a3e7 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
617 * use dmaengine if possible. 617 * use dmaengine if possible.
618 * It will use pio handler if impossible. 618 * It will use pio handler if impossible.
619 */ 619 */
620 if (usb_endpoint_dir_in(desc)) 620 if (usb_endpoint_dir_in(desc)) {
621 pipe->handler = &usbhs_fifo_dma_push_handler; 621 pipe->handler = &usbhs_fifo_dma_push_handler;
622 else 622 } else {
623 pipe->handler = &usbhs_fifo_dma_pop_handler; 623 pipe->handler = &usbhs_fifo_dma_pop_handler;
624 usbhs_xxxsts_clear(priv, BRDYSTS,
625 usbhs_pipe_number(pipe));
626 }
624 627
625 ret = 0; 628 ret = 0;
626 } 629 }
@@ -1073,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1073 1076
1074 gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); 1077 gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
1075 dev_info(dev, "%stransceiver found\n", 1078 dev_info(dev, "%stransceiver found\n",
1076 gpriv->transceiver ? "" : "no "); 1079 !IS_ERR(gpriv->transceiver) ? "" : "no ");
1077 1080
1078 /* 1081 /*
1079 * CAUTION 1082 * CAUTION
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 00820809139a..b2d767e743fc 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
648 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, 648 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
649 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, 649 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
650 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, 650 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
651 { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
652 { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
651 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 653 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
652 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 654 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
653 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 655 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
1008 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, 1010 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
1009 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, 1011 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1010 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, 1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1013 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1011 { } /* Terminating entry */ 1014 { } /* Terminating entry */
1012}; 1015};
1013 1016
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c5d6c1e73e8e..f87a938cf005 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -406,6 +406,12 @@
406#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 406#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
407 407
408/* 408/*
409 * Ivium Technologies product IDs
410 */
411#define FTDI_PALMSENS_PID 0xf440
412#define FTDI_IVIUM_XSTAT_PID 0xf441
413
414/*
409 * Linx Technologies product ids 415 * Linx Technologies product ids
410 */ 416 */
411#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ 417#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
@@ -673,6 +679,12 @@
673#define INTREPID_NEOVI_PID 0x0701 679#define INTREPID_NEOVI_PID 0x0701
674 680
675/* 681/*
682 * WICED USB UART
683 */
684#define WICED_VID 0x0A5C
685#define WICED_USB20706V2_PID 0x6422
686
687/*
676 * Definitions for ID TECH (www.idt-net.com) devices 688 * Definitions for ID TECH (www.idt-net.com) devices
677 */ 689 */
678#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ 690#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 5608af4a369d..de9992b492b0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
1252 1252
1253 if (urb->transfer_buffer == NULL) { 1253 if (urb->transfer_buffer == NULL) {
1254 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, 1254 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
1255 GFP_KERNEL); 1255 GFP_ATOMIC);
1256 if (!urb->transfer_buffer) 1256 if (!urb->transfer_buffer)
1257 goto exit; 1257 goto exit;
1258 } 1258 }
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ed378fb232e7..57426d703a09 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1340 } 1340 }
1341 1341
1342 if (urb->transfer_buffer == NULL) { 1342 if (urb->transfer_buffer == NULL) {
1343 urb->transfer_buffer = 1343 urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
1344 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); 1344 GFP_ATOMIC);
1345 if (!urb->transfer_buffer) 1345 if (!urb->transfer_buffer)
1346 goto exit; 1346 goto exit;
1347 } 1347 }
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8e07536c233a..9894e341c6ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb);
274#define TELIT_PRODUCT_LE920 0x1200 274#define TELIT_PRODUCT_LE920 0x1200
275#define TELIT_PRODUCT_LE910 0x1201 275#define TELIT_PRODUCT_LE910 0x1201
276#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 276#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
277#define TELIT_PRODUCT_LE920A4_1207 0x1207
278#define TELIT_PRODUCT_LE920A4_1208 0x1208
279#define TELIT_PRODUCT_LE920A4_1211 0x1211
280#define TELIT_PRODUCT_LE920A4_1212 0x1212
281#define TELIT_PRODUCT_LE920A4_1213 0x1213
282#define TELIT_PRODUCT_LE920A4_1214 0x1214
277 283
278/* ZTE PRODUCTS */ 284/* ZTE PRODUCTS */
279#define ZTE_VENDOR_ID 0x19d2 285#define ZTE_VENDOR_ID 0x19d2
@@ -519,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
519#define VIATELECOM_VENDOR_ID 0x15eb 525#define VIATELECOM_VENDOR_ID 0x15eb
520#define VIATELECOM_PRODUCT_CDS7 0x0001 526#define VIATELECOM_PRODUCT_CDS7 0x0001
521 527
528/* WeTelecom products */
529#define WETELECOM_VENDOR_ID 0x22de
530#define WETELECOM_PRODUCT_WMD200 0x6801
531#define WETELECOM_PRODUCT_6802 0x6802
532#define WETELECOM_PRODUCT_WMD300 0x6803
533
522struct option_blacklist_info { 534struct option_blacklist_info {
523 /* bitmask of interface numbers blacklisted for send_setup */ 535 /* bitmask of interface numbers blacklisted for send_setup */
524 const unsigned long sendsetup; 536 const unsigned long sendsetup;
@@ -628,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
628 .reserved = BIT(1) | BIT(5), 640 .reserved = BIT(1) | BIT(5),
629}; 641};
630 642
643static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
644 .sendsetup = BIT(0),
645 .reserved = BIT(1),
646};
647
631static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { 648static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
632 .sendsetup = BIT(2), 649 .sendsetup = BIT(2),
633 .reserved = BIT(0) | BIT(1) | BIT(3), 650 .reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1203,6 +1220,16 @@ static const struct usb_device_id option_ids[] = {
1203 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1220 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1204 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1221 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1205 .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 1222 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
1223 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
1224 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
1225 .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
1226 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
1227 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1228 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
1229 .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
1230 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1231 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1232 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1206 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 1233 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1207 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 1234 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1208 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, 1235 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1966,9 +1993,13 @@ static const struct usb_device_id option_ids[] = {
1966 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1993 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1967 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1994 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1968 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1995 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1996 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1969 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1997 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1970 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1998 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1971 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1999 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
2000 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2001 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
2002 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1972 { } /* Terminating entry */ 2003 { } /* Terminating entry */
1973}; 2004};
1974MODULE_DEVICE_TABLE(usb, option_ids); 2005MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index a204782ae530..e98b6e57b703 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
54/* Infineon Flashloader driver */ 54/* Infineon Flashloader driver */
55#define FLASHLOADER_IDS() \ 55#define FLASHLOADER_IDS() \
56 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ 56 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
57 { USB_DEVICE(0x8087, 0x0716) } 57 { USB_DEVICE(0x8087, 0x0716) }, \
58 { USB_DEVICE(0x8087, 0x0801) }
58DEVICE(flashloader, FLASHLOADER_IDS); 59DEVICE(flashloader, FLASHLOADER_IDS);
59 60
60/* Google Serial USB SubClass */ 61/* Google Serial USB SubClass */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index b1b9bac44016..d213cf44a7e4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
1433 1433
1434 rc = usb_register(udriver); 1434 rc = usb_register(udriver);
1435 if (rc) 1435 if (rc)
1436 return rc; 1436 goto failed_usb_register;
1437 1437
1438 for (sd = serial_drivers; *sd; ++sd) { 1438 for (sd = serial_drivers; *sd; ++sd) {
1439 (*sd)->usb_driver = udriver; 1439 (*sd)->usb_driver = udriver;
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
1451 while (sd-- > serial_drivers) 1451 while (sd-- > serial_drivers)
1452 usb_serial_deregister(*sd); 1452 usb_serial_deregister(*sd);
1453 usb_deregister(udriver); 1453 usb_deregister(udriver);
1454failed_usb_register:
1455 kfree(udriver);
1454 return rc; 1456 return rc;
1455} 1457}
1456EXPORT_SYMBOL_GPL(usb_serial_register_drivers); 1458EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 15ecfc9c5f6c..152b43822ef1 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
564} 564}
565 565
566static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, 566static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
567 uint32_t flags, void *data) 567 unsigned int count, uint32_t flags,
568 void *data)
568{ 569{
569 int32_t fd = *(int32_t *)data;
570
571 if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
572 return -EINVAL;
573
574 /* DATA_NONE/DATA_BOOL enables loopback testing */ 570 /* DATA_NONE/DATA_BOOL enables loopback testing */
575 if (flags & VFIO_IRQ_SET_DATA_NONE) { 571 if (flags & VFIO_IRQ_SET_DATA_NONE) {
576 if (*ctx) 572 if (*ctx) {
577 eventfd_signal(*ctx, 1); 573 if (count) {
578 return 0; 574 eventfd_signal(*ctx, 1);
575 } else {
576 eventfd_ctx_put(*ctx);
577 *ctx = NULL;
578 }
579 return 0;
580 }
579 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 581 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
580 uint8_t trigger = *(uint8_t *)data; 582 uint8_t trigger;
583
584 if (!count)
585 return -EINVAL;
586
587 trigger = *(uint8_t *)data;
581 if (trigger && *ctx) 588 if (trigger && *ctx)
582 eventfd_signal(*ctx, 1); 589 eventfd_signal(*ctx, 1);
583 return 0;
584 }
585 590
586 /* Handle SET_DATA_EVENTFD */
587 if (fd == -1) {
588 if (*ctx)
589 eventfd_ctx_put(*ctx);
590 *ctx = NULL;
591 return 0; 591 return 0;
592 } else if (fd >= 0) { 592 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
593 struct eventfd_ctx *efdctx; 593 int32_t fd;
594 efdctx = eventfd_ctx_fdget(fd); 594
595 if (IS_ERR(efdctx)) 595 if (!count)
596 return PTR_ERR(efdctx); 596 return -EINVAL;
597 if (*ctx) 597
598 eventfd_ctx_put(*ctx); 598 fd = *(int32_t *)data;
599 *ctx = efdctx; 599 if (fd == -1) {
600 if (*ctx)
601 eventfd_ctx_put(*ctx);
602 *ctx = NULL;
603 } else if (fd >= 0) {
604 struct eventfd_ctx *efdctx;
605
606 efdctx = eventfd_ctx_fdget(fd);
607 if (IS_ERR(efdctx))
608 return PTR_ERR(efdctx);
609
610 if (*ctx)
611 eventfd_ctx_put(*ctx);
612
613 *ctx = efdctx;
614 }
600 return 0; 615 return 0;
601 } else 616 }
602 return -EINVAL; 617
618 return -EINVAL;
603} 619}
604 620
605static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, 621static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
606 unsigned index, unsigned start, 622 unsigned index, unsigned start,
607 unsigned count, uint32_t flags, void *data) 623 unsigned count, uint32_t flags, void *data)
608{ 624{
609 if (index != VFIO_PCI_ERR_IRQ_INDEX) 625 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
610 return -EINVAL; 626 return -EINVAL;
611 627
612 /* 628 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
613 * We should sanitize start & count, but that wasn't caught 629 count, flags, data);
614 * originally, so this IRQ index must forever ignore them :-(
615 */
616
617 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
618} 630}
619 631
620static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, 632static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
621 unsigned index, unsigned start, 633 unsigned index, unsigned start,
622 unsigned count, uint32_t flags, void *data) 634 unsigned count, uint32_t flags, void *data)
623{ 635{
624 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) 636 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
625 return -EINVAL; 637 return -EINVAL;
626 638
627 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); 639 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
640 count, flags, data);
628} 641}
629 642
630int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, 643int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 9d6320e8ff3e..6e29d053843d 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
88 struct scatterlist *tvc_prot_sgl; 88 struct scatterlist *tvc_prot_sgl;
89 struct page **tvc_upages; 89 struct page **tvc_upages;
90 /* Pointer to response header iovec */ 90 /* Pointer to response header iovec */
91 struct iovec *tvc_resp_iov; 91 struct iovec tvc_resp_iov;
92 /* Pointer to vhost_scsi for our device */ 92 /* Pointer to vhost_scsi for our device */
93 struct vhost_scsi *tvc_vhost; 93 struct vhost_scsi *tvc_vhost;
94 /* Pointer to vhost_virtqueue for the cmd */ 94 /* Pointer to vhost_virtqueue for the cmd */
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
547 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 547 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
548 se_cmd->scsi_sense_length); 548 se_cmd->scsi_sense_length);
549 549
550 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, 550 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
551 cmd->tvc_in_iovs, sizeof(v_rsp)); 551 cmd->tvc_in_iovs, sizeof(v_rsp));
552 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); 552 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
553 if (likely(ret == sizeof(v_rsp))) { 553 if (likely(ret == sizeof(v_rsp))) {
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1044 } 1044 }
1045 cmd->tvc_vhost = vs; 1045 cmd->tvc_vhost = vs;
1046 cmd->tvc_vq = vq; 1046 cmd->tvc_vq = vq;
1047 cmd->tvc_resp_iov = &vq->iov[out]; 1047 cmd->tvc_resp_iov = vq->iov[out];
1048 cmd->tvc_in_iovs = in; 1048 cmd->tvc_in_iovs = in;
1049 1049
1050 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1050 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 388eec4e1a90..97fb2f8fa930 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n)
220{ 220{
221 void *priv = NULL; 221 void *priv = NULL;
222 long err; 222 long err;
223 struct vhost_memory *memory; 223 struct vhost_umem *umem;
224 224
225 mutex_lock(&n->dev.mutex); 225 mutex_lock(&n->dev.mutex);
226 err = vhost_dev_check_owner(&n->dev); 226 err = vhost_dev_check_owner(&n->dev);
227 if (err) 227 if (err)
228 goto done; 228 goto done;
229 memory = vhost_dev_reset_owner_prepare(); 229 umem = vhost_dev_reset_owner_prepare();
230 if (!memory) { 230 if (!umem) {
231 err = -ENOMEM; 231 err = -ENOMEM;
232 goto done; 232 goto done;
233 } 233 }
234 vhost_test_stop(n, &priv); 234 vhost_test_stop(n, &priv);
235 vhost_test_flush(n); 235 vhost_test_flush(n);
236 vhost_dev_reset_owner(&n->dev, memory); 236 vhost_dev_reset_owner(&n->dev, umem);
237done: 237done:
238 mutex_unlock(&n->dev.mutex); 238 mutex_unlock(&n->dev.mutex);
239 return err; 239 return err;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 0ddf3a2dbfc4..e3b30ea9ece5 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
307 307
308 vhost_disable_notify(&vsock->dev, vq); 308 vhost_disable_notify(&vsock->dev, vq);
309 for (;;) { 309 for (;;) {
310 u32 len;
311
310 if (!vhost_vsock_more_replies(vsock)) { 312 if (!vhost_vsock_more_replies(vsock)) {
311 /* Stop tx until the device processes already 313 /* Stop tx until the device processes already
312 * pending replies. Leave tx virtqueue 314 * pending replies. Leave tx virtqueue
@@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
334 continue; 336 continue;
335 } 337 }
336 338
339 len = pkt->len;
340
337 /* Only accept correctly addressed packets */ 341 /* Only accept correctly addressed packets */
338 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) 342 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
339 virtio_transport_recv_pkt(pkt); 343 virtio_transport_recv_pkt(pkt);
340 else 344 else
341 virtio_transport_free_pkt(pkt); 345 virtio_transport_free_pkt(pkt);
342 346
343 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); 347 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
344 added = true; 348 added = true;
345 } 349 }
346 350
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 114a0c88afb8..ed9c9eeedfe5 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
167 * making all of the arch DMA ops work on the vring device itself 167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops. 168 * is a mess. For now, we use the parent device for DMA ops.
169 */ 169 */
170struct device *vring_dma_dev(const struct vring_virtqueue *vq) 170static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171{ 171{
172 return vq->vq.vdev->dev.parent; 172 return vq->vq.vdev->dev.parent;
173} 173}
@@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
327 * host should service the ring ASAP. */ 327 * host should service the ring ASAP. */
328 if (out_sgs) 328 if (out_sgs)
329 vq->notify(&vq->vq); 329 vq->notify(&vq->vq);
330 if (indirect)
331 kfree(desc);
330 END_USE(vq); 332 END_USE(vq);
331 return -ENOSPC; 333 return -ENOSPC;
332 } 334 }
@@ -426,6 +428,7 @@ unmap_release:
426 if (indirect) 428 if (indirect)
427 kfree(desc); 429 kfree(desc);
428 430
431 END_USE(vq);
429 return -EIO; 432 return -EIO;
430} 433}
431 434
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 7487971f9f78..c1010f018bd8 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
316 rc = -ENOMEM; 316 rc = -ENOMEM;
317 goto out; 317 goto out;
318 } 318 }
319 } else { 319 } else if (msg_type == XS_TRANSACTION_END) {
320 list_for_each_entry(trans, &u->transactions, list) 320 list_for_each_entry(trans, &u->transactions, list)
321 if (trans->handle.id == u->u.msg.tx_id) 321 if (trans->handle.id == u->u.msg.tx_id)
322 break; 322 break;
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4b0eff6da674..85737e96ab8b 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
189 case 1: 189 case 1:
190 _debug("extract FID count"); 190 _debug("extract FID count");
191 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 191 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
192 switch (ret) { 192 if (ret < 0)
193 case 0: break; 193 return ret;
194 case -EAGAIN: return 0;
195 default: return ret;
196 }
197 194
198 call->count = ntohl(call->tmp); 195 call->count = ntohl(call->tmp);
199 _debug("FID count: %u", call->count); 196 _debug("FID count: %u", call->count);
@@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
210 _debug("extract FID array"); 207 _debug("extract FID array");
211 ret = afs_extract_data(call, skb, last, call->buffer, 208 ret = afs_extract_data(call, skb, last, call->buffer,
212 call->count * 3 * 4); 209 call->count * 3 * 4);
213 switch (ret) { 210 if (ret < 0)
214 case 0: break; 211 return ret;
215 case -EAGAIN: return 0;
216 default: return ret;
217 }
218 212
219 _debug("unmarshall FID array"); 213 _debug("unmarshall FID array");
220 call->request = kcalloc(call->count, 214 call->request = kcalloc(call->count,
@@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
239 case 3: 233 case 3:
240 _debug("extract CB count"); 234 _debug("extract CB count");
241 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 235 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
242 switch (ret) { 236 if (ret < 0)
243 case 0: break; 237 return ret;
244 case -EAGAIN: return 0;
245 default: return ret;
246 }
247 238
248 tmp = ntohl(call->tmp); 239 tmp = ntohl(call->tmp);
249 _debug("CB count: %u", tmp); 240 _debug("CB count: %u", tmp);
@@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
258 _debug("extract CB array"); 249 _debug("extract CB array");
259 ret = afs_extract_data(call, skb, last, call->request, 250 ret = afs_extract_data(call, skb, last, call->request,
260 call->count * 3 * 4); 251 call->count * 3 * 4);
261 switch (ret) { 252 if (ret < 0)
262 case 0: break; 253 return ret;
263 case -EAGAIN: return 0;
264 default: return ret;
265 }
266 254
267 _debug("unmarshall CB array"); 255 _debug("unmarshall CB array");
268 cb = call->request; 256 cb = call->request;
@@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
278 call->unmarshall++; 266 call->unmarshall++;
279 267
280 case 5: 268 case 5:
281 _debug("trailer"); 269 ret = afs_data_complete(call, skb, last);
282 if (skb->len != 0) 270 if (ret < 0)
283 return -EBADMSG; 271 return ret;
284 272
285 /* Record that the message was unmarshalled successfully so 273 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking 274 * that the call destructor can know do the callback breaking
@@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
294 break; 282 break;
295 } 283 }
296 284
297 if (!last)
298 return 0;
299 285
300 call->state = AFS_CALL_REPLYING; 286 call->state = AFS_CALL_REPLYING;
301 287
@@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
335{ 321{
336 struct afs_server *server; 322 struct afs_server *server;
337 struct in_addr addr; 323 struct in_addr addr;
324 int ret;
338 325
339 _enter(",{%u},%d", skb->len, last); 326 _enter(",{%u},%d", skb->len, last);
340 327
341 if (skb->len > 0) 328 ret = afs_data_complete(call, skb, last);
342 return -EBADMSG; 329 if (ret < 0)
343 if (!last) 330 return ret;
344 return 0;
345 331
346 /* no unmarshalling required */ 332 /* no unmarshalling required */
347 call->state = AFS_CALL_REPLYING; 333 call->state = AFS_CALL_REPLYING;
@@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
371 357
372 _enter(",{%u},%d", skb->len, last); 358 _enter(",{%u},%d", skb->len, last);
373 359
360 /* There are some arguments that we ignore */
361 afs_data_consumed(call, skb);
374 if (!last) 362 if (!last)
375 return 0; 363 return -EAGAIN;
376 364
377 /* no unmarshalling required */ 365 /* no unmarshalling required */
378 call->state = AFS_CALL_REPLYING; 366 call->state = AFS_CALL_REPLYING;
@@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work)
408static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, 396static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
409 bool last) 397 bool last)
410{ 398{
399 int ret;
400
411 _enter(",{%u},%d", skb->len, last); 401 _enter(",{%u},%d", skb->len, last);
412 402
413 if (skb->len > 0) 403 ret = afs_data_complete(call, skb, last);
414 return -EBADMSG; 404 if (ret < 0)
415 if (!last) 405 return ret;
416 return 0;
417 406
418 /* no unmarshalling required */ 407 /* no unmarshalling required */
419 call->state = AFS_CALL_REPLYING; 408 call->state = AFS_CALL_REPLYING;
@@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
460 449
461 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 450 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
462 451
463 if (skb->len > 0) 452 ret = afs_data_complete(call, skb, last);
464 return -EBADMSG; 453 if (ret < 0)
465 if (!last) 454 return ret;
466 return 0;
467 455
468 switch (call->unmarshall) { 456 switch (call->unmarshall) {
469 case 0: 457 case 0:
@@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
509 break; 497 break;
510 } 498 }
511 499
512 if (!last) 500 ret = afs_data_complete(call, skb, last);
513 return 0; 501 if (ret < 0)
502 return ret;
514 503
515 call->state = AFS_CALL_REPLYING; 504 call->state = AFS_CALL_REPLYING;
516 505
@@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
588static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, 577static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
589 struct sk_buff *skb, bool last) 578 struct sk_buff *skb, bool last)
590{ 579{
580 int ret;
581
591 _enter(",{%u},%d", skb->len, last); 582 _enter(",{%u},%d", skb->len, last);
592 583
593 if (skb->len > 0) 584 ret = afs_data_complete(call, skb, last);
594 return -EBADMSG; 585 if (ret < 0)
595 if (!last) 586 return ret;
596 return 0;
597 587
598 /* no unmarshalling required */ 588 /* no unmarshalling required */
599 call->state = AFS_CALL_REPLYING; 589 call->state = AFS_CALL_REPLYING;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index c2e930ec2888..9312b92e54be 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call,
240{ 240{
241 struct afs_vnode *vnode = call->reply; 241 struct afs_vnode *vnode = call->reply;
242 const __be32 *bp; 242 const __be32 *bp;
243 int ret;
243 244
244 _enter(",,%u", last); 245 _enter(",,%u", last);
245 246
246 afs_transfer_reply(call, skb); 247 ret = afs_transfer_reply(call, skb, last);
247 if (!last) 248 if (ret < 0)
248 return 0; 249 return ret;
249
250 if (call->reply_size != call->reply_max)
251 return -EBADMSG;
252 250
253 /* unmarshall the reply once we've received all of it */ 251 /* unmarshall the reply once we've received all of it */
254 bp = call->buffer; 252 bp = call->buffer;
@@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
335 case 1: 333 case 1:
336 _debug("extract data length (MSW)"); 334 _debug("extract data length (MSW)");
337 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 335 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
338 switch (ret) { 336 if (ret < 0)
339 case 0: break; 337 return ret;
340 case -EAGAIN: return 0;
341 default: return ret;
342 }
343 338
344 call->count = ntohl(call->tmp); 339 call->count = ntohl(call->tmp);
345 _debug("DATA length MSW: %u", call->count); 340 _debug("DATA length MSW: %u", call->count);
@@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
353 case 2: 348 case 2:
354 _debug("extract data length"); 349 _debug("extract data length");
355 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 350 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
356 switch (ret) { 351 if (ret < 0)
357 case 0: break; 352 return ret;
358 case -EAGAIN: return 0;
359 default: return ret;
360 }
361 353
362 call->count = ntohl(call->tmp); 354 call->count = ntohl(call->tmp);
363 _debug("DATA length: %u", call->count); 355 _debug("DATA length: %u", call->count);
@@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
375 ret = afs_extract_data(call, skb, last, buffer, 367 ret = afs_extract_data(call, skb, last, buffer,
376 call->count); 368 call->count);
377 kunmap_atomic(buffer); 369 kunmap_atomic(buffer);
378 switch (ret) { 370 if (ret < 0)
379 case 0: break; 371 return ret;
380 case -EAGAIN: return 0;
381 default: return ret;
382 }
383 } 372 }
384 373
385 call->offset = 0; 374 call->offset = 0;
@@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
389 case 4: 378 case 4:
390 ret = afs_extract_data(call, skb, last, call->buffer, 379 ret = afs_extract_data(call, skb, last, call->buffer,
391 (21 + 3 + 6) * 4); 380 (21 + 3 + 6) * 4);
392 switch (ret) { 381 if (ret < 0)
393 case 0: break; 382 return ret;
394 case -EAGAIN: return 0;
395 default: return ret;
396 }
397 383
398 bp = call->buffer; 384 bp = call->buffer;
399 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); 385 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
@@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
405 call->unmarshall++; 391 call->unmarshall++;
406 392
407 case 5: 393 case 5:
408 _debug("trailer"); 394 ret = afs_data_complete(call, skb, last);
409 if (skb->len != 0) 395 if (ret < 0)
410 return -EBADMSG; 396 return ret;
411 break; 397 break;
412 } 398 }
413 399
414 if (!last)
415 return 0;
416
417 if (call->count < PAGE_SIZE) { 400 if (call->count < PAGE_SIZE) {
418 _debug("clear"); 401 _debug("clear");
419 page = call->reply3; 402 page = call->reply3;
@@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
537{ 520{
538 _enter(",{%u},%d", skb->len, last); 521 _enter(",{%u},%d", skb->len, last);
539 522
540 if (skb->len > 0) 523 /* shouldn't be any reply data */
541 return -EBADMSG; /* shouldn't be any reply data */ 524 return afs_data_complete(call, skb, last);
542 return 0;
543} 525}
544 526
545/* 527/*
@@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call,
622{ 604{
623 struct afs_vnode *vnode = call->reply; 605 struct afs_vnode *vnode = call->reply;
624 const __be32 *bp; 606 const __be32 *bp;
607 int ret;
625 608
626 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 609 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
627 610
628 afs_transfer_reply(call, skb); 611 ret = afs_transfer_reply(call, skb, last);
629 if (!last) 612 if (ret < 0)
630 return 0; 613 return ret;
631
632 if (call->reply_size != call->reply_max)
633 return -EBADMSG;
634 614
635 /* unmarshall the reply once we've received all of it */ 615 /* unmarshall the reply once we've received all of it */
636 bp = call->buffer; 616 bp = call->buffer;
@@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call,
721{ 701{
722 struct afs_vnode *vnode = call->reply; 702 struct afs_vnode *vnode = call->reply;
723 const __be32 *bp; 703 const __be32 *bp;
704 int ret;
724 705
725 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 706 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
726 707
727 afs_transfer_reply(call, skb); 708 ret = afs_transfer_reply(call, skb, last);
728 if (!last) 709 if (ret < 0)
729 return 0; 710 return ret;
730
731 if (call->reply_size != call->reply_max)
732 return -EBADMSG;
733 711
734 /* unmarshall the reply once we've received all of it */ 712 /* unmarshall the reply once we've received all of it */
735 bp = call->buffer; 713 bp = call->buffer;
@@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call,
804{ 782{
805 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; 783 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
806 const __be32 *bp; 784 const __be32 *bp;
785 int ret;
807 786
808 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 787 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
809 788
810 afs_transfer_reply(call, skb); 789 ret = afs_transfer_reply(call, skb, last);
811 if (!last) 790 if (ret < 0)
812 return 0; 791 return ret;
813
814 if (call->reply_size != call->reply_max)
815 return -EBADMSG;
816 792
817 /* unmarshall the reply once we've received all of it */ 793 /* unmarshall the reply once we've received all of it */
818 bp = call->buffer; 794 bp = call->buffer;
@@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call,
892{ 868{
893 struct afs_vnode *vnode = call->reply; 869 struct afs_vnode *vnode = call->reply;
894 const __be32 *bp; 870 const __be32 *bp;
871 int ret;
895 872
896 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 873 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
897 874
898 afs_transfer_reply(call, skb); 875 ret = afs_transfer_reply(call, skb, last);
899 if (!last) 876 if (ret < 0)
900 return 0; 877 return ret;
901
902 if (call->reply_size != call->reply_max)
903 return -EBADMSG;
904 878
905 /* unmarshall the reply once we've received all of it */ 879 /* unmarshall the reply once we've received all of it */
906 bp = call->buffer; 880 bp = call->buffer;
@@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call,
999{ 973{
1000 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; 974 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
1001 const __be32 *bp; 975 const __be32 *bp;
976 int ret;
1002 977
1003 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 978 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
1004 979
1005 afs_transfer_reply(call, skb); 980 ret = afs_transfer_reply(call, skb, last);
1006 if (!last) 981 if (ret < 0)
1007 return 0; 982 return ret;
1008
1009 if (call->reply_size != call->reply_max)
1010 return -EBADMSG;
1011 983
1012 /* unmarshall the reply once we've received all of it */ 984 /* unmarshall the reply once we've received all of it */
1013 bp = call->buffer; 985 bp = call->buffer;
@@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call,
1105{ 1077{
1106 struct afs_vnode *vnode = call->reply; 1078 struct afs_vnode *vnode = call->reply;
1107 const __be32 *bp; 1079 const __be32 *bp;
1080 int ret;
1108 1081
1109 _enter(",,%u", last); 1082 _enter(",,%u", last);
1110 1083
1111 afs_transfer_reply(call, skb); 1084 ret = afs_transfer_reply(call, skb, last);
1112 if (!last) { 1085 if (ret < 0)
1113 _leave(" = 0 [more]"); 1086 return ret;
1114 return 0;
1115 }
1116
1117 if (call->reply_size != call->reply_max) {
1118 _leave(" = -EBADMSG [%u != %u]",
1119 call->reply_size, call->reply_max);
1120 return -EBADMSG;
1121 }
1122 1087
1123 /* unmarshall the reply once we've received all of it */ 1088 /* unmarshall the reply once we've received all of it */
1124 bp = call->buffer; 1089 bp = call->buffer;
@@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call,
1292 afs_dataversion_t *store_version; 1257 afs_dataversion_t *store_version;
1293 struct afs_vnode *vnode = call->reply; 1258 struct afs_vnode *vnode = call->reply;
1294 const __be32 *bp; 1259 const __be32 *bp;
1260 int ret;
1295 1261
1296 _enter(",,%u", last); 1262 _enter(",,%u", last);
1297 1263
1298 afs_transfer_reply(call, skb); 1264 ret = afs_transfer_reply(call, skb, last);
1299 if (!last) { 1265 if (ret < 0)
1300 _leave(" = 0 [more]"); 1266 return ret;
1301 return 0;
1302 }
1303
1304 if (call->reply_size != call->reply_max) {
1305 _leave(" = -EBADMSG [%u != %u]",
1306 call->reply_size, call->reply_max);
1307 return -EBADMSG;
1308 }
1309 1267
1310 /* unmarshall the reply once we've received all of it */ 1268 /* unmarshall the reply once we've received all of it */
1311 store_version = NULL; 1269 store_version = NULL;
@@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1504 _debug("extract status"); 1462 _debug("extract status");
1505 ret = afs_extract_data(call, skb, last, call->buffer, 1463 ret = afs_extract_data(call, skb, last, call->buffer,
1506 12 * 4); 1464 12 * 4);
1507 switch (ret) { 1465 if (ret < 0)
1508 case 0: break; 1466 return ret;
1509 case -EAGAIN: return 0;
1510 default: return ret;
1511 }
1512 1467
1513 bp = call->buffer; 1468 bp = call->buffer;
1514 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); 1469 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2);
@@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1518 /* extract the volume name length */ 1473 /* extract the volume name length */
1519 case 2: 1474 case 2:
1520 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1475 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1521 switch (ret) { 1476 if (ret < 0)
1522 case 0: break; 1477 return ret;
1523 case -EAGAIN: return 0;
1524 default: return ret;
1525 }
1526 1478
1527 call->count = ntohl(call->tmp); 1479 call->count = ntohl(call->tmp);
1528 _debug("volname length: %u", call->count); 1480 _debug("volname length: %u", call->count);
@@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1537 if (call->count > 0) { 1489 if (call->count > 0) {
1538 ret = afs_extract_data(call, skb, last, call->reply3, 1490 ret = afs_extract_data(call, skb, last, call->reply3,
1539 call->count); 1491 call->count);
1540 switch (ret) { 1492 if (ret < 0)
1541 case 0: break; 1493 return ret;
1542 case -EAGAIN: return 0;
1543 default: return ret;
1544 }
1545 } 1494 }
1546 1495
1547 p = call->reply3; 1496 p = call->reply3;
@@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1561 case 4: 1510 case 4:
1562 ret = afs_extract_data(call, skb, last, call->buffer, 1511 ret = afs_extract_data(call, skb, last, call->buffer,
1563 call->count); 1512 call->count);
1564 switch (ret) { 1513 if (ret < 0)
1565 case 0: break; 1514 return ret;
1566 case -EAGAIN: return 0;
1567 default: return ret;
1568 }
1569 1515
1570 call->offset = 0; 1516 call->offset = 0;
1571 call->unmarshall++; 1517 call->unmarshall++;
@@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1574 /* extract the offline message length */ 1520 /* extract the offline message length */
1575 case 5: 1521 case 5:
1576 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1522 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1577 switch (ret) { 1523 if (ret < 0)
1578 case 0: break; 1524 return ret;
1579 case -EAGAIN: return 0;
1580 default: return ret;
1581 }
1582 1525
1583 call->count = ntohl(call->tmp); 1526 call->count = ntohl(call->tmp);
1584 _debug("offline msg length: %u", call->count); 1527 _debug("offline msg length: %u", call->count);
@@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1593 if (call->count > 0) { 1536 if (call->count > 0) {
1594 ret = afs_extract_data(call, skb, last, call->reply3, 1537 ret = afs_extract_data(call, skb, last, call->reply3,
1595 call->count); 1538 call->count);
1596 switch (ret) { 1539 if (ret < 0)
1597 case 0: break; 1540 return ret;
1598 case -EAGAIN: return 0;
1599 default: return ret;
1600 }
1601 } 1541 }
1602 1542
1603 p = call->reply3; 1543 p = call->reply3;
@@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1617 case 7: 1557 case 7:
1618 ret = afs_extract_data(call, skb, last, call->buffer, 1558 ret = afs_extract_data(call, skb, last, call->buffer,
1619 call->count); 1559 call->count);
1620 switch (ret) { 1560 if (ret < 0)
1621 case 0: break; 1561 return ret;
1622 case -EAGAIN: return 0;
1623 default: return ret;
1624 }
1625 1562
1626 call->offset = 0; 1563 call->offset = 0;
1627 call->unmarshall++; 1564 call->unmarshall++;
@@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1630 /* extract the message of the day length */ 1567 /* extract the message of the day length */
1631 case 8: 1568 case 8:
1632 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1569 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1633 switch (ret) { 1570 if (ret < 0)
1634 case 0: break; 1571 return ret;
1635 case -EAGAIN: return 0;
1636 default: return ret;
1637 }
1638 1572
1639 call->count = ntohl(call->tmp); 1573 call->count = ntohl(call->tmp);
1640 _debug("motd length: %u", call->count); 1574 _debug("motd length: %u", call->count);
@@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1649 if (call->count > 0) { 1583 if (call->count > 0) {
1650 ret = afs_extract_data(call, skb, last, call->reply3, 1584 ret = afs_extract_data(call, skb, last, call->reply3,
1651 call->count); 1585 call->count);
1652 switch (ret) { 1586 if (ret < 0)
1653 case 0: break; 1587 return ret;
1654 case -EAGAIN: return 0;
1655 default: return ret;
1656 }
1657 } 1588 }
1658 1589
1659 p = call->reply3; 1590 p = call->reply3;
@@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1673 case 10: 1604 case 10:
1674 ret = afs_extract_data(call, skb, last, call->buffer, 1605 ret = afs_extract_data(call, skb, last, call->buffer,
1675 call->count); 1606 call->count);
1676 switch (ret) { 1607 if (ret < 0)
1677 case 0: break; 1608 return ret;
1678 case -EAGAIN: return 0;
1679 default: return ret;
1680 }
1681 1609
1682 call->offset = 0; 1610 call->offset = 0;
1683 call->unmarshall++; 1611 call->unmarshall++;
1684 no_motd_padding: 1612 no_motd_padding:
1685 1613
1686 case 11: 1614 case 11:
1687 _debug("trailer %d", skb->len); 1615 ret = afs_data_complete(call, skb, last);
1688 if (skb->len != 0) 1616 if (ret < 0)
1689 return -EBADMSG; 1617 return ret;
1690 break; 1618 break;
1691 } 1619 }
1692 1620
1693 if (!last)
1694 return 0;
1695
1696 _leave(" = 0 [done]"); 1621 _leave(" = 0 [done]");
1697 return 0; 1622 return 0;
1698} 1623}
@@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call,
1764 struct sk_buff *skb, bool last) 1689 struct sk_buff *skb, bool last)
1765{ 1690{
1766 const __be32 *bp; 1691 const __be32 *bp;
1692 int ret;
1767 1693
1768 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 1694 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
1769 1695
1770 afs_transfer_reply(call, skb); 1696 ret = afs_transfer_reply(call, skb, last);
1771 if (!last) 1697 if (ret < 0)
1772 return 0; 1698 return ret;
1773
1774 if (call->reply_size != call->reply_max)
1775 return -EBADMSG;
1776 1699
1777 /* unmarshall the reply once we've received all of it */ 1700 /* unmarshall the reply once we've received all of it */
1778 bp = call->buffer; 1701 bp = call->buffer;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 71d5982312f3..df976b2a7f40 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *);
609 */ 609 */
610extern int afs_open_socket(void); 610extern int afs_open_socket(void);
611extern void afs_close_socket(void); 611extern void afs_close_socket(void);
612extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
612extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, 613extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
613 const struct afs_wait_mode *); 614 const struct afs_wait_mode *);
614extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, 615extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
615 size_t, size_t); 616 size_t, size_t);
616extern void afs_flat_call_destructor(struct afs_call *); 617extern void afs_flat_call_destructor(struct afs_call *);
617extern void afs_transfer_reply(struct afs_call *, struct sk_buff *); 618extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
618extern void afs_send_empty_reply(struct afs_call *); 619extern void afs_send_empty_reply(struct afs_call *);
619extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); 620extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
620extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, 621extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
621 size_t); 622 size_t);
622 623
624static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
625 bool last)
626{
627 if (skb->len > 0)
628 return -EBADMSG;
629 afs_data_consumed(call, skb);
630 if (!last)
631 return -EAGAIN;
632 return 0;
633}
634
623/* 635/*
624 * security.c 636 * security.c
625 */ 637 */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 4832de84d52c..14d04c848465 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -150,10 +150,9 @@ void afs_close_socket(void)
150} 150}
151 151
152/* 152/*
153 * note that the data in a socket buffer is now delivered and that the buffer 153 * Note that the data in a socket buffer is now consumed.
154 * should be freed
155 */ 154 */
156static void afs_data_delivered(struct sk_buff *skb) 155void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
157{ 156{
158 if (!skb) { 157 if (!skb) {
159 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); 158 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
@@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb)
161 } else { 160 } else {
162 _debug("DLVR %p{%u} [%d]", 161 _debug("DLVR %p{%u} [%d]",
163 skb, skb->mark, atomic_read(&afs_outstanding_skbs)); 162 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
164 if (atomic_dec_return(&afs_outstanding_skbs) == -1) 163 rxrpc_kernel_data_consumed(call->rxcall, skb);
165 BUG();
166 rxrpc_kernel_data_delivered(skb);
167 } 164 }
168} 165}
169 166
@@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call)
489 last = rxrpc_kernel_is_data_last(skb); 486 last = rxrpc_kernel_is_data_last(skb);
490 ret = call->type->deliver(call, skb, last); 487 ret = call->type->deliver(call, skb, last);
491 switch (ret) { 488 switch (ret) {
489 case -EAGAIN:
490 if (last) {
491 _debug("short data");
492 goto unmarshal_error;
493 }
494 break;
492 case 0: 495 case 0:
493 if (last && 496 ASSERT(last);
494 call->state == AFS_CALL_AWAIT_REPLY) 497 if (call->state == AFS_CALL_AWAIT_REPLY)
495 call->state = AFS_CALL_COMPLETE; 498 call->state = AFS_CALL_COMPLETE;
496 break; 499 break;
497 case -ENOTCONN: 500 case -ENOTCONN:
@@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call)
501 abort_code = RX_INVALID_OPERATION; 504 abort_code = RX_INVALID_OPERATION;
502 goto do_abort; 505 goto do_abort;
503 default: 506 default:
507 unmarshal_error:
504 abort_code = RXGEN_CC_UNMARSHAL; 508 abort_code = RXGEN_CC_UNMARSHAL;
505 if (call->state != AFS_CALL_AWAIT_REPLY) 509 if (call->state != AFS_CALL_AWAIT_REPLY)
506 abort_code = RXGEN_SS_UNMARSHAL; 510 abort_code = RXGEN_SS_UNMARSHAL;
@@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call)
511 call->state = AFS_CALL_ERROR; 515 call->state = AFS_CALL_ERROR;
512 break; 516 break;
513 } 517 }
514 afs_data_delivered(skb); 518 break;
515 skb = NULL;
516 continue;
517 case RXRPC_SKB_MARK_FINAL_ACK: 519 case RXRPC_SKB_MARK_FINAL_ACK:
518 _debug("Rcv ACK"); 520 _debug("Rcv ACK");
519 call->state = AFS_CALL_COMPLETE; 521 call->state = AFS_CALL_COMPLETE;
@@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call)
685} 687}
686 688
687/* 689/*
688 * empty a socket buffer into a flat reply buffer 690 * Empty a socket buffer into a flat reply buffer.
689 */ 691 */
690void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) 692int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
691{ 693{
692 size_t len = skb->len; 694 size_t len = skb->len;
693 695
694 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) 696 if (len > call->reply_max - call->reply_size) {
695 BUG(); 697 _leave(" = -EBADMSG [%zu > %u]",
696 call->reply_size += len; 698 len, call->reply_max - call->reply_size);
699 return -EBADMSG;
700 }
701
702 if (len > 0) {
703 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
704 len) < 0)
705 BUG();
706 call->reply_size += len;
707 }
708
709 afs_data_consumed(call, skb);
710 if (!last)
711 return -EAGAIN;
712
713 if (call->reply_size != call->reply_max) {
714 _leave(" = -EBADMSG [%u != %u]",
715 call->reply_size, call->reply_max);
716 return -EBADMSG;
717 }
718 return 0;
697} 719}
698 720
699/* 721/*
@@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
745} 767}
746 768
747/* 769/*
748 * grab the operation ID from an incoming cache manager call 770 * Grab the operation ID from an incoming cache manager call. The socket
771 * buffer is discarded on error or if we don't yet have sufficient data.
749 */ 772 */
750static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, 773static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
751 bool last) 774 bool last)
@@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
766 call->offset += len; 789 call->offset += len;
767 790
768 if (call->offset < 4) { 791 if (call->offset < 4) {
769 if (last) { 792 afs_data_consumed(call, skb);
770 _leave(" = -EBADMSG [op ID short]"); 793 _leave(" = -EAGAIN");
771 return -EBADMSG; 794 return -EAGAIN;
772 }
773 _leave(" = 0 [incomplete]");
774 return 0;
775 } 795 }
776 796
777 call->state = AFS_CALL_AWAIT_REQUEST; 797 call->state = AFS_CALL_AWAIT_REQUEST;
@@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
855} 875}
856 876
857/* 877/*
858 * extract a piece of data from the received data socket buffers 878 * Extract a piece of data from the received data socket buffers.
859 */ 879 */
860int afs_extract_data(struct afs_call *call, struct sk_buff *skb, 880int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
861 bool last, void *buf, size_t count) 881 bool last, void *buf, size_t count)
@@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
873 call->offset += len; 893 call->offset += len;
874 894
875 if (call->offset < count) { 895 if (call->offset < count) {
876 if (last) { 896 afs_data_consumed(call, skb);
877 _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
878 return -EBADMSG;
879 }
880 _leave(" = -EAGAIN"); 897 _leave(" = -EAGAIN");
881 return -EAGAIN; 898 return -EAGAIN;
882 } 899 }
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 340afd0cd182..f94d1abdc3eb 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
64 struct afs_cache_vlocation *entry; 64 struct afs_cache_vlocation *entry;
65 __be32 *bp; 65 __be32 *bp;
66 u32 tmp; 66 u32 tmp;
67 int loop; 67 int loop, ret;
68 68
69 _enter(",,%u", last); 69 _enter(",,%u", last);
70 70
71 afs_transfer_reply(call, skb); 71 ret = afs_transfer_reply(call, skb, last);
72 if (!last) 72 if (ret < 0)
73 return 0; 73 return ret;
74
75 if (call->reply_size != call->reply_max)
76 return -EBADMSG;
77 74
78 /* unmarshall the reply once we've received all of it */ 75 /* unmarshall the reply once we've received all of it */
79 entry = call->reply; 76 entry = call->reply;
diff --git a/fs/aio.c b/fs/aio.c
index fb8e45b88cd4..4fe81d1c60f9 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
239 static const struct dentry_operations ops = { 239 static const struct dentry_operations ops = {
240 .d_dname = simple_dname, 240 .d_dname = simple_dname,
241 }; 241 };
242 return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC); 242 struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
243 AIO_RING_MAGIC);
244
245 if (!IS_ERR(root))
246 root->d_sb->s_iflags |= SB_I_NOEXEC;
247 return root;
243} 248}
244 249
245/* aio_setup 250/* aio_setup
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7f6aff3f72eb..e5495f37c6ed 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -853,6 +853,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
853 current->flags |= PF_RANDOMIZE; 853 current->flags |= PF_RANDOMIZE;
854 854
855 setup_new_exec(bprm); 855 setup_new_exec(bprm);
856 install_exec_creds(bprm);
856 857
857 /* Do this so that we can load the interpreter, if need be. We will 858 /* Do this so that we can load the interpreter, if need be. We will
858 change some of these later */ 859 change some of these later */
@@ -1044,7 +1045,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
1044 goto out; 1045 goto out;
1045#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 1046#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1046 1047
1047 install_exec_creds(bprm);
1048 retval = create_elf_tables(bprm, &loc->elf_ex, 1048 retval = create_elf_tables(bprm, &loc->elf_ex,
1049 load_addr, interp_load_addr); 1049 load_addr, interp_load_addr);
1050 if (retval < 0) 1050 if (retval < 0)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c3cdde87cc8c..08ae99343d92 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -249,7 +249,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
249 * thaw_bdev drops it. 249 * thaw_bdev drops it.
250 */ 250 */
251 sb = get_super(bdev); 251 sb = get_super(bdev);
252 drop_super(sb); 252 if (sb)
253 drop_super(sb);
253 mutex_unlock(&bdev->bd_fsfreeze_mutex); 254 mutex_unlock(&bdev->bd_fsfreeze_mutex);
254 return sb; 255 return sb;
255 } 256 }
@@ -646,7 +647,7 @@ static struct dentry *bd_mount(struct file_system_type *fs_type,
646{ 647{
647 struct dentry *dent; 648 struct dentry *dent;
648 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); 649 dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
649 if (dent) 650 if (!IS_ERR(dent))
650 dent->d_sb->s_iflags |= SB_I_CGROUPWB; 651 dent->d_sb->s_iflags |= SB_I_CGROUPWB;
651 return dent; 652 return dent;
652} 653}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 2b88439c2ee8..455a6b2fd539 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -589,6 +589,7 @@ static void __merge_refs(struct list_head *head, int mode)
589 589
590 list_del(&ref2->list); 590 list_del(&ref2->list);
591 kmem_cache_free(btrfs_prelim_ref_cache, ref2); 591 kmem_cache_free(btrfs_prelim_ref_cache, ref2);
592 cond_resched();
592 } 593 }
593 594
594 } 595 }
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2fe8f89091a3..33fe03551105 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -427,6 +427,7 @@ struct btrfs_space_info {
427 struct list_head ro_bgs; 427 struct list_head ro_bgs;
428 struct list_head priority_tickets; 428 struct list_head priority_tickets;
429 struct list_head tickets; 429 struct list_head tickets;
430 u64 tickets_id;
430 431
431 struct rw_semaphore groups_sem; 432 struct rw_semaphore groups_sem;
432 /* for block groups in our same type */ 433 /* for block groups in our same type */
@@ -1028,6 +1029,7 @@ struct btrfs_fs_info {
1028 struct btrfs_workqueue *qgroup_rescan_workers; 1029 struct btrfs_workqueue *qgroup_rescan_workers;
1029 struct completion qgroup_rescan_completion; 1030 struct completion qgroup_rescan_completion;
1030 struct btrfs_work qgroup_rescan_work; 1031 struct btrfs_work qgroup_rescan_work;
1032 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
1031 1033
1032 /* filesystem state */ 1034 /* filesystem state */
1033 unsigned long fs_state; 1035 unsigned long fs_state;
@@ -1079,6 +1081,8 @@ struct btrfs_fs_info {
1079 struct list_head pinned_chunks; 1081 struct list_head pinned_chunks;
1080 1082
1081 int creating_free_space_tree; 1083 int creating_free_space_tree;
1084 /* Used to record internally whether fs has been frozen */
1085 int fs_frozen;
1082}; 1086};
1083 1087
1084struct btrfs_subvolume_writers { 1088struct btrfs_subvolume_writers {
@@ -2578,7 +2582,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
2578 struct btrfs_root *root, 2582 struct btrfs_root *root,
2579 u64 root_objectid, u64 owner, u64 offset, 2583 u64 root_objectid, u64 owner, u64 offset,
2580 struct btrfs_key *ins); 2584 struct btrfs_key *ins);
2581int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 2585int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
2582 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 2586 u64 min_alloc_size, u64 empty_size, u64 hint_byte,
2583 struct btrfs_key *ins, int is_data, int delalloc); 2587 struct btrfs_key *ins, int is_data, int delalloc);
2584int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2588int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index b6d210e7a993..ac02e041464b 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -541,7 +541,6 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
541 struct btrfs_delayed_ref_head *existing; 541 struct btrfs_delayed_ref_head *existing;
542 struct btrfs_delayed_ref_head *head_ref = NULL; 542 struct btrfs_delayed_ref_head *head_ref = NULL;
543 struct btrfs_delayed_ref_root *delayed_refs; 543 struct btrfs_delayed_ref_root *delayed_refs;
544 struct btrfs_qgroup_extent_record *qexisting;
545 int count_mod = 1; 544 int count_mod = 1;
546 int must_insert_reserved = 0; 545 int must_insert_reserved = 0;
547 546
@@ -606,10 +605,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
606 qrecord->num_bytes = num_bytes; 605 qrecord->num_bytes = num_bytes;
607 qrecord->old_roots = NULL; 606 qrecord->old_roots = NULL;
608 607
609 qexisting = btrfs_qgroup_insert_dirty_extent(fs_info, 608 if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
610 delayed_refs, 609 delayed_refs, qrecord))
611 qrecord);
612 if (qexisting)
613 kfree(qrecord); 610 kfree(qrecord);
614 } 611 }
615 612
@@ -862,33 +859,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
862 return 0; 859 return 0;
863} 860}
864 861
865int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
866 struct btrfs_trans_handle *trans,
867 u64 ref_root, u64 bytenr, u64 num_bytes)
868{
869 struct btrfs_delayed_ref_root *delayed_refs;
870 struct btrfs_delayed_ref_head *ref_head;
871 int ret = 0;
872
873 if (!fs_info->quota_enabled || !is_fstree(ref_root))
874 return 0;
875
876 delayed_refs = &trans->transaction->delayed_refs;
877
878 spin_lock(&delayed_refs->lock);
879 ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
880 if (!ref_head) {
881 ret = -ENOENT;
882 goto out;
883 }
884 WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
885 ref_head->qgroup_ref_root = ref_root;
886 ref_head->qgroup_reserved = num_bytes;
887out:
888 spin_unlock(&delayed_refs->lock);
889 return ret;
890}
891
892int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 862int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
893 struct btrfs_trans_handle *trans, 863 struct btrfs_trans_handle *trans,
894 u64 bytenr, u64 num_bytes, 864 u64 bytenr, u64 num_bytes,
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 5fca9534a271..43f3629760e9 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
250 u64 parent, u64 ref_root, 250 u64 parent, u64 ref_root,
251 u64 owner, u64 offset, u64 reserved, int action, 251 u64 owner, u64 offset, u64 reserved, int action,
252 struct btrfs_delayed_extent_op *extent_op); 252 struct btrfs_delayed_extent_op *extent_op);
253int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
254 struct btrfs_trans_handle *trans,
255 u64 ref_root, u64 bytenr, u64 num_bytes);
256int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 253int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
257 struct btrfs_trans_handle *trans, 254 struct btrfs_trans_handle *trans,
258 u64 bytenr, u64 num_bytes, 255 u64 bytenr, u64 num_bytes,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 59febfb8d04a..54bc8c7c6bcd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -559,8 +559,29 @@ static noinline int check_leaf(struct btrfs_root *root,
559 u32 nritems = btrfs_header_nritems(leaf); 559 u32 nritems = btrfs_header_nritems(leaf);
560 int slot; 560 int slot;
561 561
562 if (nritems == 0) 562 if (nritems == 0) {
563 struct btrfs_root *check_root;
564
565 key.objectid = btrfs_header_owner(leaf);
566 key.type = BTRFS_ROOT_ITEM_KEY;
567 key.offset = (u64)-1;
568
569 check_root = btrfs_get_fs_root(root->fs_info, &key, false);
570 /*
571 * The only reason we also check NULL here is that during
572 * open_ctree() some roots has not yet been set up.
573 */
574 if (!IS_ERR_OR_NULL(check_root)) {
575 /* if leaf is the root, then it's fine */
576 if (leaf->start !=
577 btrfs_root_bytenr(&check_root->root_item)) {
578 CORRUPT("non-root leaf's nritems is 0",
579 leaf, root, 0);
580 return -EIO;
581 }
582 }
563 return 0; 583 return 0;
584 }
564 585
565 /* Check the 0 item */ 586 /* Check the 0 item */
566 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != 587 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
@@ -612,6 +633,19 @@ static noinline int check_leaf(struct btrfs_root *root,
612 return 0; 633 return 0;
613} 634}
614 635
636static int check_node(struct btrfs_root *root, struct extent_buffer *node)
637{
638 unsigned long nr = btrfs_header_nritems(node);
639
640 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
641 btrfs_crit(root->fs_info,
642 "corrupt node: block %llu root %llu nritems %lu",
643 node->start, root->objectid, nr);
644 return -EIO;
645 }
646 return 0;
647}
648
615static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 649static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
616 u64 phy_offset, struct page *page, 650 u64 phy_offset, struct page *page,
617 u64 start, u64 end, int mirror) 651 u64 start, u64 end, int mirror)
@@ -682,6 +716,9 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
682 ret = -EIO; 716 ret = -EIO;
683 } 717 }
684 718
719 if (found_level > 0 && check_node(root, eb))
720 ret = -EIO;
721
685 if (!ret) 722 if (!ret)
686 set_extent_buffer_uptodate(eb); 723 set_extent_buffer_uptodate(eb);
687err: 724err:
@@ -1618,8 +1655,8 @@ fail:
1618 return ret; 1655 return ret;
1619} 1656}
1620 1657
1621static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, 1658struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1622 u64 root_id) 1659 u64 root_id)
1623{ 1660{
1624 struct btrfs_root *root; 1661 struct btrfs_root *root;
1625 1662
@@ -2298,6 +2335,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2298 fs_info->quota_enabled = 0; 2335 fs_info->quota_enabled = 0;
2299 fs_info->pending_quota_state = 0; 2336 fs_info->pending_quota_state = 0;
2300 fs_info->qgroup_ulist = NULL; 2337 fs_info->qgroup_ulist = NULL;
2338 fs_info->qgroup_rescan_running = false;
2301 mutex_init(&fs_info->qgroup_rescan_lock); 2339 mutex_init(&fs_info->qgroup_rescan_lock);
2302} 2340}
2303 2341
@@ -2624,6 +2662,7 @@ int open_ctree(struct super_block *sb,
2624 atomic_set(&fs_info->qgroup_op_seq, 0); 2662 atomic_set(&fs_info->qgroup_op_seq, 0);
2625 atomic_set(&fs_info->reada_works_cnt, 0); 2663 atomic_set(&fs_info->reada_works_cnt, 0);
2626 atomic64_set(&fs_info->tree_mod_seq, 0); 2664 atomic64_set(&fs_info->tree_mod_seq, 0);
2665 fs_info->fs_frozen = 0;
2627 fs_info->sb = sb; 2666 fs_info->sb = sb;
2628 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; 2667 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2629 fs_info->metadata_ratio = 0; 2668 fs_info->metadata_ratio = 0;
@@ -3739,8 +3778,15 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3739 if (btrfs_root_refs(&root->root_item) == 0) 3778 if (btrfs_root_refs(&root->root_item) == 0)
3740 synchronize_srcu(&fs_info->subvol_srcu); 3779 synchronize_srcu(&fs_info->subvol_srcu);
3741 3780
3742 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 3781 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3743 btrfs_free_log(NULL, root); 3782 btrfs_free_log(NULL, root);
3783 if (root->reloc_root) {
3784 free_extent_buffer(root->reloc_root->node);
3785 free_extent_buffer(root->reloc_root->commit_root);
3786 btrfs_put_fs_root(root->reloc_root);
3787 root->reloc_root = NULL;
3788 }
3789 }
3744 3790
3745 if (root->free_ino_pinned) 3791 if (root->free_ino_pinned)
3746 __btrfs_remove_free_space_cache(root->free_ino_pinned); 3792 __btrfs_remove_free_space_cache(root->free_ino_pinned);
@@ -3851,7 +3897,7 @@ void close_ctree(struct btrfs_root *root)
3851 smp_mb(); 3897 smp_mb();
3852 3898
3853 /* wait for the qgroup rescan worker to stop */ 3899 /* wait for the qgroup rescan worker to stop */
3854 btrfs_qgroup_wait_for_completion(fs_info); 3900 btrfs_qgroup_wait_for_completion(fs_info, false);
3855 3901
3856 /* wait for the uuid_scan task to finish */ 3902 /* wait for the uuid_scan task to finish */
3857 down(&fs_info->uuid_tree_rescan_sem); 3903 down(&fs_info->uuid_tree_rescan_sem);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index b3207a0e09f7..f19a982f5a4f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
68struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root, 68struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
69 struct btrfs_key *location); 69 struct btrfs_key *location);
70int btrfs_init_fs_root(struct btrfs_root *root); 70int btrfs_init_fs_root(struct btrfs_root *root);
71struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
72 u64 root_id);
71int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 73int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
72 struct btrfs_root *root); 74 struct btrfs_root *root);
73void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); 75void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 61b494e8e604..38c2df84cabd 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -60,21 +60,6 @@ enum {
60 CHUNK_ALLOC_FORCE = 2, 60 CHUNK_ALLOC_FORCE = 2,
61}; 61};
62 62
63/*
64 * Control how reservations are dealt with.
65 *
66 * RESERVE_FREE - freeing a reservation.
67 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * ENOSPC accounting
69 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
70 * bytes_may_use as the ENOSPC accounting is done elsewhere
71 */
72enum {
73 RESERVE_FREE = 0,
74 RESERVE_ALLOC = 1,
75 RESERVE_ALLOC_NO_ACCOUNT = 2,
76};
77
78static int update_block_group(struct btrfs_trans_handle *trans, 63static int update_block_group(struct btrfs_trans_handle *trans,
79 struct btrfs_root *root, u64 bytenr, 64 struct btrfs_root *root, u64 bytenr,
80 u64 num_bytes, int alloc); 65 u64 num_bytes, int alloc);
@@ -104,9 +89,10 @@ static int find_next_key(struct btrfs_path *path, int level,
104 struct btrfs_key *key); 89 struct btrfs_key *key);
105static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 90static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106 int dump_block_groups); 91 int dump_block_groups);
107static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 92static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
108 u64 num_bytes, int reserve, 93 u64 ram_bytes, u64 num_bytes, int delalloc);
109 int delalloc); 94static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
95 u64 num_bytes, int delalloc);
110static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, 96static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111 u64 num_bytes); 97 u64 num_bytes);
112int btrfs_pin_extent(struct btrfs_root *root, 98int btrfs_pin_extent(struct btrfs_root *root,
@@ -3501,7 +3487,6 @@ again:
3501 dcs = BTRFS_DC_SETUP; 3487 dcs = BTRFS_DC_SETUP;
3502 else if (ret == -ENOSPC) 3488 else if (ret == -ENOSPC)
3503 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); 3489 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3504 btrfs_free_reserved_data_space(inode, 0, num_pages);
3505 3490
3506out_put: 3491out_put:
3507 iput(inode); 3492 iput(inode);
@@ -4472,6 +4457,15 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
4472 } 4457 }
4473} 4458}
4474 4459
4460/*
4461 * If force is CHUNK_ALLOC_FORCE:
4462 * - return 1 if it successfully allocates a chunk,
4463 * - return errors including -ENOSPC otherwise.
4464 * If force is NOT CHUNK_ALLOC_FORCE:
4465 * - return 0 if it doesn't need to allocate a new chunk,
4466 * - return 1 if it successfully allocates a chunk,
4467 * - return errors including -ENOSPC otherwise.
4468 */
4475static int do_chunk_alloc(struct btrfs_trans_handle *trans, 4469static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4476 struct btrfs_root *extent_root, u64 flags, int force) 4470 struct btrfs_root *extent_root, u64 flags, int force)
4477{ 4471{
@@ -4882,7 +4876,7 @@ static int flush_space(struct btrfs_root *root,
4882 btrfs_get_alloc_profile(root, 0), 4876 btrfs_get_alloc_profile(root, 0),
4883 CHUNK_ALLOC_NO_FORCE); 4877 CHUNK_ALLOC_NO_FORCE);
4884 btrfs_end_transaction(trans, root); 4878 btrfs_end_transaction(trans, root);
4885 if (ret == -ENOSPC) 4879 if (ret > 0 || ret == -ENOSPC)
4886 ret = 0; 4880 ret = 0;
4887 break; 4881 break;
4888 case COMMIT_TRANS: 4882 case COMMIT_TRANS:
@@ -4907,11 +4901,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4907 u64 expected; 4901 u64 expected;
4908 u64 to_reclaim = 0; 4902 u64 to_reclaim = 0;
4909 4903
4910 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4911 if (can_overcommit(root, space_info, to_reclaim,
4912 BTRFS_RESERVE_FLUSH_ALL))
4913 return 0;
4914
4915 list_for_each_entry(ticket, &space_info->tickets, list) 4904 list_for_each_entry(ticket, &space_info->tickets, list)
4916 to_reclaim += ticket->bytes; 4905 to_reclaim += ticket->bytes;
4917 list_for_each_entry(ticket, &space_info->priority_tickets, list) 4906 list_for_each_entry(ticket, &space_info->priority_tickets, list)
@@ -4919,6 +4908,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4919 if (to_reclaim) 4908 if (to_reclaim)
4920 return to_reclaim; 4909 return to_reclaim;
4921 4910
4911 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4912 if (can_overcommit(root, space_info, to_reclaim,
4913 BTRFS_RESERVE_FLUSH_ALL))
4914 return 0;
4915
4922 used = space_info->bytes_used + space_info->bytes_reserved + 4916 used = space_info->bytes_used + space_info->bytes_reserved +
4923 space_info->bytes_pinned + space_info->bytes_readonly + 4917 space_info->bytes_pinned + space_info->bytes_readonly +
4924 space_info->bytes_may_use; 4918 space_info->bytes_may_use;
@@ -4972,12 +4966,12 @@ static void wake_all_tickets(struct list_head *head)
4972 */ 4966 */
4973static void btrfs_async_reclaim_metadata_space(struct work_struct *work) 4967static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4974{ 4968{
4975 struct reserve_ticket *last_ticket = NULL;
4976 struct btrfs_fs_info *fs_info; 4969 struct btrfs_fs_info *fs_info;
4977 struct btrfs_space_info *space_info; 4970 struct btrfs_space_info *space_info;
4978 u64 to_reclaim; 4971 u64 to_reclaim;
4979 int flush_state; 4972 int flush_state;
4980 int commit_cycles = 0; 4973 int commit_cycles = 0;
4974 u64 last_tickets_id;
4981 4975
4982 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); 4976 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4983 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); 4977 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
@@ -4990,8 +4984,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4990 spin_unlock(&space_info->lock); 4984 spin_unlock(&space_info->lock);
4991 return; 4985 return;
4992 } 4986 }
4993 last_ticket = list_first_entry(&space_info->tickets, 4987 last_tickets_id = space_info->tickets_id;
4994 struct reserve_ticket, list);
4995 spin_unlock(&space_info->lock); 4988 spin_unlock(&space_info->lock);
4996 4989
4997 flush_state = FLUSH_DELAYED_ITEMS_NR; 4990 flush_state = FLUSH_DELAYED_ITEMS_NR;
@@ -5011,10 +5004,10 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
5011 space_info); 5004 space_info);
5012 ticket = list_first_entry(&space_info->tickets, 5005 ticket = list_first_entry(&space_info->tickets,
5013 struct reserve_ticket, list); 5006 struct reserve_ticket, list);
5014 if (last_ticket == ticket) { 5007 if (last_tickets_id == space_info->tickets_id) {
5015 flush_state++; 5008 flush_state++;
5016 } else { 5009 } else {
5017 last_ticket = ticket; 5010 last_tickets_id = space_info->tickets_id;
5018 flush_state = FLUSH_DELAYED_ITEMS_NR; 5011 flush_state = FLUSH_DELAYED_ITEMS_NR;
5019 if (commit_cycles) 5012 if (commit_cycles)
5020 commit_cycles--; 5013 commit_cycles--;
@@ -5390,6 +5383,7 @@ again:
5390 list_del_init(&ticket->list); 5383 list_del_init(&ticket->list);
5391 num_bytes -= ticket->bytes; 5384 num_bytes -= ticket->bytes;
5392 ticket->bytes = 0; 5385 ticket->bytes = 0;
5386 space_info->tickets_id++;
5393 wake_up(&ticket->wait); 5387 wake_up(&ticket->wait);
5394 } else { 5388 } else {
5395 ticket->bytes -= num_bytes; 5389 ticket->bytes -= num_bytes;
@@ -5432,6 +5426,7 @@ again:
5432 num_bytes -= ticket->bytes; 5426 num_bytes -= ticket->bytes;
5433 space_info->bytes_may_use += ticket->bytes; 5427 space_info->bytes_may_use += ticket->bytes;
5434 ticket->bytes = 0; 5428 ticket->bytes = 0;
5429 space_info->tickets_id++;
5435 wake_up(&ticket->wait); 5430 wake_up(&ticket->wait);
5436 } else { 5431 } else {
5437 trace_btrfs_space_reservation(fs_info, "space_info", 5432 trace_btrfs_space_reservation(fs_info, "space_info",
@@ -6497,19 +6492,15 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6497} 6492}
6498 6493
6499/** 6494/**
6500 * btrfs_update_reserved_bytes - update the block_group and space info counters 6495 * btrfs_add_reserved_bytes - update the block_group and space info counters
6501 * @cache: The cache we are manipulating 6496 * @cache: The cache we are manipulating
6497 * @ram_bytes: The number of bytes of file content, and will be same to
6498 * @num_bytes except for the compress path.
6502 * @num_bytes: The number of bytes in question 6499 * @num_bytes: The number of bytes in question
6503 * @reserve: One of the reservation enums
6504 * @delalloc: The blocks are allocated for the delalloc write 6500 * @delalloc: The blocks are allocated for the delalloc write
6505 * 6501 *
6506 * This is called by the allocator when it reserves space, or by somebody who is 6502 * This is called by the allocator when it reserves space. Metadata
6507 * freeing space that was never actually used on disk. For example if you 6503 * reservations should be called with RESERVE_ALLOC so we do the proper
6508 * reserve some space for a new leaf in transaction A and before transaction A
6509 * commits you free that leaf, you call this with reserve set to 0 in order to
6510 * clear the reservation.
6511 *
6512 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6513 * ENOSPC accounting. For data we handle the reservation through clearing the 6504 * ENOSPC accounting. For data we handle the reservation through clearing the
6514 * delalloc bits in the io_tree. We have to do this since we could end up 6505 * delalloc bits in the io_tree. We have to do this since we could end up
6515 * allocating less disk space for the amount of data we have reserved in the 6506 * allocating less disk space for the amount of data we have reserved in the
@@ -6519,44 +6510,63 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6519 * make the reservation and return -EAGAIN, otherwise this function always 6510 * make the reservation and return -EAGAIN, otherwise this function always
6520 * succeeds. 6511 * succeeds.
6521 */ 6512 */
6522static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 6513static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6523 u64 num_bytes, int reserve, int delalloc) 6514 u64 ram_bytes, u64 num_bytes, int delalloc)
6524{ 6515{
6525 struct btrfs_space_info *space_info = cache->space_info; 6516 struct btrfs_space_info *space_info = cache->space_info;
6526 int ret = 0; 6517 int ret = 0;
6527 6518
6528 spin_lock(&space_info->lock); 6519 spin_lock(&space_info->lock);
6529 spin_lock(&cache->lock); 6520 spin_lock(&cache->lock);
6530 if (reserve != RESERVE_FREE) { 6521 if (cache->ro) {
6531 if (cache->ro) { 6522 ret = -EAGAIN;
6532 ret = -EAGAIN;
6533 } else {
6534 cache->reserved += num_bytes;
6535 space_info->bytes_reserved += num_bytes;
6536 if (reserve == RESERVE_ALLOC) {
6537 trace_btrfs_space_reservation(cache->fs_info,
6538 "space_info", space_info->flags,
6539 num_bytes, 0);
6540 space_info->bytes_may_use -= num_bytes;
6541 }
6542
6543 if (delalloc)
6544 cache->delalloc_bytes += num_bytes;
6545 }
6546 } else { 6523 } else {
6547 if (cache->ro) 6524 cache->reserved += num_bytes;
6548 space_info->bytes_readonly += num_bytes; 6525 space_info->bytes_reserved += num_bytes;
6549 cache->reserved -= num_bytes;
6550 space_info->bytes_reserved -= num_bytes;
6551 6526
6527 trace_btrfs_space_reservation(cache->fs_info,
6528 "space_info", space_info->flags,
6529 ram_bytes, 0);
6530 space_info->bytes_may_use -= ram_bytes;
6552 if (delalloc) 6531 if (delalloc)
6553 cache->delalloc_bytes -= num_bytes; 6532 cache->delalloc_bytes += num_bytes;
6554 } 6533 }
6555 spin_unlock(&cache->lock); 6534 spin_unlock(&cache->lock);
6556 spin_unlock(&space_info->lock); 6535 spin_unlock(&space_info->lock);
6557 return ret; 6536 return ret;
6558} 6537}
6559 6538
6539/**
6540 * btrfs_free_reserved_bytes - update the block_group and space info counters
6541 * @cache: The cache we are manipulating
6542 * @num_bytes: The number of bytes in question
6543 * @delalloc: The blocks are allocated for the delalloc write
6544 *
6545 * This is called by somebody who is freeing space that was never actually used
6546 * on disk. For example if you reserve some space for a new leaf in transaction
6547 * A and before transaction A commits you free that leaf, you call this with
6548 * reserve set to 0 in order to clear the reservation.
6549 */
6550
6551static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6552 u64 num_bytes, int delalloc)
6553{
6554 struct btrfs_space_info *space_info = cache->space_info;
6555 int ret = 0;
6556
6557 spin_lock(&space_info->lock);
6558 spin_lock(&cache->lock);
6559 if (cache->ro)
6560 space_info->bytes_readonly += num_bytes;
6561 cache->reserved -= num_bytes;
6562 space_info->bytes_reserved -= num_bytes;
6563
6564 if (delalloc)
6565 cache->delalloc_bytes -= num_bytes;
6566 spin_unlock(&cache->lock);
6567 spin_unlock(&space_info->lock);
6568 return ret;
6569}
6560void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 6570void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6561 struct btrfs_root *root) 6571 struct btrfs_root *root)
6562{ 6572{
@@ -7191,7 +7201,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7191 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); 7201 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7192 7202
7193 btrfs_add_free_space(cache, buf->start, buf->len); 7203 btrfs_add_free_space(cache, buf->start, buf->len);
7194 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); 7204 btrfs_free_reserved_bytes(cache, buf->len, 0);
7195 btrfs_put_block_group(cache); 7205 btrfs_put_block_group(cache);
7196 trace_btrfs_reserved_extent_free(root, buf->start, buf->len); 7206 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
7197 pin = 0; 7207 pin = 0;
@@ -7416,9 +7426,9 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7416 * the free space extent currently. 7426 * the free space extent currently.
7417 */ 7427 */
7418static noinline int find_free_extent(struct btrfs_root *orig_root, 7428static noinline int find_free_extent(struct btrfs_root *orig_root,
7419 u64 num_bytes, u64 empty_size, 7429 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7420 u64 hint_byte, struct btrfs_key *ins, 7430 u64 hint_byte, struct btrfs_key *ins,
7421 u64 flags, int delalloc) 7431 u64 flags, int delalloc)
7422{ 7432{
7423 int ret = 0; 7433 int ret = 0;
7424 struct btrfs_root *root = orig_root->fs_info->extent_root; 7434 struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -7430,8 +7440,6 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
7430 struct btrfs_space_info *space_info; 7440 struct btrfs_space_info *space_info;
7431 int loop = 0; 7441 int loop = 0;
7432 int index = __get_raid_index(flags); 7442 int index = __get_raid_index(flags);
7433 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7434 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7435 bool failed_cluster_refill = false; 7443 bool failed_cluster_refill = false;
7436 bool failed_alloc = false; 7444 bool failed_alloc = false;
7437 bool use_cluster = true; 7445 bool use_cluster = true;
@@ -7763,8 +7771,8 @@ checks:
7763 search_start - offset); 7771 search_start - offset);
7764 BUG_ON(offset > search_start); 7772 BUG_ON(offset > search_start);
7765 7773
7766 ret = btrfs_update_reserved_bytes(block_group, num_bytes, 7774 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7767 alloc_type, delalloc); 7775 num_bytes, delalloc);
7768 if (ret == -EAGAIN) { 7776 if (ret == -EAGAIN) {
7769 btrfs_add_free_space(block_group, offset, num_bytes); 7777 btrfs_add_free_space(block_group, offset, num_bytes);
7770 goto loop; 7778 goto loop;
@@ -7936,7 +7944,7 @@ again:
7936 up_read(&info->groups_sem); 7944 up_read(&info->groups_sem);
7937} 7945}
7938 7946
7939int btrfs_reserve_extent(struct btrfs_root *root, 7947int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
7940 u64 num_bytes, u64 min_alloc_size, 7948 u64 num_bytes, u64 min_alloc_size,
7941 u64 empty_size, u64 hint_byte, 7949 u64 empty_size, u64 hint_byte,
7942 struct btrfs_key *ins, int is_data, int delalloc) 7950 struct btrfs_key *ins, int is_data, int delalloc)
@@ -7948,8 +7956,8 @@ int btrfs_reserve_extent(struct btrfs_root *root,
7948 flags = btrfs_get_alloc_profile(root, is_data); 7956 flags = btrfs_get_alloc_profile(root, is_data);
7949again: 7957again:
7950 WARN_ON(num_bytes < root->sectorsize); 7958 WARN_ON(num_bytes < root->sectorsize);
7951 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, 7959 ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
7952 flags, delalloc); 7960 hint_byte, ins, flags, delalloc);
7953 if (!ret && !is_data) { 7961 if (!ret && !is_data) {
7954 btrfs_dec_block_group_reservations(root->fs_info, 7962 btrfs_dec_block_group_reservations(root->fs_info,
7955 ins->objectid); 7963 ins->objectid);
@@ -7958,6 +7966,7 @@ again:
7958 num_bytes = min(num_bytes >> 1, ins->offset); 7966 num_bytes = min(num_bytes >> 1, ins->offset);
7959 num_bytes = round_down(num_bytes, root->sectorsize); 7967 num_bytes = round_down(num_bytes, root->sectorsize);
7960 num_bytes = max(num_bytes, min_alloc_size); 7968 num_bytes = max(num_bytes, min_alloc_size);
7969 ram_bytes = num_bytes;
7961 if (num_bytes == min_alloc_size) 7970 if (num_bytes == min_alloc_size)
7962 final_tried = true; 7971 final_tried = true;
7963 goto again; 7972 goto again;
@@ -7995,7 +8004,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7995 if (btrfs_test_opt(root->fs_info, DISCARD)) 8004 if (btrfs_test_opt(root->fs_info, DISCARD))
7996 ret = btrfs_discard_extent(root, start, len, NULL); 8005 ret = btrfs_discard_extent(root, start, len, NULL);
7997 btrfs_add_free_space(cache, start, len); 8006 btrfs_add_free_space(cache, start, len);
7998 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc); 8007 btrfs_free_reserved_bytes(cache, len, delalloc);
7999 trace_btrfs_reserved_extent_free(root, start, len); 8008 trace_btrfs_reserved_extent_free(root, start, len);
8000 } 8009 }
8001 8010
@@ -8208,6 +8217,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8208{ 8217{
8209 int ret; 8218 int ret;
8210 struct btrfs_block_group_cache *block_group; 8219 struct btrfs_block_group_cache *block_group;
8220 struct btrfs_space_info *space_info;
8211 8221
8212 /* 8222 /*
8213 * Mixed block groups will exclude before processing the log so we only 8223 * Mixed block groups will exclude before processing the log so we only
@@ -8223,9 +8233,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8223 if (!block_group) 8233 if (!block_group)
8224 return -EINVAL; 8234 return -EINVAL;
8225 8235
8226 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 8236 space_info = block_group->space_info;
8227 RESERVE_ALLOC_NO_ACCOUNT, 0); 8237 spin_lock(&space_info->lock);
8228 BUG_ON(ret); /* logic error */ 8238 spin_lock(&block_group->lock);
8239 space_info->bytes_reserved += ins->offset;
8240 block_group->reserved += ins->offset;
8241 spin_unlock(&block_group->lock);
8242 spin_unlock(&space_info->lock);
8243
8229 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 8244 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
8230 0, owner, offset, ins, 1); 8245 0, owner, offset, ins, 1);
8231 btrfs_put_block_group(block_group); 8246 btrfs_put_block_group(block_group);
@@ -8368,7 +8383,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8368 if (IS_ERR(block_rsv)) 8383 if (IS_ERR(block_rsv))
8369 return ERR_CAST(block_rsv); 8384 return ERR_CAST(block_rsv);
8370 8385
8371 ret = btrfs_reserve_extent(root, blocksize, blocksize, 8386 ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8372 empty_size, hint, &ins, 0, 0); 8387 empty_size, hint, &ins, 0, 0);
8373 if (ret) 8388 if (ret)
8374 goto out_unuse; 8389 goto out_unuse;
@@ -8521,35 +8536,6 @@ reada:
8521 wc->reada_slot = slot; 8536 wc->reada_slot = slot;
8522} 8537}
8523 8538
8524/*
8525 * These may not be seen by the usual inc/dec ref code so we have to
8526 * add them here.
8527 */
8528static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8529 struct btrfs_root *root, u64 bytenr,
8530 u64 num_bytes)
8531{
8532 struct btrfs_qgroup_extent_record *qrecord;
8533 struct btrfs_delayed_ref_root *delayed_refs;
8534
8535 qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8536 if (!qrecord)
8537 return -ENOMEM;
8538
8539 qrecord->bytenr = bytenr;
8540 qrecord->num_bytes = num_bytes;
8541 qrecord->old_roots = NULL;
8542
8543 delayed_refs = &trans->transaction->delayed_refs;
8544 spin_lock(&delayed_refs->lock);
8545 if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
8546 delayed_refs, qrecord))
8547 kfree(qrecord);
8548 spin_unlock(&delayed_refs->lock);
8549
8550 return 0;
8551}
8552
8553static int account_leaf_items(struct btrfs_trans_handle *trans, 8539static int account_leaf_items(struct btrfs_trans_handle *trans,
8554 struct btrfs_root *root, 8540 struct btrfs_root *root,
8555 struct extent_buffer *eb) 8541 struct extent_buffer *eb)
@@ -8583,7 +8569,8 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
8583 8569
8584 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 8570 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8585 8571
8586 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes); 8572 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
8573 bytenr, num_bytes, GFP_NOFS);
8587 if (ret) 8574 if (ret)
8588 return ret; 8575 return ret;
8589 } 8576 }
@@ -8732,8 +8719,9 @@ walk_down:
8732 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 8719 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8733 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 8720 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8734 8721
8735 ret = record_one_subtree_extent(trans, root, child_bytenr, 8722 ret = btrfs_qgroup_insert_dirty_extent(trans,
8736 root->nodesize); 8723 root->fs_info, child_bytenr,
8724 root->nodesize, GFP_NOFS);
8737 if (ret) 8725 if (ret)
8738 goto out; 8726 goto out;
8739 } 8727 }
@@ -9906,6 +9894,7 @@ static int find_first_block_group(struct btrfs_root *root,
9906 } else { 9894 } else {
9907 ret = 0; 9895 ret = 0;
9908 } 9896 }
9897 free_extent_map(em);
9909 goto out; 9898 goto out;
9910 } 9899 }
9911 path->slots[0]++; 9900 path->slots[0]++;
@@ -9942,6 +9931,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9942 block_group->iref = 0; 9931 block_group->iref = 0;
9943 block_group->inode = NULL; 9932 block_group->inode = NULL;
9944 spin_unlock(&block_group->lock); 9933 spin_unlock(&block_group->lock);
9934 ASSERT(block_group->io_ctl.inode == NULL);
9945 iput(inode); 9935 iput(inode);
9946 last = block_group->key.objectid + block_group->key.offset; 9936 last = block_group->key.objectid + block_group->key.offset;
9947 btrfs_put_block_group(block_group); 9937 btrfs_put_block_group(block_group);
@@ -9999,6 +9989,10 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
9999 free_excluded_extents(info->extent_root, block_group); 9989 free_excluded_extents(info->extent_root, block_group);
10000 9990
10001 btrfs_remove_free_space_cache(block_group); 9991 btrfs_remove_free_space_cache(block_group);
9992 ASSERT(list_empty(&block_group->dirty_list));
9993 ASSERT(list_empty(&block_group->io_list));
9994 ASSERT(list_empty(&block_group->bg_list));
9995 ASSERT(atomic_read(&block_group->count) == 1);
10002 btrfs_put_block_group(block_group); 9996 btrfs_put_block_group(block_group);
10003 9997
10004 spin_lock(&info->block_group_cache_lock); 9998 spin_lock(&info->block_group_cache_lock);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index bc2729a7612d..28cd88fccc7e 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -20,6 +20,7 @@
20#define EXTENT_DAMAGED (1U << 14) 20#define EXTENT_DAMAGED (1U << 14)
21#define EXTENT_NORESERVE (1U << 15) 21#define EXTENT_NORESERVE (1U << 15)
22#define EXTENT_QGROUP_RESERVED (1U << 16) 22#define EXTENT_QGROUP_RESERVED (1U << 16)
23#define EXTENT_CLEAR_DATA_RESV (1U << 17)
23#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 24#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
24#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) 25#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
25 26
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9404121fd5f7..fea31a4a6e36 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2033,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2033 */ 2033 */
2034 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2034 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2035 &BTRFS_I(inode)->runtime_flags); 2035 &BTRFS_I(inode)->runtime_flags);
2036 /*
2037 * An ordered extent might have started before and completed
2038 * already with io errors, in which case the inode was not
2039 * updated and we end up here. So check the inode's mapping
2040 * flags for any errors that might have happened while doing
2041 * writeback of file data.
2042 */
2043 ret = btrfs_inode_check_errors(inode);
2036 inode_unlock(inode); 2044 inode_unlock(inode);
2037 goto out; 2045 goto out;
2038 } 2046 }
@@ -2062,7 +2070,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2062 } 2070 }
2063 trans->sync = true; 2071 trans->sync = true;
2064 2072
2065 btrfs_init_log_ctx(&ctx); 2073 btrfs_init_log_ctx(&ctx, inode);
2066 2074
2067 ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx); 2075 ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2068 if (ret < 0) { 2076 if (ret < 0) {
@@ -2667,6 +2675,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2667 2675
2668 alloc_start = round_down(offset, blocksize); 2676 alloc_start = round_down(offset, blocksize);
2669 alloc_end = round_up(offset + len, blocksize); 2677 alloc_end = round_up(offset + len, blocksize);
2678 cur_offset = alloc_start;
2670 2679
2671 /* Make sure we aren't being give some crap mode */ 2680 /* Make sure we aren't being give some crap mode */
2672 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2681 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -2759,7 +2768,6 @@ static long btrfs_fallocate(struct file *file, int mode,
2759 2768
2760 /* First, check if we exceed the qgroup limit */ 2769 /* First, check if we exceed the qgroup limit */
2761 INIT_LIST_HEAD(&reserve_list); 2770 INIT_LIST_HEAD(&reserve_list);
2762 cur_offset = alloc_start;
2763 while (1) { 2771 while (1) {
2764 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 2772 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2765 alloc_end - cur_offset, 0); 2773 alloc_end - cur_offset, 0);
@@ -2786,6 +2794,14 @@ static long btrfs_fallocate(struct file *file, int mode,
2786 last_byte - cur_offset); 2794 last_byte - cur_offset);
2787 if (ret < 0) 2795 if (ret < 0)
2788 break; 2796 break;
2797 } else {
2798 /*
2799 * Do not need to reserve unwritten extent for this
2800 * range, free reserved data space first, otherwise
2801 * it'll result in false ENOSPC error.
2802 */
2803 btrfs_free_reserved_data_space(inode, cur_offset,
2804 last_byte - cur_offset);
2789 } 2805 }
2790 free_extent_map(em); 2806 free_extent_map(em);
2791 cur_offset = last_byte; 2807 cur_offset = last_byte;
@@ -2803,6 +2819,9 @@ static long btrfs_fallocate(struct file *file, int mode,
2803 range->start, 2819 range->start,
2804 range->len, 1 << inode->i_blkbits, 2820 range->len, 1 << inode->i_blkbits,
2805 offset + len, &alloc_hint); 2821 offset + len, &alloc_hint);
2822 else
2823 btrfs_free_reserved_data_space(inode, range->start,
2824 range->len);
2806 list_del(&range->list); 2825 list_del(&range->list);
2807 kfree(range); 2826 kfree(range);
2808 } 2827 }
@@ -2837,18 +2856,11 @@ out_unlock:
2837 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 2856 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2838 &cached_state, GFP_KERNEL); 2857 &cached_state, GFP_KERNEL);
2839out: 2858out:
2840 /*
2841 * As we waited the extent range, the data_rsv_map must be empty
2842 * in the range, as written data range will be released from it.
2843 * And for prealloacted extent, it will also be released when
2844 * its metadata is written.
2845 * So this is completely used as cleanup.
2846 */
2847 btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
2848 inode_unlock(inode); 2859 inode_unlock(inode);
2849 /* Let go of our reservation. */ 2860 /* Let go of our reservation. */
2850 btrfs_free_reserved_data_space(inode, alloc_start, 2861 if (ret != 0)
2851 alloc_end - alloc_start); 2862 btrfs_free_reserved_data_space(inode, alloc_start,
2863 alloc_end - cur_offset);
2852 return ret; 2864 return ret;
2853} 2865}
2854 2866
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index aa6fabaee72e..359ee861b5a4 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -495,10 +495,9 @@ again:
495 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, 495 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
496 prealloc, prealloc, &alloc_hint); 496 prealloc, prealloc, &alloc_hint);
497 if (ret) { 497 if (ret) {
498 btrfs_delalloc_release_space(inode, 0, prealloc); 498 btrfs_delalloc_release_metadata(inode, prealloc);
499 goto out_put; 499 goto out_put;
500 } 500 }
501 btrfs_free_reserved_data_space(inode, 0, prealloc);
502 501
503 ret = btrfs_write_out_ino_cache(root, trans, path, inode); 502 ret = btrfs_write_out_ino_cache(root, trans, path, inode);
504out_put: 503out_put:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2f5975954ccf..e6811c42e41e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -566,6 +566,8 @@ cont:
566 PAGE_SET_WRITEBACK | 566 PAGE_SET_WRITEBACK |
567 page_error_op | 567 page_error_op |
568 PAGE_END_WRITEBACK); 568 PAGE_END_WRITEBACK);
569 btrfs_free_reserved_data_space_noquota(inode, start,
570 end - start + 1);
569 goto free_pages_out; 571 goto free_pages_out;
570 } 572 }
571 } 573 }
@@ -742,7 +744,7 @@ retry:
742 lock_extent(io_tree, async_extent->start, 744 lock_extent(io_tree, async_extent->start,
743 async_extent->start + async_extent->ram_size - 1); 745 async_extent->start + async_extent->ram_size - 1);
744 746
745 ret = btrfs_reserve_extent(root, 747 ret = btrfs_reserve_extent(root, async_extent->ram_size,
746 async_extent->compressed_size, 748 async_extent->compressed_size,
747 async_extent->compressed_size, 749 async_extent->compressed_size,
748 0, alloc_hint, &ins, 1, 1); 750 0, alloc_hint, &ins, 1, 1);
@@ -969,7 +971,8 @@ static noinline int cow_file_range(struct inode *inode,
969 EXTENT_DEFRAG, PAGE_UNLOCK | 971 EXTENT_DEFRAG, PAGE_UNLOCK |
970 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 972 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
971 PAGE_END_WRITEBACK); 973 PAGE_END_WRITEBACK);
972 974 btrfs_free_reserved_data_space_noquota(inode, start,
975 end - start + 1);
973 *nr_written = *nr_written + 976 *nr_written = *nr_written +
974 (end - start + PAGE_SIZE) / PAGE_SIZE; 977 (end - start + PAGE_SIZE) / PAGE_SIZE;
975 *page_started = 1; 978 *page_started = 1;
@@ -989,7 +992,7 @@ static noinline int cow_file_range(struct inode *inode,
989 unsigned long op; 992 unsigned long op;
990 993
991 cur_alloc_size = disk_num_bytes; 994 cur_alloc_size = disk_num_bytes;
992 ret = btrfs_reserve_extent(root, cur_alloc_size, 995 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
993 root->sectorsize, 0, alloc_hint, 996 root->sectorsize, 0, alloc_hint,
994 &ins, 1, 1); 997 &ins, 1, 1);
995 if (ret < 0) 998 if (ret < 0)
@@ -1489,8 +1492,10 @@ out_check:
1489 extent_clear_unlock_delalloc(inode, cur_offset, 1492 extent_clear_unlock_delalloc(inode, cur_offset,
1490 cur_offset + num_bytes - 1, 1493 cur_offset + num_bytes - 1,
1491 locked_page, EXTENT_LOCKED | 1494 locked_page, EXTENT_LOCKED |
1492 EXTENT_DELALLOC, PAGE_UNLOCK | 1495 EXTENT_DELALLOC |
1493 PAGE_SET_PRIVATE2); 1496 EXTENT_CLEAR_DATA_RESV,
1497 PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1498
1494 if (!nolock && nocow) 1499 if (!nolock && nocow)
1495 btrfs_end_write_no_snapshoting(root); 1500 btrfs_end_write_no_snapshoting(root);
1496 cur_offset = extent_end; 1501 cur_offset = extent_end;
@@ -1807,7 +1812,9 @@ static void btrfs_clear_bit_hook(struct inode *inode,
1807 return; 1812 return;
1808 1813
1809 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1814 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1810 && do_list && !(state->state & EXTENT_NORESERVE)) 1815 && do_list && !(state->state & EXTENT_NORESERVE)
1816 && (*bits & (EXTENT_DO_ACCOUNTING |
1817 EXTENT_CLEAR_DATA_RESV)))
1811 btrfs_free_reserved_data_space_noquota(inode, 1818 btrfs_free_reserved_data_space_noquota(inode,
1812 state->start, len); 1819 state->start, len);
1813 1820
@@ -3435,10 +3442,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
3435 found_key.offset = 0; 3442 found_key.offset = 0;
3436 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 3443 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3437 ret = PTR_ERR_OR_ZERO(inode); 3444 ret = PTR_ERR_OR_ZERO(inode);
3438 if (ret && ret != -ESTALE) 3445 if (ret && ret != -ENOENT)
3439 goto out; 3446 goto out;
3440 3447
3441 if (ret == -ESTALE && root == root->fs_info->tree_root) { 3448 if (ret == -ENOENT && root == root->fs_info->tree_root) {
3442 struct btrfs_root *dead_root; 3449 struct btrfs_root *dead_root;
3443 struct btrfs_fs_info *fs_info = root->fs_info; 3450 struct btrfs_fs_info *fs_info = root->fs_info;
3444 int is_dead_root = 0; 3451 int is_dead_root = 0;
@@ -3474,7 +3481,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
3474 * Inode is already gone but the orphan item is still there, 3481 * Inode is already gone but the orphan item is still there,
3475 * kill the orphan item. 3482 * kill the orphan item.
3476 */ 3483 */
3477 if (ret == -ESTALE) { 3484 if (ret == -ENOENT) {
3478 trans = btrfs_start_transaction(root, 1); 3485 trans = btrfs_start_transaction(root, 1);
3479 if (IS_ERR(trans)) { 3486 if (IS_ERR(trans)) {
3480 ret = PTR_ERR(trans); 3487 ret = PTR_ERR(trans);
@@ -3633,7 +3640,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3633/* 3640/*
3634 * read an inode from the btree into the in-memory inode 3641 * read an inode from the btree into the in-memory inode
3635 */ 3642 */
3636static void btrfs_read_locked_inode(struct inode *inode) 3643static int btrfs_read_locked_inode(struct inode *inode)
3637{ 3644{
3638 struct btrfs_path *path; 3645 struct btrfs_path *path;
3639 struct extent_buffer *leaf; 3646 struct extent_buffer *leaf;
@@ -3652,14 +3659,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
3652 filled = true; 3659 filled = true;
3653 3660
3654 path = btrfs_alloc_path(); 3661 path = btrfs_alloc_path();
3655 if (!path) 3662 if (!path) {
3663 ret = -ENOMEM;
3656 goto make_bad; 3664 goto make_bad;
3665 }
3657 3666
3658 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3667 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3659 3668
3660 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3669 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3661 if (ret) 3670 if (ret) {
3671 if (ret > 0)
3672 ret = -ENOENT;
3662 goto make_bad; 3673 goto make_bad;
3674 }
3663 3675
3664 leaf = path->nodes[0]; 3676 leaf = path->nodes[0];
3665 3677
@@ -3812,11 +3824,12 @@ cache_acl:
3812 } 3824 }
3813 3825
3814 btrfs_update_iflags(inode); 3826 btrfs_update_iflags(inode);
3815 return; 3827 return 0;
3816 3828
3817make_bad: 3829make_bad:
3818 btrfs_free_path(path); 3830 btrfs_free_path(path);
3819 make_bad_inode(inode); 3831 make_bad_inode(inode);
3832 return ret;
3820} 3833}
3821 3834
3822/* 3835/*
@@ -4204,6 +4217,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4204 int err = 0; 4217 int err = 0;
4205 struct btrfs_root *root = BTRFS_I(dir)->root; 4218 struct btrfs_root *root = BTRFS_I(dir)->root;
4206 struct btrfs_trans_handle *trans; 4219 struct btrfs_trans_handle *trans;
4220 u64 last_unlink_trans;
4207 4221
4208 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4222 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4209 return -ENOTEMPTY; 4223 return -ENOTEMPTY;
@@ -4226,11 +4240,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4226 if (err) 4240 if (err)
4227 goto out; 4241 goto out;
4228 4242
4243 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4244
4229 /* now the directory is empty */ 4245 /* now the directory is empty */
4230 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4246 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4231 dentry->d_name.name, dentry->d_name.len); 4247 dentry->d_name.name, dentry->d_name.len);
4232 if (!err) 4248 if (!err) {
4233 btrfs_i_size_write(inode, 0); 4249 btrfs_i_size_write(inode, 0);
4250 /*
4251 * Propagate the last_unlink_trans value of the deleted dir to
4252 * its parent directory. This is to prevent an unrecoverable
4253 * log tree in the case we do something like this:
4254 * 1) create dir foo
4255 * 2) create snapshot under dir foo
4256 * 3) delete the snapshot
4257 * 4) rmdir foo
4258 * 5) mkdir foo
4259 * 6) fsync foo or some file inside foo
4260 */
4261 if (last_unlink_trans >= trans->transid)
4262 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4263 }
4234out: 4264out:
4235 btrfs_end_transaction(trans, root); 4265 btrfs_end_transaction(trans, root);
4236 btrfs_btree_balance_dirty(root); 4266 btrfs_btree_balance_dirty(root);
@@ -5606,7 +5636,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5606 return ERR_PTR(-ENOMEM); 5636 return ERR_PTR(-ENOMEM);
5607 5637
5608 if (inode->i_state & I_NEW) { 5638 if (inode->i_state & I_NEW) {
5609 btrfs_read_locked_inode(inode); 5639 int ret;
5640
5641 ret = btrfs_read_locked_inode(inode);
5610 if (!is_bad_inode(inode)) { 5642 if (!is_bad_inode(inode)) {
5611 inode_tree_add(inode); 5643 inode_tree_add(inode);
5612 unlock_new_inode(inode); 5644 unlock_new_inode(inode);
@@ -5615,7 +5647,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5615 } else { 5647 } else {
5616 unlock_new_inode(inode); 5648 unlock_new_inode(inode);
5617 iput(inode); 5649 iput(inode);
5618 inode = ERR_PTR(-ESTALE); 5650 ASSERT(ret < 0);
5651 inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
5619 } 5652 }
5620 } 5653 }
5621 5654
@@ -7225,7 +7258,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7225 int ret; 7258 int ret;
7226 7259
7227 alloc_hint = get_extent_allocation_hint(inode, start, len); 7260 alloc_hint = get_extent_allocation_hint(inode, start, len);
7228 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 7261 ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
7229 alloc_hint, &ins, 1, 1); 7262 alloc_hint, &ins, 1, 1);
7230 if (ret) 7263 if (ret)
7231 return ERR_PTR(ret); 7264 return ERR_PTR(ret);
@@ -7725,6 +7758,13 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7725 ret = PTR_ERR(em2); 7758 ret = PTR_ERR(em2);
7726 goto unlock_err; 7759 goto unlock_err;
7727 } 7760 }
7761 /*
7762 * For inode marked NODATACOW or extent marked PREALLOC,
7763 * use the existing or preallocated extent, so does not
7764 * need to adjust btrfs_space_info's bytes_may_use.
7765 */
7766 btrfs_free_reserved_data_space_noquota(inode,
7767 start, len);
7728 goto unlock; 7768 goto unlock;
7729 } 7769 }
7730 } 7770 }
@@ -7759,7 +7799,6 @@ unlock:
7759 i_size_write(inode, start + len); 7799 i_size_write(inode, start + len);
7760 7800
7761 adjust_dio_outstanding_extents(inode, dio_data, len); 7801 adjust_dio_outstanding_extents(inode, dio_data, len);
7762 btrfs_free_reserved_data_space(inode, start, len);
7763 WARN_ON(dio_data->reserve < len); 7802 WARN_ON(dio_data->reserve < len);
7764 dio_data->reserve -= len; 7803 dio_data->reserve -= len;
7765 dio_data->unsubmitted_oe_range_end = start + len; 7804 dio_data->unsubmitted_oe_range_end = start + len;
@@ -10280,6 +10319,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10280 u64 last_alloc = (u64)-1; 10319 u64 last_alloc = (u64)-1;
10281 int ret = 0; 10320 int ret = 0;
10282 bool own_trans = true; 10321 bool own_trans = true;
10322 u64 end = start + num_bytes - 1;
10283 10323
10284 if (trans) 10324 if (trans)
10285 own_trans = false; 10325 own_trans = false;
@@ -10301,8 +10341,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10301 * sized chunks. 10341 * sized chunks.
10302 */ 10342 */
10303 cur_bytes = min(cur_bytes, last_alloc); 10343 cur_bytes = min(cur_bytes, last_alloc);
10304 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 10344 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
10305 *alloc_hint, &ins, 1, 0); 10345 min_size, 0, *alloc_hint, &ins, 1, 0);
10306 if (ret) { 10346 if (ret) {
10307 if (own_trans) 10347 if (own_trans)
10308 btrfs_end_transaction(trans, root); 10348 btrfs_end_transaction(trans, root);
@@ -10388,6 +10428,9 @@ next:
10388 if (own_trans) 10428 if (own_trans)
10389 btrfs_end_transaction(trans, root); 10429 btrfs_end_transaction(trans, root);
10390 } 10430 }
10431 if (cur_offset < end)
10432 btrfs_free_reserved_data_space(inode, cur_offset,
10433 end - cur_offset + 1);
10391 return ret; 10434 return ret;
10392} 10435}
10393 10436
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 14ed1e9e6bc8..b2a2da5893af 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5084,7 +5084,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
5084 if (!capable(CAP_SYS_ADMIN)) 5084 if (!capable(CAP_SYS_ADMIN))
5085 return -EPERM; 5085 return -EPERM;
5086 5086
5087 return btrfs_qgroup_wait_for_completion(root->fs_info); 5087 return btrfs_qgroup_wait_for_completion(root->fs_info, true);
5088} 5088}
5089 5089
5090static long _btrfs_ioctl_set_received_subvol(struct file *file, 5090static long _btrfs_ioctl_set_received_subvol(struct file *file,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 93ee1c18ef9d..8db2e29fdcf4 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
995 goto out; 995 goto out;
996 fs_info->quota_enabled = 0; 996 fs_info->quota_enabled = 0;
997 fs_info->pending_quota_state = 0; 997 fs_info->pending_quota_state = 0;
998 btrfs_qgroup_wait_for_completion(fs_info); 998 btrfs_qgroup_wait_for_completion(fs_info, false);
999 spin_lock(&fs_info->qgroup_lock); 999 spin_lock(&fs_info->qgroup_lock);
1000 quota_root = fs_info->quota_root; 1000 quota_root = fs_info->quota_root;
1001 fs_info->quota_root = NULL; 1001 fs_info->quota_root = NULL;
@@ -1453,10 +1453,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
1453 return ret; 1453 return ret;
1454} 1454}
1455 1455
1456struct btrfs_qgroup_extent_record * 1456int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
1457btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info, 1457 struct btrfs_delayed_ref_root *delayed_refs,
1458 struct btrfs_delayed_ref_root *delayed_refs, 1458 struct btrfs_qgroup_extent_record *record)
1459 struct btrfs_qgroup_extent_record *record)
1460{ 1459{
1461 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; 1460 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1462 struct rb_node *parent_node = NULL; 1461 struct rb_node *parent_node = NULL;
@@ -1475,12 +1474,42 @@ btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
1475 else if (bytenr > entry->bytenr) 1474 else if (bytenr > entry->bytenr)
1476 p = &(*p)->rb_right; 1475 p = &(*p)->rb_right;
1477 else 1476 else
1478 return entry; 1477 return 1;
1479 } 1478 }
1480 1479
1481 rb_link_node(&record->node, parent_node, p); 1480 rb_link_node(&record->node, parent_node, p);
1482 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); 1481 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1483 return NULL; 1482 return 0;
1483}
1484
1485int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
1486 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
1487 gfp_t gfp_flag)
1488{
1489 struct btrfs_qgroup_extent_record *record;
1490 struct btrfs_delayed_ref_root *delayed_refs;
1491 int ret;
1492
1493 if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
1494 return 0;
1495 if (WARN_ON(trans == NULL))
1496 return -EINVAL;
1497 record = kmalloc(sizeof(*record), gfp_flag);
1498 if (!record)
1499 return -ENOMEM;
1500
1501 delayed_refs = &trans->transaction->delayed_refs;
1502 record->bytenr = bytenr;
1503 record->num_bytes = num_bytes;
1504 record->old_roots = NULL;
1505
1506 spin_lock(&delayed_refs->lock);
1507 ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
1508 record);
1509 spin_unlock(&delayed_refs->lock);
1510 if (ret > 0)
1511 kfree(record);
1512 return 0;
1484} 1513}
1485 1514
1486#define UPDATE_NEW 0 1515#define UPDATE_NEW 0
@@ -2303,6 +2332,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2303 int err = -ENOMEM; 2332 int err = -ENOMEM;
2304 int ret = 0; 2333 int ret = 0;
2305 2334
2335 mutex_lock(&fs_info->qgroup_rescan_lock);
2336 fs_info->qgroup_rescan_running = true;
2337 mutex_unlock(&fs_info->qgroup_rescan_lock);
2338
2306 path = btrfs_alloc_path(); 2339 path = btrfs_alloc_path();
2307 if (!path) 2340 if (!path)
2308 goto out; 2341 goto out;
@@ -2369,6 +2402,9 @@ out:
2369 } 2402 }
2370 2403
2371done: 2404done:
2405 mutex_lock(&fs_info->qgroup_rescan_lock);
2406 fs_info->qgroup_rescan_running = false;
2407 mutex_unlock(&fs_info->qgroup_rescan_lock);
2372 complete_all(&fs_info->qgroup_rescan_completion); 2408 complete_all(&fs_info->qgroup_rescan_completion);
2373} 2409}
2374 2410
@@ -2487,20 +2523,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2487 return 0; 2523 return 0;
2488} 2524}
2489 2525
2490int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info) 2526int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
2527 bool interruptible)
2491{ 2528{
2492 int running; 2529 int running;
2493 int ret = 0; 2530 int ret = 0;
2494 2531
2495 mutex_lock(&fs_info->qgroup_rescan_lock); 2532 mutex_lock(&fs_info->qgroup_rescan_lock);
2496 spin_lock(&fs_info->qgroup_lock); 2533 spin_lock(&fs_info->qgroup_lock);
2497 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN; 2534 running = fs_info->qgroup_rescan_running;
2498 spin_unlock(&fs_info->qgroup_lock); 2535 spin_unlock(&fs_info->qgroup_lock);
2499 mutex_unlock(&fs_info->qgroup_rescan_lock); 2536 mutex_unlock(&fs_info->qgroup_rescan_lock);
2500 2537
2501 if (running) 2538 if (!running)
2539 return 0;
2540
2541 if (interruptible)
2502 ret = wait_for_completion_interruptible( 2542 ret = wait_for_completion_interruptible(
2503 &fs_info->qgroup_rescan_completion); 2543 &fs_info->qgroup_rescan_completion);
2544 else
2545 wait_for_completion(&fs_info->qgroup_rescan_completion);
2504 2546
2505 return ret; 2547 return ret;
2506} 2548}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 710887c06aaf..1bc64c864b62 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
46 struct btrfs_fs_info *fs_info); 46 struct btrfs_fs_info *fs_info);
47int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info); 47int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
48void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info); 48void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
49int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info); 49int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
50 bool interruptible);
50int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, 51int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
51 struct btrfs_fs_info *fs_info, u64 src, u64 dst); 52 struct btrfs_fs_info *fs_info, u64 src, u64 dst);
52int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, 53int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
@@ -63,10 +64,35 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
63struct btrfs_delayed_extent_op; 64struct btrfs_delayed_extent_op;
64int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, 65int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
65 struct btrfs_fs_info *fs_info); 66 struct btrfs_fs_info *fs_info);
66struct btrfs_qgroup_extent_record * 67/*
67btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info, 68 * Insert one dirty extent record into @delayed_refs, informing qgroup to
68 struct btrfs_delayed_ref_root *delayed_refs, 69 * account that extent at commit trans time.
69 struct btrfs_qgroup_extent_record *record); 70 *
71 * No lock version, caller must acquire delayed ref lock and allocate memory.
72 *
73 * Return 0 for success insert
74 * Return >0 for existing record, caller can free @record safely.
75 * Error is not possible
76 */
77int btrfs_qgroup_insert_dirty_extent_nolock(
78 struct btrfs_fs_info *fs_info,
79 struct btrfs_delayed_ref_root *delayed_refs,
80 struct btrfs_qgroup_extent_record *record);
81
82/*
83 * Insert one dirty extent record into @delayed_refs, informing qgroup to
84 * account that extent at commit trans time.
85 *
86 * Better encapsulated version.
87 *
88 * Return 0 if the operation is done.
89 * Return <0 for error, like memory allocation failure or invalid parameter
90 * (NULL trans)
91 */
92int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
93 struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
94 gfp_t gfp_flag);
95
70int 96int
71btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, 97btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
72 struct btrfs_fs_info *fs_info, 98 struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b26a5aea41b4..c0c13dc6fe12 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -31,6 +31,7 @@
31#include "async-thread.h" 31#include "async-thread.h"
32#include "free-space-cache.h" 32#include "free-space-cache.h"
33#include "inode-map.h" 33#include "inode-map.h"
34#include "qgroup.h"
34 35
35/* 36/*
36 * backref_node, mapping_node and tree_block start with this 37 * backref_node, mapping_node and tree_block start with this
@@ -3037,15 +3038,19 @@ int prealloc_file_extent_cluster(struct inode *inode,
3037 u64 num_bytes; 3038 u64 num_bytes;
3038 int nr = 0; 3039 int nr = 0;
3039 int ret = 0; 3040 int ret = 0;
3041 u64 prealloc_start = cluster->start - offset;
3042 u64 prealloc_end = cluster->end - offset;
3043 u64 cur_offset;
3040 3044
3041 BUG_ON(cluster->start != cluster->boundary[0]); 3045 BUG_ON(cluster->start != cluster->boundary[0]);
3042 inode_lock(inode); 3046 inode_lock(inode);
3043 3047
3044 ret = btrfs_check_data_free_space(inode, cluster->start, 3048 ret = btrfs_check_data_free_space(inode, prealloc_start,
3045 cluster->end + 1 - cluster->start); 3049 prealloc_end + 1 - prealloc_start);
3046 if (ret) 3050 if (ret)
3047 goto out; 3051 goto out;
3048 3052
3053 cur_offset = prealloc_start;
3049 while (nr < cluster->nr) { 3054 while (nr < cluster->nr) {
3050 start = cluster->boundary[nr] - offset; 3055 start = cluster->boundary[nr] - offset;
3051 if (nr + 1 < cluster->nr) 3056 if (nr + 1 < cluster->nr)
@@ -3055,16 +3060,21 @@ int prealloc_file_extent_cluster(struct inode *inode,
3055 3060
3056 lock_extent(&BTRFS_I(inode)->io_tree, start, end); 3061 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3057 num_bytes = end + 1 - start; 3062 num_bytes = end + 1 - start;
3063 if (cur_offset < start)
3064 btrfs_free_reserved_data_space(inode, cur_offset,
3065 start - cur_offset);
3058 ret = btrfs_prealloc_file_range(inode, 0, start, 3066 ret = btrfs_prealloc_file_range(inode, 0, start,
3059 num_bytes, num_bytes, 3067 num_bytes, num_bytes,
3060 end + 1, &alloc_hint); 3068 end + 1, &alloc_hint);
3069 cur_offset = end + 1;
3061 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 3070 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3062 if (ret) 3071 if (ret)
3063 break; 3072 break;
3064 nr++; 3073 nr++;
3065 } 3074 }
3066 btrfs_free_reserved_data_space(inode, cluster->start, 3075 if (cur_offset < prealloc_end)
3067 cluster->end + 1 - cluster->start); 3076 btrfs_free_reserved_data_space(inode, cur_offset,
3077 prealloc_end + 1 - cur_offset);
3068out: 3078out:
3069 inode_unlock(inode); 3079 inode_unlock(inode);
3070 return ret; 3080 return ret;
@@ -3916,6 +3926,90 @@ int prepare_to_relocate(struct reloc_control *rc)
3916 return 0; 3926 return 0;
3917} 3927}
3918 3928
3929/*
3930 * Qgroup fixer for data chunk relocation.
3931 * The data relocation is done in the following steps
3932 * 1) Copy data extents into data reloc tree
3933 * 2) Create tree reloc tree(special snapshot) for related subvolumes
3934 * 3) Modify file extents in tree reloc tree
3935 * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
3936 *
3937 * The problem is, data and tree reloc tree are not accounted to qgroup,
3938 * and 4) will only info qgroup to track tree blocks change, not file extents
3939 * in the tree blocks.
3940 *
3941 * The good news is, related data extents are all in data reloc tree, so we
3942 * only need to info qgroup to track all file extents in data reloc tree
3943 * before commit trans.
3944 */
3945static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
3946 struct reloc_control *rc)
3947{
3948 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3949 struct inode *inode = rc->data_inode;
3950 struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
3951 struct btrfs_path *path;
3952 struct btrfs_key key;
3953 int ret = 0;
3954
3955 if (!fs_info->quota_enabled)
3956 return 0;
3957
3958 /*
3959 * Only for stage where we update data pointers the qgroup fix is
3960 * valid.
3961 * For MOVING_DATA stage, we will miss the timing of swapping tree
3962 * blocks, and won't fix it.
3963 */
3964 if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
3965 return 0;
3966
3967 path = btrfs_alloc_path();
3968 if (!path)
3969 return -ENOMEM;
3970 key.objectid = btrfs_ino(inode);
3971 key.type = BTRFS_EXTENT_DATA_KEY;
3972 key.offset = 0;
3973
3974 ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
3975 if (ret < 0)
3976 goto out;
3977
3978 lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
3979 while (1) {
3980 struct btrfs_file_extent_item *fi;
3981
3982 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3983 if (key.objectid > btrfs_ino(inode))
3984 break;
3985 if (key.type != BTRFS_EXTENT_DATA_KEY)
3986 goto next;
3987 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3988 struct btrfs_file_extent_item);
3989 if (btrfs_file_extent_type(path->nodes[0], fi) !=
3990 BTRFS_FILE_EXTENT_REG)
3991 goto next;
3992 ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
3993 btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
3994 btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
3995 GFP_NOFS);
3996 if (ret < 0)
3997 break;
3998next:
3999 ret = btrfs_next_item(data_reloc_root, path);
4000 if (ret < 0)
4001 break;
4002 if (ret > 0) {
4003 ret = 0;
4004 break;
4005 }
4006 }
4007 unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
4008out:
4009 btrfs_free_path(path);
4010 return ret;
4011}
4012
3919static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 4013static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3920{ 4014{
3921 struct rb_root blocks = RB_ROOT; 4015 struct rb_root blocks = RB_ROOT;
@@ -4102,10 +4196,18 @@ restart:
4102 4196
4103 /* get rid of pinned extents */ 4197 /* get rid of pinned extents */
4104 trans = btrfs_join_transaction(rc->extent_root); 4198 trans = btrfs_join_transaction(rc->extent_root);
4105 if (IS_ERR(trans)) 4199 if (IS_ERR(trans)) {
4106 err = PTR_ERR(trans); 4200 err = PTR_ERR(trans);
4107 else 4201 goto out_free;
4108 btrfs_commit_transaction(trans, rc->extent_root); 4202 }
4203 ret = qgroup_fix_relocated_data_extents(trans, rc);
4204 if (ret < 0) {
4205 btrfs_abort_transaction(trans, ret);
4206 if (!err)
4207 err = ret;
4208 goto out_free;
4209 }
4210 btrfs_commit_transaction(trans, rc->extent_root);
4109out_free: 4211out_free:
4110 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); 4212 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
4111 btrfs_free_path(path); 4213 btrfs_free_path(path);
@@ -4468,10 +4570,16 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4468 unset_reloc_control(rc); 4570 unset_reloc_control(rc);
4469 4571
4470 trans = btrfs_join_transaction(rc->extent_root); 4572 trans = btrfs_join_transaction(rc->extent_root);
4471 if (IS_ERR(trans)) 4573 if (IS_ERR(trans)) {
4472 err = PTR_ERR(trans); 4574 err = PTR_ERR(trans);
4473 else 4575 goto out_free;
4474 err = btrfs_commit_transaction(trans, rc->extent_root); 4576 }
4577 err = qgroup_fix_relocated_data_extents(trans, rc);
4578 if (err < 0) {
4579 btrfs_abort_transaction(trans, err);
4580 goto out_free;
4581 }
4582 err = btrfs_commit_transaction(trans, rc->extent_root);
4475out_free: 4583out_free:
4476 kfree(rc); 4584 kfree(rc);
4477out: 4585out:
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 7fd7e1830cfe..091296062456 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
272 root_key.objectid = key.offset; 272 root_key.objectid = key.offset;
273 key.offset++; 273 key.offset++;
274 274
275 /*
276 * The root might have been inserted already, as before we look
277 * for orphan roots, log replay might have happened, which
278 * triggers a transaction commit and qgroup accounting, which
279 * in turn reads and inserts fs roots while doing backref
280 * walking.
281 */
282 root = btrfs_lookup_fs_root(tree_root->fs_info,
283 root_key.objectid);
284 if (root) {
285 WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
286 &root->state));
287 if (btrfs_root_refs(&root->root_item) == 0)
288 btrfs_add_dead_root(root);
289 continue;
290 }
291
275 root = btrfs_read_fs_root(tree_root, &root_key); 292 root = btrfs_read_fs_root(tree_root, &root_key);
276 err = PTR_ERR_OR_ZERO(root); 293 err = PTR_ERR_OR_ZERO(root);
277 if (err && err != -ENOENT) { 294 if (err && err != -ENOENT) {
@@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
310 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); 327 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
311 328
312 err = btrfs_insert_fs_root(root->fs_info, root); 329 err = btrfs_insert_fs_root(root->fs_info, root);
313 /*
314 * The root might have been inserted already, as before we look
315 * for orphan roots, log replay might have happened, which
316 * triggers a transaction commit and qgroup accounting, which
317 * in turn reads and inserts fs roots while doing backref
318 * walking.
319 */
320 if (err == -EEXIST)
321 err = 0;
322 if (err) { 330 if (err) {
331 BUG_ON(err == -EEXIST);
323 btrfs_free_fs_root(root); 332 btrfs_free_fs_root(root);
324 break; 333 break;
325 } 334 }
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index b71dd298385c..a87675ffd02b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -231,7 +231,6 @@ struct pending_dir_move {
231 u64 parent_ino; 231 u64 parent_ino;
232 u64 ino; 232 u64 ino;
233 u64 gen; 233 u64 gen;
234 bool is_orphan;
235 struct list_head update_refs; 234 struct list_head update_refs;
236}; 235};
237 236
@@ -274,6 +273,39 @@ struct name_cache_entry {
274 char name[]; 273 char name[];
275}; 274};
276 275
276static void inconsistent_snapshot_error(struct send_ctx *sctx,
277 enum btrfs_compare_tree_result result,
278 const char *what)
279{
280 const char *result_string;
281
282 switch (result) {
283 case BTRFS_COMPARE_TREE_NEW:
284 result_string = "new";
285 break;
286 case BTRFS_COMPARE_TREE_DELETED:
287 result_string = "deleted";
288 break;
289 case BTRFS_COMPARE_TREE_CHANGED:
290 result_string = "updated";
291 break;
292 case BTRFS_COMPARE_TREE_SAME:
293 ASSERT(0);
294 result_string = "unchanged";
295 break;
296 default:
297 ASSERT(0);
298 result_string = "unexpected";
299 }
300
301 btrfs_err(sctx->send_root->fs_info,
302 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
303 result_string, what, sctx->cmp_key->objectid,
304 sctx->send_root->root_key.objectid,
305 (sctx->parent_root ?
306 sctx->parent_root->root_key.objectid : 0));
307}
308
277static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 309static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
278 310
279static struct waiting_dir_move * 311static struct waiting_dir_move *
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1861 * was already unlinked/moved, so we can safely assume that we will not 1893 * was already unlinked/moved, so we can safely assume that we will not
1862 * overwrite anything at this point in time. 1894 * overwrite anything at this point in time.
1863 */ 1895 */
1864 if (other_inode > sctx->send_progress) { 1896 if (other_inode > sctx->send_progress ||
1897 is_waiting_for_move(sctx, other_inode)) {
1865 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1898 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1866 who_gen, NULL, NULL, NULL, NULL); 1899 who_gen, NULL, NULL, NULL, NULL);
1867 if (ret < 0) 1900 if (ret < 0)
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
2502 key.type = BTRFS_INODE_ITEM_KEY; 2535 key.type = BTRFS_INODE_ITEM_KEY;
2503 key.offset = 0; 2536 key.offset = 0;
2504 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2537 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2538 if (ret > 0)
2539 ret = -ENOENT;
2505 if (ret < 0) 2540 if (ret < 0)
2506 goto out; 2541 goto out;
2507 2542
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2947 } 2982 }
2948 2983
2949 if (loc.objectid > send_progress) { 2984 if (loc.objectid > send_progress) {
2985 struct orphan_dir_info *odi;
2986
2987 odi = get_orphan_dir_info(sctx, dir);
2988 free_orphan_dir_info(sctx, odi);
2950 ret = 0; 2989 ret = 0;
2951 goto out; 2990 goto out;
2952 } 2991 }
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx,
3047 pm->parent_ino = parent_ino; 3086 pm->parent_ino = parent_ino;
3048 pm->ino = ino; 3087 pm->ino = ino;
3049 pm->gen = ino_gen; 3088 pm->gen = ino_gen;
3050 pm->is_orphan = is_orphan;
3051 INIT_LIST_HEAD(&pm->list); 3089 INIT_LIST_HEAD(&pm->list);
3052 INIT_LIST_HEAD(&pm->update_refs); 3090 INIT_LIST_HEAD(&pm->update_refs);
3053 RB_CLEAR_NODE(&pm->node); 3091 RB_CLEAR_NODE(&pm->node);
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3113 return NULL; 3151 return NULL;
3114} 3152}
3115 3153
3154static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3155 u64 ino, u64 gen, u64 *ancestor_ino)
3156{
3157 int ret = 0;
3158 u64 parent_inode = 0;
3159 u64 parent_gen = 0;
3160 u64 start_ino = ino;
3161
3162 *ancestor_ino = 0;
3163 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3164 fs_path_reset(name);
3165
3166 if (is_waiting_for_rm(sctx, ino))
3167 break;
3168 if (is_waiting_for_move(sctx, ino)) {
3169 if (*ancestor_ino == 0)
3170 *ancestor_ino = ino;
3171 ret = get_first_ref(sctx->parent_root, ino,
3172 &parent_inode, &parent_gen, name);
3173 } else {
3174 ret = __get_cur_name_and_parent(sctx, ino, gen,
3175 &parent_inode,
3176 &parent_gen, name);
3177 if (ret > 0) {
3178 ret = 0;
3179 break;
3180 }
3181 }
3182 if (ret < 0)
3183 break;
3184 if (parent_inode == start_ino) {
3185 ret = 1;
3186 if (*ancestor_ino == 0)
3187 *ancestor_ino = ino;
3188 break;
3189 }
3190 ino = parent_inode;
3191 gen = parent_gen;
3192 }
3193 return ret;
3194}
3195
3116static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3196static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3117{ 3197{
3118 struct fs_path *from_path = NULL; 3198 struct fs_path *from_path = NULL;
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3123 u64 parent_ino, parent_gen; 3203 u64 parent_ino, parent_gen;
3124 struct waiting_dir_move *dm = NULL; 3204 struct waiting_dir_move *dm = NULL;
3125 u64 rmdir_ino = 0; 3205 u64 rmdir_ino = 0;
3206 u64 ancestor;
3207 bool is_orphan;
3126 int ret; 3208 int ret;
3127 3209
3128 name = fs_path_alloc(); 3210 name = fs_path_alloc();
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3135 dm = get_waiting_dir_move(sctx, pm->ino); 3217 dm = get_waiting_dir_move(sctx, pm->ino);
3136 ASSERT(dm); 3218 ASSERT(dm);
3137 rmdir_ino = dm->rmdir_ino; 3219 rmdir_ino = dm->rmdir_ino;
3220 is_orphan = dm->orphanized;
3138 free_waiting_dir_move(sctx, dm); 3221 free_waiting_dir_move(sctx, dm);
3139 3222
3140 if (pm->is_orphan) { 3223 if (is_orphan) {
3141 ret = gen_unique_name(sctx, pm->ino, 3224 ret = gen_unique_name(sctx, pm->ino,
3142 pm->gen, from_path); 3225 pm->gen, from_path);
3143 } else { 3226 } else {
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3155 goto out; 3238 goto out;
3156 3239
3157 sctx->send_progress = sctx->cur_ino + 1; 3240 sctx->send_progress = sctx->cur_ino + 1;
3241 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3242 if (ret < 0)
3243 goto out;
3244 if (ret) {
3245 LIST_HEAD(deleted_refs);
3246 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3247 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3248 &pm->update_refs, &deleted_refs,
3249 is_orphan);
3250 if (ret < 0)
3251 goto out;
3252 if (rmdir_ino) {
3253 dm = get_waiting_dir_move(sctx, pm->ino);
3254 ASSERT(dm);
3255 dm->rmdir_ino = rmdir_ino;
3256 }
3257 goto out;
3258 }
3158 fs_path_reset(name); 3259 fs_path_reset(name);
3159 to_path = name; 3260 to_path = name;
3160 name = NULL; 3261 name = NULL;
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3174 /* already deleted */ 3275 /* already deleted */
3175 goto finish; 3276 goto finish;
3176 } 3277 }
3177 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); 3278 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
3178 if (ret < 0) 3279 if (ret < 0)
3179 goto out; 3280 goto out;
3180 if (!ret) 3281 if (!ret)
@@ -3204,8 +3305,18 @@ finish:
3204 * and old parent(s). 3305 * and old parent(s).
3205 */ 3306 */
3206 list_for_each_entry(cur, &pm->update_refs, list) { 3307 list_for_each_entry(cur, &pm->update_refs, list) {
3207 if (cur->dir == rmdir_ino) 3308 /*
3309 * The parent inode might have been deleted in the send snapshot
3310 */
3311 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3312 NULL, NULL, NULL, NULL, NULL);
3313 if (ret == -ENOENT) {
3314 ret = 0;
3208 continue; 3315 continue;
3316 }
3317 if (ret < 0)
3318 goto out;
3319
3209 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3320 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3210 if (ret < 0) 3321 if (ret < 0)
3211 goto out; 3322 goto out;
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
3325 u64 left_gen; 3436 u64 left_gen;
3326 u64 right_gen; 3437 u64 right_gen;
3327 int ret = 0; 3438 int ret = 0;
3439 struct waiting_dir_move *wdm;
3328 3440
3329 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3441 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3330 return 0; 3442 return 0;
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
3383 goto out; 3495 goto out;
3384 } 3496 }
3385 3497
3386 if (is_waiting_for_move(sctx, di_key.objectid)) { 3498 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3499 if (wdm && !wdm->orphanized) {
3387 ret = add_pending_dir_move(sctx, 3500 ret = add_pending_dir_move(sctx,
3388 sctx->cur_ino, 3501 sctx->cur_ino,
3389 sctx->cur_inode_gen, 3502 sctx->cur_inode_gen,
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
3470 ret = is_ancestor(sctx->parent_root, 3583 ret = is_ancestor(sctx->parent_root,
3471 sctx->cur_ino, sctx->cur_inode_gen, 3584 sctx->cur_ino, sctx->cur_inode_gen,
3472 ino, path_before); 3585 ino, path_before);
3473 break; 3586 if (ret)
3587 break;
3474 } 3588 }
3475 3589
3476 fs_path_reset(path_before); 3590 fs_path_reset(path_before);
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3643 goto out; 3757 goto out;
3644 if (ret) { 3758 if (ret) {
3645 struct name_cache_entry *nce; 3759 struct name_cache_entry *nce;
3760 struct waiting_dir_move *wdm;
3646 3761
3647 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3762 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3648 cur->full_path); 3763 cur->full_path);
3649 if (ret < 0) 3764 if (ret < 0)
3650 goto out; 3765 goto out;
3766
3767 /*
3768 * If ow_inode has its rename operation delayed
3769 * make sure that its orphanized name is used in
3770 * the source path when performing its rename
3771 * operation.
3772 */
3773 if (is_waiting_for_move(sctx, ow_inode)) {
3774 wdm = get_waiting_dir_move(sctx,
3775 ow_inode);
3776 ASSERT(wdm);
3777 wdm->orphanized = true;
3778 }
3779
3651 /* 3780 /*
3652 * Make sure we clear our orphanized inode's 3781 * Make sure we clear our orphanized inode's
3653 * name from the name cache. This is because the 3782 * name from the name cache. This is because the
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3663 name_cache_delete(sctx, nce); 3792 name_cache_delete(sctx, nce);
3664 kfree(nce); 3793 kfree(nce);
3665 } 3794 }
3795
3796 /*
3797 * ow_inode might currently be an ancestor of
3798 * cur_ino, therefore compute valid_path (the
3799 * current path of cur_ino) again because it
3800 * might contain the pre-orphanization name of
3801 * ow_inode, which is no longer valid.
3802 */
3803 fs_path_reset(valid_path);
3804 ret = get_cur_path(sctx, sctx->cur_ino,
3805 sctx->cur_inode_gen, valid_path);
3806 if (ret < 0)
3807 goto out;
3666 } else { 3808 } else {
3667 ret = send_unlink(sctx, cur->full_path); 3809 ret = send_unlink(sctx, cur->full_path);
3668 if (ret < 0) 3810 if (ret < 0)
@@ -4126,10 +4268,12 @@ static int process_all_refs(struct send_ctx *sctx,
4126 } 4268 }
4127 btrfs_release_path(path); 4269 btrfs_release_path(path);
4128 4270
4271 /*
4272 * We don't actually care about pending_move as we are simply
4273 * re-creating this inode and will be rename'ing it into place once we
4274 * rename the parent directory.
4275 */
4129 ret = process_recorded_refs(sctx, &pending_move); 4276 ret = process_recorded_refs(sctx, &pending_move);
4130 /* Only applicable to an incremental send. */
4131 ASSERT(pending_move == 0);
4132
4133out: 4277out:
4134 btrfs_free_path(path); 4278 btrfs_free_path(path);
4135 return ret; 4279 return ret;
@@ -5602,7 +5746,10 @@ static int changed_ref(struct send_ctx *sctx,
5602{ 5746{
5603 int ret = 0; 5747 int ret = 0;
5604 5748
5605 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5749 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5750 inconsistent_snapshot_error(sctx, result, "reference");
5751 return -EIO;
5752 }
5606 5753
5607 if (!sctx->cur_inode_new_gen && 5754 if (!sctx->cur_inode_new_gen &&
5608 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 5755 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
@@ -5627,7 +5774,10 @@ static int changed_xattr(struct send_ctx *sctx,
5627{ 5774{
5628 int ret = 0; 5775 int ret = 0;
5629 5776
5630 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5777 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5778 inconsistent_snapshot_error(sctx, result, "xattr");
5779 return -EIO;
5780 }
5631 5781
5632 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 5782 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5633 if (result == BTRFS_COMPARE_TREE_NEW) 5783 if (result == BTRFS_COMPARE_TREE_NEW)
@@ -5651,7 +5801,10 @@ static int changed_extent(struct send_ctx *sctx,
5651{ 5801{
5652 int ret = 0; 5802 int ret = 0;
5653 5803
5654 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5804 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5805 inconsistent_snapshot_error(sctx, result, "extent");
5806 return -EIO;
5807 }
5655 5808
5656 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 5809 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5657 if (result != BTRFS_COMPARE_TREE_DELETED) 5810 if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 864ce334f696..4071fe2bd098 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2241,6 +2241,13 @@ static int btrfs_freeze(struct super_block *sb)
2241 struct btrfs_trans_handle *trans; 2241 struct btrfs_trans_handle *trans;
2242 struct btrfs_root *root = btrfs_sb(sb)->tree_root; 2242 struct btrfs_root *root = btrfs_sb(sb)->tree_root;
2243 2243
2244 root->fs_info->fs_frozen = 1;
2245 /*
2246 * We don't need a barrier here, we'll wait for any transaction that
2247 * could be in progress on other threads (and do delayed iputs that
2248 * we want to avoid on a frozen filesystem), or do the commit
2249 * ourselves.
2250 */
2244 trans = btrfs_attach_transaction_barrier(root); 2251 trans = btrfs_attach_transaction_barrier(root);
2245 if (IS_ERR(trans)) { 2252 if (IS_ERR(trans)) {
2246 /* no transaction, don't bother */ 2253 /* no transaction, don't bother */
@@ -2251,6 +2258,14 @@ static int btrfs_freeze(struct super_block *sb)
2251 return btrfs_commit_transaction(trans, root); 2258 return btrfs_commit_transaction(trans, root);
2252} 2259}
2253 2260
2261static int btrfs_unfreeze(struct super_block *sb)
2262{
2263 struct btrfs_root *root = btrfs_sb(sb)->tree_root;
2264
2265 root->fs_info->fs_frozen = 0;
2266 return 0;
2267}
2268
2254static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 2269static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2255{ 2270{
2256 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 2271 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2299,6 +2314,7 @@ static const struct super_operations btrfs_super_ops = {
2299 .statfs = btrfs_statfs, 2314 .statfs = btrfs_statfs,
2300 .remount_fs = btrfs_remount, 2315 .remount_fs = btrfs_remount,
2301 .freeze_fs = btrfs_freeze, 2316 .freeze_fs = btrfs_freeze,
2317 .unfreeze_fs = btrfs_unfreeze,
2302}; 2318};
2303 2319
2304static const struct file_operations btrfs_ctl_fops = { 2320static const struct file_operations btrfs_ctl_fops = {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9cca0a721961..95d41919d034 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2278,8 +2278,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2278 2278
2279 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2279 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2280 2280
2281 /*
2282 * If fs has been frozen, we can not handle delayed iputs, otherwise
2283 * it'll result in deadlock about SB_FREEZE_FS.
2284 */
2281 if (current != root->fs_info->transaction_kthread && 2285 if (current != root->fs_info->transaction_kthread &&
2282 current != root->fs_info->cleaner_kthread) 2286 current != root->fs_info->cleaner_kthread &&
2287 !root->fs_info->fs_frozen)
2283 btrfs_run_delayed_iputs(root); 2288 btrfs_run_delayed_iputs(root);
2284 2289
2285 return ret; 2290 return ret;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d31a0c4f56be..ef9c55bc7907 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -27,6 +27,7 @@
27#include "backref.h" 27#include "backref.h"
28#include "hash.h" 28#include "hash.h"
29#include "compression.h" 29#include "compression.h"
30#include "qgroup.h"
30 31
31/* magic values for the inode_only field in btrfs_log_inode: 32/* magic values for the inode_only field in btrfs_log_inode:
32 * 33 *
@@ -680,6 +681,21 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
680 ins.type = BTRFS_EXTENT_ITEM_KEY; 681 ins.type = BTRFS_EXTENT_ITEM_KEY;
681 offset = key->offset - btrfs_file_extent_offset(eb, item); 682 offset = key->offset - btrfs_file_extent_offset(eb, item);
682 683
684 /*
685 * Manually record dirty extent, as here we did a shallow
686 * file extent item copy and skip normal backref update,
687 * but modifying extent tree all by ourselves.
688 * So need to manually record dirty extent for qgroup,
689 * as the owner of the file extent changed from log tree
690 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
691 */
692 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
693 btrfs_file_extent_disk_bytenr(eb, item),
694 btrfs_file_extent_disk_num_bytes(eb, item),
695 GFP_NOFS);
696 if (ret < 0)
697 goto out;
698
683 if (ins.objectid > 0) { 699 if (ins.objectid > 0) {
684 u64 csum_start; 700 u64 csum_start;
685 u64 csum_end; 701 u64 csum_end;
@@ -2807,7 +2823,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2807 */ 2823 */
2808 mutex_unlock(&root->log_mutex); 2824 mutex_unlock(&root->log_mutex);
2809 2825
2810 btrfs_init_log_ctx(&root_log_ctx); 2826 btrfs_init_log_ctx(&root_log_ctx, NULL);
2811 2827
2812 mutex_lock(&log_root_tree->log_mutex); 2828 mutex_lock(&log_root_tree->log_mutex);
2813 atomic_inc(&log_root_tree->log_batch); 2829 atomic_inc(&log_root_tree->log_batch);
@@ -2851,6 +2867,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2851 2867
2852 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 2868 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2853 blk_finish_plug(&plug); 2869 blk_finish_plug(&plug);
2870 list_del_init(&root_log_ctx.list);
2854 mutex_unlock(&log_root_tree->log_mutex); 2871 mutex_unlock(&log_root_tree->log_mutex);
2855 ret = root_log_ctx.log_ret; 2872 ret = root_log_ctx.log_ret;
2856 goto out; 2873 goto out;
@@ -4469,7 +4486,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4469static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4486static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4470 const int slot, 4487 const int slot,
4471 const struct btrfs_key *key, 4488 const struct btrfs_key *key,
4472 struct inode *inode) 4489 struct inode *inode,
4490 u64 *other_ino)
4473{ 4491{
4474 int ret; 4492 int ret;
4475 struct btrfs_path *search_path; 4493 struct btrfs_path *search_path;
@@ -4528,7 +4546,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4528 search_path, parent, 4546 search_path, parent,
4529 name, this_name_len, 0); 4547 name, this_name_len, 0);
4530 if (di && !IS_ERR(di)) { 4548 if (di && !IS_ERR(di)) {
4531 ret = 1; 4549 struct btrfs_key di_key;
4550
4551 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4552 di, &di_key);
4553 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4554 ret = 1;
4555 *other_ino = di_key.objectid;
4556 } else {
4557 ret = -EAGAIN;
4558 }
4532 goto out; 4559 goto out;
4533 } else if (IS_ERR(di)) { 4560 } else if (IS_ERR(di)) {
4534 ret = PTR_ERR(di); 4561 ret = PTR_ERR(di);
@@ -4722,16 +4749,72 @@ again:
4722 if ((min_key.type == BTRFS_INODE_REF_KEY || 4749 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4723 min_key.type == BTRFS_INODE_EXTREF_KEY) && 4750 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4724 BTRFS_I(inode)->generation == trans->transid) { 4751 BTRFS_I(inode)->generation == trans->transid) {
4752 u64 other_ino = 0;
4753
4725 ret = btrfs_check_ref_name_override(path->nodes[0], 4754 ret = btrfs_check_ref_name_override(path->nodes[0],
4726 path->slots[0], 4755 path->slots[0],
4727 &min_key, inode); 4756 &min_key, inode,
4757 &other_ino);
4728 if (ret < 0) { 4758 if (ret < 0) {
4729 err = ret; 4759 err = ret;
4730 goto out_unlock; 4760 goto out_unlock;
4731 } else if (ret > 0) { 4761 } else if (ret > 0 && ctx &&
4732 err = 1; 4762 other_ino != btrfs_ino(ctx->inode)) {
4733 btrfs_set_log_full_commit(root->fs_info, trans); 4763 struct btrfs_key inode_key;
4734 goto out_unlock; 4764 struct inode *other_inode;
4765
4766 if (ins_nr > 0) {
4767 ins_nr++;
4768 } else {
4769 ins_nr = 1;
4770 ins_start_slot = path->slots[0];
4771 }
4772 ret = copy_items(trans, inode, dst_path, path,
4773 &last_extent, ins_start_slot,
4774 ins_nr, inode_only,
4775 logged_isize);
4776 if (ret < 0) {
4777 err = ret;
4778 goto out_unlock;
4779 }
4780 ins_nr = 0;
4781 btrfs_release_path(path);
4782 inode_key.objectid = other_ino;
4783 inode_key.type = BTRFS_INODE_ITEM_KEY;
4784 inode_key.offset = 0;
4785 other_inode = btrfs_iget(root->fs_info->sb,
4786 &inode_key, root,
4787 NULL);
4788 /*
4789 * If the other inode that had a conflicting dir
4790 * entry was deleted in the current transaction,
4791 * we don't need to do more work nor fallback to
4792 * a transaction commit.
4793 */
4794 if (IS_ERR(other_inode) &&
4795 PTR_ERR(other_inode) == -ENOENT) {
4796 goto next_key;
4797 } else if (IS_ERR(other_inode)) {
4798 err = PTR_ERR(other_inode);
4799 goto out_unlock;
4800 }
4801 /*
4802 * We are safe logging the other inode without
4803 * acquiring its i_mutex as long as we log with
4804 * the LOG_INODE_EXISTS mode. We're safe against
4805 * concurrent renames of the other inode as well
4806 * because during a rename we pin the log and
4807 * update the log with the new name before we
4808 * unpin it.
4809 */
4810 err = btrfs_log_inode(trans, root, other_inode,
4811 LOG_INODE_EXISTS,
4812 0, LLONG_MAX, ctx);
4813 iput(other_inode);
4814 if (err)
4815 goto out_unlock;
4816 else
4817 goto next_key;
4735 } 4818 }
4736 } 4819 }
4737 4820
@@ -4799,7 +4882,7 @@ next_slot:
4799 ins_nr = 0; 4882 ins_nr = 0;
4800 } 4883 }
4801 btrfs_release_path(path); 4884 btrfs_release_path(path);
4802 4885next_key:
4803 if (min_key.offset < (u64)-1) { 4886 if (min_key.offset < (u64)-1) {
4804 min_key.offset++; 4887 min_key.offset++;
4805 } else if (min_key.type < max_key.type) { 4888 } else if (min_key.type < max_key.type) {
@@ -4993,8 +5076,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4993 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5076 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
4994 break; 5077 break;
4995 5078
4996 if (IS_ROOT(parent)) 5079 if (IS_ROOT(parent)) {
5080 inode = d_inode(parent);
5081 if (btrfs_must_commit_transaction(trans, inode))
5082 ret = 1;
4997 break; 5083 break;
5084 }
4998 5085
4999 parent = dget_parent(parent); 5086 parent = dget_parent(parent);
5000 dput(old_parent); 5087 dput(old_parent);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index a9f1b75d080d..ab858e31ccbc 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -30,15 +30,18 @@ struct btrfs_log_ctx {
30 int log_transid; 30 int log_transid;
31 int io_err; 31 int io_err;
32 bool log_new_dentries; 32 bool log_new_dentries;
33 struct inode *inode;
33 struct list_head list; 34 struct list_head list;
34}; 35};
35 36
36static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx) 37static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
38 struct inode *inode)
37{ 39{
38 ctx->log_ret = 0; 40 ctx->log_ret = 0;
39 ctx->log_transid = 0; 41 ctx->log_transid = 0;
40 ctx->io_err = 0; 42 ctx->io_err = 0;
41 ctx->log_new_dentries = false; 43 ctx->log_new_dentries = false;
44 ctx->inode = inode;
42 INIT_LIST_HEAD(&ctx->list); 45 INIT_LIST_HEAD(&ctx->list);
43} 46}
44 47
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 51f125508771..035efce603a9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -834,10 +834,6 @@ static void __free_device(struct work_struct *work)
834 struct btrfs_device *device; 834 struct btrfs_device *device;
835 835
836 device = container_of(work, struct btrfs_device, rcu_work); 836 device = container_of(work, struct btrfs_device, rcu_work);
837
838 if (device->bdev)
839 blkdev_put(device->bdev, device->mode);
840
841 rcu_string_free(device->name); 837 rcu_string_free(device->name);
842 kfree(device); 838 kfree(device);
843} 839}
@@ -852,6 +848,17 @@ static void free_device(struct rcu_head *head)
852 schedule_work(&device->rcu_work); 848 schedule_work(&device->rcu_work);
853} 849}
854 850
851static void btrfs_close_bdev(struct btrfs_device *device)
852{
853 if (device->bdev && device->writeable) {
854 sync_blockdev(device->bdev);
855 invalidate_bdev(device->bdev);
856 }
857
858 if (device->bdev)
859 blkdev_put(device->bdev, device->mode);
860}
861
855static void btrfs_close_one_device(struct btrfs_device *device) 862static void btrfs_close_one_device(struct btrfs_device *device)
856{ 863{
857 struct btrfs_fs_devices *fs_devices = device->fs_devices; 864 struct btrfs_fs_devices *fs_devices = device->fs_devices;
@@ -870,10 +877,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
870 if (device->missing) 877 if (device->missing)
871 fs_devices->missing_devices--; 878 fs_devices->missing_devices--;
872 879
873 if (device->bdev && device->writeable) { 880 btrfs_close_bdev(device);
874 sync_blockdev(device->bdev);
875 invalidate_bdev(device->bdev);
876 }
877 881
878 new_device = btrfs_alloc_device(NULL, &device->devid, 882 new_device = btrfs_alloc_device(NULL, &device->devid,
879 device->uuid); 883 device->uuid);
@@ -1932,6 +1936,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
1932 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device); 1936 btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
1933 } 1937 }
1934 1938
1939 btrfs_close_bdev(device);
1940
1935 call_rcu(&device->rcu, free_device); 1941 call_rcu(&device->rcu, free_device);
1936 1942
1937 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1; 1943 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
@@ -2025,6 +2031,9 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
2025 /* zero out the old super if it is writable */ 2031 /* zero out the old super if it is writable */
2026 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); 2032 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
2027 } 2033 }
2034
2035 btrfs_close_bdev(srcdev);
2036
2028 call_rcu(&srcdev->rcu, free_device); 2037 call_rcu(&srcdev->rcu, free_device);
2029 2038
2030 /* 2039 /*
@@ -2080,6 +2089,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2080 * the device_list_mutex lock. 2089 * the device_list_mutex lock.
2081 */ 2090 */
2082 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); 2091 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2092
2093 btrfs_close_bdev(tgtdev);
2083 call_rcu(&tgtdev->rcu, free_device); 2094 call_rcu(&tgtdev->rcu, free_device);
2084} 2095}
2085 2096
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 99115cae1652..16e6ded0b7f2 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1347,9 +1347,12 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
1347{ 1347{
1348 struct inode *inode = &ci->vfs_inode; 1348 struct inode *inode = &ci->vfs_inode;
1349 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1349 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1350 struct ceph_mds_session *session = *psession; 1350 struct ceph_mds_session *session = NULL;
1351 int mds; 1351 int mds;
1352
1352 dout("ceph_flush_snaps %p\n", inode); 1353 dout("ceph_flush_snaps %p\n", inode);
1354 if (psession)
1355 session = *psession;
1353retry: 1356retry:
1354 spin_lock(&ci->i_ceph_lock); 1357 spin_lock(&ci->i_ceph_lock);
1355 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { 1358 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c64a0b794d49..df4b3e6fa563 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
597 if (is_hash_order(new_pos)) { 597 if (is_hash_order(new_pos)) {
598 /* no need to reset last_name for a forward seek when 598 /* no need to reset last_name for a forward seek when
599 * dentries are sotred in hash order */ 599 * dentries are sotred in hash order */
600 } else if (fi->frag |= fpos_frag(new_pos)) { 600 } else if (fi->frag != fpos_frag(new_pos)) {
601 return true; 601 return true;
602 } 602 }
603 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL; 603 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fa59a85226b2..f72d4ae303b2 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2759,6 +2759,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2759 } else { 2759 } else {
2760 path = NULL; 2760 path = NULL;
2761 pathlen = 0; 2761 pathlen = 0;
2762 pathbase = 0;
2762 } 2763 }
2763 2764
2764 spin_lock(&ci->i_ceph_lock); 2765 spin_lock(&ci->i_ceph_lock);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 6bbec5e784cd..14ae4b8e1a3c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -609,6 +609,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
609 char *s, *p; 609 char *s, *p;
610 char sep; 610 char sep;
611 611
612 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
613 return dget(sb->s_root);
614
612 full_path = cifs_build_path_to_root(vol, cifs_sb, 615 full_path = cifs_build_path_to_root(vol, cifs_sb,
613 cifs_sb_master_tcon(cifs_sb)); 616 cifs_sb_master_tcon(cifs_sb));
614 if (full_path == NULL) 617 if (full_path == NULL)
@@ -686,26 +689,22 @@ cifs_do_mount(struct file_system_type *fs_type,
686 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); 689 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
687 if (cifs_sb->mountdata == NULL) { 690 if (cifs_sb->mountdata == NULL) {
688 root = ERR_PTR(-ENOMEM); 691 root = ERR_PTR(-ENOMEM);
689 goto out_cifs_sb; 692 goto out_free;
690 } 693 }
691 694
692 if (volume_info->prepath) { 695 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
693 cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL); 696 if (rc) {
694 if (cifs_sb->prepath == NULL) { 697 root = ERR_PTR(rc);
695 root = ERR_PTR(-ENOMEM); 698 goto out_free;
696 goto out_cifs_sb;
697 }
698 } 699 }
699 700
700 cifs_setup_cifs_sb(volume_info, cifs_sb);
701
702 rc = cifs_mount(cifs_sb, volume_info); 701 rc = cifs_mount(cifs_sb, volume_info);
703 if (rc) { 702 if (rc) {
704 if (!(flags & MS_SILENT)) 703 if (!(flags & MS_SILENT))
705 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 704 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
706 rc); 705 rc);
707 root = ERR_PTR(rc); 706 root = ERR_PTR(rc);
708 goto out_mountdata; 707 goto out_free;
709 } 708 }
710 709
711 mnt_data.vol = volume_info; 710 mnt_data.vol = volume_info;
@@ -735,11 +734,7 @@ cifs_do_mount(struct file_system_type *fs_type,
735 sb->s_flags |= MS_ACTIVE; 734 sb->s_flags |= MS_ACTIVE;
736 } 735 }
737 736
738 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 737 root = cifs_get_root(volume_info, sb);
739 root = dget(sb->s_root);
740 else
741 root = cifs_get_root(volume_info, sb);
742
743 if (IS_ERR(root)) 738 if (IS_ERR(root))
744 goto out_super; 739 goto out_super;
745 740
@@ -752,9 +747,9 @@ out:
752 cifs_cleanup_volume_info(volume_info); 747 cifs_cleanup_volume_info(volume_info);
753 return root; 748 return root;
754 749
755out_mountdata: 750out_free:
751 kfree(cifs_sb->prepath);
756 kfree(cifs_sb->mountdata); 752 kfree(cifs_sb->mountdata);
757out_cifs_sb:
758 kfree(cifs_sb); 753 kfree(cifs_sb);
759out_nls: 754out_nls:
760 unload_nls(volume_info->local_nls); 755 unload_nls(volume_info->local_nls);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1243bd326591..95dab43646f0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -184,7 +184,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
184 unsigned int to_read); 184 unsigned int to_read);
185extern int cifs_read_page_from_socket(struct TCP_Server_Info *server, 185extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
186 struct page *page, unsigned int to_read); 186 struct page *page, unsigned int to_read);
187extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, 187extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
188 struct cifs_sb_info *cifs_sb); 188 struct cifs_sb_info *cifs_sb);
189extern int cifs_match_super(struct super_block *, void *); 189extern int cifs_match_super(struct super_block *, void *);
190extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); 190extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7ae03283bd61..2e4f4bad8b1e 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2781,6 +2781,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2781 return 1; 2781 return 1;
2782} 2782}
2783 2783
2784static int
2785match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2786{
2787 struct cifs_sb_info *old = CIFS_SB(sb);
2788 struct cifs_sb_info *new = mnt_data->cifs_sb;
2789
2790 if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
2791 if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
2792 return 0;
2793 /* The prepath should be null terminated strings */
2794 if (strcmp(new->prepath, old->prepath))
2795 return 0;
2796
2797 return 1;
2798 }
2799 return 0;
2800}
2801
2784int 2802int
2785cifs_match_super(struct super_block *sb, void *data) 2803cifs_match_super(struct super_block *sb, void *data)
2786{ 2804{
@@ -2808,7 +2826,8 @@ cifs_match_super(struct super_block *sb, void *data)
2808 2826
2809 if (!match_server(tcp_srv, volume_info) || 2827 if (!match_server(tcp_srv, volume_info) ||
2810 !match_session(ses, volume_info) || 2828 !match_session(ses, volume_info) ||
2811 !match_tcon(tcon, volume_info->UNC)) { 2829 !match_tcon(tcon, volume_info->UNC) ||
2830 !match_prepath(sb, mnt_data)) {
2812 rc = 0; 2831 rc = 0;
2813 goto out; 2832 goto out;
2814 } 2833 }
@@ -3222,7 +3241,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
3222 } 3241 }
3223} 3242}
3224 3243
3225void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, 3244int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3226 struct cifs_sb_info *cifs_sb) 3245 struct cifs_sb_info *cifs_sb)
3227{ 3246{
3228 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 3247 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3316,6 +3335,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3316 3335
3317 if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) 3336 if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
3318 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); 3337 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3338
3339 if (pvolume_info->prepath) {
3340 cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
3341 if (cifs_sb->prepath == NULL)
3342 return -ENOMEM;
3343 }
3344
3345 return 0;
3319} 3346}
3320 3347
3321static void 3348static void
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 0f9961eede1e..ed115acb5dee 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -11,6 +11,7 @@
11#include <linux/random.h> 11#include <linux/random.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/fscrypto.h> 13#include <linux/fscrypto.h>
14#include <linux/mount.h>
14 15
15static int inode_has_encryption_context(struct inode *inode) 16static int inode_has_encryption_context(struct inode *inode)
16{ 17{
@@ -92,26 +93,42 @@ static int create_encryption_context_from_policy(struct inode *inode,
92 return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL); 93 return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
93} 94}
94 95
95int fscrypt_process_policy(struct inode *inode, 96int fscrypt_process_policy(struct file *filp,
96 const struct fscrypt_policy *policy) 97 const struct fscrypt_policy *policy)
97{ 98{
99 struct inode *inode = file_inode(filp);
100 int ret;
101
102 if (!inode_owner_or_capable(inode))
103 return -EACCES;
104
98 if (policy->version != 0) 105 if (policy->version != 0)
99 return -EINVAL; 106 return -EINVAL;
100 107
108 ret = mnt_want_write_file(filp);
109 if (ret)
110 return ret;
111
101 if (!inode_has_encryption_context(inode)) { 112 if (!inode_has_encryption_context(inode)) {
102 if (!inode->i_sb->s_cop->empty_dir) 113 if (!S_ISDIR(inode->i_mode))
103 return -EOPNOTSUPP; 114 ret = -EINVAL;
104 if (!inode->i_sb->s_cop->empty_dir(inode)) 115 else if (!inode->i_sb->s_cop->empty_dir)
105 return -ENOTEMPTY; 116 ret = -EOPNOTSUPP;
106 return create_encryption_context_from_policy(inode, policy); 117 else if (!inode->i_sb->s_cop->empty_dir(inode))
118 ret = -ENOTEMPTY;
119 else
120 ret = create_encryption_context_from_policy(inode,
121 policy);
122 } else if (!is_encryption_context_consistent_with_policy(inode,
123 policy)) {
124 printk(KERN_WARNING
125 "%s: Policy inconsistent with encryption context\n",
126 __func__);
127 ret = -EINVAL;
107 } 128 }
108 129
109 if (is_encryption_context_consistent_with_policy(inode, policy)) 130 mnt_drop_write_file(filp);
110 return 0; 131 return ret;
111
112 printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
113 __func__);
114 return -EINVAL;
115} 132}
116EXPORT_SYMBOL(fscrypt_process_policy); 133EXPORT_SYMBOL(fscrypt_process_policy);
117 134
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index d116453b0276..79a5941c2474 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -585,7 +585,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
585 */ 585 */
586void *devpts_get_priv(struct dentry *dentry) 586void *devpts_get_priv(struct dentry *dentry)
587{ 587{
588 WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); 588 if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
589 return NULL;
589 return dentry->d_fsdata; 590 return dentry->d_fsdata;
590} 591}
591 592
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index eea64912c9c0..466f7d60edc2 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -607,20 +607,54 @@ static const struct file_operations format2_fops;
607static const struct file_operations format3_fops; 607static const struct file_operations format3_fops;
608static const struct file_operations format4_fops; 608static const struct file_operations format4_fops;
609 609
610static int table_open(struct inode *inode, struct file *file) 610static int table_open1(struct inode *inode, struct file *file)
611{ 611{
612 struct seq_file *seq; 612 struct seq_file *seq;
613 int ret = -1; 613 int ret;
614 614
615 if (file->f_op == &format1_fops) 615 ret = seq_open(file, &format1_seq_ops);
616 ret = seq_open(file, &format1_seq_ops); 616 if (ret)
617 else if (file->f_op == &format2_fops) 617 return ret;
618 ret = seq_open(file, &format2_seq_ops); 618
619 else if (file->f_op == &format3_fops) 619 seq = file->private_data;
620 ret = seq_open(file, &format3_seq_ops); 620 seq->private = inode->i_private; /* the dlm_ls */
621 else if (file->f_op == &format4_fops) 621 return 0;
622 ret = seq_open(file, &format4_seq_ops); 622}
623
624static int table_open2(struct inode *inode, struct file *file)
625{
626 struct seq_file *seq;
627 int ret;
628
629 ret = seq_open(file, &format2_seq_ops);
630 if (ret)
631 return ret;
632
633 seq = file->private_data;
634 seq->private = inode->i_private; /* the dlm_ls */
635 return 0;
636}
637
638static int table_open3(struct inode *inode, struct file *file)
639{
640 struct seq_file *seq;
641 int ret;
642
643 ret = seq_open(file, &format3_seq_ops);
644 if (ret)
645 return ret;
646
647 seq = file->private_data;
648 seq->private = inode->i_private; /* the dlm_ls */
649 return 0;
650}
651
652static int table_open4(struct inode *inode, struct file *file)
653{
654 struct seq_file *seq;
655 int ret;
623 656
657 ret = seq_open(file, &format4_seq_ops);
624 if (ret) 658 if (ret)
625 return ret; 659 return ret;
626 660
@@ -631,7 +665,7 @@ static int table_open(struct inode *inode, struct file *file)
631 665
632static const struct file_operations format1_fops = { 666static const struct file_operations format1_fops = {
633 .owner = THIS_MODULE, 667 .owner = THIS_MODULE,
634 .open = table_open, 668 .open = table_open1,
635 .read = seq_read, 669 .read = seq_read,
636 .llseek = seq_lseek, 670 .llseek = seq_lseek,
637 .release = seq_release 671 .release = seq_release
@@ -639,7 +673,7 @@ static const struct file_operations format1_fops = {
639 673
640static const struct file_operations format2_fops = { 674static const struct file_operations format2_fops = {
641 .owner = THIS_MODULE, 675 .owner = THIS_MODULE,
642 .open = table_open, 676 .open = table_open2,
643 .read = seq_read, 677 .read = seq_read,
644 .llseek = seq_lseek, 678 .llseek = seq_lseek,
645 .release = seq_release 679 .release = seq_release
@@ -647,7 +681,7 @@ static const struct file_operations format2_fops = {
647 681
648static const struct file_operations format3_fops = { 682static const struct file_operations format3_fops = {
649 .owner = THIS_MODULE, 683 .owner = THIS_MODULE,
650 .open = table_open, 684 .open = table_open3,
651 .read = seq_read, 685 .read = seq_read,
652 .llseek = seq_lseek, 686 .llseek = seq_lseek,
653 .release = seq_release 687 .release = seq_release
@@ -655,7 +689,7 @@ static const struct file_operations format3_fops = {
655 689
656static const struct file_operations format4_fops = { 690static const struct file_operations format4_fops = {
657 .owner = THIS_MODULE, 691 .owner = THIS_MODULE,
658 .open = table_open, 692 .open = table_open4,
659 .read = seq_read, 693 .read = seq_read,
660 .llseek = seq_lseek, 694 .llseek = seq_lseek,
661 .release = seq_release 695 .release = seq_release
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3131747199e1..c6ea25a190f8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5466,8 +5466,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5466 sbi->s_want_extra_isize, 5466 sbi->s_want_extra_isize,
5467 iloc, handle); 5467 iloc, handle);
5468 if (ret) { 5468 if (ret) {
5469 ext4_set_inode_state(inode,
5470 EXT4_STATE_NO_EXPAND);
5471 if (mnt_count != 5469 if (mnt_count !=
5472 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5470 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5473 ext4_warning(inode->i_sb, 5471 ext4_warning(inode->i_sb,
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 10686fd67fb4..1bb7df5e4536 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -776,7 +776,7 @@ resizefs_out:
776 (struct fscrypt_policy __user *)arg, 776 (struct fscrypt_policy __user *)arg,
777 sizeof(policy))) 777 sizeof(policy)))
778 return -EFAULT; 778 return -EFAULT;
779 return fscrypt_process_policy(inode, &policy); 779 return fscrypt_process_policy(filp, &policy);
780#else 780#else
781 return -EOPNOTSUPP; 781 return -EOPNOTSUPP;
782#endif 782#endif
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1c593aa0218e..3ec8708989ca 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2211,6 +2211,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2211 2211
2212/* Called at mount-time, super-block is locked */ 2212/* Called at mount-time, super-block is locked */
2213static int ext4_check_descriptors(struct super_block *sb, 2213static int ext4_check_descriptors(struct super_block *sb,
2214 ext4_fsblk_t sb_block,
2214 ext4_group_t *first_not_zeroed) 2215 ext4_group_t *first_not_zeroed)
2215{ 2216{
2216 struct ext4_sb_info *sbi = EXT4_SB(sb); 2217 struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2241,6 +2242,11 @@ static int ext4_check_descriptors(struct super_block *sb,
2241 grp = i; 2242 grp = i;
2242 2243
2243 block_bitmap = ext4_block_bitmap(sb, gdp); 2244 block_bitmap = ext4_block_bitmap(sb, gdp);
2245 if (block_bitmap == sb_block) {
2246 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2247 "Block bitmap for group %u overlaps "
2248 "superblock", i);
2249 }
2244 if (block_bitmap < first_block || block_bitmap > last_block) { 2250 if (block_bitmap < first_block || block_bitmap > last_block) {
2245 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2251 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2246 "Block bitmap for group %u not in group " 2252 "Block bitmap for group %u not in group "
@@ -2248,6 +2254,11 @@ static int ext4_check_descriptors(struct super_block *sb,
2248 return 0; 2254 return 0;
2249 } 2255 }
2250 inode_bitmap = ext4_inode_bitmap(sb, gdp); 2256 inode_bitmap = ext4_inode_bitmap(sb, gdp);
2257 if (inode_bitmap == sb_block) {
2258 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2259 "Inode bitmap for group %u overlaps "
2260 "superblock", i);
2261 }
2251 if (inode_bitmap < first_block || inode_bitmap > last_block) { 2262 if (inode_bitmap < first_block || inode_bitmap > last_block) {
2252 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2263 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2253 "Inode bitmap for group %u not in group " 2264 "Inode bitmap for group %u not in group "
@@ -2255,6 +2266,11 @@ static int ext4_check_descriptors(struct super_block *sb,
2255 return 0; 2266 return 0;
2256 } 2267 }
2257 inode_table = ext4_inode_table(sb, gdp); 2268 inode_table = ext4_inode_table(sb, gdp);
2269 if (inode_table == sb_block) {
2270 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2271 "Inode table for group %u overlaps "
2272 "superblock", i);
2273 }
2258 if (inode_table < first_block || 2274 if (inode_table < first_block ||
2259 inode_table + sbi->s_itb_per_group - 1 > last_block) { 2275 inode_table + sbi->s_itb_per_group - 1 > last_block) {
2260 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2276 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3757,7 +3773,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3757 goto failed_mount2; 3773 goto failed_mount2;
3758 } 3774 }
3759 } 3775 }
3760 if (!ext4_check_descriptors(sb, &first_not_zeroed)) { 3776 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3761 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3777 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3762 ret = -EFSCORRUPTED; 3778 ret = -EFSCORRUPTED;
3763 goto failed_mount2; 3779 goto failed_mount2;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 39e9cfb1b371..2eb935ca5d9e 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1353,15 +1353,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1353 size_t min_offs, free; 1353 size_t min_offs, free;
1354 int total_ino; 1354 int total_ino;
1355 void *base, *start, *end; 1355 void *base, *start, *end;
1356 int extra_isize = 0, error = 0, tried_min_extra_isize = 0; 1356 int error = 0, tried_min_extra_isize = 0;
1357 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); 1357 int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1358 int isize_diff; /* How much do we need to grow i_extra_isize */
1358 1359
1359 down_write(&EXT4_I(inode)->xattr_sem); 1360 down_write(&EXT4_I(inode)->xattr_sem);
1361 /*
1362 * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
1363 */
1364 ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1360retry: 1365retry:
1361 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) { 1366 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
1362 up_write(&EXT4_I(inode)->xattr_sem); 1367 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
1363 return 0; 1368 goto out;
1364 }
1365 1369
1366 header = IHDR(inode, raw_inode); 1370 header = IHDR(inode, raw_inode);
1367 entry = IFIRST(header); 1371 entry = IFIRST(header);
@@ -1382,7 +1386,7 @@ retry:
1382 goto cleanup; 1386 goto cleanup;
1383 1387
1384 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); 1388 free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
1385 if (free >= new_extra_isize) { 1389 if (free >= isize_diff) {
1386 entry = IFIRST(header); 1390 entry = IFIRST(header);
1387 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize 1391 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
1388 - new_extra_isize, (void *)raw_inode + 1392 - new_extra_isize, (void *)raw_inode +
@@ -1390,8 +1394,7 @@ retry:
1390 (void *)header, total_ino, 1394 (void *)header, total_ino,
1391 inode->i_sb->s_blocksize); 1395 inode->i_sb->s_blocksize);
1392 EXT4_I(inode)->i_extra_isize = new_extra_isize; 1396 EXT4_I(inode)->i_extra_isize = new_extra_isize;
1393 error = 0; 1397 goto out;
1394 goto cleanup;
1395 } 1398 }
1396 1399
1397 /* 1400 /*
@@ -1414,7 +1417,7 @@ retry:
1414 end = bh->b_data + bh->b_size; 1417 end = bh->b_data + bh->b_size;
1415 min_offs = end - base; 1418 min_offs = end - base;
1416 free = ext4_xattr_free_space(first, &min_offs, base, NULL); 1419 free = ext4_xattr_free_space(first, &min_offs, base, NULL);
1417 if (free < new_extra_isize) { 1420 if (free < isize_diff) {
1418 if (!tried_min_extra_isize && s_min_extra_isize) { 1421 if (!tried_min_extra_isize && s_min_extra_isize) {
1419 tried_min_extra_isize++; 1422 tried_min_extra_isize++;
1420 new_extra_isize = s_min_extra_isize; 1423 new_extra_isize = s_min_extra_isize;
@@ -1428,7 +1431,7 @@ retry:
1428 free = inode->i_sb->s_blocksize; 1431 free = inode->i_sb->s_blocksize;
1429 } 1432 }
1430 1433
1431 while (new_extra_isize > 0) { 1434 while (isize_diff > 0) {
1432 size_t offs, size, entry_size; 1435 size_t offs, size, entry_size;
1433 struct ext4_xattr_entry *small_entry = NULL; 1436 struct ext4_xattr_entry *small_entry = NULL;
1434 struct ext4_xattr_info i = { 1437 struct ext4_xattr_info i = {
@@ -1459,7 +1462,7 @@ retry:
1459 EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) + 1462 EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
1460 EXT4_XATTR_LEN(last->e_name_len); 1463 EXT4_XATTR_LEN(last->e_name_len);
1461 if (total_size <= free && total_size < min_total_size) { 1464 if (total_size <= free && total_size < min_total_size) {
1462 if (total_size < new_extra_isize) { 1465 if (total_size < isize_diff) {
1463 small_entry = last; 1466 small_entry = last;
1464 } else { 1467 } else {
1465 entry = last; 1468 entry = last;
@@ -1514,22 +1517,22 @@ retry:
1514 error = ext4_xattr_ibody_set(handle, inode, &i, is); 1517 error = ext4_xattr_ibody_set(handle, inode, &i, is);
1515 if (error) 1518 if (error)
1516 goto cleanup; 1519 goto cleanup;
1520 total_ino -= entry_size;
1517 1521
1518 entry = IFIRST(header); 1522 entry = IFIRST(header);
1519 if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) 1523 if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
1520 shift_bytes = new_extra_isize; 1524 shift_bytes = isize_diff;
1521 else 1525 else
1522 shift_bytes = entry_size + size; 1526 shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
1523 /* Adjust the offsets and shift the remaining entries ahead */ 1527 /* Adjust the offsets and shift the remaining entries ahead */
1524 ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize - 1528 ext4_xattr_shift_entries(entry, -shift_bytes,
1525 shift_bytes, (void *)raw_inode + 1529 (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
1526 EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes, 1530 EXT4_I(inode)->i_extra_isize + shift_bytes,
1527 (void *)header, total_ino - entry_size, 1531 (void *)header, total_ino, inode->i_sb->s_blocksize);
1528 inode->i_sb->s_blocksize);
1529 1532
1530 extra_isize += shift_bytes; 1533 isize_diff -= shift_bytes;
1531 new_extra_isize -= shift_bytes; 1534 EXT4_I(inode)->i_extra_isize += shift_bytes;
1532 EXT4_I(inode)->i_extra_isize = extra_isize; 1535 header = IHDR(inode, raw_inode);
1533 1536
1534 i.name = b_entry_name; 1537 i.name = b_entry_name;
1535 i.value = buffer; 1538 i.value = buffer;
@@ -1551,6 +1554,8 @@ retry:
1551 kfree(bs); 1554 kfree(bs);
1552 } 1555 }
1553 brelse(bh); 1556 brelse(bh);
1557out:
1558 ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1554 up_write(&EXT4_I(inode)->xattr_sem); 1559 up_write(&EXT4_I(inode)->xattr_sem);
1555 return 0; 1560 return 0;
1556 1561
@@ -1562,6 +1567,10 @@ cleanup:
1562 kfree(is); 1567 kfree(is);
1563 kfree(bs); 1568 kfree(bs);
1564 brelse(bh); 1569 brelse(bh);
1570 /*
1571 * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
1572 * size expansion failed.
1573 */
1565 up_write(&EXT4_I(inode)->xattr_sem); 1574 up_write(&EXT4_I(inode)->xattr_sem);
1566 return error; 1575 return error;
1567} 1576}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 69dd3e6566e0..a92e783fa057 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -24,6 +24,7 @@
24#define EXT4_XATTR_INDEX_SYSTEM 7 24#define EXT4_XATTR_INDEX_SYSTEM 7
25#define EXT4_XATTR_INDEX_RICHACL 8 25#define EXT4_XATTR_INDEX_RICHACL 8
26#define EXT4_XATTR_INDEX_ENCRYPTION 9 26#define EXT4_XATTR_INDEX_ENCRYPTION 9
27#define EXT4_XATTR_INDEX_HURD 10 /* Reserved for Hurd */
27 28
28struct ext4_xattr_header { 29struct ext4_xattr_header {
29 __le32 h_magic; /* magic number for identification */ 30 __le32 h_magic; /* magic number for identification */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d64d2a515cb2..ccb401eebc11 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1699,11 +1699,11 @@ static int f2fs_write_end(struct file *file,
1699 trace_f2fs_write_end(inode, pos, len, copied); 1699 trace_f2fs_write_end(inode, pos, len, copied);
1700 1700
1701 set_page_dirty(page); 1701 set_page_dirty(page);
1702 f2fs_put_page(page, 1);
1703 1702
1704 if (pos + copied > i_size_read(inode)) 1703 if (pos + copied > i_size_read(inode))
1705 f2fs_i_size_write(inode, pos + copied); 1704 f2fs_i_size_write(inode, pos + copied);
1706 1705
1706 f2fs_put_page(page, 1);
1707 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1707 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1708 return copied; 1708 return copied;
1709} 1709}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 675fa79d86f6..14f5fe2b841e 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -538,7 +538,7 @@ struct f2fs_nm_info {
538 /* NAT cache management */ 538 /* NAT cache management */
539 struct radix_tree_root nat_root;/* root of the nat entry cache */ 539 struct radix_tree_root nat_root;/* root of the nat entry cache */
540 struct radix_tree_root nat_set_root;/* root of the nat set cache */ 540 struct radix_tree_root nat_set_root;/* root of the nat set cache */
541 struct percpu_rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ 541 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
542 struct list_head nat_entries; /* cached nat entry list (clean) */ 542 struct list_head nat_entries; /* cached nat entry list (clean) */
543 unsigned int nat_cnt; /* the # of cached nat entries */ 543 unsigned int nat_cnt; /* the # of cached nat entries */
544 unsigned int dirty_nat_cnt; /* total num of nat entries in set */ 544 unsigned int dirty_nat_cnt; /* total num of nat entries in set */
@@ -787,7 +787,7 @@ struct f2fs_sb_info {
787 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ 787 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
788 struct inode *meta_inode; /* cache meta blocks */ 788 struct inode *meta_inode; /* cache meta blocks */
789 struct mutex cp_mutex; /* checkpoint procedure lock */ 789 struct mutex cp_mutex; /* checkpoint procedure lock */
790 struct percpu_rw_semaphore cp_rwsem; /* blocking FS operations */ 790 struct rw_semaphore cp_rwsem; /* blocking FS operations */
791 struct rw_semaphore node_write; /* locking node writes */ 791 struct rw_semaphore node_write; /* locking node writes */
792 wait_queue_head_t cp_wait; 792 wait_queue_head_t cp_wait;
793 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ 793 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
@@ -1074,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1074 1074
1075static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) 1075static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1076{ 1076{
1077 percpu_down_read(&sbi->cp_rwsem); 1077 down_read(&sbi->cp_rwsem);
1078} 1078}
1079 1079
1080static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) 1080static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1081{ 1081{
1082 percpu_up_read(&sbi->cp_rwsem); 1082 up_read(&sbi->cp_rwsem);
1083} 1083}
1084 1084
1085static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) 1085static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1086{ 1086{
1087 percpu_down_write(&sbi->cp_rwsem); 1087 down_write(&sbi->cp_rwsem);
1088} 1088}
1089 1089
1090static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) 1090static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1091{ 1091{
1092 percpu_up_write(&sbi->cp_rwsem); 1092 up_write(&sbi->cp_rwsem);
1093} 1093}
1094 1094
1095static inline int __get_cp_reason(struct f2fs_sb_info *sbi) 1095static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 0e493f63ea41..28f4f4cbb8d8 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1757,21 +1757,14 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1757{ 1757{
1758 struct fscrypt_policy policy; 1758 struct fscrypt_policy policy;
1759 struct inode *inode = file_inode(filp); 1759 struct inode *inode = file_inode(filp);
1760 int ret;
1761 1760
1762 if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg, 1761 if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
1763 sizeof(policy))) 1762 sizeof(policy)))
1764 return -EFAULT; 1763 return -EFAULT;
1765 1764
1766 ret = mnt_want_write_file(filp);
1767 if (ret)
1768 return ret;
1769
1770 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1765 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1771 ret = fscrypt_process_policy(inode, &policy);
1772 1766
1773 mnt_drop_write_file(filp); 1767 return fscrypt_process_policy(filp, &policy);
1774 return ret;
1775} 1768}
1776 1769
1777static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 1770static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
@@ -2086,15 +2079,19 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2086 if (unlikely(f2fs_readonly(src->i_sb))) 2079 if (unlikely(f2fs_readonly(src->i_sb)))
2087 return -EROFS; 2080 return -EROFS;
2088 2081
2089 if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode)) 2082 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2090 return -EISDIR; 2083 return -EINVAL;
2091 2084
2092 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2085 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
2093 return -EOPNOTSUPP; 2086 return -EOPNOTSUPP;
2094 2087
2095 inode_lock(src); 2088 inode_lock(src);
2096 if (src != dst) 2089 if (src != dst) {
2097 inode_lock(dst); 2090 if (!inode_trylock(dst)) {
2091 ret = -EBUSY;
2092 goto out;
2093 }
2094 }
2098 2095
2099 ret = -EINVAL; 2096 ret = -EINVAL;
2100 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2097 if (pos_in + len > src->i_size || pos_in + len < pos_in)
@@ -2152,6 +2149,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2152out_unlock: 2149out_unlock:
2153 if (src != dst) 2150 if (src != dst)
2154 inode_unlock(dst); 2151 inode_unlock(dst);
2152out:
2155 inode_unlock(src); 2153 inode_unlock(src);
2156 return ret; 2154 return ret;
2157} 2155}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b2fa4b615925..f75d197d5beb 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -206,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
206 struct nat_entry *e; 206 struct nat_entry *e;
207 bool need = false; 207 bool need = false;
208 208
209 percpu_down_read(&nm_i->nat_tree_lock); 209 down_read(&nm_i->nat_tree_lock);
210 e = __lookup_nat_cache(nm_i, nid); 210 e = __lookup_nat_cache(nm_i, nid);
211 if (e) { 211 if (e) {
212 if (!get_nat_flag(e, IS_CHECKPOINTED) && 212 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
213 !get_nat_flag(e, HAS_FSYNCED_INODE)) 213 !get_nat_flag(e, HAS_FSYNCED_INODE))
214 need = true; 214 need = true;
215 } 215 }
216 percpu_up_read(&nm_i->nat_tree_lock); 216 up_read(&nm_i->nat_tree_lock);
217 return need; 217 return need;
218} 218}
219 219
@@ -223,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
223 struct nat_entry *e; 223 struct nat_entry *e;
224 bool is_cp = true; 224 bool is_cp = true;
225 225
226 percpu_down_read(&nm_i->nat_tree_lock); 226 down_read(&nm_i->nat_tree_lock);
227 e = __lookup_nat_cache(nm_i, nid); 227 e = __lookup_nat_cache(nm_i, nid);
228 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 228 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
229 is_cp = false; 229 is_cp = false;
230 percpu_up_read(&nm_i->nat_tree_lock); 230 up_read(&nm_i->nat_tree_lock);
231 return is_cp; 231 return is_cp;
232} 232}
233 233
@@ -237,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
237 struct nat_entry *e; 237 struct nat_entry *e;
238 bool need_update = true; 238 bool need_update = true;
239 239
240 percpu_down_read(&nm_i->nat_tree_lock); 240 down_read(&nm_i->nat_tree_lock);
241 e = __lookup_nat_cache(nm_i, ino); 241 e = __lookup_nat_cache(nm_i, ino);
242 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 242 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
243 (get_nat_flag(e, IS_CHECKPOINTED) || 243 (get_nat_flag(e, IS_CHECKPOINTED) ||
244 get_nat_flag(e, HAS_FSYNCED_INODE))) 244 get_nat_flag(e, HAS_FSYNCED_INODE)))
245 need_update = false; 245 need_update = false;
246 percpu_up_read(&nm_i->nat_tree_lock); 246 up_read(&nm_i->nat_tree_lock);
247 return need_update; 247 return need_update;
248} 248}
249 249
@@ -284,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
284 struct f2fs_nm_info *nm_i = NM_I(sbi); 284 struct f2fs_nm_info *nm_i = NM_I(sbi);
285 struct nat_entry *e; 285 struct nat_entry *e;
286 286
287 percpu_down_write(&nm_i->nat_tree_lock); 287 down_write(&nm_i->nat_tree_lock);
288 e = __lookup_nat_cache(nm_i, ni->nid); 288 e = __lookup_nat_cache(nm_i, ni->nid);
289 if (!e) { 289 if (!e) {
290 e = grab_nat_entry(nm_i, ni->nid); 290 e = grab_nat_entry(nm_i, ni->nid);
@@ -334,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
334 set_nat_flag(e, HAS_FSYNCED_INODE, true); 334 set_nat_flag(e, HAS_FSYNCED_INODE, true);
335 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 335 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
336 } 336 }
337 percpu_up_write(&nm_i->nat_tree_lock); 337 up_write(&nm_i->nat_tree_lock);
338} 338}
339 339
340int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 340int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -342,7 +342,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
342 struct f2fs_nm_info *nm_i = NM_I(sbi); 342 struct f2fs_nm_info *nm_i = NM_I(sbi);
343 int nr = nr_shrink; 343 int nr = nr_shrink;
344 344
345 percpu_down_write(&nm_i->nat_tree_lock); 345 if (!down_write_trylock(&nm_i->nat_tree_lock))
346 return 0;
346 347
347 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 348 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
348 struct nat_entry *ne; 349 struct nat_entry *ne;
@@ -351,7 +352,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
351 __del_from_nat_cache(nm_i, ne); 352 __del_from_nat_cache(nm_i, ne);
352 nr_shrink--; 353 nr_shrink--;
353 } 354 }
354 percpu_up_write(&nm_i->nat_tree_lock); 355 up_write(&nm_i->nat_tree_lock);
355 return nr - nr_shrink; 356 return nr - nr_shrink;
356} 357}
357 358
@@ -373,13 +374,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
373 ni->nid = nid; 374 ni->nid = nid;
374 375
375 /* Check nat cache */ 376 /* Check nat cache */
376 percpu_down_read(&nm_i->nat_tree_lock); 377 down_read(&nm_i->nat_tree_lock);
377 e = __lookup_nat_cache(nm_i, nid); 378 e = __lookup_nat_cache(nm_i, nid);
378 if (e) { 379 if (e) {
379 ni->ino = nat_get_ino(e); 380 ni->ino = nat_get_ino(e);
380 ni->blk_addr = nat_get_blkaddr(e); 381 ni->blk_addr = nat_get_blkaddr(e);
381 ni->version = nat_get_version(e); 382 ni->version = nat_get_version(e);
382 percpu_up_read(&nm_i->nat_tree_lock); 383 up_read(&nm_i->nat_tree_lock);
383 return; 384 return;
384 } 385 }
385 386
@@ -403,11 +404,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
403 node_info_from_raw_nat(ni, &ne); 404 node_info_from_raw_nat(ni, &ne);
404 f2fs_put_page(page, 1); 405 f2fs_put_page(page, 1);
405cache: 406cache:
406 percpu_up_read(&nm_i->nat_tree_lock); 407 up_read(&nm_i->nat_tree_lock);
407 /* cache nat entry */ 408 /* cache nat entry */
408 percpu_down_write(&nm_i->nat_tree_lock); 409 down_write(&nm_i->nat_tree_lock);
409 cache_nat_entry(sbi, nid, &ne); 410 cache_nat_entry(sbi, nid, &ne);
410 percpu_up_write(&nm_i->nat_tree_lock); 411 up_write(&nm_i->nat_tree_lock);
411} 412}
412 413
413/* 414/*
@@ -1788,7 +1789,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
1788 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 1789 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1789 META_NAT, true); 1790 META_NAT, true);
1790 1791
1791 percpu_down_read(&nm_i->nat_tree_lock); 1792 down_read(&nm_i->nat_tree_lock);
1792 1793
1793 while (1) { 1794 while (1) {
1794 struct page *page = get_current_nat_page(sbi, nid); 1795 struct page *page = get_current_nat_page(sbi, nid);
@@ -1820,7 +1821,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
1820 remove_free_nid(nm_i, nid); 1821 remove_free_nid(nm_i, nid);
1821 } 1822 }
1822 up_read(&curseg->journal_rwsem); 1823 up_read(&curseg->journal_rwsem);
1823 percpu_up_read(&nm_i->nat_tree_lock); 1824 up_read(&nm_i->nat_tree_lock);
1824 1825
1825 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 1826 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
1826 nm_i->ra_nid_pages, META_NAT, false); 1827 nm_i->ra_nid_pages, META_NAT, false);
@@ -2209,7 +2210,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
2209 if (!nm_i->dirty_nat_cnt) 2210 if (!nm_i->dirty_nat_cnt)
2210 return; 2211 return;
2211 2212
2212 percpu_down_write(&nm_i->nat_tree_lock); 2213 down_write(&nm_i->nat_tree_lock);
2213 2214
2214 /* 2215 /*
2215 * if there are no enough space in journal to store dirty nat 2216 * if there are no enough space in journal to store dirty nat
@@ -2232,7 +2233,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
2232 list_for_each_entry_safe(set, tmp, &sets, set_list) 2233 list_for_each_entry_safe(set, tmp, &sets, set_list)
2233 __flush_nat_entry_set(sbi, set); 2234 __flush_nat_entry_set(sbi, set);
2234 2235
2235 percpu_up_write(&nm_i->nat_tree_lock); 2236 up_write(&nm_i->nat_tree_lock);
2236 2237
2237 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); 2238 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2238} 2239}
@@ -2268,8 +2269,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
2268 2269
2269 mutex_init(&nm_i->build_lock); 2270 mutex_init(&nm_i->build_lock);
2270 spin_lock_init(&nm_i->free_nid_list_lock); 2271 spin_lock_init(&nm_i->free_nid_list_lock);
2271 if (percpu_init_rwsem(&nm_i->nat_tree_lock)) 2272 init_rwsem(&nm_i->nat_tree_lock);
2272 return -ENOMEM;
2273 2273
2274 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 2274 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2275 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 2275 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2326,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2326 spin_unlock(&nm_i->free_nid_list_lock); 2326 spin_unlock(&nm_i->free_nid_list_lock);
2327 2327
2328 /* destroy nat cache */ 2328 /* destroy nat cache */
2329 percpu_down_write(&nm_i->nat_tree_lock); 2329 down_write(&nm_i->nat_tree_lock);
2330 while ((found = __gang_lookup_nat_cache(nm_i, 2330 while ((found = __gang_lookup_nat_cache(nm_i,
2331 nid, NATVEC_SIZE, natvec))) { 2331 nid, NATVEC_SIZE, natvec))) {
2332 unsigned idx; 2332 unsigned idx;
@@ -2351,9 +2351,8 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2351 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 2351 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2352 } 2352 }
2353 } 2353 }
2354 percpu_up_write(&nm_i->nat_tree_lock); 2354 up_write(&nm_i->nat_tree_lock);
2355 2355
2356 percpu_free_rwsem(&nm_i->nat_tree_lock);
2357 kfree(nm_i->nat_bitmap); 2356 kfree(nm_i->nat_bitmap);
2358 sbi->nm_info = NULL; 2357 sbi->nm_info = NULL;
2359 kfree(nm_i); 2358 kfree(nm_i);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 1b86d3f638ef..7f863a645ab1 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -706,8 +706,6 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
706 percpu_counter_destroy(&sbi->nr_pages[i]); 706 percpu_counter_destroy(&sbi->nr_pages[i]);
707 percpu_counter_destroy(&sbi->alloc_valid_block_count); 707 percpu_counter_destroy(&sbi->alloc_valid_block_count);
708 percpu_counter_destroy(&sbi->total_valid_inode_count); 708 percpu_counter_destroy(&sbi->total_valid_inode_count);
709
710 percpu_free_rwsem(&sbi->cp_rwsem);
711} 709}
712 710
713static void f2fs_put_super(struct super_block *sb) 711static void f2fs_put_super(struct super_block *sb)
@@ -1483,9 +1481,6 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
1483{ 1481{
1484 int i, err; 1482 int i, err;
1485 1483
1486 if (percpu_init_rwsem(&sbi->cp_rwsem))
1487 return -ENOMEM;
1488
1489 for (i = 0; i < NR_COUNT_TYPE; i++) { 1484 for (i = 0; i < NR_COUNT_TYPE; i++) {
1490 err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); 1485 err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
1491 if (err) 1486 if (err)
@@ -1686,6 +1681,7 @@ try_onemore:
1686 sbi->write_io[i].bio = NULL; 1681 sbi->write_io[i].bio = NULL;
1687 } 1682 }
1688 1683
1684 init_rwsem(&sbi->cp_rwsem);
1689 init_waitqueue_head(&sbi->cp_wait); 1685 init_waitqueue_head(&sbi->cp_wait);
1690 init_sb_info(sbi); 1686 init_sb_info(sbi);
1691 1687
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 4d09d4441e3e..05713a5da083 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1949,6 +1949,12 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1949{ 1949{
1950 struct backing_dev_info *bdi; 1950 struct backing_dev_info *bdi;
1951 1951
1952 /*
1953 * If we are expecting writeback progress we must submit plugged IO.
1954 */
1955 if (blk_needs_flush_plug(current))
1956 blk_schedule_flush_plug(current);
1957
1952 if (!nr_pages) 1958 if (!nr_pages)
1953 nr_pages = get_nr_dirty_pages(); 1959 nr_pages = get_nr_dirty_pages();
1954 1960
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f394aff59c36..3988b43c2f5a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -530,13 +530,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
530 req->out.args[0].size = count; 530 req->out.args[0].size = count;
531} 531}
532 532
533static void fuse_release_user_pages(struct fuse_req *req, int write) 533static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
534{ 534{
535 unsigned i; 535 unsigned i;
536 536
537 for (i = 0; i < req->num_pages; i++) { 537 for (i = 0; i < req->num_pages; i++) {
538 struct page *page = req->pages[i]; 538 struct page *page = req->pages[i];
539 if (write) 539 if (should_dirty)
540 set_page_dirty_lock(page); 540 set_page_dirty_lock(page);
541 put_page(page); 541 put_page(page);
542 } 542 }
@@ -1320,6 +1320,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1320 loff_t *ppos, int flags) 1320 loff_t *ppos, int flags)
1321{ 1321{
1322 int write = flags & FUSE_DIO_WRITE; 1322 int write = flags & FUSE_DIO_WRITE;
1323 bool should_dirty = !write && iter_is_iovec(iter);
1323 int cuse = flags & FUSE_DIO_CUSE; 1324 int cuse = flags & FUSE_DIO_CUSE;
1324 struct file *file = io->file; 1325 struct file *file = io->file;
1325 struct inode *inode = file->f_mapping->host; 1326 struct inode *inode = file->f_mapping->host;
@@ -1363,7 +1364,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1363 nres = fuse_send_read(req, io, pos, nbytes, owner); 1364 nres = fuse_send_read(req, io, pos, nbytes, owner);
1364 1365
1365 if (!io->async) 1366 if (!io->async)
1366 fuse_release_user_pages(req, !write); 1367 fuse_release_user_pages(req, should_dirty);
1367 if (req->out.h.error) { 1368 if (req->out.h.error) {
1368 err = req->out.h.error; 1369 err = req->out.h.error;
1369 break; 1370 break;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 0f56deb24ce6..c415668c86d4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -568,7 +568,7 @@ static int ioctl_fsthaw(struct file *filp)
568 return thaw_super(sb); 568 return thaw_super(sb);
569} 569}
570 570
571static long ioctl_file_dedupe_range(struct file *file, void __user *arg) 571static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
572{ 572{
573 struct file_dedupe_range __user *argp = arg; 573 struct file_dedupe_range __user *argp = arg;
574 struct file_dedupe_range *same = NULL; 574 struct file_dedupe_range *same = NULL;
@@ -582,6 +582,10 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
582 } 582 }
583 583
584 size = offsetof(struct file_dedupe_range __user, info[count]); 584 size = offsetof(struct file_dedupe_range __user, info[count]);
585 if (size > PAGE_SIZE) {
586 ret = -ENOMEM;
587 goto out;
588 }
585 589
586 same = memdup_user(argp, size); 590 same = memdup_user(argp, size);
587 if (IS_ERR(same)) { 591 if (IS_ERR(same)) {
diff --git a/fs/iomap.c b/fs/iomap.c
index 48141b8eff5f..706270f21b35 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
84 * Now the data has been copied, commit the range we've copied. This 84 * Now the data has been copied, commit the range we've copied. This
85 * should not fail unless the filesystem has had a fatal error. 85 * should not fail unless the filesystem has had a fatal error.
86 */ 86 */
87 ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0, 87 if (ops->iomap_end) {
88 flags, &iomap); 88 ret = ops->iomap_end(inode, pos, length,
89 written > 0 ? written : 0,
90 flags, &iomap);
91 }
89 92
90 return written ? written : ret; 93 return written ? written : ret;
91} 94}
@@ -194,12 +197,9 @@ again:
194 if (mapping_writably_mapped(inode->i_mapping)) 197 if (mapping_writably_mapped(inode->i_mapping))
195 flush_dcache_page(page); 198 flush_dcache_page(page);
196 199
197 pagefault_disable();
198 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 200 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
199 pagefault_enable();
200 201
201 flush_dcache_page(page); 202 flush_dcache_page(page);
202 mark_page_accessed(page);
203 203
204 status = iomap_write_end(inode, pos, bytes, copied, page); 204 status = iomap_write_end(inode, pos, bytes, copied, page);
205 if (unlikely(status < 0)) 205 if (unlikely(status < 0))
@@ -428,9 +428,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
428 break; 428 break;
429 } 429 }
430 430
431 if (iomap->flags & IOMAP_F_MERGED)
432 flags |= FIEMAP_EXTENT_MERGED;
433
431 return fiemap_fill_next_extent(fi, iomap->offset, 434 return fiemap_fill_next_extent(fi, iomap->offset,
432 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0, 435 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
433 iomap->length, flags | FIEMAP_EXTENT_MERGED); 436 iomap->length, flags);
434 437
435} 438}
436 439
@@ -470,13 +473,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
470 if (ret) 473 if (ret)
471 return ret; 474 return ret;
472 475
473 ret = filemap_write_and_wait(inode->i_mapping); 476 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
474 if (ret) 477 ret = filemap_write_and_wait(inode->i_mapping);
475 return ret; 478 if (ret)
479 return ret;
480 }
476 481
477 while (len > 0) { 482 while (len > 0) {
478 ret = iomap_apply(inode, start, len, 0, ops, &ctx, 483 ret = iomap_apply(inode, start, len, 0, ops, &ctx,
479 iomap_fiemap_actor); 484 iomap_fiemap_actor);
485 /* inode with no (attribute) mapping will give ENOENT */
486 if (ret == -ENOENT)
487 break;
480 if (ret < 0) 488 if (ret < 0)
481 return ret; 489 return ret;
482 if (ret == 0) 490 if (ret == 0)
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e1574008adc9..2bcb86e6e6ca 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -840,21 +840,35 @@ repeat:
840 mutex_lock(&kernfs_mutex); 840 mutex_lock(&kernfs_mutex);
841 841
842 list_for_each_entry(info, &kernfs_root(kn)->supers, node) { 842 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
843 struct kernfs_node *parent;
843 struct inode *inode; 844 struct inode *inode;
844 struct dentry *dentry;
845 845
846 /*
847 * We want fsnotify_modify() on @kn but as the
848 * modifications aren't originating from userland don't
849 * have the matching @file available. Look up the inodes
850 * and generate the events manually.
851 */
846 inode = ilookup(info->sb, kn->ino); 852 inode = ilookup(info->sb, kn->ino);
847 if (!inode) 853 if (!inode)
848 continue; 854 continue;
849 855
850 dentry = d_find_any_alias(inode); 856 parent = kernfs_get_parent(kn);
851 if (dentry) { 857 if (parent) {
852 fsnotify_parent(NULL, dentry, FS_MODIFY); 858 struct inode *p_inode;
853 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, 859
854 NULL, 0); 860 p_inode = ilookup(info->sb, parent->ino);
855 dput(dentry); 861 if (p_inode) {
862 fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
863 inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
864 iput(p_inode);
865 }
866
867 kernfs_put(parent);
856 } 868 }
857 869
870 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
871 kn->name, 0);
858 iput(inode); 872 iput(inode);
859 } 873 }
860 874
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index f55a4e756047..217847679f0e 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -346,7 +346,7 @@ static void bl_write_cleanup(struct work_struct *work)
346 PAGE_SIZE - 1) & (loff_t)PAGE_MASK; 346 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
347 347
348 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 348 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
349 (end - start) >> SECTOR_SHIFT); 349 (end - start) >> SECTOR_SHIFT, end);
350 } 350 }
351 351
352 pnfs_ld_write_done(hdr); 352 pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 18e6fd0b9506..efc007f00742 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -141,6 +141,7 @@ struct pnfs_block_layout {
141 struct rb_root bl_ext_ro; 141 struct rb_root bl_ext_ro;
142 spinlock_t bl_ext_lock; /* Protects list manipulation */ 142 spinlock_t bl_ext_lock; /* Protects list manipulation */
143 bool bl_scsi_layout; 143 bool bl_scsi_layout;
144 u64 bl_lwb;
144}; 145};
145 146
146static inline struct pnfs_block_layout * 147static inline struct pnfs_block_layout *
@@ -182,7 +183,7 @@ int ext_tree_insert(struct pnfs_block_layout *bl,
182int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, 183int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start,
183 sector_t end); 184 sector_t end);
184int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, 185int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
185 sector_t len); 186 sector_t len, u64 lwb);
186bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect, 187bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
187 struct pnfs_block_extent *ret, bool rw); 188 struct pnfs_block_extent *ret, bool rw);
188int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg); 189int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg);
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 992bcb19c11e..c85fbfd2d0d9 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -402,7 +402,7 @@ ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
402 402
403int 403int
404ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, 404ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
405 sector_t len) 405 sector_t len, u64 lwb)
406{ 406{
407 struct rb_root *root = &bl->bl_ext_rw; 407 struct rb_root *root = &bl->bl_ext_rw;
408 sector_t end = start + len; 408 sector_t end = start + len;
@@ -471,6 +471,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
471 } 471 }
472 } 472 }
473out: 473out:
474 if (bl->bl_lwb < lwb)
475 bl->bl_lwb = lwb;
474 spin_unlock(&bl->bl_ext_lock); 476 spin_unlock(&bl->bl_ext_lock);
475 477
476 __ext_put_deviceids(&tmp); 478 __ext_put_deviceids(&tmp);
@@ -518,7 +520,7 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
518} 520}
519 521
520static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, 522static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
521 size_t buffer_size, size_t *count) 523 size_t buffer_size, size_t *count, __u64 *lastbyte)
522{ 524{
523 struct pnfs_block_extent *be; 525 struct pnfs_block_extent *be;
524 int ret = 0; 526 int ret = 0;
@@ -542,6 +544,8 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
542 p = encode_block_extent(be, p); 544 p = encode_block_extent(be, p);
543 be->be_tag = EXTENT_COMMITTING; 545 be->be_tag = EXTENT_COMMITTING;
544 } 546 }
547 *lastbyte = bl->bl_lwb - 1;
548 bl->bl_lwb = 0;
545 spin_unlock(&bl->bl_ext_lock); 549 spin_unlock(&bl->bl_ext_lock);
546 550
547 return ret; 551 return ret;
@@ -564,7 +568,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
564 arg->layoutupdate_pages = &arg->layoutupdate_page; 568 arg->layoutupdate_pages = &arg->layoutupdate_page;
565 569
566retry: 570retry:
567 ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count); 571 ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
568 if (unlikely(ret)) { 572 if (unlikely(ret)) {
569 ext_tree_free_commitdata(arg, buffer_size); 573 ext_tree_free_commitdata(arg, buffer_size);
570 574
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index a7f2e6e33305..52a28311e2a4 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
275err_socks: 275err_socks:
276 svc_rpcb_cleanup(serv, net); 276 svc_rpcb_cleanup(serv, net);
277err_bind: 277err_bind:
278 nn->cb_users[minorversion]--;
278 dprintk("NFS: Couldn't create callback socket: err = %d; " 279 dprintk("NFS: Couldn't create callback socket: err = %d; "
279 "net = %p\n", ret, net); 280 "net = %p\n", ret, net);
280 return ret; 281 return ret;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index c92a75e066a6..f953ef6b2f2e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -454,11 +454,8 @@ static bool referring_call_exists(struct nfs_client *clp,
454 ((u32 *)&rclist->rcl_sessionid.data)[3], 454 ((u32 *)&rclist->rcl_sessionid.data)[3],
455 ref->rc_sequenceid, ref->rc_slotid); 455 ref->rc_sequenceid, ref->rc_slotid);
456 456
457 spin_lock(&tbl->slot_tbl_lock); 457 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
458 status = (test_bit(ref->rc_slotid, tbl->used_slots) && 458 ref->rc_sequenceid, HZ >> 1) < 0;
459 tbl->slots[ref->rc_slotid].seq_nr ==
460 ref->rc_sequenceid);
461 spin_unlock(&tbl->slot_tbl_lock);
462 if (status) 459 if (status)
463 goto out; 460 goto out;
464 } 461 }
@@ -487,7 +484,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
487 goto out; 484 goto out;
488 485
489 tbl = &clp->cl_session->bc_slot_table; 486 tbl = &clp->cl_session->bc_slot_table;
490 slot = tbl->slots + args->csa_slotid;
491 487
492 /* Set up res before grabbing the spinlock */ 488 /* Set up res before grabbing the spinlock */
493 memcpy(&res->csr_sessionid, &args->csa_sessionid, 489 memcpy(&res->csr_sessionid, &args->csa_sessionid,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 003ebce4bbc4..1e106780a237 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
426 * Initialise the timeout values for a connection 426 * Initialise the timeout values for a connection
427 */ 427 */
428void nfs_init_timeout_values(struct rpc_timeout *to, int proto, 428void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
429 unsigned int timeo, unsigned int retrans) 429 int timeo, int retrans)
430{ 430{
431 to->to_initval = timeo * HZ / 10; 431 to->to_initval = timeo * HZ / 10;
432 to->to_retries = retrans; 432 to->to_retries = retrans;
@@ -434,9 +434,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
434 switch (proto) { 434 switch (proto) {
435 case XPRT_TRANSPORT_TCP: 435 case XPRT_TRANSPORT_TCP:
436 case XPRT_TRANSPORT_RDMA: 436 case XPRT_TRANSPORT_RDMA:
437 if (to->to_retries == 0) 437 if (retrans == NFS_UNSPEC_RETRANS)
438 to->to_retries = NFS_DEF_TCP_RETRANS; 438 to->to_retries = NFS_DEF_TCP_RETRANS;
439 if (to->to_initval == 0) 439 if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
440 to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; 440 to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
441 if (to->to_initval > NFS_MAX_TCP_TIMEOUT) 441 if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
442 to->to_initval = NFS_MAX_TCP_TIMEOUT; 442 to->to_initval = NFS_MAX_TCP_TIMEOUT;
@@ -449,9 +449,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
449 to->to_exponential = 0; 449 to->to_exponential = 0;
450 break; 450 break;
451 case XPRT_TRANSPORT_UDP: 451 case XPRT_TRANSPORT_UDP:
452 if (to->to_retries == 0) 452 if (retrans == NFS_UNSPEC_RETRANS)
453 to->to_retries = NFS_DEF_UDP_RETRANS; 453 to->to_retries = NFS_DEF_UDP_RETRANS;
454 if (!to->to_initval) 454 if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
455 to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10; 455 to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
456 if (to->to_initval > NFS_MAX_UDP_TIMEOUT) 456 if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
457 to->to_initval = NFS_MAX_UDP_TIMEOUT; 457 to->to_initval = NFS_MAX_UDP_TIMEOUT;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 7d620970f2e1..ca699ddc11c1 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -657,7 +657,10 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
657 if (result <= 0) 657 if (result <= 0)
658 goto out; 658 goto out;
659 659
660 written = generic_write_sync(iocb, result); 660 result = generic_write_sync(iocb, result);
661 if (result < 0)
662 goto out;
663 written = result;
661 iocb->ki_pos += written; 664 iocb->ki_pos += written;
662 665
663 /* Return error values */ 666 /* Return error values */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index e6206eaf2bdf..51b51369704c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -37,6 +37,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
37 if (ffl) { 37 if (ffl) {
38 INIT_LIST_HEAD(&ffl->error_list); 38 INIT_LIST_HEAD(&ffl->error_list);
39 INIT_LIST_HEAD(&ffl->mirrors); 39 INIT_LIST_HEAD(&ffl->mirrors);
40 ffl->last_report_time = ktime_get();
40 return &ffl->generic_hdr; 41 return &ffl->generic_hdr;
41 } else 42 } else
42 return NULL; 43 return NULL;
@@ -640,19 +641,18 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
640{ 641{
641 static const ktime_t notime = {0}; 642 static const ktime_t notime = {0};
642 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 643 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
644 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
643 645
644 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 646 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
645 if (ktime_equal(mirror->start_time, notime)) 647 if (ktime_equal(mirror->start_time, notime))
646 mirror->start_time = now; 648 mirror->start_time = now;
647 if (ktime_equal(mirror->last_report_time, notime))
648 mirror->last_report_time = now;
649 if (mirror->report_interval != 0) 649 if (mirror->report_interval != 0)
650 report_interval = (s64)mirror->report_interval * 1000LL; 650 report_interval = (s64)mirror->report_interval * 1000LL;
651 else if (layoutstats_timer != 0) 651 else if (layoutstats_timer != 0)
652 report_interval = (s64)layoutstats_timer * 1000LL; 652 report_interval = (s64)layoutstats_timer * 1000LL;
653 if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >= 653 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
654 report_interval) { 654 report_interval) {
655 mirror->last_report_time = now; 655 ffl->last_report_time = now;
656 return true; 656 return true;
657 } 657 }
658 658
@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
806{ 806{
807 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 807 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
808 struct nfs4_pnfs_ds *ds; 808 struct nfs4_pnfs_ds *ds;
809 bool fail_return = false;
809 int idx; 810 int idx;
810 811
811 /* mirrors are sorted by efficiency */ 812 /* mirrors are sorted by efficiency */
812 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { 813 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
813 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false); 814 if (idx+1 == fls->mirror_array_cnt)
815 fail_return = true;
816 ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
814 if (ds) { 817 if (ds) {
815 *best_idx = idx; 818 *best_idx = idx;
816 return ds; 819 return ds;
@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
859 struct nfs4_pnfs_ds *ds; 862 struct nfs4_pnfs_ds *ds;
860 int ds_idx; 863 int ds_idx;
861 864
865retry:
862 /* Use full layout for now */ 866 /* Use full layout for now */
863 if (!pgio->pg_lseg) 867 if (!pgio->pg_lseg)
864 ff_layout_pg_get_read(pgio, req, false); 868 ff_layout_pg_get_read(pgio, req, false);
@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
871 875
872 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); 876 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
873 if (!ds) { 877 if (!ds) {
874 if (ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 878 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
875 goto out_pnfs;
876 else
877 goto out_mds; 879 goto out_mds;
880 pnfs_put_lseg(pgio->pg_lseg);
881 pgio->pg_lseg = NULL;
882 /* Sleep for 1 second before retrying */
883 ssleep(1);
884 goto retry;
878 } 885 }
879 886
880 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); 887 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
@@ -890,12 +897,6 @@ out_mds:
890 pnfs_put_lseg(pgio->pg_lseg); 897 pnfs_put_lseg(pgio->pg_lseg);
891 pgio->pg_lseg = NULL; 898 pgio->pg_lseg = NULL;
892 nfs_pageio_reset_read_mds(pgio); 899 nfs_pageio_reset_read_mds(pgio);
893 return;
894
895out_pnfs:
896 pnfs_set_lo_fail(pgio->pg_lseg);
897 pnfs_put_lseg(pgio->pg_lseg);
898 pgio->pg_lseg = NULL;
899} 900}
900 901
901static void 902static void
@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
909 int i; 910 int i;
910 int status; 911 int status;
911 912
913retry:
912 if (!pgio->pg_lseg) { 914 if (!pgio->pg_lseg) {
913 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 915 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
914 req->wb_context, 916 req->wb_context,
@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
940 for (i = 0; i < pgio->pg_mirror_count; i++) { 942 for (i = 0; i < pgio->pg_mirror_count; i++) {
941 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); 943 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
942 if (!ds) { 944 if (!ds) {
943 if (ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 945 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
944 goto out_pnfs;
945 else
946 goto out_mds; 946 goto out_mds;
947 pnfs_put_lseg(pgio->pg_lseg);
948 pgio->pg_lseg = NULL;
949 /* Sleep for 1 second before retrying */
950 ssleep(1);
951 goto retry;
947 } 952 }
948 pgm = &pgio->pg_mirrors[i]; 953 pgm = &pgio->pg_mirrors[i];
949 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 954 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -956,12 +961,6 @@ out_mds:
956 pnfs_put_lseg(pgio->pg_lseg); 961 pnfs_put_lseg(pgio->pg_lseg);
957 pgio->pg_lseg = NULL; 962 pgio->pg_lseg = NULL;
958 nfs_pageio_reset_write_mds(pgio); 963 nfs_pageio_reset_write_mds(pgio);
959 return;
960
961out_pnfs:
962 pnfs_set_lo_fail(pgio->pg_lseg);
963 pnfs_put_lseg(pgio->pg_lseg);
964 pgio->pg_lseg = NULL;
965} 964}
966 965
967static unsigned int 966static unsigned int
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 1bcdb15d0c41..3ee0c9fcea76 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -84,7 +84,6 @@ struct nfs4_ff_layout_mirror {
84 struct nfs4_ff_layoutstat read_stat; 84 struct nfs4_ff_layoutstat read_stat;
85 struct nfs4_ff_layoutstat write_stat; 85 struct nfs4_ff_layoutstat write_stat;
86 ktime_t start_time; 86 ktime_t start_time;
87 ktime_t last_report_time;
88 u32 report_interval; 87 u32 report_interval;
89}; 88};
90 89
@@ -101,6 +100,7 @@ struct nfs4_flexfile_layout {
101 struct pnfs_ds_commit_info commit_info; 100 struct pnfs_ds_commit_info commit_info;
102 struct list_head mirrors; 101 struct list_head mirrors;
103 struct list_head error_list; /* nfs4_ff_layout_ds_err */ 102 struct list_head error_list; /* nfs4_ff_layout_ds_err */
103 ktime_t last_report_time; /* Layoutstat report times */
104}; 104};
105 105
106static inline struct nfs4_flexfile_layout * 106static inline struct nfs4_flexfile_layout *
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 0aa36be71fce..f7a3f6b05369 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -17,8 +17,8 @@
17 17
18#define NFSDBG_FACILITY NFSDBG_PNFS_LD 18#define NFSDBG_FACILITY NFSDBG_PNFS_LD
19 19
20static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO; 20static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
21static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS; 21static unsigned int dataserver_retrans;
22 22
23void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds) 23void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
24{ 24{
@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
379 379
380 devid = &mirror->mirror_ds->id_node; 380 devid = &mirror->mirror_ds->id_node;
381 if (ff_layout_test_devid_unavailable(devid)) 381 if (ff_layout_test_devid_unavailable(devid))
382 goto out; 382 goto out_fail;
383 383
384 ds = mirror->mirror_ds->ds; 384 ds = mirror->mirror_ds->ds;
385 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */ 385 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
405 mirror->mirror_ds->ds_versions[0].rsize = max_payload; 405 mirror->mirror_ds->ds_versions[0].rsize = max_payload;
406 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload) 406 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
407 mirror->mirror_ds->ds_versions[0].wsize = max_payload; 407 mirror->mirror_ds->ds_versions[0].wsize = max_payload;
408 } else { 408 goto out;
409 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
410 mirror, lseg->pls_range.offset,
411 lseg->pls_range.length, NFS4ERR_NXIO,
412 OP_ILLEGAL, GFP_NOIO);
413 if (fail_return || !ff_layout_has_available_ds(lseg))
414 pnfs_error_mark_layout_for_return(ino, lseg);
415 ds = NULL;
416 } 409 }
410 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
411 mirror, lseg->pls_range.offset,
412 lseg->pls_range.length, NFS4ERR_NXIO,
413 OP_ILLEGAL, GFP_NOIO);
414out_fail:
415 if (fail_return || !ff_layout_has_available_ds(lseg))
416 pnfs_error_mark_layout_for_return(ino, lseg);
417 ds = NULL;
417out: 418out:
418 return ds; 419 return ds;
419} 420}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7ce5e023c3c3..74935a19e4bf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -58,6 +58,9 @@ struct nfs_clone_mount {
58 */ 58 */
59#define NFS_UNSPEC_PORT (-1) 59#define NFS_UNSPEC_PORT (-1)
60 60
61#define NFS_UNSPEC_RETRANS (UINT_MAX)
62#define NFS_UNSPEC_TIMEO (UINT_MAX)
63
61/* 64/*
62 * Maximum number of pages that readdir can use for creating 65 * Maximum number of pages that readdir can use for creating
63 * a vmapped array of pages. 66 * a vmapped array of pages.
@@ -156,7 +159,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
156int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *); 159int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
157void nfs_server_insert_lists(struct nfs_server *); 160void nfs_server_insert_lists(struct nfs_server *);
158void nfs_server_remove_lists(struct nfs_server *); 161void nfs_server_remove_lists(struct nfs_server *);
159void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int); 162void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
160int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t, 163int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
161 rpc_authflavor_t); 164 rpc_authflavor_t);
162struct nfs_server *nfs_alloc_server(void); 165struct nfs_server *nfs_alloc_server(void);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 33da841a21bb..64b43b4ad9dd 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -318,10 +318,22 @@ static void
318nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 318nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
319{ 319{
320 struct nfs42_layoutstat_data *data = calldata; 320 struct nfs42_layoutstat_data *data = calldata;
321 struct nfs_server *server = NFS_SERVER(data->args.inode); 321 struct inode *inode = data->inode;
322 struct nfs_server *server = NFS_SERVER(inode);
323 struct pnfs_layout_hdr *lo;
322 324
325 spin_lock(&inode->i_lock);
326 lo = NFS_I(inode)->layout;
327 if (!pnfs_layout_is_valid(lo)) {
328 spin_unlock(&inode->i_lock);
329 rpc_exit(task, 0);
330 return;
331 }
332 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
333 spin_unlock(&inode->i_lock);
323 nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args, 334 nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
324 &data->res.seq_res, task); 335 &data->res.seq_res, task);
336
325} 337}
326 338
327static void 339static void
@@ -338,12 +350,14 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
338 case 0: 350 case 0:
339 break; 351 break;
340 case -NFS4ERR_EXPIRED: 352 case -NFS4ERR_EXPIRED:
353 case -NFS4ERR_ADMIN_REVOKED:
354 case -NFS4ERR_DELEG_REVOKED:
341 case -NFS4ERR_STALE_STATEID: 355 case -NFS4ERR_STALE_STATEID:
342 case -NFS4ERR_OLD_STATEID:
343 case -NFS4ERR_BAD_STATEID: 356 case -NFS4ERR_BAD_STATEID:
344 spin_lock(&inode->i_lock); 357 spin_lock(&inode->i_lock);
345 lo = NFS_I(inode)->layout; 358 lo = NFS_I(inode)->layout;
346 if (lo && nfs4_stateid_match(&data->args.stateid, 359 if (pnfs_layout_is_valid(lo) &&
360 nfs4_stateid_match(&data->args.stateid,
347 &lo->plh_stateid)) { 361 &lo->plh_stateid)) {
348 LIST_HEAD(head); 362 LIST_HEAD(head);
349 363
@@ -357,11 +371,23 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
357 } else 371 } else
358 spin_unlock(&inode->i_lock); 372 spin_unlock(&inode->i_lock);
359 break; 373 break;
374 case -NFS4ERR_OLD_STATEID:
375 spin_lock(&inode->i_lock);
376 lo = NFS_I(inode)->layout;
377 if (pnfs_layout_is_valid(lo) &&
378 nfs4_stateid_match_other(&data->args.stateid,
379 &lo->plh_stateid)) {
380 /* Do we need to delay before resending? */
381 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
382 &data->args.stateid))
383 rpc_delay(task, HZ);
384 rpc_restart_call_prepare(task);
385 }
386 spin_unlock(&inode->i_lock);
387 break;
360 case -ENOTSUPP: 388 case -ENOTSUPP:
361 case -EOPNOTSUPP: 389 case -EOPNOTSUPP:
362 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 390 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
363 default:
364 break;
365 } 391 }
366 392
367 dprintk("%s server returns %d\n", __func__, task->tk_status); 393 dprintk("%s server returns %d\n", __func__, task->tk_status);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 324bfdc21250..9bf64eacba5b 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -396,6 +396,10 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
396extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 396extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
397extern void nfs4_kill_renewd(struct nfs_client *); 397extern void nfs4_kill_renewd(struct nfs_client *);
398extern void nfs4_renew_state(struct work_struct *); 398extern void nfs4_renew_state(struct work_struct *);
399extern void nfs4_set_lease_period(struct nfs_client *clp,
400 unsigned long lease,
401 unsigned long lastrenewed);
402
399 403
400/* nfs4state.c */ 404/* nfs4state.c */
401struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp); 405struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8d7d08d4f95f..cd3b7cfdde16 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -817,6 +817,11 @@ static int nfs4_set_client(struct nfs_server *server,
817 goto error; 817 goto error;
818 } 818 }
819 819
820 if (server->nfs_client == clp) {
821 error = -ELOOP;
822 goto error;
823 }
824
820 /* 825 /*
821 * Query for the lease time on clientid setup or renewal 826 * Query for the lease time on clientid setup or renewal
822 * 827 *
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a036e93bdf96..a9dec32ba9ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -634,15 +634,11 @@ out_sleep:
634} 634}
635EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 635EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
636 636
637static int nfs40_sequence_done(struct rpc_task *task, 637static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
638 struct nfs4_sequence_res *res)
639{ 638{
640 struct nfs4_slot *slot = res->sr_slot; 639 struct nfs4_slot *slot = res->sr_slot;
641 struct nfs4_slot_table *tbl; 640 struct nfs4_slot_table *tbl;
642 641
643 if (slot == NULL)
644 goto out;
645
646 tbl = slot->table; 642 tbl = slot->table;
647 spin_lock(&tbl->slot_tbl_lock); 643 spin_lock(&tbl->slot_tbl_lock);
648 if (!nfs41_wake_and_assign_slot(tbl, slot)) 644 if (!nfs41_wake_and_assign_slot(tbl, slot))
@@ -650,7 +646,13 @@ static int nfs40_sequence_done(struct rpc_task *task,
650 spin_unlock(&tbl->slot_tbl_lock); 646 spin_unlock(&tbl->slot_tbl_lock);
651 647
652 res->sr_slot = NULL; 648 res->sr_slot = NULL;
653out: 649}
650
651static int nfs40_sequence_done(struct rpc_task *task,
652 struct nfs4_sequence_res *res)
653{
654 if (res->sr_slot != NULL)
655 nfs40_sequence_free_slot(res);
654 return 1; 656 return 1;
655} 657}
656 658
@@ -666,6 +668,11 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
666 tbl = slot->table; 668 tbl = slot->table;
667 session = tbl->session; 669 session = tbl->session;
668 670
671 /* Bump the slot sequence number */
672 if (slot->seq_done)
673 slot->seq_nr++;
674 slot->seq_done = 0;
675
669 spin_lock(&tbl->slot_tbl_lock); 676 spin_lock(&tbl->slot_tbl_lock);
670 /* Be nice to the server: try to ensure that the last transmitted 677 /* Be nice to the server: try to ensure that the last transmitted
671 * value for highest_user_slotid <= target_highest_slotid 678 * value for highest_user_slotid <= target_highest_slotid
@@ -686,9 +693,12 @@ out_unlock:
686 res->sr_slot = NULL; 693 res->sr_slot = NULL;
687 if (send_new_highest_used_slotid) 694 if (send_new_highest_used_slotid)
688 nfs41_notify_server(session->clp); 695 nfs41_notify_server(session->clp);
696 if (waitqueue_active(&tbl->slot_waitq))
697 wake_up_all(&tbl->slot_waitq);
689} 698}
690 699
691int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 700static int nfs41_sequence_process(struct rpc_task *task,
701 struct nfs4_sequence_res *res)
692{ 702{
693 struct nfs4_session *session; 703 struct nfs4_session *session;
694 struct nfs4_slot *slot = res->sr_slot; 704 struct nfs4_slot *slot = res->sr_slot;
@@ -714,7 +724,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
714 switch (res->sr_status) { 724 switch (res->sr_status) {
715 case 0: 725 case 0:
716 /* Update the slot's sequence and clientid lease timer */ 726 /* Update the slot's sequence and clientid lease timer */
717 ++slot->seq_nr; 727 slot->seq_done = 1;
718 clp = session->clp; 728 clp = session->clp;
719 do_renew_lease(clp, res->sr_timestamp); 729 do_renew_lease(clp, res->sr_timestamp);
720 /* Check sequence flags */ 730 /* Check sequence flags */
@@ -769,16 +779,16 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
769 goto retry_nowait; 779 goto retry_nowait;
770 default: 780 default:
771 /* Just update the slot sequence no. */ 781 /* Just update the slot sequence no. */
772 ++slot->seq_nr; 782 slot->seq_done = 1;
773 } 783 }
774out: 784out:
775 /* The session may be reset by one of the error handlers. */ 785 /* The session may be reset by one of the error handlers. */
776 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 786 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
777 nfs41_sequence_free_slot(res);
778out_noaction: 787out_noaction:
779 return ret; 788 return ret;
780retry_nowait: 789retry_nowait:
781 if (rpc_restart_call_prepare(task)) { 790 if (rpc_restart_call_prepare(task)) {
791 nfs41_sequence_free_slot(res);
782 task->tk_status = 0; 792 task->tk_status = 0;
783 ret = 0; 793 ret = 0;
784 } 794 }
@@ -789,8 +799,37 @@ out_retry:
789 rpc_delay(task, NFS4_POLL_RETRY_MAX); 799 rpc_delay(task, NFS4_POLL_RETRY_MAX);
790 return 0; 800 return 0;
791} 801}
802
803int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
804{
805 if (!nfs41_sequence_process(task, res))
806 return 0;
807 if (res->sr_slot != NULL)
808 nfs41_sequence_free_slot(res);
809 return 1;
810
811}
792EXPORT_SYMBOL_GPL(nfs41_sequence_done); 812EXPORT_SYMBOL_GPL(nfs41_sequence_done);
793 813
814static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
815{
816 if (res->sr_slot == NULL)
817 return 1;
818 if (res->sr_slot->table->session != NULL)
819 return nfs41_sequence_process(task, res);
820 return nfs40_sequence_done(task, res);
821}
822
823static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
824{
825 if (res->sr_slot != NULL) {
826 if (res->sr_slot->table->session != NULL)
827 nfs41_sequence_free_slot(res);
828 else
829 nfs40_sequence_free_slot(res);
830 }
831}
832
794int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 833int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
795{ 834{
796 if (res->sr_slot == NULL) 835 if (res->sr_slot == NULL)
@@ -920,6 +959,17 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
920 args, res, task); 959 args, res, task);
921} 960}
922 961
962static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
963{
964 return nfs40_sequence_done(task, res);
965}
966
967static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
968{
969 if (res->sr_slot != NULL)
970 nfs40_sequence_free_slot(res);
971}
972
923int nfs4_sequence_done(struct rpc_task *task, 973int nfs4_sequence_done(struct rpc_task *task,
924 struct nfs4_sequence_res *res) 974 struct nfs4_sequence_res *res)
925{ 975{
@@ -1197,6 +1247,7 @@ static void nfs4_opendata_free(struct kref *kref)
1197 struct super_block *sb = p->dentry->d_sb; 1247 struct super_block *sb = p->dentry->d_sb;
1198 1248
1199 nfs_free_seqid(p->o_arg.seqid); 1249 nfs_free_seqid(p->o_arg.seqid);
1250 nfs4_sequence_free_slot(&p->o_res.seq_res);
1200 if (p->state != NULL) 1251 if (p->state != NULL)
1201 nfs4_put_open_state(p->state); 1252 nfs4_put_open_state(p->state);
1202 nfs4_put_state_owner(p->owner); 1253 nfs4_put_state_owner(p->owner);
@@ -1656,9 +1707,14 @@ err:
1656static struct nfs4_state * 1707static struct nfs4_state *
1657nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1708nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1658{ 1709{
1710 struct nfs4_state *ret;
1711
1659 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1712 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1660 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1713 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
1661 return _nfs4_opendata_to_nfs4_state(data); 1714 else
1715 ret = _nfs4_opendata_to_nfs4_state(data);
1716 nfs4_sequence_free_slot(&data->o_res.seq_res);
1717 return ret;
1662} 1718}
1663 1719
1664static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1720static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
@@ -2056,7 +2112,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
2056 2112
2057 data->rpc_status = task->tk_status; 2113 data->rpc_status = task->tk_status;
2058 2114
2059 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 2115 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2060 return; 2116 return;
2061 2117
2062 if (task->tk_status == 0) { 2118 if (task->tk_status == 0) {
@@ -4237,12 +4293,9 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
4237 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4293 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4238 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4294 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4239 if (err == 0) { 4295 if (err == 0) {
4240 struct nfs_client *clp = server->nfs_client; 4296 nfs4_set_lease_period(server->nfs_client,
4241 4297 fsinfo->lease_time * HZ,
4242 spin_lock(&clp->cl_lock); 4298 now);
4243 clp->cl_lease_time = fsinfo->lease_time * HZ;
4244 clp->cl_last_renewal = now;
4245 spin_unlock(&clp->cl_lock);
4246 break; 4299 break;
4247 } 4300 }
4248 err = nfs4_handle_exception(server, err, &exception); 4301 err = nfs4_handle_exception(server, err, &exception);
@@ -7517,12 +7570,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
7517 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7570 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7518 trace_nfs4_create_session(clp, status); 7571 trace_nfs4_create_session(clp, status);
7519 7572
7573 switch (status) {
7574 case -NFS4ERR_STALE_CLIENTID:
7575 case -NFS4ERR_DELAY:
7576 case -ETIMEDOUT:
7577 case -EACCES:
7578 case -EAGAIN:
7579 goto out;
7580 };
7581
7582 clp->cl_seqid++;
7520 if (!status) { 7583 if (!status) {
7521 /* Verify the session's negotiated channel_attrs values */ 7584 /* Verify the session's negotiated channel_attrs values */
7522 status = nfs4_verify_channel_attrs(&args, &res); 7585 status = nfs4_verify_channel_attrs(&args, &res);
7523 /* Increment the clientid slot sequence id */ 7586 /* Increment the clientid slot sequence id */
7524 if (clp->cl_seqid == res.seqid)
7525 clp->cl_seqid++;
7526 if (status) 7587 if (status)
7527 goto out; 7588 goto out;
7528 nfs4_update_session(session, &res); 7589 nfs4_update_session(session, &res);
@@ -7867,7 +7928,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7867 struct nfs4_layoutget *lgp = calldata; 7928 struct nfs4_layoutget *lgp = calldata;
7868 7929
7869 dprintk("--> %s\n", __func__); 7930 dprintk("--> %s\n", __func__);
7870 nfs41_sequence_done(task, &lgp->res.seq_res); 7931 nfs41_sequence_process(task, &lgp->res.seq_res);
7871 dprintk("<-- %s\n", __func__); 7932 dprintk("<-- %s\n", __func__);
7872} 7933}
7873 7934
@@ -8083,6 +8144,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8083 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8144 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8084 if (status == 0 && lgp->res.layoutp->len) 8145 if (status == 0 && lgp->res.layoutp->len)
8085 lseg = pnfs_layout_process(lgp); 8146 lseg = pnfs_layout_process(lgp);
8147 nfs4_sequence_free_slot(&lgp->res.seq_res);
8086 rpc_put_task(task); 8148 rpc_put_task(task);
8087 dprintk("<-- %s status=%d\n", __func__, status); 8149 dprintk("<-- %s status=%d\n", __func__, status);
8088 if (status) 8150 if (status)
@@ -8109,7 +8171,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8109 8171
8110 dprintk("--> %s\n", __func__); 8172 dprintk("--> %s\n", __func__);
8111 8173
8112 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 8174 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
8113 return; 8175 return;
8114 8176
8115 server = NFS_SERVER(lrp->args.inode); 8177 server = NFS_SERVER(lrp->args.inode);
@@ -8121,6 +8183,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8121 case -NFS4ERR_DELAY: 8183 case -NFS4ERR_DELAY:
8122 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8184 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8123 break; 8185 break;
8186 nfs4_sequence_free_slot(&lrp->res.seq_res);
8124 rpc_restart_call_prepare(task); 8187 rpc_restart_call_prepare(task);
8125 return; 8188 return;
8126 } 8189 }
@@ -8135,12 +8198,16 @@ static void nfs4_layoutreturn_release(void *calldata)
8135 8198
8136 dprintk("--> %s\n", __func__); 8199 dprintk("--> %s\n", __func__);
8137 spin_lock(&lo->plh_inode->i_lock); 8200 spin_lock(&lo->plh_inode->i_lock);
8138 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range, 8201 if (lrp->res.lrs_present) {
8139 be32_to_cpu(lrp->args.stateid.seqid)); 8202 pnfs_mark_matching_lsegs_invalid(lo, &freeme,
8140 if (lrp->res.lrs_present && pnfs_layout_is_valid(lo)) 8203 &lrp->args.range,
8204 be32_to_cpu(lrp->args.stateid.seqid));
8141 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8205 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8206 } else
8207 pnfs_mark_layout_stateid_invalid(lo, &freeme);
8142 pnfs_clear_layoutreturn_waitbit(lo); 8208 pnfs_clear_layoutreturn_waitbit(lo);
8143 spin_unlock(&lo->plh_inode->i_lock); 8209 spin_unlock(&lo->plh_inode->i_lock);
8210 nfs4_sequence_free_slot(&lrp->res.seq_res);
8144 pnfs_free_lseg_list(&freeme); 8211 pnfs_free_lseg_list(&freeme);
8145 pnfs_put_layout_hdr(lrp->args.layout); 8212 pnfs_put_layout_hdr(lrp->args.layout);
8146 nfs_iput_and_deactive(lrp->inode); 8213 nfs_iput_and_deactive(lrp->inode);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index e1ba58c3d1ad..82e77198d17e 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -136,6 +136,26 @@ nfs4_kill_renewd(struct nfs_client *clp)
136 cancel_delayed_work_sync(&clp->cl_renewd); 136 cancel_delayed_work_sync(&clp->cl_renewd);
137} 137}
138 138
139/**
140 * nfs4_set_lease_period - Sets the lease period on a nfs_client
141 *
142 * @clp: pointer to nfs_client
143 * @lease: new value for lease period
144 * @lastrenewed: time at which lease was last renewed
145 */
146void nfs4_set_lease_period(struct nfs_client *clp,
147 unsigned long lease,
148 unsigned long lastrenewed)
149{
150 spin_lock(&clp->cl_lock);
151 clp->cl_lease_time = lease;
152 clp->cl_last_renewal = lastrenewed;
153 spin_unlock(&clp->cl_lock);
154
155 /* Cap maximum reconnect timeout at 1/2 lease period */
156 rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1);
157}
158
139/* 159/*
140 * Local variables: 160 * Local variables:
141 * c-basic-offset: 8 161 * c-basic-offset: 8
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index 332d06e64fa9..b62973045a3e 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -28,6 +28,7 @@ static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
28 tbl->highest_used_slotid = NFS4_NO_SLOT; 28 tbl->highest_used_slotid = NFS4_NO_SLOT;
29 spin_lock_init(&tbl->slot_tbl_lock); 29 spin_lock_init(&tbl->slot_tbl_lock);
30 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue); 30 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
31 init_waitqueue_head(&tbl->slot_waitq);
31 init_completion(&tbl->complete); 32 init_completion(&tbl->complete);
32} 33}
33 34
@@ -172,6 +173,58 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
172 return ERR_PTR(-E2BIG); 173 return ERR_PTR(-E2BIG);
173} 174}
174 175
176static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
177 u32 *seq_nr)
178 __must_hold(&tbl->slot_tbl_lock)
179{
180 struct nfs4_slot *slot;
181
182 slot = nfs4_lookup_slot(tbl, slotid);
183 if (IS_ERR(slot))
184 return PTR_ERR(slot);
185 *seq_nr = slot->seq_nr;
186 return 0;
187}
188
189/*
190 * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
191 *
192 * Given a slot table, slot id and sequence number, determine if the
193 * RPC call in question is still in flight. This function is mainly
194 * intended for use by the callback channel.
195 */
196static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
197 u32 slotid, u32 seq_nr)
198{
199 u32 cur_seq;
200 bool ret = false;
201
202 spin_lock(&tbl->slot_tbl_lock);
203 if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
204 cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
205 ret = true;
206 spin_unlock(&tbl->slot_tbl_lock);
207 return ret;
208}
209
210/*
211 * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
212 *
213 * Given a slot table, slot id and sequence number, wait until the
214 * corresponding RPC call completes. This function is mainly
215 * intended for use by the callback channel.
216 */
217int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
218 u32 slotid, u32 seq_nr,
219 unsigned long timeout)
220{
221 if (wait_event_timeout(tbl->slot_waitq,
222 !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
223 timeout) == 0)
224 return -ETIMEDOUT;
225 return 0;
226}
227
175/* 228/*
176 * nfs4_alloc_slot - efficiently look for a free slot 229 * nfs4_alloc_slot - efficiently look for a free slot
177 * 230 *
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 5b51298d1d03..f703b755351b 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -21,7 +21,8 @@ struct nfs4_slot {
21 unsigned long generation; 21 unsigned long generation;
22 u32 slot_nr; 22 u32 slot_nr;
23 u32 seq_nr; 23 u32 seq_nr;
24 unsigned int interrupted : 1; 24 unsigned int interrupted : 1,
25 seq_done : 1;
25}; 26};
26 27
27/* Sessions */ 28/* Sessions */
@@ -36,6 +37,7 @@ struct nfs4_slot_table {
36 unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ 37 unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
37 spinlock_t slot_tbl_lock; 38 spinlock_t slot_tbl_lock;
38 struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ 39 struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
40 wait_queue_head_t slot_waitq; /* Completion wait on slot */
39 u32 max_slots; /* # slots in table */ 41 u32 max_slots; /* # slots in table */
40 u32 max_slotid; /* Max allowed slotid value */ 42 u32 max_slotid; /* Max allowed slotid value */
41 u32 highest_used_slotid; /* sent to server on each SEQ. 43 u32 highest_used_slotid; /* sent to server on each SEQ.
@@ -78,6 +80,9 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
78extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); 80extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
79extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 81extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
80extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid); 82extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
83extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
84 u32 slotid, u32 seq_nr,
85 unsigned long timeout);
81extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 86extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
82extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 87extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
83extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 88extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 834b875900d6..cada00aa5096 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -277,20 +277,17 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
277{ 277{
278 int status; 278 int status;
279 struct nfs_fsinfo fsinfo; 279 struct nfs_fsinfo fsinfo;
280 unsigned long now;
280 281
281 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { 282 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
282 nfs4_schedule_state_renewal(clp); 283 nfs4_schedule_state_renewal(clp);
283 return 0; 284 return 0;
284 } 285 }
285 286
287 now = jiffies;
286 status = nfs4_proc_get_lease_time(clp, &fsinfo); 288 status = nfs4_proc_get_lease_time(clp, &fsinfo);
287 if (status == 0) { 289 if (status == 0) {
288 /* Update lease time and schedule renewal */ 290 nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
289 spin_lock(&clp->cl_lock);
290 clp->cl_lease_time = fsinfo.lease_time * HZ;
291 clp->cl_last_renewal = jiffies;
292 spin_unlock(&clp->cl_lock);
293
294 nfs4_schedule_state_renewal(clp); 291 nfs4_schedule_state_renewal(clp);
295 } 292 }
296 293
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 70806cae0d36..2c93a85eda51 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -365,7 +365,8 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
365 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */ 365 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
366 atomic_dec(&lo->plh_refcount); 366 atomic_dec(&lo->plh_refcount);
367 if (list_empty(&lo->plh_segs)) { 367 if (list_empty(&lo->plh_segs)) {
368 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 368 if (atomic_read(&lo->plh_outstanding) == 0)
369 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
369 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 370 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
370 } 371 }
371 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); 372 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
@@ -768,17 +769,32 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
768 pnfs_destroy_layouts_byclid(clp, false); 769 pnfs_destroy_layouts_byclid(clp, false);
769} 770}
770 771
772static void
773pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
774{
775 lo->plh_return_iomode = 0;
776 lo->plh_return_seq = 0;
777 clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
778}
779
771/* update lo->plh_stateid with new if is more recent */ 780/* update lo->plh_stateid with new if is more recent */
772void 781void
773pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, 782pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
774 bool update_barrier) 783 bool update_barrier)
775{ 784{
776 u32 oldseq, newseq, new_barrier = 0; 785 u32 oldseq, newseq, new_barrier = 0;
777 bool invalid = !pnfs_layout_is_valid(lo);
778 786
779 oldseq = be32_to_cpu(lo->plh_stateid.seqid); 787 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
780 newseq = be32_to_cpu(new->seqid); 788 newseq = be32_to_cpu(new->seqid);
781 if (invalid || pnfs_seqid_is_newer(newseq, oldseq)) { 789
790 if (!pnfs_layout_is_valid(lo)) {
791 nfs4_stateid_copy(&lo->plh_stateid, new);
792 lo->plh_barrier = newseq;
793 pnfs_clear_layoutreturn_info(lo);
794 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
795 return;
796 }
797 if (pnfs_seqid_is_newer(newseq, oldseq)) {
782 nfs4_stateid_copy(&lo->plh_stateid, new); 798 nfs4_stateid_copy(&lo->plh_stateid, new);
783 /* 799 /*
784 * Because of wraparound, we want to keep the barrier 800 * Because of wraparound, we want to keep the barrier
@@ -790,7 +806,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
790 new_barrier = be32_to_cpu(new->seqid); 806 new_barrier = be32_to_cpu(new->seqid);
791 else if (new_barrier == 0) 807 else if (new_barrier == 0)
792 return; 808 return;
793 if (invalid || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier)) 809 if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
794 lo->plh_barrier = new_barrier; 810 lo->plh_barrier = new_barrier;
795} 811}
796 812
@@ -886,19 +902,14 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
886 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); 902 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
887} 903}
888 904
889static void
890pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
891{
892 lo->plh_return_iomode = 0;
893 lo->plh_return_seq = 0;
894 clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
895}
896
897static bool 905static bool
898pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo, 906pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
899 nfs4_stateid *stateid, 907 nfs4_stateid *stateid,
900 enum pnfs_iomode *iomode) 908 enum pnfs_iomode *iomode)
901{ 909{
910 /* Serialise LAYOUTGET/LAYOUTRETURN */
911 if (atomic_read(&lo->plh_outstanding) != 0)
912 return false;
902 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 913 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
903 return false; 914 return false;
904 pnfs_get_layout_hdr(lo); 915 pnfs_get_layout_hdr(lo);
@@ -1555,6 +1566,7 @@ pnfs_update_layout(struct inode *ino,
1555 } 1566 }
1556 1567
1557lookup_again: 1568lookup_again:
1569 nfs4_client_recover_expired_lease(clp);
1558 first = false; 1570 first = false;
1559 spin_lock(&ino->i_lock); 1571 spin_lock(&ino->i_lock);
1560 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); 1572 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1797,16 +1809,11 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
1797 */ 1809 */
1798 pnfs_mark_layout_stateid_invalid(lo, &free_me); 1810 pnfs_mark_layout_stateid_invalid(lo, &free_me);
1799 1811
1800 nfs4_stateid_copy(&lo->plh_stateid, &res->stateid); 1812 pnfs_set_layout_stateid(lo, &res->stateid, true);
1801 lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
1802 } 1813 }
1803 1814
1804 pnfs_get_lseg(lseg); 1815 pnfs_get_lseg(lseg);
1805 pnfs_layout_insert_lseg(lo, lseg, &free_me); 1816 pnfs_layout_insert_lseg(lo, lseg, &free_me);
1806 if (!pnfs_layout_is_valid(lo)) {
1807 pnfs_clear_layoutreturn_info(lo);
1808 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1809 }
1810 1817
1811 1818
1812 if (res->return_on_close) 1819 if (res->return_on_close)
@@ -2510,7 +2517,6 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2510 2517
2511 data->args.fh = NFS_FH(inode); 2518 data->args.fh = NFS_FH(inode);
2512 data->args.inode = inode; 2519 data->args.inode = inode;
2513 nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
2514 status = ld->prepare_layoutstats(&data->args); 2520 status = ld->prepare_layoutstats(&data->args);
2515 if (status) 2521 if (status)
2516 goto out_free; 2522 goto out_free;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 18d446e1a82b..d39601381adf 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -923,6 +923,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
923 923
924 data = kzalloc(sizeof(*data), GFP_KERNEL); 924 data = kzalloc(sizeof(*data), GFP_KERNEL);
925 if (data) { 925 if (data) {
926 data->timeo = NFS_UNSPEC_TIMEO;
927 data->retrans = NFS_UNSPEC_RETRANS;
926 data->acregmin = NFS_DEF_ACREGMIN; 928 data->acregmin = NFS_DEF_ACREGMIN;
927 data->acregmax = NFS_DEF_ACREGMAX; 929 data->acregmax = NFS_DEF_ACREGMAX;
928 data->acdirmin = NFS_DEF_ACDIRMIN; 930 data->acdirmin = NFS_DEF_ACDIRMIN;
@@ -1189,6 +1191,19 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
1189 return rc; 1191 return rc;
1190} 1192}
1191 1193
1194static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
1195 unsigned long l_bound, unsigned long u_bound)
1196{
1197 int ret;
1198
1199 ret = nfs_get_option_ul(args, option);
1200 if (ret != 0)
1201 return ret;
1202 if (*option < l_bound || *option > u_bound)
1203 return -ERANGE;
1204 return 0;
1205}
1206
1192/* 1207/*
1193 * Error-check and convert a string of mount options from user space into 1208 * Error-check and convert a string of mount options from user space into
1194 * a data structure. The whole mount string is processed; bad options are 1209 * a data structure. The whole mount string is processed; bad options are
@@ -1352,12 +1367,12 @@ static int nfs_parse_mount_options(char *raw,
1352 mnt->bsize = option; 1367 mnt->bsize = option;
1353 break; 1368 break;
1354 case Opt_timeo: 1369 case Opt_timeo:
1355 if (nfs_get_option_ul(args, &option) || option == 0) 1370 if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
1356 goto out_invalid_value; 1371 goto out_invalid_value;
1357 mnt->timeo = option; 1372 mnt->timeo = option;
1358 break; 1373 break;
1359 case Opt_retrans: 1374 case Opt_retrans:
1360 if (nfs_get_option_ul(args, &option) || option == 0) 1375 if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
1361 goto out_invalid_value; 1376 goto out_invalid_value;
1362 mnt->retrans = option; 1377 mnt->retrans = option;
1363 break; 1378 break;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8410ca275db1..a204d7e109d4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4903,6 +4903,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4903 return nfs_ok; 4903 return nfs_ok;
4904} 4904}
4905 4905
4906static __be32
4907nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
4908{
4909 struct nfs4_ol_stateid *stp = openlockstateid(s);
4910 __be32 ret;
4911
4912 mutex_lock(&stp->st_mutex);
4913
4914 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4915 if (ret)
4916 goto out;
4917
4918 ret = nfserr_locks_held;
4919 if (check_for_locks(stp->st_stid.sc_file,
4920 lockowner(stp->st_stateowner)))
4921 goto out;
4922
4923 release_lock_stateid(stp);
4924 ret = nfs_ok;
4925
4926out:
4927 mutex_unlock(&stp->st_mutex);
4928 nfs4_put_stid(s);
4929 return ret;
4930}
4931
4906__be32 4932__be32
4907nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4933nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4908 struct nfsd4_free_stateid *free_stateid) 4934 struct nfsd4_free_stateid *free_stateid)
@@ -4910,7 +4936,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4910 stateid_t *stateid = &free_stateid->fr_stateid; 4936 stateid_t *stateid = &free_stateid->fr_stateid;
4911 struct nfs4_stid *s; 4937 struct nfs4_stid *s;
4912 struct nfs4_delegation *dp; 4938 struct nfs4_delegation *dp;
4913 struct nfs4_ol_stateid *stp;
4914 struct nfs4_client *cl = cstate->session->se_client; 4939 struct nfs4_client *cl = cstate->session->se_client;
4915 __be32 ret = nfserr_bad_stateid; 4940 __be32 ret = nfserr_bad_stateid;
4916 4941
@@ -4929,18 +4954,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4929 ret = nfserr_locks_held; 4954 ret = nfserr_locks_held;
4930 break; 4955 break;
4931 case NFS4_LOCK_STID: 4956 case NFS4_LOCK_STID:
4932 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4957 atomic_inc(&s->sc_count);
4933 if (ret)
4934 break;
4935 stp = openlockstateid(s);
4936 ret = nfserr_locks_held;
4937 if (check_for_locks(stp->st_stid.sc_file,
4938 lockowner(stp->st_stateowner)))
4939 break;
4940 WARN_ON(!unhash_lock_stateid(stp));
4941 spin_unlock(&cl->cl_lock); 4958 spin_unlock(&cl->cl_lock);
4942 nfs4_put_stid(s); 4959 ret = nfsd4_free_lock_stateid(stateid, s);
4943 ret = nfs_ok;
4944 goto out; 4960 goto out;
4945 case NFS4_REVOKED_DELEG_STID: 4961 case NFS4_REVOKED_DELEG_STID:
4946 dp = delegstateid(s); 4962 dp = delegstateid(s);
@@ -5507,7 +5523,7 @@ static __be32
5507lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 5523lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5508 struct nfs4_ol_stateid *ost, 5524 struct nfs4_ol_stateid *ost,
5509 struct nfsd4_lock *lock, 5525 struct nfsd4_lock *lock,
5510 struct nfs4_ol_stateid **lst, bool *new) 5526 struct nfs4_ol_stateid **plst, bool *new)
5511{ 5527{
5512 __be32 status; 5528 __be32 status;
5513 struct nfs4_file *fi = ost->st_stid.sc_file; 5529 struct nfs4_file *fi = ost->st_stid.sc_file;
@@ -5515,7 +5531,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5515 struct nfs4_client *cl = oo->oo_owner.so_client; 5531 struct nfs4_client *cl = oo->oo_owner.so_client;
5516 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 5532 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5517 struct nfs4_lockowner *lo; 5533 struct nfs4_lockowner *lo;
5534 struct nfs4_ol_stateid *lst;
5518 unsigned int strhashval; 5535 unsigned int strhashval;
5536 bool hashed;
5519 5537
5520 lo = find_lockowner_str(cl, &lock->lk_new_owner); 5538 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5521 if (!lo) { 5539 if (!lo) {
@@ -5531,12 +5549,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5531 goto out; 5549 goto out;
5532 } 5550 }
5533 5551
5534 *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 5552retry:
5535 if (*lst == NULL) { 5553 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5554 if (lst == NULL) {
5536 status = nfserr_jukebox; 5555 status = nfserr_jukebox;
5537 goto out; 5556 goto out;
5538 } 5557 }
5558
5559 mutex_lock(&lst->st_mutex);
5560
5561 /* See if it's still hashed to avoid race with FREE_STATEID */
5562 spin_lock(&cl->cl_lock);
5563 hashed = !list_empty(&lst->st_perfile);
5564 spin_unlock(&cl->cl_lock);
5565
5566 if (!hashed) {
5567 mutex_unlock(&lst->st_mutex);
5568 nfs4_put_stid(&lst->st_stid);
5569 goto retry;
5570 }
5539 status = nfs_ok; 5571 status = nfs_ok;
5572 *plst = lst;
5540out: 5573out:
5541 nfs4_put_stateowner(&lo->lo_owner); 5574 nfs4_put_stateowner(&lo->lo_owner);
5542 return status; 5575 return status;
@@ -5603,8 +5636,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5603 goto out; 5636 goto out;
5604 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5637 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5605 &lock_stp, &new); 5638 &lock_stp, &new);
5606 if (status == nfs_ok)
5607 mutex_lock(&lock_stp->st_mutex);
5608 } else { 5639 } else {
5609 status = nfs4_preprocess_seqid_op(cstate, 5640 status = nfs4_preprocess_seqid_op(cstate,
5610 lock->lk_old_lock_seqid, 5641 lock->lk_old_lock_seqid,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index ba944123167b..ff476e654b8f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1252,10 +1252,13 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1252 if (IS_ERR(dchild)) 1252 if (IS_ERR(dchild))
1253 return nfserrno(host_err); 1253 return nfserrno(host_err);
1254 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1254 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
1255 if (err) { 1255 /*
1256 dput(dchild); 1256 * We unconditionally drop our ref to dchild as fh_compose will have
1257 * already grabbed its own ref for it.
1258 */
1259 dput(dchild);
1260 if (err)
1257 return err; 1261 return err;
1258 }
1259 return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type, 1262 return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
1260 rdev, resfhp); 1263 rdev, resfhp);
1261} 1264}
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 54e5d6681786..43fdc2765aea 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -80,6 +80,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
80 } 80 }
81 81
82 for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { 82 for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
83 if (ovl_is_private_xattr(name))
84 continue;
83retry: 85retry:
84 size = vfs_getxattr(old, name, value, value_size); 86 size = vfs_getxattr(old, name, value, value_size);
85 if (size == -ERANGE) 87 if (size == -ERANGE)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 12bcd07b9e32..1560fdc09a5f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -12,6 +12,8 @@
12#include <linux/xattr.h> 12#include <linux/xattr.h>
13#include <linux/security.h> 13#include <linux/security.h>
14#include <linux/cred.h> 14#include <linux/cred.h>
15#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h>
15#include "overlayfs.h" 17#include "overlayfs.h"
16 18
17void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) 19void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -186,6 +188,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
186 struct dentry *newdentry; 188 struct dentry *newdentry;
187 int err; 189 int err;
188 190
191 if (!hardlink && !IS_POSIXACL(udir))
192 stat->mode &= ~current_umask();
193
189 inode_lock_nested(udir, I_MUTEX_PARENT); 194 inode_lock_nested(udir, I_MUTEX_PARENT);
190 newdentry = lookup_one_len(dentry->d_name.name, upperdir, 195 newdentry = lookup_one_len(dentry->d_name.name, upperdir,
191 dentry->d_name.len); 196 dentry->d_name.len);
@@ -335,6 +340,32 @@ out_free:
335 return ret; 340 return ret;
336} 341}
337 342
343static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
344 const struct posix_acl *acl)
345{
346 void *buffer;
347 size_t size;
348 int err;
349
350 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
351 return 0;
352
353 size = posix_acl_to_xattr(NULL, acl, NULL, 0);
354 buffer = kmalloc(size, GFP_KERNEL);
355 if (!buffer)
356 return -ENOMEM;
357
358 size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
359 err = size;
360 if (err < 0)
361 goto out_free;
362
363 err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
364out_free:
365 kfree(buffer);
366 return err;
367}
368
338static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, 369static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
339 struct kstat *stat, const char *link, 370 struct kstat *stat, const char *link,
340 struct dentry *hardlink) 371 struct dentry *hardlink)
@@ -346,10 +377,18 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
346 struct dentry *upper; 377 struct dentry *upper;
347 struct dentry *newdentry; 378 struct dentry *newdentry;
348 int err; 379 int err;
380 struct posix_acl *acl, *default_acl;
349 381
350 if (WARN_ON(!workdir)) 382 if (WARN_ON(!workdir))
351 return -EROFS; 383 return -EROFS;
352 384
385 if (!hardlink) {
386 err = posix_acl_create(dentry->d_parent->d_inode,
387 &stat->mode, &default_acl, &acl);
388 if (err)
389 return err;
390 }
391
353 err = ovl_lock_rename_workdir(workdir, upperdir); 392 err = ovl_lock_rename_workdir(workdir, upperdir);
354 if (err) 393 if (err)
355 goto out; 394 goto out;
@@ -384,6 +423,17 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
384 if (err) 423 if (err)
385 goto out_cleanup; 424 goto out_cleanup;
386 } 425 }
426 if (!hardlink) {
427 err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
428 acl);
429 if (err)
430 goto out_cleanup;
431
432 err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
433 default_acl);
434 if (err)
435 goto out_cleanup;
436 }
387 437
388 if (!hardlink && S_ISDIR(stat->mode)) { 438 if (!hardlink && S_ISDIR(stat->mode)) {
389 err = ovl_set_opaque(newdentry); 439 err = ovl_set_opaque(newdentry);
@@ -410,6 +460,10 @@ out_dput:
410out_unlock: 460out_unlock:
411 unlock_rename(workdir, upperdir); 461 unlock_rename(workdir, upperdir);
412out: 462out:
463 if (!hardlink) {
464 posix_acl_release(acl);
465 posix_acl_release(default_acl);
466 }
413 return err; 467 return err;
414 468
415out_cleanup: 469out_cleanup:
@@ -950,9 +1004,9 @@ const struct inode_operations ovl_dir_inode_operations = {
950 .permission = ovl_permission, 1004 .permission = ovl_permission,
951 .getattr = ovl_dir_getattr, 1005 .getattr = ovl_dir_getattr,
952 .setxattr = generic_setxattr, 1006 .setxattr = generic_setxattr,
953 .getxattr = ovl_getxattr, 1007 .getxattr = generic_getxattr,
954 .listxattr = ovl_listxattr, 1008 .listxattr = ovl_listxattr,
955 .removexattr = ovl_removexattr, 1009 .removexattr = generic_removexattr,
956 .get_acl = ovl_get_acl, 1010 .get_acl = ovl_get_acl,
957 .update_time = ovl_update_time, 1011 .update_time = ovl_update_time,
958}; 1012};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 1b885c156028..c75625c1efa3 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/xattr.h> 12#include <linux/xattr.h>
13#include <linux/posix_acl.h>
13#include "overlayfs.h" 14#include "overlayfs.h"
14 15
15static int ovl_copy_up_truncate(struct dentry *dentry) 16static int ovl_copy_up_truncate(struct dentry *dentry)
@@ -191,32 +192,44 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
191 return err; 192 return err;
192} 193}
193 194
194static bool ovl_is_private_xattr(const char *name) 195bool ovl_is_private_xattr(const char *name)
195{ 196{
196#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "." 197 return strncmp(name, OVL_XATTR_PREFIX,
197 return strncmp(name, OVL_XATTR_PRE_NAME, 198 sizeof(OVL_XATTR_PREFIX) - 1) == 0;
198 sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
199} 199}
200 200
201int ovl_setxattr(struct dentry *dentry, struct inode *inode, 201int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
202 const char *name, const void *value, 202 size_t size, int flags)
203 size_t size, int flags)
204{ 203{
205 int err; 204 int err;
206 struct dentry *upperdentry; 205 struct path realpath;
206 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
207 const struct cred *old_cred; 207 const struct cred *old_cred;
208 208
209 err = ovl_want_write(dentry); 209 err = ovl_want_write(dentry);
210 if (err) 210 if (err)
211 goto out; 211 goto out;
212 212
213 if (!value && !OVL_TYPE_UPPER(type)) {
214 err = vfs_getxattr(realpath.dentry, name, NULL, 0);
215 if (err < 0)
216 goto out_drop_write;
217 }
218
213 err = ovl_copy_up(dentry); 219 err = ovl_copy_up(dentry);
214 if (err) 220 if (err)
215 goto out_drop_write; 221 goto out_drop_write;
216 222
217 upperdentry = ovl_dentry_upper(dentry); 223 if (!OVL_TYPE_UPPER(type))
224 ovl_path_upper(dentry, &realpath);
225
218 old_cred = ovl_override_creds(dentry->d_sb); 226 old_cred = ovl_override_creds(dentry->d_sb);
219 err = vfs_setxattr(upperdentry, name, value, size, flags); 227 if (value)
228 err = vfs_setxattr(realpath.dentry, name, value, size, flags);
229 else {
230 WARN_ON(flags != XATTR_REPLACE);
231 err = vfs_removexattr(realpath.dentry, name);
232 }
220 revert_creds(old_cred); 233 revert_creds(old_cred);
221 234
222out_drop_write: 235out_drop_write:
@@ -225,16 +238,13 @@ out:
225 return err; 238 return err;
226} 239}
227 240
228ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, 241int ovl_xattr_get(struct dentry *dentry, const char *name,
229 const char *name, void *value, size_t size) 242 void *value, size_t size)
230{ 243{
231 struct dentry *realdentry = ovl_dentry_real(dentry); 244 struct dentry *realdentry = ovl_dentry_real(dentry);
232 ssize_t res; 245 ssize_t res;
233 const struct cred *old_cred; 246 const struct cred *old_cred;
234 247
235 if (ovl_is_private_xattr(name))
236 return -ENODATA;
237
238 old_cred = ovl_override_creds(dentry->d_sb); 248 old_cred = ovl_override_creds(dentry->d_sb);
239 res = vfs_getxattr(realdentry, name, value, size); 249 res = vfs_getxattr(realdentry, name, value, size);
240 revert_creds(old_cred); 250 revert_creds(old_cred);
@@ -245,7 +255,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
245{ 255{
246 struct dentry *realdentry = ovl_dentry_real(dentry); 256 struct dentry *realdentry = ovl_dentry_real(dentry);
247 ssize_t res; 257 ssize_t res;
248 int off; 258 size_t len;
259 char *s;
249 const struct cred *old_cred; 260 const struct cred *old_cred;
250 261
251 old_cred = ovl_override_creds(dentry->d_sb); 262 old_cred = ovl_override_creds(dentry->d_sb);
@@ -255,73 +266,39 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
255 return res; 266 return res;
256 267
257 /* filter out private xattrs */ 268 /* filter out private xattrs */
258 for (off = 0; off < res;) { 269 for (s = list, len = res; len;) {
259 char *s = list + off; 270 size_t slen = strnlen(s, len) + 1;
260 size_t slen = strlen(s) + 1;
261 271
262 BUG_ON(off + slen > res); 272 /* underlying fs providing us with an broken xattr list? */
273 if (WARN_ON(slen > len))
274 return -EIO;
263 275
276 len -= slen;
264 if (ovl_is_private_xattr(s)) { 277 if (ovl_is_private_xattr(s)) {
265 res -= slen; 278 res -= slen;
266 memmove(s, s + slen, res - off); 279 memmove(s, s + slen, len);
267 } else { 280 } else {
268 off += slen; 281 s += slen;
269 } 282 }
270 } 283 }
271 284
272 return res; 285 return res;
273} 286}
274 287
275int ovl_removexattr(struct dentry *dentry, const char *name)
276{
277 int err;
278 struct path realpath;
279 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
280 const struct cred *old_cred;
281
282 err = ovl_want_write(dentry);
283 if (err)
284 goto out;
285
286 err = -ENODATA;
287 if (ovl_is_private_xattr(name))
288 goto out_drop_write;
289
290 if (!OVL_TYPE_UPPER(type)) {
291 err = vfs_getxattr(realpath.dentry, name, NULL, 0);
292 if (err < 0)
293 goto out_drop_write;
294
295 err = ovl_copy_up(dentry);
296 if (err)
297 goto out_drop_write;
298
299 ovl_path_upper(dentry, &realpath);
300 }
301
302 old_cred = ovl_override_creds(dentry->d_sb);
303 err = vfs_removexattr(realpath.dentry, name);
304 revert_creds(old_cred);
305out_drop_write:
306 ovl_drop_write(dentry);
307out:
308 return err;
309}
310
311struct posix_acl *ovl_get_acl(struct inode *inode, int type) 288struct posix_acl *ovl_get_acl(struct inode *inode, int type)
312{ 289{
313 struct inode *realinode = ovl_inode_real(inode, NULL); 290 struct inode *realinode = ovl_inode_real(inode, NULL);
314 const struct cred *old_cred; 291 const struct cred *old_cred;
315 struct posix_acl *acl; 292 struct posix_acl *acl;
316 293
317 if (!IS_POSIXACL(realinode)) 294 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
318 return NULL; 295 return NULL;
319 296
320 if (!realinode->i_op->get_acl) 297 if (!realinode->i_op->get_acl)
321 return NULL; 298 return NULL;
322 299
323 old_cred = ovl_override_creds(inode->i_sb); 300 old_cred = ovl_override_creds(inode->i_sb);
324 acl = realinode->i_op->get_acl(realinode, type); 301 acl = get_acl(realinode, type);
325 revert_creds(old_cred); 302 revert_creds(old_cred);
326 303
327 return acl; 304 return acl;
@@ -391,9 +368,9 @@ static const struct inode_operations ovl_file_inode_operations = {
391 .permission = ovl_permission, 368 .permission = ovl_permission,
392 .getattr = ovl_getattr, 369 .getattr = ovl_getattr,
393 .setxattr = generic_setxattr, 370 .setxattr = generic_setxattr,
394 .getxattr = ovl_getxattr, 371 .getxattr = generic_getxattr,
395 .listxattr = ovl_listxattr, 372 .listxattr = ovl_listxattr,
396 .removexattr = ovl_removexattr, 373 .removexattr = generic_removexattr,
397 .get_acl = ovl_get_acl, 374 .get_acl = ovl_get_acl,
398 .update_time = ovl_update_time, 375 .update_time = ovl_update_time,
399}; 376};
@@ -404,9 +381,9 @@ static const struct inode_operations ovl_symlink_inode_operations = {
404 .readlink = ovl_readlink, 381 .readlink = ovl_readlink,
405 .getattr = ovl_getattr, 382 .getattr = ovl_getattr,
406 .setxattr = generic_setxattr, 383 .setxattr = generic_setxattr,
407 .getxattr = ovl_getxattr, 384 .getxattr = generic_getxattr,
408 .listxattr = ovl_listxattr, 385 .listxattr = ovl_listxattr,
409 .removexattr = ovl_removexattr, 386 .removexattr = generic_removexattr,
410 .update_time = ovl_update_time, 387 .update_time = ovl_update_time,
411}; 388};
412 389
@@ -415,6 +392,9 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
415 inode->i_ino = get_next_ino(); 392 inode->i_ino = get_next_ino();
416 inode->i_mode = mode; 393 inode->i_mode = mode;
417 inode->i_flags |= S_NOCMTIME; 394 inode->i_flags |= S_NOCMTIME;
395#ifdef CONFIG_FS_POSIX_ACL
396 inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
397#endif
418 398
419 mode &= S_IFMT; 399 mode &= S_IFMT;
420 switch (mode) { 400 switch (mode) {
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index e4f5c9536bfe..5813ccff8cd9 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -24,8 +24,8 @@ enum ovl_path_type {
24 (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) 24 (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
25 25
26 26
27#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay" 27#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
28#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque" 28#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
29 29
30#define OVL_ISUPPER_MASK 1UL 30#define OVL_ISUPPER_MASK 1UL
31 31
@@ -179,20 +179,21 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
179void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list); 179void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
180void ovl_cache_free(struct list_head *list); 180void ovl_cache_free(struct list_head *list);
181int ovl_check_d_type_supported(struct path *realpath); 181int ovl_check_d_type_supported(struct path *realpath);
182void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
183 struct dentry *dentry, int level);
182 184
183/* inode.c */ 185/* inode.c */
184int ovl_setattr(struct dentry *dentry, struct iattr *attr); 186int ovl_setattr(struct dentry *dentry, struct iattr *attr);
185int ovl_permission(struct inode *inode, int mask); 187int ovl_permission(struct inode *inode, int mask);
186int ovl_setxattr(struct dentry *dentry, struct inode *inode, 188int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
187 const char *name, const void *value, 189 size_t size, int flags);
188 size_t size, int flags); 190int ovl_xattr_get(struct dentry *dentry, const char *name,
189ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, 191 void *value, size_t size);
190 const char *name, void *value, size_t size);
191ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); 192ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
192int ovl_removexattr(struct dentry *dentry, const char *name);
193struct posix_acl *ovl_get_acl(struct inode *inode, int type); 193struct posix_acl *ovl_get_acl(struct inode *inode, int type);
194int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags); 194int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
195int ovl_update_time(struct inode *inode, struct timespec *ts, int flags); 195int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
196bool ovl_is_private_xattr(const char *name);
196 197
197struct inode *ovl_new_inode(struct super_block *sb, umode_t mode); 198struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
198struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode); 199struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index cf37fc76fc9f..f241b4ee3d8a 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -248,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
248 err = rdd->err; 248 err = rdd->err;
249 } while (!err && rdd->count); 249 } while (!err && rdd->count);
250 250
251 if (!err && rdd->first_maybe_whiteout) 251 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
252 err = ovl_check_whiteouts(realpath->dentry, rdd); 252 err = ovl_check_whiteouts(realpath->dentry, rdd);
253 253
254 fput(realfile); 254 fput(realfile);
@@ -606,3 +606,64 @@ int ovl_check_d_type_supported(struct path *realpath)
606 606
607 return rdd.d_type_supported; 607 return rdd.d_type_supported;
608} 608}
609
610static void ovl_workdir_cleanup_recurse(struct path *path, int level)
611{
612 int err;
613 struct inode *dir = path->dentry->d_inode;
614 LIST_HEAD(list);
615 struct ovl_cache_entry *p;
616 struct ovl_readdir_data rdd = {
617 .ctx.actor = ovl_fill_merge,
618 .dentry = NULL,
619 .list = &list,
620 .root = RB_ROOT,
621 .is_lowest = false,
622 };
623
624 err = ovl_dir_read(path, &rdd);
625 if (err)
626 goto out;
627
628 inode_lock_nested(dir, I_MUTEX_PARENT);
629 list_for_each_entry(p, &list, l_node) {
630 struct dentry *dentry;
631
632 if (p->name[0] == '.') {
633 if (p->len == 1)
634 continue;
635 if (p->len == 2 && p->name[1] == '.')
636 continue;
637 }
638 dentry = lookup_one_len(p->name, path->dentry, p->len);
639 if (IS_ERR(dentry))
640 continue;
641 if (dentry->d_inode)
642 ovl_workdir_cleanup(dir, path->mnt, dentry, level);
643 dput(dentry);
644 }
645 inode_unlock(dir);
646out:
647 ovl_cache_free(&list);
648}
649
650void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
651 struct dentry *dentry, int level)
652{
653 int err;
654
655 if (!d_is_dir(dentry) || level > 1) {
656 ovl_cleanup(dir, dentry);
657 return;
658 }
659
660 err = ovl_do_rmdir(dir, dentry);
661 if (err) {
662 struct path path = { .mnt = mnt, .dentry = dentry };
663
664 inode_unlock(dir);
665 ovl_workdir_cleanup_recurse(&path, level + 1);
666 inode_lock_nested(dir, I_MUTEX_PARENT);
667 ovl_cleanup(dir, dentry);
668 }
669}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 4036132842b5..e2a94a26767b 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -814,6 +814,10 @@ retry:
814 struct kstat stat = { 814 struct kstat stat = {
815 .mode = S_IFDIR | 0, 815 .mode = S_IFDIR | 0,
816 }; 816 };
817 struct iattr attr = {
818 .ia_valid = ATTR_MODE,
819 .ia_mode = stat.mode,
820 };
817 821
818 if (work->d_inode) { 822 if (work->d_inode) {
819 err = -EEXIST; 823 err = -EEXIST;
@@ -821,7 +825,7 @@ retry:
821 goto out_dput; 825 goto out_dput;
822 826
823 retried = true; 827 retried = true;
824 ovl_cleanup(dir, work); 828 ovl_workdir_cleanup(dir, mnt, work, 0);
825 dput(work); 829 dput(work);
826 goto retry; 830 goto retry;
827 } 831 }
@@ -829,6 +833,21 @@ retry:
829 err = ovl_create_real(dir, work, &stat, NULL, NULL, true); 833 err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
830 if (err) 834 if (err)
831 goto out_dput; 835 goto out_dput;
836
837 err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
838 if (err && err != -ENODATA && err != -EOPNOTSUPP)
839 goto out_dput;
840
841 err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
842 if (err && err != -ENODATA && err != -EOPNOTSUPP)
843 goto out_dput;
844
845 /* Clear any inherited mode bits */
846 inode_lock(work->d_inode);
847 err = notify_change(work, &attr, NULL);
848 inode_unlock(work->d_inode);
849 if (err)
850 goto out_dput;
832 } 851 }
833out_unlock: 852out_unlock:
834 inode_unlock(dir); 853 inode_unlock(dir);
@@ -967,10 +986,19 @@ static unsigned int ovl_split_lowerdirs(char *str)
967 return ctr; 986 return ctr;
968} 987}
969 988
970static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler, 989static int __maybe_unused
971 struct dentry *dentry, struct inode *inode, 990ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
972 const char *name, const void *value, 991 struct dentry *dentry, struct inode *inode,
973 size_t size, int flags) 992 const char *name, void *buffer, size_t size)
993{
994 return ovl_xattr_get(dentry, handler->name, buffer, size);
995}
996
997static int __maybe_unused
998ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
999 struct dentry *dentry, struct inode *inode,
1000 const char *name, const void *value,
1001 size_t size, int flags)
974{ 1002{
975 struct dentry *workdir = ovl_workdir(dentry); 1003 struct dentry *workdir = ovl_workdir(dentry);
976 struct inode *realinode = ovl_inode_real(inode, NULL); 1004 struct inode *realinode = ovl_inode_real(inode, NULL);
@@ -998,19 +1026,22 @@ static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
998 1026
999 posix_acl_release(acl); 1027 posix_acl_release(acl);
1000 1028
1001 return ovl_setxattr(dentry, inode, handler->name, value, size, flags); 1029 err = ovl_xattr_set(dentry, handler->name, value, size, flags);
1030 if (!err)
1031 ovl_copyattr(ovl_inode_real(inode, NULL), inode);
1032
1033 return err;
1002 1034
1003out_acl_release: 1035out_acl_release:
1004 posix_acl_release(acl); 1036 posix_acl_release(acl);
1005 return err; 1037 return err;
1006} 1038}
1007 1039
1008static int ovl_other_xattr_set(const struct xattr_handler *handler, 1040static int ovl_own_xattr_get(const struct xattr_handler *handler,
1009 struct dentry *dentry, struct inode *inode, 1041 struct dentry *dentry, struct inode *inode,
1010 const char *name, const void *value, 1042 const char *name, void *buffer, size_t size)
1011 size_t size, int flags)
1012{ 1043{
1013 return ovl_setxattr(dentry, inode, name, value, size, flags); 1044 return -EPERM;
1014} 1045}
1015 1046
1016static int ovl_own_xattr_set(const struct xattr_handler *handler, 1047static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1021,42 +1052,59 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
1021 return -EPERM; 1052 return -EPERM;
1022} 1053}
1023 1054
1024static const struct xattr_handler ovl_posix_acl_access_xattr_handler = { 1055static int ovl_other_xattr_get(const struct xattr_handler *handler,
1056 struct dentry *dentry, struct inode *inode,
1057 const char *name, void *buffer, size_t size)
1058{
1059 return ovl_xattr_get(dentry, name, buffer, size);
1060}
1061
1062static int ovl_other_xattr_set(const struct xattr_handler *handler,
1063 struct dentry *dentry, struct inode *inode,
1064 const char *name, const void *value,
1065 size_t size, int flags)
1066{
1067 return ovl_xattr_set(dentry, name, value, size, flags);
1068}
1069
1070static const struct xattr_handler __maybe_unused
1071ovl_posix_acl_access_xattr_handler = {
1025 .name = XATTR_NAME_POSIX_ACL_ACCESS, 1072 .name = XATTR_NAME_POSIX_ACL_ACCESS,
1026 .flags = ACL_TYPE_ACCESS, 1073 .flags = ACL_TYPE_ACCESS,
1074 .get = ovl_posix_acl_xattr_get,
1027 .set = ovl_posix_acl_xattr_set, 1075 .set = ovl_posix_acl_xattr_set,
1028}; 1076};
1029 1077
1030static const struct xattr_handler ovl_posix_acl_default_xattr_handler = { 1078static const struct xattr_handler __maybe_unused
1079ovl_posix_acl_default_xattr_handler = {
1031 .name = XATTR_NAME_POSIX_ACL_DEFAULT, 1080 .name = XATTR_NAME_POSIX_ACL_DEFAULT,
1032 .flags = ACL_TYPE_DEFAULT, 1081 .flags = ACL_TYPE_DEFAULT,
1082 .get = ovl_posix_acl_xattr_get,
1033 .set = ovl_posix_acl_xattr_set, 1083 .set = ovl_posix_acl_xattr_set,
1034}; 1084};
1035 1085
1036static const struct xattr_handler ovl_own_xattr_handler = { 1086static const struct xattr_handler ovl_own_xattr_handler = {
1037 .prefix = OVL_XATTR_PREFIX, 1087 .prefix = OVL_XATTR_PREFIX,
1088 .get = ovl_own_xattr_get,
1038 .set = ovl_own_xattr_set, 1089 .set = ovl_own_xattr_set,
1039}; 1090};
1040 1091
1041static const struct xattr_handler ovl_other_xattr_handler = { 1092static const struct xattr_handler ovl_other_xattr_handler = {
1042 .prefix = "", /* catch all */ 1093 .prefix = "", /* catch all */
1094 .get = ovl_other_xattr_get,
1043 .set = ovl_other_xattr_set, 1095 .set = ovl_other_xattr_set,
1044}; 1096};
1045 1097
1046static const struct xattr_handler *ovl_xattr_handlers[] = { 1098static const struct xattr_handler *ovl_xattr_handlers[] = {
1099#ifdef CONFIG_FS_POSIX_ACL
1047 &ovl_posix_acl_access_xattr_handler, 1100 &ovl_posix_acl_access_xattr_handler,
1048 &ovl_posix_acl_default_xattr_handler, 1101 &ovl_posix_acl_default_xattr_handler,
1102#endif
1049 &ovl_own_xattr_handler, 1103 &ovl_own_xattr_handler,
1050 &ovl_other_xattr_handler, 1104 &ovl_other_xattr_handler,
1051 NULL 1105 NULL
1052}; 1106};
1053 1107
1054static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
1055 &ovl_own_xattr_handler,
1056 &ovl_other_xattr_handler,
1057 NULL,
1058};
1059
1060static int ovl_fill_super(struct super_block *sb, void *data, int silent) 1108static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1061{ 1109{
1062 struct path upperpath = { NULL, NULL }; 1110 struct path upperpath = { NULL, NULL };
@@ -1132,7 +1180,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1132 err = -EINVAL; 1180 err = -EINVAL;
1133 stacklen = ovl_split_lowerdirs(lowertmp); 1181 stacklen = ovl_split_lowerdirs(lowertmp);
1134 if (stacklen > OVL_MAX_STACK) { 1182 if (stacklen > OVL_MAX_STACK) {
1135 pr_err("overlayfs: too many lower directries, limit is %d\n", 1183 pr_err("overlayfs: too many lower directories, limit is %d\n",
1136 OVL_MAX_STACK); 1184 OVL_MAX_STACK);
1137 goto out_free_lowertmp; 1185 goto out_free_lowertmp;
1138 } else if (!ufs->config.upperdir && stacklen == 1) { 1186 } else if (!ufs->config.upperdir && stacklen == 1) {
@@ -1269,10 +1317,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1269 1317
1270 sb->s_magic = OVERLAYFS_SUPER_MAGIC; 1318 sb->s_magic = OVERLAYFS_SUPER_MAGIC;
1271 sb->s_op = &ovl_super_operations; 1319 sb->s_op = &ovl_super_operations;
1272 if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) 1320 sb->s_xattr = ovl_xattr_handlers;
1273 sb->s_xattr = ovl_xattr_handlers;
1274 else
1275 sb->s_xattr = ovl_xattr_noacl_handlers;
1276 sb->s_root = root_dentry; 1321 sb->s_root = root_dentry;
1277 sb->s_fs_info = ufs; 1322 sb->s_fs_info = ufs;
1278 sb->s_flags |= MS_POSIXACL; 1323 sb->s_flags |= MS_POSIXACL;
diff --git a/fs/pipe.c b/fs/pipe.c
index 4b32928f5426..4ebe6b2e5217 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
144 struct page *page = buf->page; 144 struct page *page = buf->page;
145 145
146 if (page_count(page) == 1) { 146 if (page_count(page) == 1) {
147 if (memcg_kmem_enabled()) { 147 if (memcg_kmem_enabled())
148 memcg_kmem_uncharge(page, 0); 148 memcg_kmem_uncharge(page, 0);
149 __ClearPageKmemcg(page);
150 }
151 __SetPageLocked(page); 149 __SetPageLocked(page);
152 return 0; 150 return 0;
153 } 151 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 54e270262979..ac0df4dde823 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1556,18 +1556,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
1556static int proc_exe_link(struct dentry *dentry, struct path *exe_path) 1556static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1557{ 1557{
1558 struct task_struct *task; 1558 struct task_struct *task;
1559 struct mm_struct *mm;
1560 struct file *exe_file; 1559 struct file *exe_file;
1561 1560
1562 task = get_proc_task(d_inode(dentry)); 1561 task = get_proc_task(d_inode(dentry));
1563 if (!task) 1562 if (!task)
1564 return -ENOENT; 1563 return -ENOENT;
1565 mm = get_task_mm(task); 1564 exe_file = get_task_exe_file(task);
1566 put_task_struct(task); 1565 put_task_struct(task);
1567 if (!mm)
1568 return -ENOENT;
1569 exe_file = get_mm_exe_file(mm);
1570 mmput(mm);
1571 if (exe_file) { 1566 if (exe_file) {
1572 *exe_path = exe_file->f_path; 1567 *exe_path = exe_file->f_path;
1573 path_get(&exe_file->f_path); 1568 path_get(&exe_file->f_path);
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 09e18fdf61e5..b9a8c813e5e6 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46 cached = 0; 46 cached = 0;
47 47
48 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 48 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
49 pages[lru] = global_page_state(NR_LRU_BASE + lru); 49 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
50 50
51 available = si_mem_available(); 51 available = si_mem_available();
52 52
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 187d84ef9de9..f6fa99eca515 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
581 mss->anonymous_thp += HPAGE_PMD_SIZE; 581 mss->anonymous_thp += HPAGE_PMD_SIZE;
582 else if (PageSwapBacked(page)) 582 else if (PageSwapBacked(page))
583 mss->shmem_thp += HPAGE_PMD_SIZE; 583 mss->shmem_thp += HPAGE_PMD_SIZE;
584 else if (is_zone_device_page(page))
585 /* pass */;
584 else 586 else
585 VM_BUG_ON_PAGE(1, page); 587 VM_BUG_ON_PAGE(1, page);
586 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 588 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 19f532e7d35e..6dc4296eed62 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -223,8 +223,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
223 size -= n; 223 size -= n;
224 buf += n; 224 buf += n;
225 copied += n; 225 copied += n;
226 if (!m->count) 226 if (!m->count) {
227 m->from = 0;
227 m->index++; 228 m->index++;
229 }
228 if (!size) 230 if (!size)
229 goto Done; 231 goto Done;
230 } 232 }
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f35523d4fa3a..b803213d1307 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
114 * If buf != of->prealloc_buf, we don't know how 114 * If buf != of->prealloc_buf, we don't know how
115 * large it is, so cannot safely pass it to ->show 115 * large it is, so cannot safely pass it to ->show
116 */ 116 */
117 if (pos || WARN_ON_ONCE(buf != of->prealloc_buf)) 117 if (WARN_ON_ONCE(buf != of->prealloc_buf))
118 return 0; 118 return 0;
119 len = ops->show(kobj, of->kn->priv, buf); 119 len = ops->show(kobj, of->kn->priv, buf);
120 if (pos) {
121 if (len <= pos)
122 return 0;
123 len -= pos;
124 memmove(buf, buf + pos, len);
125 }
120 return min(count, len); 126 return min(count, len);
121} 127}
122 128
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index b45345d701e7..51157da3f76e 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
370 370
371 p = c->gap_lebs; 371 p = c->gap_lebs;
372 do { 372 do {
373 ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs); 373 ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
374 written = layout_leb_in_gaps(c, p); 374 written = layout_leb_in_gaps(c, p);
375 if (written < 0) { 375 if (written < 0) {
376 err = written; 376 err = written;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index e237811f09ce..11a004114eba 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -575,7 +575,8 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
575 dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name, 575 dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
576 inode->i_ino, dentry, size); 576 inode->i_ino, dentry, size);
577 577
578 return __ubifs_getxattr(inode, name, buffer, size); 578 name = xattr_full_name(handler, name);
579 return __ubifs_getxattr(inode, name, buffer, size);
579} 580}
580 581
581static int ubifs_xattr_set(const struct xattr_handler *handler, 582static int ubifs_xattr_set(const struct xattr_handler *handler,
@@ -586,6 +587,8 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
586 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", 587 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
587 name, inode->i_ino, dentry, size); 588 name, inode->i_ino, dentry, size);
588 589
590 name = xattr_full_name(handler, name);
591
589 if (value) 592 if (value)
590 return __ubifs_setxattr(inode, name, value, size, flags); 593 return __ubifs_setxattr(inode, name, value, size, flags);
591 else 594 else
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 776ae2f325d1..05b5243d89f6 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small(
1582 xfs_extlen_t *flenp, /* result length */ 1582 xfs_extlen_t *flenp, /* result length */
1583 int *stat) /* status: 0-freelist, 1-normal/none */ 1583 int *stat) /* status: 0-freelist, 1-normal/none */
1584{ 1584{
1585 struct xfs_owner_info oinfo;
1585 int error; 1586 int error;
1586 xfs_agblock_t fbno; 1587 xfs_agblock_t fbno;
1587 xfs_extlen_t flen; 1588 xfs_extlen_t flen;
@@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small(
1624 error0); 1625 error0);
1625 args->wasfromfl = 1; 1626 args->wasfromfl = 1;
1626 trace_xfs_alloc_small_freelist(args); 1627 trace_xfs_alloc_small_freelist(args);
1628
1629 /*
1630 * If we're feeding an AGFL block to something that
1631 * doesn't live in the free space, we need to clear
1632 * out the OWN_AG rmap.
1633 */
1634 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1635 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1636 fbno, 1, &oinfo);
1637 if (error)
1638 goto error0;
1639
1627 *stat = 0; 1640 *stat = 0;
1628 return 0; 1641 return 0;
1629 } 1642 }
@@ -2264,6 +2277,9 @@ xfs_alloc_log_agf(
2264 offsetof(xfs_agf_t, agf_longest), 2277 offsetof(xfs_agf_t, agf_longest),
2265 offsetof(xfs_agf_t, agf_btreeblks), 2278 offsetof(xfs_agf_t, agf_btreeblks),
2266 offsetof(xfs_agf_t, agf_uuid), 2279 offsetof(xfs_agf_t, agf_uuid),
2280 offsetof(xfs_agf_t, agf_rmap_blocks),
2281 /* needed so that we don't log the whole rest of the structure: */
2282 offsetof(xfs_agf_t, agf_spare64),
2267 sizeof(xfs_agf_t) 2283 sizeof(xfs_agf_t)
2268 }; 2284 };
2269 2285
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index b5c213a051cd..08569792fe20 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1814,6 +1814,10 @@ xfs_btree_lookup(
1814 1814
1815 XFS_BTREE_STATS_INC(cur, lookup); 1815 XFS_BTREE_STATS_INC(cur, lookup);
1816 1816
1817 /* No such thing as a zero-level tree. */
1818 if (cur->bc_nlevels == 0)
1819 return -EFSCORRUPTED;
1820
1817 block = NULL; 1821 block = NULL;
1818 keyno = 0; 1822 keyno = 0;
1819 1823
@@ -4554,15 +4558,22 @@ xfs_btree_simple_query_range(
4554 if (error) 4558 if (error)
4555 goto out; 4559 goto out;
4556 4560
4561 /* Nothing? See if there's anything to the right. */
4562 if (!stat) {
4563 error = xfs_btree_increment(cur, 0, &stat);
4564 if (error)
4565 goto out;
4566 }
4567
4557 while (stat) { 4568 while (stat) {
4558 /* Find the record. */ 4569 /* Find the record. */
4559 error = xfs_btree_get_rec(cur, &recp, &stat); 4570 error = xfs_btree_get_rec(cur, &recp, &stat);
4560 if (error || !stat) 4571 if (error || !stat)
4561 break; 4572 break;
4562 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4563 4573
4564 /* Skip if high_key(rec) < low_key. */ 4574 /* Skip if high_key(rec) < low_key. */
4565 if (firstrec) { 4575 if (firstrec) {
4576 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
4566 firstrec = false; 4577 firstrec = false;
4567 diff = cur->bc_ops->diff_two_keys(cur, low_key, 4578 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4568 &rec_key); 4579 &rec_key);
@@ -4571,6 +4582,7 @@ xfs_btree_simple_query_range(
4571 } 4582 }
4572 4583
4573 /* Stop if high_key < low_key(rec). */ 4584 /* Stop if high_key < low_key(rec). */
4585 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4574 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key); 4586 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4575 if (diff > 0) 4587 if (diff > 0)
4576 break; 4588 break;
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 054a2032fdb3..c221d0ecd52e 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -194,7 +194,7 @@ xfs_defer_trans_abort(
194 /* Abort intent items. */ 194 /* Abort intent items. */
195 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 195 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
196 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 196 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
197 if (dfp->dfp_committed) 197 if (!dfp->dfp_done)
198 dfp->dfp_type->abort_intent(dfp->dfp_intent); 198 dfp->dfp_type->abort_intent(dfp->dfp_intent);
199 } 199 }
200 200
@@ -290,7 +290,6 @@ xfs_defer_finish(
290 struct xfs_defer_pending *dfp; 290 struct xfs_defer_pending *dfp;
291 struct list_head *li; 291 struct list_head *li;
292 struct list_head *n; 292 struct list_head *n;
293 void *done_item = NULL;
294 void *state; 293 void *state;
295 int error = 0; 294 int error = 0;
296 void (*cleanup_fn)(struct xfs_trans *, void *, int); 295 void (*cleanup_fn)(struct xfs_trans *, void *, int);
@@ -309,19 +308,11 @@ xfs_defer_finish(
309 if (error) 308 if (error)
310 goto out; 309 goto out;
311 310
312 /* Mark all pending intents as committed. */
313 list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
314 if (dfp->dfp_committed)
315 break;
316 trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
317 dfp->dfp_committed = true;
318 }
319
320 /* Log an intent-done item for the first pending item. */ 311 /* Log an intent-done item for the first pending item. */
321 dfp = list_first_entry(&dop->dop_pending, 312 dfp = list_first_entry(&dop->dop_pending,
322 struct xfs_defer_pending, dfp_list); 313 struct xfs_defer_pending, dfp_list);
323 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp); 314 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
324 done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent, 315 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
325 dfp->dfp_count); 316 dfp->dfp_count);
326 cleanup_fn = dfp->dfp_type->finish_cleanup; 317 cleanup_fn = dfp->dfp_type->finish_cleanup;
327 318
@@ -331,7 +322,7 @@ xfs_defer_finish(
331 list_del(li); 322 list_del(li);
332 dfp->dfp_count--; 323 dfp->dfp_count--;
333 error = dfp->dfp_type->finish_item(*tp, dop, li, 324 error = dfp->dfp_type->finish_item(*tp, dop, li,
334 done_item, &state); 325 dfp->dfp_done, &state);
335 if (error) { 326 if (error) {
336 /* 327 /*
337 * Clean up after ourselves and jump out. 328 * Clean up after ourselves and jump out.
@@ -428,8 +419,8 @@ xfs_defer_add(
428 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 419 dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
429 KM_SLEEP | KM_NOFS); 420 KM_SLEEP | KM_NOFS);
430 dfp->dfp_type = defer_op_types[type]; 421 dfp->dfp_type = defer_op_types[type];
431 dfp->dfp_committed = false;
432 dfp->dfp_intent = NULL; 422 dfp->dfp_intent = NULL;
423 dfp->dfp_done = NULL;
433 dfp->dfp_count = 0; 424 dfp->dfp_count = 0;
434 INIT_LIST_HEAD(&dfp->dfp_work); 425 INIT_LIST_HEAD(&dfp->dfp_work);
435 list_add_tail(&dfp->dfp_list, &dop->dop_intake); 426 list_add_tail(&dfp->dfp_list, &dop->dop_intake);
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index cc3981c48296..e96533d178cf 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -30,8 +30,8 @@ struct xfs_defer_op_type;
30struct xfs_defer_pending { 30struct xfs_defer_pending {
31 const struct xfs_defer_op_type *dfp_type; /* function pointers */ 31 const struct xfs_defer_op_type *dfp_type; /* function pointers */
32 struct list_head dfp_list; /* pending items */ 32 struct list_head dfp_list; /* pending items */
33 bool dfp_committed; /* committed trans? */
34 void *dfp_intent; /* log intent item */ 33 void *dfp_intent; /* log intent item */
34 void *dfp_done; /* log done item */
35 struct list_head dfp_work; /* work items */ 35 struct list_head dfp_work; /* work items */
36 unsigned int dfp_count; /* # extent items */ 36 unsigned int dfp_count; /* # extent items */
37}; 37};
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index f814d42c73b2..270fb5cf4fa1 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -640,12 +640,15 @@ typedef struct xfs_agf {
640 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ 640 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
641 uuid_t agf_uuid; /* uuid of filesystem */ 641 uuid_t agf_uuid; /* uuid of filesystem */
642 642
643 __be32 agf_rmap_blocks; /* rmapbt blocks used */
644 __be32 agf_padding; /* padding */
645
643 /* 646 /*
644 * reserve some contiguous space for future logged fields before we add 647 * reserve some contiguous space for future logged fields before we add
645 * the unlogged fields. This makes the range logging via flags and 648 * the unlogged fields. This makes the range logging via flags and
646 * structure offsets much simpler. 649 * structure offsets much simpler.
647 */ 650 */
648 __be64 agf_spare64[16]; 651 __be64 agf_spare64[15];
649 652
650 /* unlogged fields, written during buffer writeback. */ 653 /* unlogged fields, written during buffer writeback. */
651 __be64 agf_lsn; /* last write sequence */ 654 __be64 agf_lsn; /* last write sequence */
@@ -670,7 +673,9 @@ typedef struct xfs_agf {
670#define XFS_AGF_LONGEST 0x00000400 673#define XFS_AGF_LONGEST 0x00000400
671#define XFS_AGF_BTREEBLKS 0x00000800 674#define XFS_AGF_BTREEBLKS 0x00000800
672#define XFS_AGF_UUID 0x00001000 675#define XFS_AGF_UUID 0x00001000
673#define XFS_AGF_NUM_BITS 13 676#define XFS_AGF_RMAP_BLOCKS 0x00002000
677#define XFS_AGF_SPARE64 0x00004000
678#define XFS_AGF_NUM_BITS 15
674#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) 679#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
675 680
676#define XFS_AGF_FLAGS \ 681#define XFS_AGF_FLAGS \
@@ -686,7 +691,9 @@ typedef struct xfs_agf {
686 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ 691 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
687 { XFS_AGF_LONGEST, "LONGEST" }, \ 692 { XFS_AGF_LONGEST, "LONGEST" }, \
688 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ 693 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
689 { XFS_AGF_UUID, "UUID" } 694 { XFS_AGF_UUID, "UUID" }, \
695 { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }, \
696 { XFS_AGF_SPARE64, "SPARE64" }
690 697
691/* disk block (xfs_daddr_t) in the AG */ 698/* disk block (xfs_daddr_t) in the AG */
692#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) 699#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index bc1faebc84ec..17b8eeb34ac8 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block(
98 union xfs_btree_ptr *new, 98 union xfs_btree_ptr *new,
99 int *stat) 99 int *stat)
100{ 100{
101 struct xfs_buf *agbp = cur->bc_private.a.agbp;
102 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
101 int error; 103 int error;
102 xfs_agblock_t bno; 104 xfs_agblock_t bno;
103 105
@@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block(
124 126
125 xfs_trans_agbtree_delta(cur->bc_tp, 1); 127 xfs_trans_agbtree_delta(cur->bc_tp, 1);
126 new->s = cpu_to_be32(bno); 128 new->s = cpu_to_be32(bno);
129 be32_add_cpu(&agf->agf_rmap_blocks, 1);
130 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
127 131
128 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 132 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
129 *stat = 1; 133 *stat = 1;
@@ -143,6 +147,8 @@ xfs_rmapbt_free_block(
143 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); 147 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
144 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, 148 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
145 bno, 1); 149 bno, 1);
150 be32_add_cpu(&agf->agf_rmap_blocks, -1);
151 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
146 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); 152 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
147 if (error) 153 if (error)
148 return error; 154 return error;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 0e3d4f5ec33c..4aecc5fefe96 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -583,7 +583,8 @@ xfs_sb_verify(
583 * Only check the in progress field for the primary superblock as 583 * Only check the in progress field for the primary superblock as
584 * mkfs.xfs doesn't clear it from secondary superblocks. 584 * mkfs.xfs doesn't clear it from secondary superblocks.
585 */ 585 */
586 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, 586 return xfs_mount_validate_sb(mp, &sb,
587 bp->b_maps[0].bm_bn == XFS_SB_DADDR,
587 check_version); 588 check_version);
588} 589}
589 590
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 47a318ce82e0..b5b9bffe3520 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -115,7 +115,6 @@ xfs_buf_ioacct_dec(
115 if (!(bp->b_flags & _XBF_IN_FLIGHT)) 115 if (!(bp->b_flags & _XBF_IN_FLIGHT))
116 return; 116 return;
117 117
118 ASSERT(bp->b_flags & XBF_ASYNC);
119 bp->b_flags &= ~_XBF_IN_FLIGHT; 118 bp->b_flags &= ~_XBF_IN_FLIGHT;
120 percpu_counter_dec(&bp->b_target->bt_io_count); 119 percpu_counter_dec(&bp->b_target->bt_io_count);
121} 120}
@@ -1612,7 +1611,7 @@ xfs_wait_buftarg(
1612 */ 1611 */
1613 while (percpu_counter_sum(&btp->bt_io_count)) 1612 while (percpu_counter_sum(&btp->bt_io_count))
1614 delay(100); 1613 delay(100);
1615 drain_workqueue(btp->bt_mount->m_buf_workqueue); 1614 flush_workqueue(btp->bt_mount->m_buf_workqueue);
1616 1615
1617 /* loop until there is nothing left on the lru list. */ 1616 /* loop until there is nothing left on the lru list. */
1618 while (list_lru_count(&btp->bt_lru)) { 1617 while (list_lru_count(&btp->bt_lru)) {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ed95e5bb04e6..e612a0233710 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -741,9 +741,20 @@ xfs_file_dax_write(
741 * page is inserted into the pagecache when we have to serve a write 741 * page is inserted into the pagecache when we have to serve a write
742 * fault on a hole. It should never be dirtied and can simply be 742 * fault on a hole. It should never be dirtied and can simply be
743 * dropped from the pagecache once we get real data for the page. 743 * dropped from the pagecache once we get real data for the page.
744 *
745 * XXX: This is racy against mmap, and there's nothing we can do about
746 * it. dax_do_io() should really do this invalidation internally as
747 * it will know if we've allocated over a holei for this specific IO and
748 * if so it needs to update the mapping tree and invalidate existing
749 * PTEs over the newly allocated range. Remove this invalidation when
750 * dax_do_io() is fixed up.
744 */ 751 */
745 if (mapping->nrpages) { 752 if (mapping->nrpages) {
746 ret = invalidate_inode_pages2(mapping); 753 loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
754
755 ret = invalidate_inode_pages2_range(mapping,
756 iocb->ki_pos >> PAGE_SHIFT,
757 end >> PAGE_SHIFT);
747 WARN_ON_ONCE(ret); 758 WARN_ON_ONCE(ret);
748 } 759 }
749 760
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 0f96847b90e1..0b7f986745c1 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -248,6 +248,7 @@ xfs_growfs_data_private(
248 agf->agf_roots[XFS_BTNUM_RMAPi] = 248 agf->agf_roots[XFS_BTNUM_RMAPi] =
249 cpu_to_be32(XFS_RMAP_BLOCK(mp)); 249 cpu_to_be32(XFS_RMAP_BLOCK(mp));
250 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); 250 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
251 agf->agf_rmap_blocks = cpu_to_be32(1);
251 } 252 }
252 253
253 agf->agf_flfirst = cpu_to_be32(1); 254 agf->agf_flfirst = cpu_to_be32(1);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2114d53df433..2af0dda1c978 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -715,12 +715,16 @@ xfs_iomap_write_allocate(
715 * is in the delayed allocation extent on which we sit 715 * is in the delayed allocation extent on which we sit
716 * but before our buffer starts. 716 * but before our buffer starts.
717 */ 717 */
718
719 nimaps = 0; 718 nimaps = 0;
720 while (nimaps == 0) { 719 while (nimaps == 0) {
721 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 720 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
722 721 /*
723 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres, 722 * We have already reserved space for the extent and any
723 * indirect blocks when creating the delalloc extent,
724 * there is no need to reserve space in this transaction
725 * again.
726 */
727 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
724 0, XFS_TRANS_RESERVE, &tp); 728 0, XFS_TRANS_RESERVE, &tp);
725 if (error) 729 if (error)
726 return error; 730 return error;
@@ -1037,20 +1041,14 @@ xfs_file_iomap_begin(
1037 return error; 1041 return error;
1038 1042
1039 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1043 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1040 xfs_bmbt_to_iomap(ip, iomap, &imap);
1041 } else if (nimaps) {
1042 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1043 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1044 xfs_bmbt_to_iomap(ip, iomap, &imap);
1045 } else { 1044 } else {
1045 ASSERT(nimaps);
1046
1046 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1047 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1047 trace_xfs_iomap_not_found(ip, offset, length, 0, &imap); 1048 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1048 iomap->blkno = IOMAP_NULL_BLOCK;
1049 iomap->type = IOMAP_HOLE;
1050 iomap->offset = offset;
1051 iomap->length = length;
1052 } 1049 }
1053 1050
1051 xfs_bmbt_to_iomap(ip, iomap, &imap);
1054 return 0; 1052 return 0;
1055} 1053}
1056 1054
@@ -1112,3 +1110,48 @@ struct iomap_ops xfs_iomap_ops = {
1112 .iomap_begin = xfs_file_iomap_begin, 1110 .iomap_begin = xfs_file_iomap_begin,
1113 .iomap_end = xfs_file_iomap_end, 1111 .iomap_end = xfs_file_iomap_end,
1114}; 1112};
1113
1114static int
1115xfs_xattr_iomap_begin(
1116 struct inode *inode,
1117 loff_t offset,
1118 loff_t length,
1119 unsigned flags,
1120 struct iomap *iomap)
1121{
1122 struct xfs_inode *ip = XFS_I(inode);
1123 struct xfs_mount *mp = ip->i_mount;
1124 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1125 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1126 struct xfs_bmbt_irec imap;
1127 int nimaps = 1, error = 0;
1128 unsigned lockmode;
1129
1130 if (XFS_FORCED_SHUTDOWN(mp))
1131 return -EIO;
1132
1133 lockmode = xfs_ilock_data_map_shared(ip);
1134
1135 /* if there are no attribute fork or extents, return ENOENT */
1136 if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1137 error = -ENOENT;
1138 goto out_unlock;
1139 }
1140
1141 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1142 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1143 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1144out_unlock:
1145 xfs_iunlock(ip, lockmode);
1146
1147 if (!error) {
1148 ASSERT(nimaps);
1149 xfs_bmbt_to_iomap(ip, iomap, &imap);
1150 }
1151
1152 return error;
1153}
1154
1155struct iomap_ops xfs_xattr_iomap_ops = {
1156 .iomap_begin = xfs_xattr_iomap_begin,
1157};
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index e066d045e2ff..fb8aca3d69ab 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
35 struct xfs_bmbt_irec *); 35 struct xfs_bmbt_irec *);
36 36
37extern struct iomap_ops xfs_iomap_ops; 37extern struct iomap_ops xfs_iomap_ops;
38extern struct iomap_ops xfs_xattr_iomap_ops;
38 39
39#endif /* __XFS_IOMAP_H__*/ 40#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ab820f84ed50..b24c3102fa93 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -1009,7 +1009,14 @@ xfs_vn_fiemap(
1009 int error; 1009 int error;
1010 1010
1011 xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED); 1011 xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
1012 error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops); 1012 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1013 fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
1014 error = iomap_fiemap(inode, fieinfo, start, length,
1015 &xfs_xattr_iomap_ops);
1016 } else {
1017 error = iomap_fiemap(inode, fieinfo, start, length,
1018 &xfs_iomap_ops);
1019 }
1013 xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); 1020 xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
1014 1021
1015 return error; 1022 return error;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 24ef83ef04de..fd6be45b3a1e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1574,9 +1574,16 @@ xfs_fs_fill_super(
1574 } 1574 }
1575 } 1575 }
1576 1576
1577 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 1577 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1578 if (mp->m_sb.sb_rblocks) {
1579 xfs_alert(mp,
1580 "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
1581 error = -EINVAL;
1582 goto out_filestream_unmount;
1583 }
1578 xfs_alert(mp, 1584 xfs_alert(mp,
1579 "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!"); 1585 "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
1586 }
1580 1587
1581 error = xfs_mountfs(mp); 1588 error = xfs_mountfs(mp);
1582 if (error) 1589 if (error)
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 551b7e26980c..d303a665dba9 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
1298DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct); 1298DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
1299DEFINE_IOMAP_EVENT(xfs_iomap_alloc); 1299DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
1300DEFINE_IOMAP_EVENT(xfs_iomap_found); 1300DEFINE_IOMAP_EVENT(xfs_iomap_found);
1301DEFINE_IOMAP_EVENT(xfs_iomap_not_found);
1302 1301
1303DECLARE_EVENT_CLASS(xfs_simple_io_class, 1302DECLARE_EVENT_CLASS(xfs_simple_io_class,
1304 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), 1303 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -2296,7 +2295,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
2296 __entry->dev = mp ? mp->m_super->s_dev : 0; 2295 __entry->dev = mp ? mp->m_super->s_dev : 0;
2297 __entry->type = dfp->dfp_type->type; 2296 __entry->type = dfp->dfp_type->type;
2298 __entry->intent = dfp->dfp_intent; 2297 __entry->intent = dfp->dfp_intent;
2299 __entry->committed = dfp->dfp_committed; 2298 __entry->committed = dfp->dfp_done != NULL;
2300 __entry->nr = dfp->dfp_count; 2299 __entry->nr = dfp->dfp_count;
2301 ), 2300 ),
2302 TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n", 2301 TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 54a8e65e18b6..7d026bf27713 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -25,7 +25,20 @@
25#include <asm-generic/qrwlock_types.h> 25#include <asm-generic/qrwlock_types.h>
26 26
27/* 27/*
28 * Writer states & reader shift and bias 28 * Writer states & reader shift and bias.
29 *
30 * | +0 | +1 | +2 | +3 |
31 * ----+----+----+----+----+
32 * LE | 78 | 56 | 34 | 12 | 0x12345678
33 * ----+----+----+----+----+
34 * | wr | rd |
35 * +----+----+----+----+
36 *
37 * ----+----+----+----+----+
38 * BE | 12 | 34 | 56 | 78 | 0x12345678
39 * ----+----+----+----+----+
40 * | rd | wr |
41 * +----+----+----+----+
29 */ 42 */
30#define _QW_WAITING 1 /* A writer is waiting */ 43#define _QW_WAITING 1 /* A writer is waiting */
31#define _QW_LOCKED 0xff /* A writer holds the lock */ 44#define _QW_LOCKED 0xff /* A writer holds the lock */
@@ -134,12 +147,22 @@ static inline void queued_read_unlock(struct qrwlock *lock)
134} 147}
135 148
136/** 149/**
150 * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
151 * @lock : Pointer to queue rwlock structure
152 * Return: the write byte address of a queue rwlock
153 */
154static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
155{
156 return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
157}
158
159/**
137 * queued_write_unlock - release write lock of a queue rwlock 160 * queued_write_unlock - release write lock of a queue rwlock
138 * @lock : Pointer to queue rwlock structure 161 * @lock : Pointer to queue rwlock structure
139 */ 162 */
140static inline void queued_write_unlock(struct qrwlock *lock) 163static inline void queued_write_unlock(struct qrwlock *lock)
141{ 164{
142 smp_store_release((u8 *)&lock->cnts, 0); 165 smp_store_release(__qrwlock_write_byte(lock), 0);
143} 166}
144 167
145/* 168/*
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 1bfa602958f2..6df9b0749671 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -72,6 +72,7 @@ struct exception_table_entry
72/* Returns 0 if exception not found and fixup otherwise. */ 72/* Returns 0 if exception not found and fixup otherwise. */
73extern unsigned long search_exception_table(unsigned long); 73extern unsigned long search_exception_table(unsigned long);
74 74
75
75/* 76/*
76 * architectures with an MMU should override these two 77 * architectures with an MMU should override these two
77 */ 78 */
@@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
230 might_fault(); \ 231 might_fault(); \
231 access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ 232 access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
232 __get_user((x), (__typeof__(*(ptr)) *)__p) : \ 233 __get_user((x), (__typeof__(*(ptr)) *)__p) : \
233 -EFAULT; \ 234 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
234}) 235})
235 236
236#ifndef __get_user_fn 237#ifndef __get_user_fn
237static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 238static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
238{ 239{
239 size = __copy_from_user(x, ptr, size); 240 size_t n = __copy_from_user(x, ptr, size);
240 return size ? -EFAULT : size; 241 if (unlikely(n)) {
242 memset(x + (size - n), 0, n);
243 return -EFAULT;
244 }
245 return 0;
241} 246}
242 247
243#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 248#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
257static inline long copy_from_user(void *to, 262static inline long copy_from_user(void *to,
258 const void __user * from, unsigned long n) 263 const void __user * from, unsigned long n)
259{ 264{
265 unsigned long res = n;
260 might_fault(); 266 might_fault();
261 if (access_ok(VERIFY_READ, from, n)) 267 if (likely(access_ok(VERIFY_READ, from, n)))
262 return __copy_from_user(to, from, n); 268 res = __copy_from_user(to, from, n);
263 else 269 if (unlikely(res))
264 return n; 270 memset(to + (n - res), 0, res);
271 return res;
265} 272}
266 273
267static inline long copy_to_user(void __user *to, 274static inline long copy_to_user(void __user *to,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4348d6d5877a..99c6d01d24f2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -962,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
962 * 962 *
963 * @bo: A pointer to a struct ttm_buffer_object. 963 * @bo: A pointer to a struct ttm_buffer_object.
964 * @evict: 1: This is an eviction. Don't try to pipeline. 964 * @evict: 1: This is an eviction. Don't try to pipeline.
965 * @interruptible: Sleep interruptible if waiting.
965 * @no_wait_gpu: Return immediately if the GPU is busy. 966 * @no_wait_gpu: Return immediately if the GPU is busy.
966 * @new_mem: struct ttm_mem_reg indicating where to move. 967 * @new_mem: struct ttm_mem_reg indicating where to move.
967 * 968 *
@@ -976,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
976 */ 977 */
977 978
978extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 979extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
979 bool evict, bool no_wait_gpu, 980 bool evict, bool interruptible, bool no_wait_gpu,
980 struct ttm_mem_reg *new_mem); 981 struct ttm_mem_reg *new_mem);
981 982
982/** 983/**
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4d8452c2384b..c5eaf2f80a4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1056,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
1056 return NULL; 1056 return NULL;
1057} 1057}
1058 1058
1059#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ 1059#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
1060 static const void * __acpi_table_##name[] \ 1060 static const void * __acpi_table_##name[] \
1061 __attribute__((unused)) \ 1061 __attribute__((unused)) \
1062 = { (void *) table_id, \ 1062 = { (void *) table_id, \
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 59ffaa68b11b..23ddf4b46a9b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio)
71{ 71{
72 if (bio && 72 if (bio &&
73 bio->bi_iter.bi_size && 73 bio->bi_iter.bi_size &&
74 bio_op(bio) != REQ_OP_DISCARD) 74 bio_op(bio) != REQ_OP_DISCARD &&
75 bio_op(bio) != REQ_OP_SECURE_ERASE)
75 return true; 76 return true;
76 77
77 return false; 78 return false;
@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio)
79 80
80static inline bool bio_no_advance_iter(struct bio *bio) 81static inline bool bio_no_advance_iter(struct bio *bio)
81{ 82{
82 return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; 83 return bio_op(bio) == REQ_OP_DISCARD ||
84 bio_op(bio) == REQ_OP_SECURE_ERASE ||
85 bio_op(bio) == REQ_OP_WRITE_SAME;
83} 86}
84 87
85static inline bool bio_is_rw(struct bio *bio) 88static inline bool bio_is_rw(struct bio *bio)
@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio)
199 if (bio_op(bio) == REQ_OP_DISCARD) 202 if (bio_op(bio) == REQ_OP_DISCARD)
200 return 1; 203 return 1;
201 204
205 if (bio_op(bio) == REQ_OP_SECURE_ERASE)
206 return 1;
207
202 if (bio_op(bio) == REQ_OP_WRITE_SAME) 208 if (bio_op(bio) == REQ_OP_WRITE_SAME)
203 return 1; 209 return 1;
204 210
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c210b6a7bcf..e79055c8b577 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
882static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 882static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
883 int op) 883 int op)
884{ 884{
885 if (unlikely(op == REQ_OP_DISCARD)) 885 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
886 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 886 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
887 887
888 if (unlikely(op == REQ_OP_WRITE_SAME)) 888 if (unlikely(op == REQ_OP_WRITE_SAME))
@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
913 if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 913 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
914 return q->limits.max_hw_sectors; 914 return q->limits.max_hw_sectors;
915 915
916 if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) 916 if (!q->limits.chunk_sectors ||
917 req_op(rq) == REQ_OP_DISCARD ||
918 req_op(rq) == REQ_OP_SECURE_ERASE)
917 return blk_queue_get_max_sectors(q, req_op(rq)); 919 return blk_queue_get_max_sectors(q, req_op(rq));
918 920
919 return min(blk_max_size_offset(q, offset), 921 return min(blk_max_size_offset(q, offset),
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 701b64a3b7c5..89b65b82d98f 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
74 "Attempted to advance past end of bvec iter\n"); 74 "Attempted to advance past end of bvec iter\n");
75 75
76 while (bytes) { 76 while (bytes) {
77 unsigned len = min(bytes, bvec_iter_len(bv, *iter)); 77 unsigned iter_len = bvec_iter_len(bv, *iter);
78 unsigned len = min(bytes, iter_len);
78 79
79 bytes -= len; 80 bytes -= len;
80 iter->bi_size -= len; 81 iter->bi_size -= len;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e2949397c19b..573c5a18908f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -158,7 +158,7 @@
158#define __compiler_offsetof(a, b) \ 158#define __compiler_offsetof(a, b) \
159 __builtin_offsetof(a, b) 159 __builtin_offsetof(a, b)
160 160
161#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 161#if GCC_VERSION >= 40100
162# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) 162# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
163#endif 163#endif
164 164
@@ -242,7 +242,11 @@
242 */ 242 */
243#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) 243#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
244 244
245#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP 245/*
246 * sparse (__CHECKER__) pretends to be gcc, but can't do constant
247 * folding in __builtin_bswap*() (yet), so don't set these for it.
248 */
249#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
246#if GCC_VERSION >= 40400 250#if GCC_VERSION >= 40400
247#define __HAVE_BUILTIN_BSWAP32__ 251#define __HAVE_BUILTIN_BSWAP32__
248#define __HAVE_BUILTIN_BSWAP64__ 252#define __HAVE_BUILTIN_BSWAP64__
@@ -250,7 +254,7 @@
250#if GCC_VERSION >= 40800 254#if GCC_VERSION >= 40800
251#define __HAVE_BUILTIN_BSWAP16__ 255#define __HAVE_BUILTIN_BSWAP16__
252#endif 256#endif
253#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ 257#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
254 258
255#if GCC_VERSION >= 50000 259#if GCC_VERSION >= 50000
256#define KASAN_ABI_VERSION 4 260#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 1bb954842725..668569844d37 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
527 * object's lifetime is managed by something other than RCU. That 527 * object's lifetime is managed by something other than RCU. That
528 * "something other" might be reference counting or simple immortality. 528 * "something other" might be reference counting or simple immortality.
529 * 529 *
530 * The seemingly unused void * variable is to validate @p is indeed a pointer 530 * The seemingly unused variable ___typecheck_p validates that @p is
531 * type. All pointer types silently cast to void *. 531 * indeed a pointer type by using a pointer to typeof(*p) as the type.
532 * Taking a pointer to typeof(*p) again is needed in case p is void *.
532 */ 533 */
533#define lockless_dereference(p) \ 534#define lockless_dereference(p) \
534({ \ 535({ \
535 typeof(p) _________p1 = READ_ONCE(p); \ 536 typeof(p) _________p1 = READ_ONCE(p); \
536 __maybe_unused const void * const _________p2 = _________p1; \ 537 typeof(*(p)) *___typecheck_p __maybe_unused; \
537 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 538 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
538 (_________p1); \ 539 (_________p1); \
539}) 540})
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7e1ba14a3d78..52a81f597e72 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,6 +1,8 @@
1#ifndef __CPUHOTPLUG_H 1#ifndef __CPUHOTPLUG_H
2#define __CPUHOTPLUG_H 2#define __CPUHOTPLUG_H
3 3
4#include <linux/types.h>
5
4enum cpuhp_state { 6enum cpuhp_state {
5 CPUHP_OFFLINE, 7 CPUHP_OFFLINE,
6 CPUHP_CREATE_THREADS, 8 CPUHP_CREATE_THREADS,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f5a58225385..0148a3046b48 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -118,6 +118,15 @@ typedef struct {
118 u32 imagesize; 118 u32 imagesize;
119} efi_capsule_header_t; 119} efi_capsule_header_t;
120 120
121struct efi_boot_memmap {
122 efi_memory_desc_t **map;
123 unsigned long *map_size;
124 unsigned long *desc_size;
125 u32 *desc_ver;
126 unsigned long *key_ptr;
127 unsigned long *buff_size;
128};
129
121/* 130/*
122 * EFI capsule flags 131 * EFI capsule flags
123 */ 132 */
@@ -946,7 +955,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
946/* Iterate through an efi_memory_map */ 955/* Iterate through an efi_memory_map */
947#define for_each_efi_memory_desc_in_map(m, md) \ 956#define for_each_efi_memory_desc_in_map(m, md) \
948 for ((md) = (m)->map; \ 957 for ((md) = (m)->map; \
949 ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ 958 (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
950 (md) = (void *)(md) + (m)->desc_size) 959 (md) = (void *)(md) + (m)->desc_size)
951 960
952/** 961/**
@@ -1371,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
1371 efi_loaded_image_t *image, int *cmd_line_len); 1380 efi_loaded_image_t *image, int *cmd_line_len);
1372 1381
1373efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, 1382efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
1374 efi_memory_desc_t **map, 1383 struct efi_boot_memmap *map);
1375 unsigned long *map_size,
1376 unsigned long *desc_size,
1377 u32 *desc_ver,
1378 unsigned long *key_ptr);
1379 1384
1380efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, 1385efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
1381 unsigned long size, unsigned long align, 1386 unsigned long size, unsigned long align,
@@ -1457,4 +1462,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
1457 arch_efi_call_virt_teardown(); \ 1462 arch_efi_call_virt_teardown(); \
1458}) 1463})
1459 1464
1465typedef efi_status_t (*efi_exit_boot_map_processing)(
1466 efi_system_table_t *sys_table_arg,
1467 struct efi_boot_memmap *map,
1468 void *priv);
1469
1470efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
1471 void *handle,
1472 struct efi_boot_memmap *map,
1473 void *priv,
1474 efi_exit_boot_map_processing priv_func);
1460#endif /* _LINUX_EFI_H */ 1475#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8cc719a63728..2ac6fa5f4712 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,8 +49,6 @@ struct fence_cb;
49 * @timestamp: Timestamp when the fence was signaled. 49 * @timestamp: Timestamp when the fence was signaled.
50 * @status: Optional, only valid if < 0, must be set before calling 50 * @status: Optional, only valid if < 0, must be set before calling
51 * fence_signal, indicates that the fence has completed with an error. 51 * fence_signal, indicates that the fence has completed with an error.
52 * @child_list: list of children fences
53 * @active_list: list of active fences
54 * 52 *
55 * the flags member must be manipulated and read using the appropriate 53 * the flags member must be manipulated and read using the appropriate
56 * atomic ops (bit_*), so taking the spinlock will not be needed most 54 * atomic ops (bit_*), so taking the spinlock will not be needed most
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3523bf62f328..901e25d495cc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
574 574
575struct posix_acl; 575struct posix_acl;
576#define ACL_NOT_CACHED ((void *)(-1)) 576#define ACL_NOT_CACHED ((void *)(-1))
577#define ACL_DONT_CACHE ((void *)(-3))
577 578
578static inline struct posix_acl * 579static inline struct posix_acl *
579uncached_acl_sentinel(struct task_struct *task) 580uncached_acl_sentinel(struct task_struct *task)
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cfa6cde25f8e..76cff18bb032 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
274extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, 274extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
275 unsigned int); 275 unsigned int);
276/* policy.c */ 276/* policy.c */
277extern int fscrypt_process_policy(struct inode *, 277extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
278 const struct fscrypt_policy *);
279extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); 278extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
280extern int fscrypt_has_permitted_context(struct inode *, struct inode *); 279extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
281extern int fscrypt_inherit_context(struct inode *, struct inode *, 280extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
345} 344}
346 345
347/* policy.c */ 346/* policy.c */
348static inline int fscrypt_notsupp_process_policy(struct inode *i, 347static inline int fscrypt_notsupp_process_policy(struct file *f,
349 const struct fscrypt_policy *p) 348 const struct fscrypt_policy *p)
350{ 349{
351 return -EOPNOTSUPP; 350 return -EOPNOTSUPP;
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index d2ba7d334039..1ffbf2a8cb99 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -304,6 +304,8 @@ struct tegra_mipi_device;
304 304
305struct tegra_mipi_device *tegra_mipi_request(struct device *device); 305struct tegra_mipi_device *tegra_mipi_request(struct device *device);
306void tegra_mipi_free(struct tegra_mipi_device *device); 306void tegra_mipi_free(struct tegra_mipi_device *device);
307int tegra_mipi_enable(struct tegra_mipi_device *device);
308int tegra_mipi_disable(struct tegra_mipi_device *device);
307int tegra_mipi_calibrate(struct tegra_mipi_device *device); 309int tegra_mipi_calibrate(struct tegra_mipi_device *device);
308 310
309#endif 311#endif
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 5198f8ed08a4..c97eab67558f 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
62 const char *name, 62 const char *name,
63 struct config_item_type *type) 63 struct config_item_type *type)
64{ 64{
65#ifdef CONFIG_CONFIGFS_FS 65#if IS_ENABLED(CONFIG_CONFIGFS_FS)
66 config_group_init_type_name(&t->group, name, type); 66 config_group_init_type_name(&t->group, name, type);
67#endif 67#endif
68} 68}
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3267df461012..3d70ece10313 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,6 +19,11 @@ struct vm_fault;
19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ 19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
20 20
21/* 21/*
22 * Flags for iomap mappings:
23 */
24#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
25
26/*
22 * Magic value for blkno: 27 * Magic value for blkno:
23 */ 28 */
24#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ 29#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
@@ -27,7 +32,8 @@ struct iomap {
27 sector_t blkno; /* 1st sector of mapping, 512b units */ 32 sector_t blkno; /* 1st sector of mapping, 512b units */
28 loff_t offset; /* file offset of mapping, bytes */ 33 loff_t offset; /* file offset of mapping, bytes */
29 u64 length; /* length of mapping, bytes */ 34 u64 length; /* length of mapping, bytes */
30 int type; /* type of mapping */ 35 u16 type; /* type of mapping */
36 u16 flags; /* flags for mapping */
31 struct block_device *bdev; /* block device for I/O */ 37 struct block_device *bdev; /* block device for I/O */
32}; 38};
33 39
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b52424eaa0ed..0ac26c892fe2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
945static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } 945static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
946#endif 946#endif
947 947
948/*
949 * The irqsave variants are for usage in non interrupt code. Do not use
950 * them in irq_chip callbacks. Use irq_gc_lock() instead.
951 */
952#define irq_gc_lock_irqsave(gc, flags) \
953 raw_spin_lock_irqsave(&(gc)->lock, flags)
954
955#define irq_gc_unlock_irqrestore(gc, flags) \
956 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
957
948static inline void irq_reg_writel(struct irq_chip_generic *gc, 958static inline void irq_reg_writel(struct irq_chip_generic *gc,
949 u32 val, int reg_offset) 959 u32 val, int reg_offset)
950{ 960{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 56b0b7ec66aa..99ac022edc60 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -337,6 +337,7 @@
337 */ 337 */
338#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 338#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
339#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 339#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
340#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
340#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 341#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
341#define E_ITS_MAPD_DEVICE_OOR 0x010801 342#define E_ITS_MAPD_DEVICE_OOR 0x010801
342#define E_ITS_MAPC_PROCNUM_OOR 0x010902 343#define E_ITS_MAPC_PROCNUM_OOR 0x010902
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01e908ac4a39..9c28b4d4c90b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1113,9 +1113,21 @@ struct kvm_device {
1113/* create, destroy, and name are mandatory */ 1113/* create, destroy, and name are mandatory */
1114struct kvm_device_ops { 1114struct kvm_device_ops {
1115 const char *name; 1115 const char *name;
1116
1117 /*
1118 * create is called holding kvm->lock and any operations not suitable
1119 * to do while holding the lock should be deferred to init (see
1120 * below).
1121 */
1116 int (*create)(struct kvm_device *dev, u32 type); 1122 int (*create)(struct kvm_device *dev, u32 type);
1117 1123
1118 /* 1124 /*
1125 * init is called after create if create is successful and is called
1126 * outside of holding kvm->lock.
1127 */
1128 void (*init)(struct kvm_device *dev);
1129
1130 /*
1119 * Destroy is responsible for freeing dev. 1131 * Destroy is responsible for freeing dev.
1120 * 1132 *
1121 * Destroy may be called before or after destructors are called 1133 * Destroy may be called before or after destructors are called
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4429d255c8ab..5e5b2969d931 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
195} 195}
196 196
197extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); 197extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
198extern void mpol_put_task_policy(struct task_struct *);
198 199
199#else 200#else
200 201
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
297 return -1; /* no node preference */ 298 return -1; /* no node preference */
298} 299}
299 300
301static inline void mpol_put_task_policy(struct task_struct *task)
302{
303}
300#endif /* CONFIG_NUMA */ 304#endif /* CONFIG_NUMA */
301#endif 305#endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644
index 000000000000..304985e288d2
--- /dev/null
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -0,0 +1,153 @@
1/*
2 * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
3 *
4 * Copyright (C) 2016 David Lechner <david@lechnology.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
18#define __LINUX_MFD_DA8XX_CFGCHIP_H
19
20#include <linux/bitops.h>
21
22/* register offset (32-bit registers) */
23#define CFGCHIP(n) ((n) * 4)
24
25/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
26#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
27#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2)
28#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3)
29#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0)
30#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1)
31#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2)
32#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0)
33#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3)
34#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0)
35#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1)
36#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2)
37
38/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
39#define CFGCHIP1_CAP2SRC(n) ((n) << 27)
40#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f)
41#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0)
42#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1)
43#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2)
44#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7)
45#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8)
46#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9)
47#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa)
48#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb)
49#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc)
50#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd)
51#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe)
52#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf)
53#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10)
54#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11)
55#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12)
56#define CFGCHIP1_CAP1SRC(n) ((n) << 22)
57#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f)
58#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0)
59#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1)
60#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2)
61#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7)
62#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8)
63#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9)
64#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa)
65#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb)
66#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc)
67#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd)
68#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe)
69#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf)
70#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10)
71#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11)
72#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12)
73#define CFGCHIP1_CAP0SRC(n) ((n) << 17)
74#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f)
75#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0)
76#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1)
77#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2)
78#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7)
79#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8)
80#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9)
81#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa)
82#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb)
83#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc)
84#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd)
85#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe)
86#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf)
87#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10)
88#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11)
89#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12)
90#define CFGCHIP1_HPIBYTEAD BIT(16)
91#define CFGCHIP1_HPIENA BIT(15)
92#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13)
93#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3)
94#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0)
95#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1)
96#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2)
97#define CFGCHIP1_TBCLKSYNC BIT(12)
98#define CFGCHIP1_AMUTESEL0(n) ((n) << 0)
99#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf)
100#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0)
101#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1)
102#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2)
103#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3)
104#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4)
105#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5)
106#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6)
107#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7)
108#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8)
109
110/* CFGCHIP2 (USB PHY) register bits */
111#define CFGCHIP2_PHYCLKGD BIT(17)
112#define CFGCHIP2_VBUSSENSE BIT(16)
113#define CFGCHIP2_RESET BIT(15)
114#define CFGCHIP2_OTGMODE(n) ((n) << 13)
115#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3)
116#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0)
117#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1)
118#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2)
119#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3)
120#define CFGCHIP2_USB1PHYCLKMUX BIT(12)
121#define CFGCHIP2_USB2PHYCLKMUX BIT(11)
122#define CFGCHIP2_PHYPWRDN BIT(10)
123#define CFGCHIP2_OTGPWRDN BIT(9)
124#define CFGCHIP2_DATPOL BIT(8)
125#define CFGCHIP2_USB1SUSPENDM BIT(7)
126#define CFGCHIP2_PHY_PLLON BIT(6)
127#define CFGCHIP2_SESENDEN BIT(5)
128#define CFGCHIP2_VBDTCTEN BIT(4)
129#define CFGCHIP2_REFFREQ(n) ((n) << 0)
130#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf)
131#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1)
132#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2)
133#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3)
134#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4)
135#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5)
136#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6)
137#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7)
138#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8)
139#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9)
140
141/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
142#define CFGCHIP3_RMII_SEL BIT(8)
143#define CFGCHIP3_UPP_TX_CLKSRC BIT(6)
144#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
145#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
146#define CFGCHIP3_PRUEVTSEL BIT(3)
147#define CFGCHIP3_DIV45PENA BIT(2)
148#define CFGCHIP3_EMA_CLKSRC BIT(1)
149
150/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
151#define CFGCHIP4_AMUTECLR0 BIT(0)
152
153#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 2567a87872b0..7f55b8b41032 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -138,16 +138,16 @@
138/* 138/*
139 * time in us for processing a single channel, calculated as follows: 139 * time in us for processing a single channel, calculated as follows:
140 * 140 *
141 * num cycles = open delay + (sample delay + conv time) * averaging 141 * max num cycles = open delay + (sample delay + conv time) * averaging
142 * 142 *
143 * num cycles: 152 + (1 + 13) * 16 = 376 143 * max num cycles: 262143 + (255 + 13) * 16 = 266431
144 * 144 *
145 * clock frequency: 26MHz / 8 = 3.25MHz 145 * clock frequency: 26MHz / 8 = 3.25MHz
146 * clock period: 1 / 3.25MHz = 308ns 146 * clock period: 1 / 3.25MHz = 308ns
147 * 147 *
148 * processing time: 376 * 308ns = 116us 148 * max processing time: 266431 * 308ns = 83ms(approx)
149 */ 149 */
150#define IDLE_TIMEOUT 116 /* microsec */ 150#define IDLE_TIMEOUT 83 /* milliseconds */
151 151
152#define TSCADC_CELLS 2 152#define TSCADC_CELLS 2
153 153
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 21bc4557b67a..d1f9a581aca8 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
6710}; 6710};
6711 6711
6712struct mlx5_ifc_ptys_reg_bits { 6712struct mlx5_ifc_ptys_reg_bits {
6713 u8 an_disable_cap[0x1]; 6713 u8 reserved_at_0[0x1];
6714 u8 an_disable_admin[0x1]; 6714 u8 an_disable_admin[0x1];
6715 u8 reserved_at_2[0x6]; 6715 u8 an_disable_cap[0x1];
6716 u8 reserved_at_3[0x5];
6716 u8 local_port[0x8]; 6717 u8 local_port[0x8];
6717 u8 reserved_at_10[0xd]; 6718 u8 reserved_at_10[0xd];
6718 u8 proto_mask[0x3]; 6719 u8 proto_mask[0x3];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 08ed53eeedd5..ef815b9cd426 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2014,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
2014 2014
2015extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2015extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2016extern struct file *get_mm_exe_file(struct mm_struct *mm); 2016extern struct file *get_mm_exe_file(struct mm_struct *mm);
2017extern struct file *get_task_exe_file(struct task_struct *task);
2017 2018
2018extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2019extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2019extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2020extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f2e4e90621ec..7f2ae99e5daf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
68 68
69#ifdef CONFIG_CMA 69#ifdef CONFIG_CMA
70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 70# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
71# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
71#else 72#else
72# define is_migrate_cma(migratetype) false 73# define is_migrate_cma(migratetype) false
74# define is_migrate_cma_page(_page) false
73#endif 75#endif
74 76
75#define for_each_migratetype_order(order, type) \ 77#define for_each_migratetype_order(order, type) \
@@ -826,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
826 */ 828 */
827#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 829#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
828 830
829static inline int populated_zone(struct zone *zone) 831/*
832 * Returns true if a zone has pages managed by the buddy allocator.
833 * All the reclaim decisions have to use this function rather than
834 * populated_zone(). If the whole zone is reserved then we can easily
835 * end up with populated_zone() && !managed_zone().
836 */
837static inline bool managed_zone(struct zone *zone)
838{
839 return zone->managed_pages;
840}
841
842/* Returns true if a zone has memory */
843static inline bool populated_zone(struct zone *zone)
830{ 844{
831 return (!!zone->present_pages); 845 return zone->present_pages;
832} 846}
833 847
834extern int movable_zone; 848extern int movable_zone;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 4f0bfe5912b2..e8c81fbd5f9c 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -270,6 +270,8 @@ enum {
270 MSI_FLAG_MULTI_PCI_MSI = (1 << 2), 270 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
271 /* Support PCI MSIX interrupts */ 271 /* Support PCI MSIX interrupts */
272 MSI_FLAG_PCI_MSIX = (1 << 3), 272 MSI_FLAG_PCI_MSIX = (1 << 3),
273 /* Needs early activate, required for PCI */
274 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
273}; 275};
274 276
275int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 277int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 076df5360ba5..e8d79d4ebcfe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
3267 napi->skb = NULL; 3267 napi->skb = NULL;
3268} 3268}
3269 3269
3270bool netdev_is_rx_handler_busy(struct net_device *dev);
3270int netdev_rx_handler_register(struct net_device *dev, 3271int netdev_rx_handler_register(struct net_device *dev,
3271 rx_handler_func_t *rx_handler, 3272 rx_handler_func_t *rx_handler,
3272 void *rx_handler_data); 3273 void *rx_handler_data);
@@ -3891,8 +3892,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3891extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 3892extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3892void netdev_rss_key_fill(void *buffer, size_t len); 3893void netdev_rss_key_fill(void *buffer, size_t len);
3893 3894
3894int dev_get_nest_level(struct net_device *dev, 3895int dev_get_nest_level(struct net_device *dev);
3895 bool (*type_check)(const struct net_device *dev));
3896int skb_checksum_help(struct sk_buff *skb); 3896int skb_checksum_help(struct sk_buff *skb);
3897struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3897struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3898 netdev_features_t features, bool tx_path); 3898 netdev_features_t features, bool tx_path);
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 80ca889b164e..664da0048625 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -15,6 +15,6 @@ struct nf_acct;
15struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); 15struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
16void nfnl_acct_put(struct nf_acct *acct); 16void nfnl_acct_put(struct nf_acct *acct);
17void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); 17void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
18extern int nfnl_acct_overquota(const struct sk_buff *skb, 18int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
19 struct nf_acct *nfacct); 19 struct nf_acct *nfacct);
20#endif /* _NFNL_ACCT_H */ 20#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d8b37bab2887..7676557ce357 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
794}; 794};
795 795
796struct nvmf_connect_data { 796struct nvmf_connect_data {
797 uuid_le hostid; 797 uuid_be hostid;
798 __le16 cntlid; 798 __le16 cntlid;
799 char resv4[238]; 799 char resv4[238];
800 char subsysnqn[NVMF_NQN_FIELD_LEN]; 800 char subsysnqn[NVMF_NQN_FIELD_LEN];
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2599a980340f..0ab835965669 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -683,15 +683,6 @@ struct pci_driver {
683#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) 683#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
684 684
685/** 685/**
686 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
687 * @_table: device table name
688 *
689 * This macro is deprecated and should not be used in new code.
690 */
691#define DEFINE_PCI_DEVICE_TABLE(_table) \
692 const struct pci_device_id _table[]
693
694/**
695 * PCI_DEVICE - macro used to describe a specific pci device 686 * PCI_DEVICE - macro used to describe a specific pci device
696 * @vend: the 16 bit PCI Vendor ID 687 * @vend: the 16 bit PCI Vendor ID
697 * @dev: the 16 bit PCI Device ID 688 * @dev: the 16 bit PCI Device ID
@@ -1251,10 +1242,12 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1251int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1242int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1252 unsigned int command_bits, u32 flags); 1243 unsigned int command_bits, u32 flags);
1253 1244
1254#define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */ 1245#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
1255#define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */ 1246#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
1256#define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */ 1247#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
1257#define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */ 1248#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
1249#define PCI_IRQ_ALL_TYPES \
1250 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1258 1251
1259/* kmem_cache style wrapper around pci_alloc_consistent() */ 1252/* kmem_cache style wrapper around pci_alloc_consistent() */
1260 1253
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8ed4326164cc..2b6b43cc0dd5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -743,7 +743,9 @@ struct perf_event_context {
743 u64 parent_gen; 743 u64 parent_gen;
744 u64 generation; 744 u64 generation;
745 int pin_count; 745 int pin_count;
746#ifdef CONFIG_CGROUP_PERF
746 int nr_cgroups; /* cgroup evts */ 747 int nr_cgroups; /* cgroup evts */
748#endif
747 void *task_ctx_data; /* pmu specific data */ 749 void *task_ctx_data; /* pmu specific data */
748 struct rcu_head rcu_head; 750 struct rcu_head rcu_head;
749}; 751};
@@ -769,7 +771,9 @@ struct perf_cpu_context {
769 unsigned int hrtimer_active; 771 unsigned int hrtimer_active;
770 772
771 struct pmu *unique_pmu; 773 struct pmu *unique_pmu;
774#ifdef CONFIG_CGROUP_PERF
772 struct perf_cgroup *cgrp; 775 struct perf_cgroup *cgrp;
776#endif
773}; 777};
774 778
775struct perf_output_handle { 779struct perf_output_handle {
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 8dc155dab3ed..696a56be7d3e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold;
266 * and other debug macros are compiled out unless either DEBUG is defined 266 * and other debug macros are compiled out unless either DEBUG is defined
267 * or CONFIG_DYNAMIC_DEBUG is set. 267 * or CONFIG_DYNAMIC_DEBUG is set.
268 */ 268 */
269 269#define pr_emerg(fmt, ...) \
270#ifdef CONFIG_PRINTK 270 printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
271 271#define pr_alert(fmt, ...) \
272asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...); 272 printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
273asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...); 273#define pr_crit(fmt, ...) \
274asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...); 274 printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
275asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...); 275#define pr_err(fmt, ...) \
276asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...); 276 printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
277asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...); 277#define pr_warning(fmt, ...) \
278asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...); 278 printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
279 279#define pr_warn pr_warning
280#define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__) 280#define pr_notice(fmt, ...) \
281#define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__) 281 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
282#define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__) 282#define pr_info(fmt, ...) \
283#define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__) 283 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
284#define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
285#define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
286#define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
287
288#else
289
290#define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
291#define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
292#define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
293#define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
294#define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
295#define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
296#define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
297
298#endif
299
300#define pr_warning pr_warn
301
302/* 284/*
303 * Like KERN_CONT, pr_cont() should only be used when continuing 285 * Like KERN_CONT, pr_cont() should only be used when continuing
304 * a line with no newline ('\n') enclosed. Otherwise it defaults 286 * a line with no newline ('\n') enclosed. Otherwise it defaults
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b1e3c57c7117..d6c4177df7cb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params {
70 u8 max_tc; 70 u8 max_tc;
71}; 71};
72 72
73enum qed_dcbx_sf_ieee_type {
74 QED_DCBX_SF_IEEE_ETHTYPE,
75 QED_DCBX_SF_IEEE_TCP_PORT,
76 QED_DCBX_SF_IEEE_UDP_PORT,
77 QED_DCBX_SF_IEEE_TCP_UDP_PORT
78};
79
73struct qed_app_entry { 80struct qed_app_entry {
74 bool ethtype; 81 bool ethtype;
82 enum qed_dcbx_sf_ieee_type sf_ieee;
75 bool enabled; 83 bool enabled;
76 u8 prio; 84 u8 prio;
77 u16 proto_id; 85 u16 proto_id;
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index de1f64318fc4..fcb4c3646173 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
705 sctp_authhdr_t auth_hdr; 705 sctp_authhdr_t auth_hdr;
706} __packed sctp_auth_chunk_t; 706} __packed sctp_auth_chunk_t;
707 707
708struct sctp_info {
709 __u32 sctpi_tag;
710 __u32 sctpi_state;
711 __u32 sctpi_rwnd;
712 __u16 sctpi_unackdata;
713 __u16 sctpi_penddata;
714 __u16 sctpi_instrms;
715 __u16 sctpi_outstrms;
716 __u32 sctpi_fragmentation_point;
717 __u32 sctpi_inqueue;
718 __u32 sctpi_outqueue;
719 __u32 sctpi_overall_error;
720 __u32 sctpi_max_burst;
721 __u32 sctpi_maxseg;
722 __u32 sctpi_peer_rwnd;
723 __u32 sctpi_peer_tag;
724 __u8 sctpi_peer_capable;
725 __u8 sctpi_peer_sack;
726 __u16 __reserved1;
727
728 /* assoc status info */
729 __u64 sctpi_isacks;
730 __u64 sctpi_osacks;
731 __u64 sctpi_opackets;
732 __u64 sctpi_ipackets;
733 __u64 sctpi_rtxchunks;
734 __u64 sctpi_outofseqtsns;
735 __u64 sctpi_idupchunks;
736 __u64 sctpi_gapcnt;
737 __u64 sctpi_ouodchunks;
738 __u64 sctpi_iuodchunks;
739 __u64 sctpi_oodchunks;
740 __u64 sctpi_iodchunks;
741 __u64 sctpi_octrlchunks;
742 __u64 sctpi_ictrlchunks;
743
744 /* primary transport info */
745 struct sockaddr_storage sctpi_p_address;
746 __s32 sctpi_p_state;
747 __u32 sctpi_p_cwnd;
748 __u32 sctpi_p_srtt;
749 __u32 sctpi_p_rto;
750 __u32 sctpi_p_hbinterval;
751 __u32 sctpi_p_pathmaxrxt;
752 __u32 sctpi_p_sackdelay;
753 __u32 sctpi_p_sackfreq;
754 __u32 sctpi_p_ssthresh;
755 __u32 sctpi_p_partial_bytes_acked;
756 __u32 sctpi_p_flight_size;
757 __u16 sctpi_p_error;
758 __u16 __reserved2;
759
760 /* sctp sock info */
761 __u32 sctpi_s_autoclose;
762 __u32 sctpi_s_adaptation_ind;
763 __u32 sctpi_s_pd_point;
764 __u8 sctpi_s_nodelay;
765 __u8 sctpi_s_disable_fragments;
766 __u8 sctpi_s_v4mapped;
767 __u8 sctpi_s_frag_interleave;
768 __u32 sctpi_s_type;
769 __u32 __reserved3;
770};
771
772struct sctp_infox { 708struct sctp_infox {
773 struct sctp_info *sctpinfo; 709 struct sctp_info *sctpinfo;
774 struct sctp_association *asoc; 710 struct sctp_association *asoc;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 923266cd294a..48ec7651989b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -111,7 +111,6 @@ struct uart_8250_port {
111 * if no_console_suspend 111 * if no_console_suspend
112 */ 112 */
113 unsigned char probe; 113 unsigned char probe;
114 struct mctrl_gpios *gpios;
115#define UART_PROBE_RSA (1 << 0) 114#define UART_PROBE_RSA (1 << 0)
116 115
117 /* 116 /*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6f0b3e0adc73..0f665cb26b50 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
2847 __skb_linearize(skb) : 0; 2847 __skb_linearize(skb) : 0;
2848} 2848}
2849 2849
2850static __always_inline void
2851__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2852 unsigned int off)
2853{
2854 if (skb->ip_summed == CHECKSUM_COMPLETE)
2855 skb->csum = csum_block_sub(skb->csum,
2856 csum_partial(start, len, 0), off);
2857 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2858 skb_checksum_start_offset(skb) < 0)
2859 skb->ip_summed = CHECKSUM_NONE;
2860}
2861
2850/** 2862/**
2851 * skb_postpull_rcsum - update checksum for received skb after pull 2863 * skb_postpull_rcsum - update checksum for received skb after pull
2852 * @skb: buffer to update 2864 * @skb: buffer to update
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
2857 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2869 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2858 * CHECKSUM_NONE so that it can be recomputed from scratch. 2870 * CHECKSUM_NONE so that it can be recomputed from scratch.
2859 */ 2871 */
2860
2861static inline void skb_postpull_rcsum(struct sk_buff *skb, 2872static inline void skb_postpull_rcsum(struct sk_buff *skb,
2862 const void *start, unsigned int len) 2873 const void *start, unsigned int len)
2863{ 2874{
2864 if (skb->ip_summed == CHECKSUM_COMPLETE) 2875 __skb_postpull_rcsum(skb, start, len, 0);
2865 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2866 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2867 skb_checksum_start_offset(skb) < 0)
2868 skb->ip_summed = CHECKSUM_NONE;
2869} 2876}
2870 2877
2871unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2878static __always_inline void
2879__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2880 unsigned int off)
2881{
2882 if (skb->ip_summed == CHECKSUM_COMPLETE)
2883 skb->csum = csum_block_add(skb->csum,
2884 csum_partial(start, len, 0), off);
2885}
2872 2886
2887/**
2888 * skb_postpush_rcsum - update checksum for received skb after push
2889 * @skb: buffer to update
2890 * @start: start of data after push
2891 * @len: length of data pushed
2892 *
2893 * After doing a push on a received packet, you need to call this to
2894 * update the CHECKSUM_COMPLETE checksum.
2895 */
2873static inline void skb_postpush_rcsum(struct sk_buff *skb, 2896static inline void skb_postpush_rcsum(struct sk_buff *skb,
2874 const void *start, unsigned int len) 2897 const void *start, unsigned int len)
2875{ 2898{
2876 /* For performing the reverse operation to skb_postpull_rcsum(), 2899 __skb_postpush_rcsum(skb, start, len, 0);
2877 * we can instead of ...
2878 *
2879 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
2880 *
2881 * ... just use this equivalent version here to save a few
2882 * instructions. Feeding csum of 0 in csum_partial() and later
2883 * on adding skb->csum is equivalent to feed skb->csum in the
2884 * first place.
2885 */
2886 if (skb->ip_summed == CHECKSUM_COMPLETE)
2887 skb->csum = csum_partial(start, len, skb->csum);
2888} 2900}
2889 2901
2902unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2903
2890/** 2904/**
2891 * skb_push_rcsum - push skb and update receive checksum 2905 * skb_push_rcsum - push skb and update receive checksum
2892 * @skb: buffer to update 2906 * @skb: buffer to update
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1a4ea551aae5..4293808d8cfb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
155void kzfree(const void *); 155void kzfree(const void *);
156size_t ksize(const void *); 156size_t ksize(const void *);
157 157
158#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
159const char *__check_heap_object(const void *ptr, unsigned long n,
160 struct page *page);
161#else
162static inline const char *__check_heap_object(const void *ptr,
163 unsigned long n,
164 struct page *page)
165{
166 return NULL;
167}
168#endif
169
158/* 170/*
159 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
160 * alignment larger than the alignment of a 64-bit integer. 172 * alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
index 76199b75d584..e302c447e057 100644
--- a/include/linux/smc91x.h
+++ b/include/linux/smc91x.h
@@ -1,6 +1,16 @@
1#ifndef __SMC91X_H__ 1#ifndef __SMC91X_H__
2#define __SMC91X_H__ 2#define __SMC91X_H__
3 3
4/*
5 * These bits define which access sizes a platform can support, rather
6 * than the maximal access size. So, if your platform can do 16-bit
7 * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
8 * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
9 *
10 * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
11 * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
12 * an invalid configuration.
13 */
4#define SMC91X_USE_8BIT (1 << 0) 14#define SMC91X_USE_8BIT (1 << 0)
5#define SMC91X_USE_16BIT (1 << 1) 15#define SMC91X_USE_16BIT (1 << 1)
6#define SMC91X_USE_32BIT (1 << 2) 16#define SMC91X_USE_32BIT (1 << 2)
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index b6810c92b8bb..5c02b0691587 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -195,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
195 struct rpc_xprt *, 195 struct rpc_xprt *,
196 void *), 196 void *),
197 void *data); 197 void *data);
198void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
199 unsigned long timeo);
198 200
199const char *rpc_proc_name(const struct rpc_task *task); 201const char *rpc_proc_name(const struct rpc_task *task);
200#endif /* __KERNEL__ */ 202#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5e3e1b63dbb3..a16070dd03ee 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -218,7 +218,8 @@ struct rpc_xprt {
218 struct work_struct task_cleanup; 218 struct work_struct task_cleanup;
219 struct timer_list timer; 219 struct timer_list timer;
220 unsigned long last_used, 220 unsigned long last_used,
221 idle_timeout; 221 idle_timeout,
222 max_reconnect_timeout;
222 223
223 /* 224 /*
224 * Send stuff 225 * Send stuff
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 697e160c78d0..a4f7203a9017 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -42,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
42 void __user *, size_t *, loff_t *); 42 void __user *, size_t *, loff_t *);
43extern int proc_dointvec(struct ctl_table *, int, 43extern int proc_dointvec(struct ctl_table *, int,
44 void __user *, size_t *, loff_t *); 44 void __user *, size_t *, loff_t *);
45extern int proc_douintvec(struct ctl_table *, int,
46 void __user *, size_t *, loff_t *);
45extern int proc_dointvec_minmax(struct ctl_table *, int, 47extern int proc_dointvec_minmax(struct ctl_table *, int,
46 void __user *, size_t *, loff_t *); 48 void __user *, size_t *, loff_t *);
47extern int proc_dointvec_jiffies(struct ctl_table *, int, 49extern int proc_dointvec_jiffies(struct ctl_table *, int,
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 352b1542f5cc..2b5b10eed74f 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -105,6 +105,31 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
105 105
106#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) 106#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
107 107
108#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
109static inline int arch_within_stack_frames(const void * const stack,
110 const void * const stackend,
111 const void *obj, unsigned long len)
112{
113 return 0;
114}
115#endif
116
117#ifdef CONFIG_HARDENED_USERCOPY
118extern void __check_object_size(const void *ptr, unsigned long n,
119 bool to_user);
120
121static __always_inline void check_object_size(const void *ptr, unsigned long n,
122 bool to_user)
123{
124 if (!__builtin_constant_p(n))
125 __check_object_size(ptr, n, to_user);
126}
127#else
128static inline void check_object_size(const void *ptr, unsigned long n,
129 bool to_user)
130{ }
131#endif /* CONFIG_HARDENED_USERCOPY */
132
108#endif /* __KERNEL__ */ 133#endif /* __KERNEL__ */
109 134
110#endif /* _LINUX_THREAD_INFO_H */ 135#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 349557825428..f30c187ed785 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
114#ifndef user_access_begin 114#ifndef user_access_begin
115#define user_access_begin() do { } while (0) 115#define user_access_begin() do { } while (0)
116#define user_access_end() do { } while (0) 116#define user_access_end() do { } while (0)
117#define unsafe_get_user(x, ptr) __get_user(x, ptr) 117#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
118#define unsafe_put_user(x, ptr) __put_user(x, ptr) 118#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
119#endif 119#endif
120 120
121#endif /* __LINUX_UACCESS_H__ */ 121#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1b5d1cd796e2..75b4aaf31a9d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
76 struct iov_iter *i, unsigned long offset, size_t bytes); 76 struct iov_iter *i, unsigned long offset, size_t bytes);
77void iov_iter_advance(struct iov_iter *i, size_t bytes); 77void iov_iter_advance(struct iov_iter *i, size_t bytes);
78int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 78int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
79int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes); 79#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
80size_t iov_iter_single_seg_count(const struct iov_iter *i); 80size_t iov_iter_single_seg_count(const struct iov_iter *i);
81size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 81size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
82 struct iov_iter *i); 82 struct iov_iter *i);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 41e6a24a44b9..82f3c912a5b1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -176,8 +176,8 @@ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
176int tcf_unregister_action(struct tc_action_ops *a, 176int tcf_unregister_action(struct tc_action_ops *a,
177 struct pernet_operations *ops); 177 struct pernet_operations *ops);
178int tcf_action_destroy(struct list_head *actions, int bind); 178int tcf_action_destroy(struct list_head *actions, int bind);
179int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 179int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
180 struct tcf_result *res); 180 int nr_actions, struct tcf_result *res);
181int tcf_action_init(struct net *net, struct nlattr *nla, 181int tcf_action_init(struct net *net, struct nlattr *nla,
182 struct nlattr *est, char *n, int ovr, 182 struct nlattr *est, char *n, int ovr,
183 int bind, struct list_head *); 183 int bind, struct list_head *);
@@ -189,30 +189,17 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
189int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); 189int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
190int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); 190int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
191 191
192#define tc_no_actions(_exts) \ 192#endif /* CONFIG_NET_CLS_ACT */
193 (list_empty(&(_exts)->actions))
194
195#define tc_for_each_action(_a, _exts) \
196 list_for_each_entry(a, &(_exts)->actions, list)
197
198#define tc_single_action(_exts) \
199 (list_is_singular(&(_exts)->actions))
200 193
201static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, 194static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
202 u64 packets, u64 lastuse) 195 u64 packets, u64 lastuse)
203{ 196{
197#ifdef CONFIG_NET_CLS_ACT
204 if (!a->ops->stats_update) 198 if (!a->ops->stats_update)
205 return; 199 return;
206 200
207 a->ops->stats_update(a, bytes, packets, lastuse); 201 a->ops->stats_update(a, bytes, packets, lastuse);
202#endif
208} 203}
209 204
210#else /* CONFIG_NET_CLS_ACT */
211
212#define tc_no_actions(_exts) true
213#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
214#define tc_single_action(_exts) false
215#define tcf_action_stats_update(a, bytes, packets, lastuse)
216
217#endif /* CONFIG_NET_CLS_ACT */
218#endif 205#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index ac1bc3c49fbd..7b0f88699b25 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
40 unsigned long, 40 unsigned long,
41 gfp_t); 41 gfp_t);
42int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); 42int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
43void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
43void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); 44void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
44void rxrpc_kernel_end_call(struct rxrpc_call *); 45void rxrpc_kernel_end_call(struct rxrpc_call *);
45bool rxrpc_kernel_is_data_last(struct sk_buff *); 46bool rxrpc_kernel_is_data_last(struct sk_buff *);
46u32 rxrpc_kernel_get_abort_code(struct sk_buff *); 47u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
47int rxrpc_kernel_get_error_number(struct sk_buff *); 48int rxrpc_kernel_get_error_number(struct sk_buff *);
48void rxrpc_kernel_data_delivered(struct sk_buff *);
49void rxrpc_kernel_free_skb(struct sk_buff *); 49void rxrpc_kernel_free_skb(struct sk_buff *);
50struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); 50struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
51int rxrpc_kernel_reject_call(struct socket *); 51int rxrpc_kernel_reject_call(struct socket *);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 9b4c418bebd8..fd60eccb59a6 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -52,7 +52,7 @@ struct unix_sock {
52 struct sock sk; 52 struct sock sk;
53 struct unix_address *addr; 53 struct unix_address *addr;
54 struct path path; 54 struct path path;
55 struct mutex readlock; 55 struct mutex iolock, bindlock;
56 struct sock *peer; 56 struct sock *peer;
57 struct list_head link; 57 struct list_head link;
58 atomic_long_t inflight; 58 atomic_long_t inflight;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9c23f4d33e06..beb7610d64e9 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1102,6 +1102,7 @@ struct station_info {
1102 struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1]; 1102 struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
1103}; 1103};
1104 1104
1105#if IS_ENABLED(CONFIG_CFG80211)
1105/** 1106/**
1106 * cfg80211_get_station - retrieve information about a given station 1107 * cfg80211_get_station - retrieve information about a given station
1107 * @dev: the device where the station is supposed to be connected to 1108 * @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
1114 */ 1115 */
1115int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, 1116int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
1116 struct station_info *sinfo); 1117 struct station_info *sinfo);
1118#else
1119static inline int cfg80211_get_station(struct net_device *dev,
1120 const u8 *mac_addr,
1121 struct station_info *sinfo)
1122{
1123 return -ENOENT;
1124}
1125#endif
1117 1126
1118/** 1127/**
1119 * enum monitor_flags - monitor flags 1128 * enum monitor_flags - monitor flags
diff --git a/include/net/gre.h b/include/net/gre.h
index 7a54a31d1d4c..73ea256eb7d7 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -104,6 +104,7 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
104 104
105 skb_push(skb, hdr_len); 105 skb_push(skb, hdr_len);
106 106
107 skb_set_inner_protocol(skb, proto);
107 skb_reset_transport_header(skb); 108 skb_reset_transport_header(skb);
108 greh = (struct gre_base_hdr *)skb->data; 109 greh = (struct gre_base_hdr *)skb->data;
109 greh->flags = gre_tnl_flags_to_gre_flags(flags); 110 greh->flags = gre_tnl_flags_to_gre_flags(flags);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 0dc0a51da38f..dce2d586d9ce 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
128 to = from | htonl(INET_ECN_CE << 20); 128 to = from | htonl(INET_ECN_CE << 20);
129 *(__be32 *)iph = to; 129 *(__be32 *)iph = to;
130 if (skb->ip_summed == CHECKSUM_COMPLETE) 130 if (skb->ip_summed == CHECKSUM_COMPLETE)
131 skb->csum = csum_add(csum_sub(skb->csum, from), to); 131 skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
132 (__force __wsum)to);
132 return 1; 133 return 1;
133} 134}
134 135
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 4079fc18ffe4..7d4a72e75f33 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -111,6 +111,7 @@ struct fib_info {
111 unsigned char fib_scope; 111 unsigned char fib_scope;
112 unsigned char fib_type; 112 unsigned char fib_type;
113 __be32 fib_prefsrc; 113 __be32 fib_prefsrc;
114 u32 fib_tb_id;
114 u32 fib_priority; 115 u32 fib_priority;
115 u32 *fib_metrics; 116 u32 *fib_metrics;
116#define fib_mtu fib_metrics[RTAX_MTU-1] 117#define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
319/* Exported by fib_semantics.c */ 320/* Exported by fib_semantics.c */
320int ip_fib_check_default(__be32 gw, struct net_device *dev); 321int ip_fib_check_default(__be32 gw, struct net_device *dev);
321int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); 322int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
322int fib_sync_down_addr(struct net *net, __be32 local); 323int fib_sync_down_addr(struct net_device *dev, __be32 local);
323int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 324int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
324 325
325extern u32 fib_multipath_secret __read_mostly; 326extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b4faadbb4e01..cca510a585c3 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3620,7 +3620,8 @@ struct ieee80211_ops {
3620 3620
3621 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3621 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3622 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3622 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3623 u32 (*get_expected_throughput)(struct ieee80211_sta *sta); 3623 u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
3624 struct ieee80211_sta *sta);
3624 int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3625 int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3625 int *dbm); 3626 int *dbm);
3626 3627
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index d27588c8dbd9..1139cde0fdc5 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
36void nft_meta_set_destroy(const struct nft_ctx *ctx, 36void nft_meta_set_destroy(const struct nft_ctx *ctx,
37 const struct nft_expr *expr); 37 const struct nft_expr *expr);
38 38
39int nft_meta_set_validate(const struct nft_ctx *ctx,
40 const struct nft_expr *expr,
41 const struct nft_data **data);
42
39#endif 43#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 60fa1530006b..02e28c529b29 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -8,6 +8,10 @@ struct nft_reject {
8 8
9extern const struct nla_policy nft_reject_policy[]; 9extern const struct nla_policy nft_reject_policy[];
10 10
11int nft_reject_validate(const struct nft_ctx *ctx,
12 const struct nft_expr *expr,
13 const struct nft_data **data);
14
11int nft_reject_init(const struct nft_ctx *ctx, 15int nft_reject_init(const struct nft_ctx *ctx,
12 const struct nft_expr *expr, 16 const struct nft_expr *expr,
13 const struct nlattr * const tb[]); 17 const struct nlattr * const tb[]);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6f8d65342d3a..c99508d426cc 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -59,7 +59,8 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
59struct tcf_exts { 59struct tcf_exts {
60#ifdef CONFIG_NET_CLS_ACT 60#ifdef CONFIG_NET_CLS_ACT
61 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 61 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
62 struct list_head actions; 62 int nr_actions;
63 struct tc_action **actions;
63#endif 64#endif
64 /* Map to export classifier specific extension TLV types to the 65 /* Map to export classifier specific extension TLV types to the
65 * generic extensions API. Unsupported extensions must be set to 0. 66 * generic extensions API. Unsupported extensions must be set to 0.
@@ -72,7 +73,10 @@ static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
72{ 73{
73#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
74 exts->type = 0; 75 exts->type = 0;
75 INIT_LIST_HEAD(&exts->actions); 76 exts->nr_actions = 0;
77 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
78 GFP_KERNEL);
79 WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
76#endif 80#endif
77 exts->action = action; 81 exts->action = action;
78 exts->police = police; 82 exts->police = police;
@@ -89,7 +93,7 @@ static inline int
89tcf_exts_is_predicative(struct tcf_exts *exts) 93tcf_exts_is_predicative(struct tcf_exts *exts)
90{ 94{
91#ifdef CONFIG_NET_CLS_ACT 95#ifdef CONFIG_NET_CLS_ACT
92 return !list_empty(&exts->actions); 96 return exts->nr_actions;
93#else 97#else
94 return 0; 98 return 0;
95#endif 99#endif
@@ -108,6 +112,20 @@ tcf_exts_is_available(struct tcf_exts *exts)
108 return tcf_exts_is_predicative(exts); 112 return tcf_exts_is_predicative(exts);
109} 113}
110 114
115static inline void tcf_exts_to_list(const struct tcf_exts *exts,
116 struct list_head *actions)
117{
118#ifdef CONFIG_NET_CLS_ACT
119 int i;
120
121 for (i = 0; i < exts->nr_actions; i++) {
122 struct tc_action *a = exts->actions[i];
123
124 list_add(&a->list, actions);
125 }
126#endif
127}
128
111/** 129/**
112 * tcf_exts_exec - execute tc filter extensions 130 * tcf_exts_exec - execute tc filter extensions
113 * @skb: socket buffer 131 * @skb: socket buffer
@@ -124,12 +142,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
124 struct tcf_result *res) 142 struct tcf_result *res)
125{ 143{
126#ifdef CONFIG_NET_CLS_ACT 144#ifdef CONFIG_NET_CLS_ACT
127 if (!list_empty(&exts->actions)) 145 if (exts->nr_actions)
128 return tcf_action_exec(skb, &exts->actions, res); 146 return tcf_action_exec(skb, exts->actions, exts->nr_actions,
147 res);
129#endif 148#endif
130 return 0; 149 return 0;
131} 150}
132 151
152#ifdef CONFIG_NET_CLS_ACT
153
154#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
155#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
156
157#else /* CONFIG_NET_CLS_ACT */
158
159#define tc_no_actions(_exts) true
160#define tc_single_action(_exts) false
161
162#endif /* CONFIG_NET_CLS_ACT */
163
133int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 164int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
134 struct nlattr **tb, struct nlattr *rate_tlv, 165 struct nlattr **tb, struct nlattr *rate_tlv,
135 struct tcf_exts *exts, bool ovr); 166 struct tcf_exts *exts, bool ovr);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index c00e7d51bb18..7717302cab91 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1523,6 +1523,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
1523{ 1523{
1524 if (sk->sk_send_head == skb_unlinked) 1524 if (sk->sk_send_head == skb_unlinked)
1525 sk->sk_send_head = NULL; 1525 sk->sk_send_head = NULL;
1526 if (tcp_sk(sk)->highest_sack == skb_unlinked)
1527 tcp_sk(sk)->highest_sack = NULL;
1526} 1528}
1527 1529
1528static inline void tcp_init_send_head(struct sock *sk) 1530static inline void tcp_init_send_head(struct sock *sk)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8e90dd28bb75..e1f96737c2a1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2115,22 +2115,17 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2115 size_t len) 2115 size_t len)
2116{ 2116{
2117 const void __user *p = udata->inbuf + offset; 2117 const void __user *p = udata->inbuf + offset;
2118 bool ret = false; 2118 bool ret;
2119 u8 *buf; 2119 u8 *buf;
2120 2120
2121 if (len > USHRT_MAX) 2121 if (len > USHRT_MAX)
2122 return false; 2122 return false;
2123 2123
2124 buf = kmalloc(len, GFP_KERNEL); 2124 buf = memdup_user(p, len);
2125 if (!buf) 2125 if (IS_ERR(buf))
2126 return false; 2126 return false;
2127 2127
2128 if (copy_from_user(buf, p, len))
2129 goto free;
2130
2131 ret = !memchr_inv(buf, 0, len); 2128 ret = !memchr_inv(buf, 0, len);
2132
2133free:
2134 kfree(buf); 2129 kfree(buf);
2135 return ret; 2130 return ret;
2136} 2131}
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 13c0b2ba1b6c..73d870918939 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -11,12 +11,12 @@ struct sas_rphy;
11struct request; 11struct request;
12 12
13#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS) 13#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
14static inline int is_sas_attached(struct scsi_device *sdev) 14static inline int scsi_is_sas_rphy(const struct device *sdev)
15{ 15{
16 return 0; 16 return 0;
17} 17}
18#else 18#else
19extern int is_sas_attached(struct scsi_device *sdev); 19extern int scsi_is_sas_rphy(const struct device *);
20#endif 20#endif
21 21
22static inline int sas_protocol_ata(enum sas_protocol proto) 22static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
202extern void sas_rphy_remove(struct sas_rphy *); 202extern void sas_rphy_remove(struct sas_rphy *);
203extern void sas_rphy_delete(struct sas_rphy *); 203extern void sas_rphy_delete(struct sas_rphy *);
204extern void sas_rphy_unlink(struct sas_rphy *); 204extern void sas_rphy_unlink(struct sas_rphy *);
205extern int scsi_is_sas_rphy(const struct device *);
206 205
207struct sas_port *sas_port_alloc(struct device *, int); 206struct sas_port *sas_port_alloc(struct device *, int);
208struct sas_port *sas_port_alloc_num(struct device *); 207struct sas_port *sas_port_alloc_num(struct device *);
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 51440131d337..28c5da6fdfac 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
330#ifdef CONFIG_NO_HZ_COMMON 330#ifdef CONFIG_NO_HZ_COMMON
331 331
332#define TICK_DEP_NAMES \ 332#define TICK_DEP_NAMES \
333 tick_dep_name(NONE) \ 333 tick_dep_mask_name(NONE) \
334 tick_dep_name(POSIX_TIMER) \ 334 tick_dep_name(POSIX_TIMER) \
335 tick_dep_name(PERF_EVENTS) \ 335 tick_dep_name(PERF_EVENTS) \
336 tick_dep_name(SCHED) \ 336 tick_dep_name(SCHED) \
337 tick_dep_name_end(CLOCK_UNSTABLE) 337 tick_dep_name_end(CLOCK_UNSTABLE)
338 338
339#undef tick_dep_name 339#undef tick_dep_name
340#undef tick_dep_mask_name
340#undef tick_dep_name_end 341#undef tick_dep_name_end
341 342
342#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 343/* The MASK will convert to their bits and they need to be processed too */
343#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 344#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
345 TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
346#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
347 TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
348/* NONE only has a mask defined for it */
349#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
344 350
345TICK_DEP_NAMES 351TICK_DEP_NAMES
346 352
347#undef tick_dep_name 353#undef tick_dep_name
354#undef tick_dep_mask_name
348#undef tick_dep_name_end 355#undef tick_dep_name_end
349 356
350#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, 357#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
358#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
351#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } 359#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
352 360
353#define show_tick_dep_name(val) \ 361#define show_tick_dep_name(val) \
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 9c9c6ad55f14..5cd4d4d2dd1d 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -14,6 +14,7 @@
14 14
15#include <linux/atmapi.h> 15#include <linux/atmapi.h>
16#include <linux/atmioc.h> 16#include <linux/atmioc.h>
17#include <linux/time.h>
17 18
18#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) 19#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
19 /* get pool statistics */ 20 /* get pool statistics */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index da218fec6056..9e5fc168c8a3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -339,7 +339,7 @@ enum bpf_func_id {
339 BPF_FUNC_skb_change_type, 339 BPF_FUNC_skb_change_type,
340 340
341 /** 341 /**
342 * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb 342 * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
343 * @skb: pointer to skb 343 * @skb: pointer to skb
344 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 344 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
345 * @index: index of the cgroup in the bpf_map 345 * @index: index of the cgroup in the bpf_map
@@ -348,7 +348,7 @@ enum bpf_func_id {
348 * == 1 skb succeeded the cgroup2 descendant test 348 * == 1 skb succeeded the cgroup2 descendant test
349 * < 0 error 349 * < 0 error
350 */ 350 */
351 BPF_FUNC_skb_in_cgroup, 351 BPF_FUNC_skb_under_cgroup,
352 352
353 /** 353 /**
354 * bpf_get_hash_recalc(skb) 354 * bpf_get_hash_recalc(skb)
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 163e8adac2d6..4bd1f55d6377 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -16,7 +16,8 @@
16#define _UAPI__LINUX_IF_PPPOL2TP_H 16#define _UAPI__LINUX_IF_PPPOL2TP_H
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19 19#include <linux/in.h>
20#include <linux/in6.h>
20 21
21/* Structure used to connect() the socket to a particular tunnel UDP 22/* Structure used to connect() the socket to a particular tunnel UDP
22 * socket over IPv4. 23 * socket over IPv4.
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e128769331b5..d37bbb17a007 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -21,8 +21,11 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22 22
23#include <linux/socket.h> 23#include <linux/socket.h>
24#include <linux/if.h>
24#include <linux/if_ether.h> 25#include <linux/if_ether.h>
25#include <linux/if_pppol2tp.h> 26#include <linux/if_pppol2tp.h>
27#include <linux/in.h>
28#include <linux/in6.h>
26 29
27/* For user-space programs to pick up these definitions 30/* For user-space programs to pick up these definitions
28 * which they wouldn't get otherwise without defining __KERNEL__ 31 * which they wouldn't get otherwise without defining __KERNEL__
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 1046f5515174..777b6cdb1b7b 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -2,6 +2,9 @@
2#define _UAPI_IF_TUNNEL_H_ 2#define _UAPI_IF_TUNNEL_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/if.h>
6#include <linux/ip.h>
7#include <linux/in6.h>
5#include <asm/byteorder.h> 8#include <asm/byteorder.h>
6 9
7 10
diff --git a/include/uapi/linux/ipx.h b/include/uapi/linux/ipx.h
index 3d48014cdd71..30f031db12f6 100644
--- a/include/uapi/linux/ipx.h
+++ b/include/uapi/linux/ipx.h
@@ -1,11 +1,13 @@
1#ifndef _IPX_H_ 1#ifndef _IPX_H_
2#define _IPX_H_ 2#define _IPX_H_
3#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */
3#include <linux/types.h> 4#include <linux/types.h>
4#include <linux/sockios.h> 5#include <linux/sockios.h>
5#include <linux/socket.h> 6#include <linux/socket.h>
6#define IPX_NODE_LEN 6 7#define IPX_NODE_LEN 6
7#define IPX_MTU 576 8#define IPX_MTU 576
8 9
10#if __UAPI_DEF_SOCKADDR_IPX
9struct sockaddr_ipx { 11struct sockaddr_ipx {
10 __kernel_sa_family_t sipx_family; 12 __kernel_sa_family_t sipx_family;
11 __be16 sipx_port; 13 __be16 sipx_port;
@@ -14,6 +16,7 @@ struct sockaddr_ipx {
14 __u8 sipx_type; 16 __u8 sipx_type;
15 unsigned char sipx_zero; /* 16 byte fill */ 17 unsigned char sipx_zero; /* 16 byte fill */
16}; 18};
19#endif /* __UAPI_DEF_SOCKADDR_IPX */
17 20
18/* 21/*
19 * So we can fit the extra info for SIOCSIFADDR into the address nicely 22 * So we can fit the extra info for SIOCSIFADDR into the address nicely
@@ -23,12 +26,15 @@ struct sockaddr_ipx {
23#define IPX_DLTITF 0 26#define IPX_DLTITF 0
24#define IPX_CRTITF 1 27#define IPX_CRTITF 1
25 28
29#if __UAPI_DEF_IPX_ROUTE_DEFINITION
26struct ipx_route_definition { 30struct ipx_route_definition {
27 __be32 ipx_network; 31 __be32 ipx_network;
28 __be32 ipx_router_network; 32 __be32 ipx_router_network;
29 unsigned char ipx_router_node[IPX_NODE_LEN]; 33 unsigned char ipx_router_node[IPX_NODE_LEN];
30}; 34};
35#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
31 36
37#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
32struct ipx_interface_definition { 38struct ipx_interface_definition {
33 __be32 ipx_network; 39 __be32 ipx_network;
34 unsigned char ipx_device[16]; 40 unsigned char ipx_device[16];
@@ -45,16 +51,20 @@ struct ipx_interface_definition {
45#define IPX_INTERNAL 2 51#define IPX_INTERNAL 2
46 unsigned char ipx_node[IPX_NODE_LEN]; 52 unsigned char ipx_node[IPX_NODE_LEN];
47}; 53};
48 54#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
55
56#if __UAPI_DEF_IPX_CONFIG_DATA
49struct ipx_config_data { 57struct ipx_config_data {
50 unsigned char ipxcfg_auto_select_primary; 58 unsigned char ipxcfg_auto_select_primary;
51 unsigned char ipxcfg_auto_create_interfaces; 59 unsigned char ipxcfg_auto_create_interfaces;
52}; 60};
61#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
53 62
54/* 63/*
55 * OLD Route Definition for backward compatibility. 64 * OLD Route Definition for backward compatibility.
56 */ 65 */
57 66
67#if __UAPI_DEF_IPX_ROUTE_DEF
58struct ipx_route_def { 68struct ipx_route_def {
59 __be32 ipx_network; 69 __be32 ipx_network;
60 __be32 ipx_router_network; 70 __be32 ipx_router_network;
@@ -67,6 +77,7 @@ struct ipx_route_def {
67#define IPX_RT_BLUEBOOK 2 77#define IPX_RT_BLUEBOOK 2
68#define IPX_RT_ROUTED 1 78#define IPX_RT_ROUTED 1
69}; 79};
80#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
70 81
71#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE) 82#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
72#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1) 83#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1)
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index e4f048ee7043..44b8a6bd5fe1 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -139,6 +139,25 @@
139 139
140#endif /* _NETINET_IN_H */ 140#endif /* _NETINET_IN_H */
141 141
142/* Coordinate with glibc netipx/ipx.h header. */
143#if defined(__NETIPX_IPX_H)
144
145#define __UAPI_DEF_SOCKADDR_IPX 0
146#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0
147#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0
148#define __UAPI_DEF_IPX_CONFIG_DATA 0
149#define __UAPI_DEF_IPX_ROUTE_DEF 0
150
151#else /* defined(__NETIPX_IPX_H) */
152
153#define __UAPI_DEF_SOCKADDR_IPX 1
154#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
155#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
156#define __UAPI_DEF_IPX_CONFIG_DATA 1
157#define __UAPI_DEF_IPX_ROUTE_DEF 1
158
159#endif /* defined(__NETIPX_IPX_H) */
160
142/* Definitions for xattr.h */ 161/* Definitions for xattr.h */
143#if defined(_SYS_XATTR_H) 162#if defined(_SYS_XATTR_H)
144#define __UAPI_DEF_XATTR 0 163#define __UAPI_DEF_XATTR 0
@@ -179,6 +198,13 @@
179#define __UAPI_DEF_IN6_PKTINFO 1 198#define __UAPI_DEF_IN6_PKTINFO 1
180#define __UAPI_DEF_IP6_MTUINFO 1 199#define __UAPI_DEF_IP6_MTUINFO 1
181 200
201/* Definitions for ipx.h */
202#define __UAPI_DEF_SOCKADDR_IPX 1
203#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
204#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
205#define __UAPI_DEF_IPX_CONFIG_DATA 1
206#define __UAPI_DEF_IPX_ROUTE_DEF 1
207
182/* Definitions for xattr.h */ 208/* Definitions for xattr.h */
183#define __UAPI_DEF_XATTR 1 209#define __UAPI_DEF_XATTR 1
184 210
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 01751faccaf8..c674ba2563b7 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -24,7 +24,7 @@ enum nft_registers {
24 __NFT_REG_MAX, 24 __NFT_REG_MAX,
25 25
26 NFT_REG32_00 = 8, 26 NFT_REG32_00 = 8,
27 MFT_REG32_01, 27 NFT_REG32_01,
28 NFT_REG32_02, 28 NFT_REG32_02,
29 NFT_REG32_03, 29 NFT_REG32_03,
30 NFT_REG32_04, 30 NFT_REG32_04,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index d95a3018f6a1..54c3b4f4aceb 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -583,7 +583,7 @@ enum ovs_userspace_attr {
583#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) 583#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
584 584
585struct ovs_action_trunc { 585struct ovs_action_trunc {
586 uint32_t max_len; /* Max packet size in bytes. */ 586 __u32 max_len; /* Max packet size in bytes. */
587}; 587};
588 588
589/** 589/**
@@ -632,8 +632,8 @@ enum ovs_hash_alg {
632 * @hash_basis: basis used for computing hash. 632 * @hash_basis: basis used for computing hash.
633 */ 633 */
634struct ovs_action_hash { 634struct ovs_action_hash {
635 uint32_t hash_alg; /* One of ovs_hash_alg. */ 635 __u32 hash_alg; /* One of ovs_hash_alg. */
636 uint32_t hash_basis; 636 __u32 hash_basis;
637}; 637};
638 638
639/** 639/**
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d304f4c9792c..a406adcc0793 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -944,4 +944,68 @@ struct sctp_default_prinfo {
944 __u16 pr_policy; 944 __u16 pr_policy;
945}; 945};
946 946
947struct sctp_info {
948 __u32 sctpi_tag;
949 __u32 sctpi_state;
950 __u32 sctpi_rwnd;
951 __u16 sctpi_unackdata;
952 __u16 sctpi_penddata;
953 __u16 sctpi_instrms;
954 __u16 sctpi_outstrms;
955 __u32 sctpi_fragmentation_point;
956 __u32 sctpi_inqueue;
957 __u32 sctpi_outqueue;
958 __u32 sctpi_overall_error;
959 __u32 sctpi_max_burst;
960 __u32 sctpi_maxseg;
961 __u32 sctpi_peer_rwnd;
962 __u32 sctpi_peer_tag;
963 __u8 sctpi_peer_capable;
964 __u8 sctpi_peer_sack;
965 __u16 __reserved1;
966
967 /* assoc status info */
968 __u64 sctpi_isacks;
969 __u64 sctpi_osacks;
970 __u64 sctpi_opackets;
971 __u64 sctpi_ipackets;
972 __u64 sctpi_rtxchunks;
973 __u64 sctpi_outofseqtsns;
974 __u64 sctpi_idupchunks;
975 __u64 sctpi_gapcnt;
976 __u64 sctpi_ouodchunks;
977 __u64 sctpi_iuodchunks;
978 __u64 sctpi_oodchunks;
979 __u64 sctpi_iodchunks;
980 __u64 sctpi_octrlchunks;
981 __u64 sctpi_ictrlchunks;
982
983 /* primary transport info */
984 struct sockaddr_storage sctpi_p_address;
985 __s32 sctpi_p_state;
986 __u32 sctpi_p_cwnd;
987 __u32 sctpi_p_srtt;
988 __u32 sctpi_p_rto;
989 __u32 sctpi_p_hbinterval;
990 __u32 sctpi_p_pathmaxrxt;
991 __u32 sctpi_p_sackdelay;
992 __u32 sctpi_p_sackfreq;
993 __u32 sctpi_p_ssthresh;
994 __u32 sctpi_p_partial_bytes_acked;
995 __u32 sctpi_p_flight_size;
996 __u16 sctpi_p_error;
997 __u16 __reserved2;
998
999 /* sctp sock info */
1000 __u32 sctpi_s_autoclose;
1001 __u32 sctpi_s_adaptation_ind;
1002 __u32 sctpi_s_pd_point;
1003 __u8 sctpi_s_nodelay;
1004 __u8 sctpi_s_disable_fragments;
1005 __u8 sctpi_s_v4mapped;
1006 __u8 sctpi_s_frag_interleave;
1007 __u32 sctpi_s_type;
1008 __u32 __reserved3;
1009};
1010
947#endif /* _UAPI_SCTP_H */ 1011#endif /* _UAPI_SCTP_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
index 6b011c19b50f..1d57ed3d84d2 100644
--- a/include/uapi/linux/virtio_vsock.h
+++ b/include/uapi/linux/virtio_vsock.h
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#ifndef _UAPI_LINUX_VIRTIO_VSOCK_H 34#ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
35#define _UAPI_LINUX_VIRTIO_VOSCK_H 35#define _UAPI_LINUX_VIRTIO_VSOCK_H
36 36
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/virtio_ids.h> 38#include <linux/virtio_ids.h>
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index cbae529b7ce0..180d526a55c3 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -136,8 +136,8 @@ struct cxl_event_afu_driver_reserved {
136 * 136 *
137 * Of course the contents will be ABI, but that's up the AFU driver. 137 * Of course the contents will be ABI, but that's up the AFU driver.
138 */ 138 */
139 size_t data_size; 139 __u32 data_size;
140 u8 data[]; 140 __u8 data[];
141}; 141};
142 142
143struct cxl_event { 143struct cxl_event {
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 9a37c541822f..b5486e648607 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -9,8 +9,8 @@
9 9
10DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 10DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
11 11
12DECLARE_PER_CPU(int, xen_vcpu_id); 12DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
13static inline int xen_vcpu_nr(int cpu) 13static inline uint32_t xen_vcpu_nr(int cpu)
14{ 14{
15 return per_cpu(xen_vcpu_id, cpu); 15 return per_cpu(xen_vcpu_id, cpu);
16} 16}
diff --git a/init/Kconfig b/init/Kconfig
index 69886493ff1e..cac3f096050d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1761,6 +1761,7 @@ choice
1761 1761
1762config SLAB 1762config SLAB
1763 bool "SLAB" 1763 bool "SLAB"
1764 select HAVE_HARDENED_USERCOPY_ALLOCATOR
1764 help 1765 help
1765 The regular slab allocator that is established and known to work 1766 The regular slab allocator that is established and known to work
1766 well in all environments. It organizes cache hot objects in 1767 well in all environments. It organizes cache hot objects in
@@ -1768,6 +1769,7 @@ config SLAB
1768 1769
1769config SLUB 1770config SLUB
1770 bool "SLUB (Unqueued Allocator)" 1771 bool "SLUB (Unqueued Allocator)"
1772 select HAVE_HARDENED_USERCOPY_ALLOCATOR
1771 help 1773 help
1772 SLUB is a slab allocator that minimizes cache line usage 1774 SLUB is a slab allocator that minimizes cache line usage
1773 instead of managing queues of cached objects (SLAB approach). 1775 instead of managing queues of cached objects (SLAB approach).
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index d6709eb70970..0d302a87f21b 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -19,6 +19,7 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/file.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/audit.h> 24#include <linux/audit.h>
24#include <linux/kthread.h> 25#include <linux/kthread.h>
@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
544 unsigned long ino; 545 unsigned long ino;
545 dev_t dev; 546 dev_t dev;
546 547
547 rcu_read_lock(); 548 exe_file = get_task_exe_file(tsk);
548 exe_file = rcu_dereference(tsk->mm->exe_file); 549 if (!exe_file)
550 return 0;
549 ino = exe_file->f_inode->i_ino; 551 ino = exe_file->f_inode->i_ino;
550 dev = exe_file->f_inode->i_sb->s_dev; 552 dev = exe_file->f_inode->i_sb->s_dev;
551 rcu_read_unlock(); 553 fput(exe_file);
552 return audit_mark_compare(mark, ino, dev); 554 return audit_mark_compare(mark, ino, dev);
553} 555}
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index fff3650d52fc..570eeca7bdfa 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -26,11 +26,18 @@ struct bpf_htab {
26 struct bucket *buckets; 26 struct bucket *buckets;
27 void *elems; 27 void *elems;
28 struct pcpu_freelist freelist; 28 struct pcpu_freelist freelist;
29 void __percpu *extra_elems;
29 atomic_t count; /* number of elements in this hashtable */ 30 atomic_t count; /* number of elements in this hashtable */
30 u32 n_buckets; /* number of hash buckets */ 31 u32 n_buckets; /* number of hash buckets */
31 u32 elem_size; /* size of each element in bytes */ 32 u32 elem_size; /* size of each element in bytes */
32}; 33};
33 34
35enum extra_elem_state {
36 HTAB_NOT_AN_EXTRA_ELEM = 0,
37 HTAB_EXTRA_ELEM_FREE,
38 HTAB_EXTRA_ELEM_USED
39};
40
34/* each htab element is struct htab_elem + key + value */ 41/* each htab element is struct htab_elem + key + value */
35struct htab_elem { 42struct htab_elem {
36 union { 43 union {
@@ -38,7 +45,10 @@ struct htab_elem {
38 struct bpf_htab *htab; 45 struct bpf_htab *htab;
39 struct pcpu_freelist_node fnode; 46 struct pcpu_freelist_node fnode;
40 }; 47 };
41 struct rcu_head rcu; 48 union {
49 struct rcu_head rcu;
50 enum extra_elem_state state;
51 };
42 u32 hash; 52 u32 hash;
43 char key[0] __aligned(8); 53 char key[0] __aligned(8);
44}; 54};
@@ -113,6 +123,23 @@ free_elems:
113 return err; 123 return err;
114} 124}
115 125
126static int alloc_extra_elems(struct bpf_htab *htab)
127{
128 void __percpu *pptr;
129 int cpu;
130
131 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
132 if (!pptr)
133 return -ENOMEM;
134
135 for_each_possible_cpu(cpu) {
136 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
137 HTAB_EXTRA_ELEM_FREE;
138 }
139 htab->extra_elems = pptr;
140 return 0;
141}
142
116/* Called from syscall */ 143/* Called from syscall */
117static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 144static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
118{ 145{
@@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
185 if (percpu) 212 if (percpu)
186 cost += (u64) round_up(htab->map.value_size, 8) * 213 cost += (u64) round_up(htab->map.value_size, 8) *
187 num_possible_cpus() * htab->map.max_entries; 214 num_possible_cpus() * htab->map.max_entries;
215 else
216 cost += (u64) htab->elem_size * num_possible_cpus();
188 217
189 if (cost >= U32_MAX - PAGE_SIZE) 218 if (cost >= U32_MAX - PAGE_SIZE)
190 /* make sure page count doesn't overflow */ 219 /* make sure page count doesn't overflow */
@@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
212 raw_spin_lock_init(&htab->buckets[i].lock); 241 raw_spin_lock_init(&htab->buckets[i].lock);
213 } 242 }
214 243
244 if (!percpu) {
245 err = alloc_extra_elems(htab);
246 if (err)
247 goto free_buckets;
248 }
249
215 if (!(attr->map_flags & BPF_F_NO_PREALLOC)) { 250 if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
216 err = prealloc_elems_and_freelist(htab); 251 err = prealloc_elems_and_freelist(htab);
217 if (err) 252 if (err)
218 goto free_buckets; 253 goto free_extra_elems;
219 } 254 }
220 255
221 return &htab->map; 256 return &htab->map;
222 257
258free_extra_elems:
259 free_percpu(htab->extra_elems);
223free_buckets: 260free_buckets:
224 kvfree(htab->buckets); 261 kvfree(htab->buckets);
225free_htab: 262free_htab:
@@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
349 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 386 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
350 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); 387 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
351 kfree(l); 388 kfree(l);
352
353} 389}
354 390
355static void htab_elem_free_rcu(struct rcu_head *head) 391static void htab_elem_free_rcu(struct rcu_head *head)
@@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head)
370 406
371static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 407static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
372{ 408{
409 if (l->state == HTAB_EXTRA_ELEM_USED) {
410 l->state = HTAB_EXTRA_ELEM_FREE;
411 return;
412 }
413
373 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { 414 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
374 pcpu_freelist_push(&htab->freelist, &l->fnode); 415 pcpu_freelist_push(&htab->freelist, &l->fnode);
375 } else { 416 } else {
@@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
381 422
382static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 423static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
383 void *value, u32 key_size, u32 hash, 424 void *value, u32 key_size, u32 hash,
384 bool percpu, bool onallcpus) 425 bool percpu, bool onallcpus,
426 bool old_elem_exists)
385{ 427{
386 u32 size = htab->map.value_size; 428 u32 size = htab->map.value_size;
387 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); 429 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
388 struct htab_elem *l_new; 430 struct htab_elem *l_new;
389 void __percpu *pptr; 431 void __percpu *pptr;
432 int err = 0;
390 433
391 if (prealloc) { 434 if (prealloc) {
392 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); 435 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
393 if (!l_new) 436 if (!l_new)
394 return ERR_PTR(-E2BIG); 437 err = -E2BIG;
395 } else { 438 } else {
396 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 439 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
397 atomic_dec(&htab->count); 440 atomic_dec(&htab->count);
398 return ERR_PTR(-E2BIG); 441 err = -E2BIG;
442 } else {
443 l_new = kmalloc(htab->elem_size,
444 GFP_ATOMIC | __GFP_NOWARN);
445 if (!l_new)
446 return ERR_PTR(-ENOMEM);
399 } 447 }
400 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); 448 }
401 if (!l_new) 449
402 return ERR_PTR(-ENOMEM); 450 if (err) {
451 if (!old_elem_exists)
452 return ERR_PTR(err);
453
454 /* if we're updating the existing element and the hash table
455 * is full, use per-cpu extra elems
456 */
457 l_new = this_cpu_ptr(htab->extra_elems);
458 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
459 return ERR_PTR(-E2BIG);
460 l_new->state = HTAB_EXTRA_ELEM_USED;
461 } else {
462 l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
403 } 463 }
404 464
405 memcpy(l_new->key, key, key_size); 465 memcpy(l_new->key, key, key_size);
@@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
489 if (ret) 549 if (ret)
490 goto err; 550 goto err;
491 551
492 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false); 552 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
553 !!l_old);
493 if (IS_ERR(l_new)) { 554 if (IS_ERR(l_new)) {
494 /* all pre-allocated elements are in use or memory exhausted */ 555 /* all pre-allocated elements are in use or memory exhausted */
495 ret = PTR_ERR(l_new); 556 ret = PTR_ERR(l_new);
@@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
563 } 624 }
564 } else { 625 } else {
565 l_new = alloc_htab_elem(htab, key, value, key_size, 626 l_new = alloc_htab_elem(htab, key, value, key_size,
566 hash, true, onallcpus); 627 hash, true, onallcpus, false);
567 if (IS_ERR(l_new)) { 628 if (IS_ERR(l_new)) {
568 ret = PTR_ERR(l_new); 629 ret = PTR_ERR(l_new);
569 goto err; 630 goto err;
@@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map)
652 htab_free_elems(htab); 713 htab_free_elems(htab);
653 pcpu_freelist_destroy(&htab->freelist); 714 pcpu_freelist_destroy(&htab->freelist);
654 } 715 }
716 free_percpu(htab->extra_elems);
655 kvfree(htab->buckets); 717 kvfree(htab->buckets);
656 kfree(htab); 718 kfree(htab);
657} 719}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f72f23b8fdab..daea765d72e6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -194,6 +194,7 @@ struct verifier_env {
194 struct verifier_state_list **explored_states; /* search pruning optimization */ 194 struct verifier_state_list **explored_states; /* search pruning optimization */
195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
196 u32 used_map_cnt; /* number of used maps */ 196 u32 used_map_cnt; /* number of used maps */
197 u32 id_gen; /* used to generate unique reg IDs */
197 bool allow_ptr_leaks; 198 bool allow_ptr_leaks;
198}; 199};
199 200
@@ -1052,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1052 goto error; 1053 goto error;
1053 break; 1054 break;
1054 case BPF_MAP_TYPE_CGROUP_ARRAY: 1055 case BPF_MAP_TYPE_CGROUP_ARRAY:
1055 if (func_id != BPF_FUNC_skb_in_cgroup) 1056 if (func_id != BPF_FUNC_skb_under_cgroup)
1056 goto error; 1057 goto error;
1057 break; 1058 break;
1058 default: 1059 default:
@@ -1074,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1074 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1075 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1075 goto error; 1076 goto error;
1076 break; 1077 break;
1077 case BPF_FUNC_skb_in_cgroup: 1078 case BPF_FUNC_skb_under_cgroup:
1078 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1079 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1079 goto error; 1080 goto error;
1080 break; 1081 break;
@@ -1301,7 +1302,7 @@ add_imm:
1301 /* dst_reg stays as pkt_ptr type and since some positive 1302 /* dst_reg stays as pkt_ptr type and since some positive
1302 * integer value was added to the pointer, increment its 'id' 1303 * integer value was added to the pointer, increment its 'id'
1303 */ 1304 */
1304 dst_reg->id++; 1305 dst_reg->id = ++env->id_gen;
1305 1306
1306 /* something was added to pkt_ptr, set range and off to zero */ 1307 /* something was added to pkt_ptr, set range and off to zero */
1307 dst_reg->off = 0; 1308 dst_reg->off = 0;
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index c2de56ab0fce..7fa0c4ae6394 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -1,4 +1,12 @@
1# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
1CONFIG_CC_OPTIMIZE_FOR_SIZE=y 2CONFIG_CC_OPTIMIZE_FOR_SIZE=y
3# CONFIG_KERNEL_GZIP is not set
4# CONFIG_KERNEL_BZIP2 is not set
5# CONFIG_KERNEL_LZMA is not set
2CONFIG_KERNEL_XZ=y 6CONFIG_KERNEL_XZ=y
7# CONFIG_KERNEL_LZO is not set
8# CONFIG_KERNEL_LZ4 is not set
3CONFIG_OPTIMIZE_INLINING=y 9CONFIG_OPTIMIZE_INLINING=y
10# CONFIG_SLAB is not set
11# CONFIG_SLUB is not set
4CONFIG_SLOB=y 12CONFIG_SLOB=y
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c7fd2778ed50..c27e53326bef 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2069 mutex_unlock(&cpuset_mutex); 2069 mutex_unlock(&cpuset_mutex);
2070} 2070}
2071 2071
2072/*
2073 * Make sure the new task conform to the current state of its parent,
2074 * which could have been changed by cpuset just after it inherits the
2075 * state from the parent and before it sits on the cgroup's task list.
2076 */
2077void cpuset_fork(struct task_struct *task)
2078{
2079 if (task_css_is_root(task, cpuset_cgrp_id))
2080 return;
2081
2082 set_cpus_allowed_ptr(task, &current->cpus_allowed);
2083 task->mems_allowed = current->mems_allowed;
2084}
2085
2072struct cgroup_subsys cpuset_cgrp_subsys = { 2086struct cgroup_subsys cpuset_cgrp_subsys = {
2073 .css_alloc = cpuset_css_alloc, 2087 .css_alloc = cpuset_css_alloc,
2074 .css_online = cpuset_css_online, 2088 .css_online = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
2079 .attach = cpuset_attach, 2093 .attach = cpuset_attach,
2080 .post_attach = cpuset_post_attach, 2094 .post_attach = cpuset_post_attach,
2081 .bind = cpuset_bind, 2095 .bind = cpuset_bind,
2096 .fork = cpuset_fork,
2082 .legacy_cftypes = files, 2097 .legacy_cftypes = files,
2083 .early_init = true, 2098 .early_init = true,
2084}; 2099};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a19550d80ab1..a54f2c2cdb20 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -242,18 +242,6 @@ unlock:
242 return ret; 242 return ret;
243} 243}
244 244
245static void event_function_local(struct perf_event *event, event_f func, void *data)
246{
247 struct event_function_struct efs = {
248 .event = event,
249 .func = func,
250 .data = data,
251 };
252
253 int ret = event_function(&efs);
254 WARN_ON_ONCE(ret);
255}
256
257static void event_function_call(struct perf_event *event, event_f func, void *data) 245static void event_function_call(struct perf_event *event, event_f func, void *data)
258{ 246{
259 struct perf_event_context *ctx = event->ctx; 247 struct perf_event_context *ctx = event->ctx;
@@ -303,6 +291,54 @@ again:
303 raw_spin_unlock_irq(&ctx->lock); 291 raw_spin_unlock_irq(&ctx->lock);
304} 292}
305 293
294/*
295 * Similar to event_function_call() + event_function(), but hard assumes IRQs
296 * are already disabled and we're on the right CPU.
297 */
298static void event_function_local(struct perf_event *event, event_f func, void *data)
299{
300 struct perf_event_context *ctx = event->ctx;
301 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
302 struct task_struct *task = READ_ONCE(ctx->task);
303 struct perf_event_context *task_ctx = NULL;
304
305 WARN_ON_ONCE(!irqs_disabled());
306
307 if (task) {
308 if (task == TASK_TOMBSTONE)
309 return;
310
311 task_ctx = ctx;
312 }
313
314 perf_ctx_lock(cpuctx, task_ctx);
315
316 task = ctx->task;
317 if (task == TASK_TOMBSTONE)
318 goto unlock;
319
320 if (task) {
321 /*
322 * We must be either inactive or active and the right task,
323 * otherwise we're screwed, since we cannot IPI to somewhere
324 * else.
325 */
326 if (ctx->is_active) {
327 if (WARN_ON_ONCE(task != current))
328 goto unlock;
329
330 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
331 goto unlock;
332 }
333 } else {
334 WARN_ON_ONCE(&cpuctx->ctx != ctx);
335 }
336
337 func(event, cpuctx, ctx, data);
338unlock:
339 perf_ctx_unlock(cpuctx, task_ctx);
340}
341
306#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 342#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
307 PERF_FLAG_FD_OUTPUT |\ 343 PERF_FLAG_FD_OUTPUT |\
308 PERF_FLAG_PID_CGROUP |\ 344 PERF_FLAG_PID_CGROUP |\
@@ -843,6 +879,32 @@ perf_cgroup_mark_enabled(struct perf_event *event,
843 } 879 }
844 } 880 }
845} 881}
882
883/*
884 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
885 * cleared when last cgroup event is removed.
886 */
887static inline void
888list_update_cgroup_event(struct perf_event *event,
889 struct perf_event_context *ctx, bool add)
890{
891 struct perf_cpu_context *cpuctx;
892
893 if (!is_cgroup_event(event))
894 return;
895
896 if (add && ctx->nr_cgroups++)
897 return;
898 else if (!add && --ctx->nr_cgroups)
899 return;
900 /*
901 * Because cgroup events are always per-cpu events,
902 * this will always be called from the right CPU.
903 */
904 cpuctx = __get_cpu_context(ctx);
905 cpuctx->cgrp = add ? event->cgrp : NULL;
906}
907
846#else /* !CONFIG_CGROUP_PERF */ 908#else /* !CONFIG_CGROUP_PERF */
847 909
848static inline bool 910static inline bool
@@ -920,6 +982,13 @@ perf_cgroup_mark_enabled(struct perf_event *event,
920 struct perf_event_context *ctx) 982 struct perf_event_context *ctx)
921{ 983{
922} 984}
985
986static inline void
987list_update_cgroup_event(struct perf_event *event,
988 struct perf_event_context *ctx, bool add)
989{
990}
991
923#endif 992#endif
924 993
925/* 994/*
@@ -1392,6 +1461,7 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1392static void 1461static void
1393list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1462list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1394{ 1463{
1464
1395 lockdep_assert_held(&ctx->lock); 1465 lockdep_assert_held(&ctx->lock);
1396 1466
1397 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1467 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1412,8 +1482,7 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1412 list_add_tail(&event->group_entry, list); 1482 list_add_tail(&event->group_entry, list);
1413 } 1483 }
1414 1484
1415 if (is_cgroup_event(event)) 1485 list_update_cgroup_event(event, ctx, true);
1416 ctx->nr_cgroups++;
1417 1486
1418 list_add_rcu(&event->event_entry, &ctx->event_list); 1487 list_add_rcu(&event->event_entry, &ctx->event_list);
1419 ctx->nr_events++; 1488 ctx->nr_events++;
@@ -1581,8 +1650,6 @@ static void perf_group_attach(struct perf_event *event)
1581static void 1650static void
1582list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1651list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1583{ 1652{
1584 struct perf_cpu_context *cpuctx;
1585
1586 WARN_ON_ONCE(event->ctx != ctx); 1653 WARN_ON_ONCE(event->ctx != ctx);
1587 lockdep_assert_held(&ctx->lock); 1654 lockdep_assert_held(&ctx->lock);
1588 1655
@@ -1594,20 +1661,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1594 1661
1595 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1662 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1596 1663
1597 if (is_cgroup_event(event)) { 1664 list_update_cgroup_event(event, ctx, false);
1598 ctx->nr_cgroups--;
1599 /*
1600 * Because cgroup events are always per-cpu events, this will
1601 * always be called from the right CPU.
1602 */
1603 cpuctx = __get_cpu_context(ctx);
1604 /*
1605 * If there are no more cgroup events then clear cgrp to avoid
1606 * stale pointer in update_cgrp_time_from_cpuctx().
1607 */
1608 if (!ctx->nr_cgroups)
1609 cpuctx->cgrp = NULL;
1610 }
1611 1665
1612 ctx->nr_events--; 1666 ctx->nr_events--;
1613 if (event->attr.inherit_stat) 1667 if (event->attr.inherit_stat)
@@ -1716,8 +1770,8 @@ static inline int pmu_filter_match(struct perf_event *event)
1716static inline int 1770static inline int
1717event_filter_match(struct perf_event *event) 1771event_filter_match(struct perf_event *event)
1718{ 1772{
1719 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1773 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1720 && perf_cgroup_match(event) && pmu_filter_match(event); 1774 perf_cgroup_match(event) && pmu_filter_match(event);
1721} 1775}
1722 1776
1723static void 1777static void
@@ -1737,8 +1791,8 @@ event_sched_out(struct perf_event *event,
1737 * maintained, otherwise bogus information is return 1791 * maintained, otherwise bogus information is return
1738 * via read() for time_enabled, time_running: 1792 * via read() for time_enabled, time_running:
1739 */ 1793 */
1740 if (event->state == PERF_EVENT_STATE_INACTIVE 1794 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1741 && !event_filter_match(event)) { 1795 !event_filter_match(event)) {
1742 delta = tstamp - event->tstamp_stopped; 1796 delta = tstamp - event->tstamp_stopped;
1743 event->tstamp_running += delta; 1797 event->tstamp_running += delta;
1744 event->tstamp_stopped = tstamp; 1798 event->tstamp_stopped = tstamp;
@@ -2236,10 +2290,15 @@ perf_install_in_context(struct perf_event_context *ctx,
2236 2290
2237 lockdep_assert_held(&ctx->mutex); 2291 lockdep_assert_held(&ctx->mutex);
2238 2292
2239 event->ctx = ctx;
2240 if (event->cpu != -1) 2293 if (event->cpu != -1)
2241 event->cpu = cpu; 2294 event->cpu = cpu;
2242 2295
2296 /*
2297 * Ensures that if we can observe event->ctx, both the event and ctx
2298 * will be 'complete'. See perf_iterate_sb_cpu().
2299 */
2300 smp_store_release(&event->ctx, ctx);
2301
2243 if (!task) { 2302 if (!task) {
2244 cpu_function_call(cpu, __perf_install_in_context, event); 2303 cpu_function_call(cpu, __perf_install_in_context, event);
2245 return; 2304 return;
@@ -2437,11 +2496,11 @@ static int __perf_event_stop(void *info)
2437 return 0; 2496 return 0;
2438} 2497}
2439 2498
2440static int perf_event_restart(struct perf_event *event) 2499static int perf_event_stop(struct perf_event *event, int restart)
2441{ 2500{
2442 struct stop_event_data sd = { 2501 struct stop_event_data sd = {
2443 .event = event, 2502 .event = event,
2444 .restart = 1, 2503 .restart = restart,
2445 }; 2504 };
2446 int ret = 0; 2505 int ret = 0;
2447 2506
@@ -3490,8 +3549,17 @@ static int perf_event_read(struct perf_event *event, bool group)
3490 .group = group, 3549 .group = group,
3491 .ret = 0, 3550 .ret = 0,
3492 }; 3551 };
3493 smp_call_function_single(event->oncpu, 3552 /*
3494 __perf_event_read, &data, 1); 3553 * Purposely ignore the smp_call_function_single() return
3554 * value.
3555 *
3556 * If event->oncpu isn't a valid CPU it means the event got
3557 * scheduled out and that will have updated the event count.
3558 *
3559 * Therefore, either way, we'll have an up-to-date event count
3560 * after this.
3561 */
3562 (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
3495 ret = data.ret; 3563 ret = data.ret;
3496 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3564 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3497 struct perf_event_context *ctx = event->ctx; 3565 struct perf_event_context *ctx = event->ctx;
@@ -4777,6 +4845,19 @@ static void ring_buffer_attach(struct perf_event *event,
4777 spin_unlock_irqrestore(&rb->event_lock, flags); 4845 spin_unlock_irqrestore(&rb->event_lock, flags);
4778 } 4846 }
4779 4847
4848 /*
4849 * Avoid racing with perf_mmap_close(AUX): stop the event
4850 * before swizzling the event::rb pointer; if it's getting
4851 * unmapped, its aux_mmap_count will be 0 and it won't
4852 * restart. See the comment in __perf_pmu_output_stop().
4853 *
4854 * Data will inevitably be lost when set_output is done in
4855 * mid-air, but then again, whoever does it like this is
4856 * not in for the data anyway.
4857 */
4858 if (has_aux(event))
4859 perf_event_stop(event, 0);
4860
4780 rcu_assign_pointer(event->rb, rb); 4861 rcu_assign_pointer(event->rb, rb);
4781 4862
4782 if (old_rb) { 4863 if (old_rb) {
@@ -5969,6 +6050,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
5969 struct perf_event *event; 6050 struct perf_event *event;
5970 6051
5971 list_for_each_entry_rcu(event, &pel->list, sb_list) { 6052 list_for_each_entry_rcu(event, &pel->list, sb_list) {
6053 /*
6054 * Skip events that are not fully formed yet; ensure that
6055 * if we observe event->ctx, both event and ctx will be
6056 * complete enough. See perf_install_in_context().
6057 */
6058 if (!smp_load_acquire(&event->ctx))
6059 continue;
6060
5972 if (event->state < PERF_EVENT_STATE_INACTIVE) 6061 if (event->state < PERF_EVENT_STATE_INACTIVE)
5973 continue; 6062 continue;
5974 if (!event_filter_match(event)) 6063 if (!event_filter_match(event))
@@ -6044,7 +6133,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6044 raw_spin_unlock_irqrestore(&ifh->lock, flags); 6133 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6045 6134
6046 if (restart) 6135 if (restart)
6047 perf_event_restart(event); 6136 perf_event_stop(event, 1);
6048} 6137}
6049 6138
6050void perf_event_exec(void) 6139void perf_event_exec(void)
@@ -6088,7 +6177,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
6088 6177
6089 /* 6178 /*
6090 * In case of inheritance, it will be the parent that links to the 6179 * In case of inheritance, it will be the parent that links to the
6091 * ring-buffer, but it will be the child that's actually using it: 6180 * ring-buffer, but it will be the child that's actually using it.
6181 *
6182 * We are using event::rb to determine if the event should be stopped,
6183 * however this may race with ring_buffer_attach() (through set_output),
6184 * which will make us skip the event that actually needs to be stopped.
6185 * So ring_buffer_attach() has to stop an aux event before re-assigning
6186 * its rb pointer.
6092 */ 6187 */
6093 if (rcu_dereference(parent->rb) == rb) 6188 if (rcu_dereference(parent->rb) == rb)
6094 ro->err = __perf_event_stop(&sd); 6189 ro->err = __perf_event_stop(&sd);
@@ -6098,7 +6193,7 @@ static int __perf_pmu_output_stop(void *info)
6098{ 6193{
6099 struct perf_event *event = info; 6194 struct perf_event *event = info;
6100 struct pmu *pmu = event->pmu; 6195 struct pmu *pmu = event->pmu;
6101 struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 6196 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6102 struct remote_output ro = { 6197 struct remote_output ro = {
6103 .rb = event->rb, 6198 .rb = event->rb,
6104 }; 6199 };
@@ -6553,15 +6648,6 @@ got_name:
6553} 6648}
6554 6649
6555/* 6650/*
6556 * Whether this @filter depends on a dynamic object which is not loaded
6557 * yet or its load addresses are not known.
6558 */
6559static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
6560{
6561 return filter->filter && filter->inode;
6562}
6563
6564/*
6565 * Check whether inode and address range match filter criteria. 6651 * Check whether inode and address range match filter criteria.
6566 */ 6652 */
6567static bool perf_addr_filter_match(struct perf_addr_filter *filter, 6653static bool perf_addr_filter_match(struct perf_addr_filter *filter,
@@ -6611,7 +6697,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6611 raw_spin_unlock_irqrestore(&ifh->lock, flags); 6697 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6612 6698
6613 if (restart) 6699 if (restart)
6614 perf_event_restart(event); 6700 perf_event_stop(event, 1);
6615} 6701}
6616 6702
6617/* 6703/*
@@ -6622,6 +6708,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6622 struct perf_event_context *ctx; 6708 struct perf_event_context *ctx;
6623 int ctxn; 6709 int ctxn;
6624 6710
6711 /*
6712 * Data tracing isn't supported yet and as such there is no need
6713 * to keep track of anything that isn't related to executable code:
6714 */
6715 if (!(vma->vm_flags & VM_EXEC))
6716 return;
6717
6625 rcu_read_lock(); 6718 rcu_read_lock();
6626 for_each_task_context_nr(ctxn) { 6719 for_each_task_context_nr(ctxn) {
6627 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 6720 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
@@ -7774,7 +7867,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
7774 list_for_each_entry(filter, &ifh->list, entry) { 7867 list_for_each_entry(filter, &ifh->list, entry) {
7775 event->addr_filters_offs[count] = 0; 7868 event->addr_filters_offs[count] = 0;
7776 7869
7777 if (perf_addr_filter_needs_mmap(filter)) 7870 /*
7871 * Adjust base offset if the filter is associated to a binary
7872 * that needs to be mapped:
7873 */
7874 if (filter->inode)
7778 event->addr_filters_offs[count] = 7875 event->addr_filters_offs[count] =
7779 perf_addr_filter_apply(filter, mm); 7876 perf_addr_filter_apply(filter, mm);
7780 7877
@@ -7789,7 +7886,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
7789 mmput(mm); 7886 mmput(mm);
7790 7887
7791restart: 7888restart:
7792 perf_event_restart(event); 7889 perf_event_stop(event, 1);
7793} 7890}
7794 7891
7795/* 7892/*
@@ -7905,8 +8002,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
7905 goto fail; 8002 goto fail;
7906 } 8003 }
7907 8004
7908 if (token == IF_SRC_FILE) { 8005 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
7909 filename = match_strdup(&args[2]); 8006 int fpos = filter->range ? 2 : 1;
8007
8008 filename = match_strdup(&args[fpos]);
7910 if (!filename) { 8009 if (!filename) {
7911 ret = -ENOMEM; 8010 ret = -ENOMEM;
7912 goto fail; 8011 goto fail;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ae9b90dc9a5a..257fa460b846 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
330 if (!rb) 330 if (!rb)
331 return NULL; 331 return NULL;
332 332
333 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount)) 333 if (!rb_has_aux(rb))
334 goto err; 334 goto err;
335 335
336 /* 336 /*
337 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through), 337 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
338 * the aux buffer is in perf_mmap_close(), about to get freed. 338 * about to get freed, so we leave immediately.
339 *
340 * Checking rb::aux_mmap_count and rb::refcount has to be done in
341 * the same order, see perf_mmap_close. Otherwise we end up freeing
342 * aux pages in this path, which is a bug, because in_atomic().
339 */ 343 */
340 if (!atomic_read(&rb->aux_mmap_count)) 344 if (!atomic_read(&rb->aux_mmap_count))
341 goto err_put; 345 goto err;
346
347 if (!atomic_inc_not_zero(&rb->aux_refcount))
348 goto err;
342 349
343 /* 350 /*
344 * Nesting is not supported for AUX area, make sure nested 351 * Nesting is not supported for AUX area, make sure nested
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index b7a525ab2083..8c50276b60d1 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
172 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 172 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
173 err = -EAGAIN; 173 err = -EAGAIN;
174 ptep = page_check_address(page, mm, addr, &ptl, 0); 174 ptep = page_check_address(page, mm, addr, &ptl, 0);
175 if (!ptep) 175 if (!ptep) {
176 mem_cgroup_cancel_charge(kpage, memcg, false);
176 goto unlock; 177 goto unlock;
178 }
177 179
178 get_page(kpage); 180 get_page(kpage);
179 page_add_new_anon_rmap(kpage, vma, addr, false); 181 page_add_new_anon_rmap(kpage, vma, addr, false);
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
200 202
201 err = 0; 203 err = 0;
202 unlock: 204 unlock:
203 mem_cgroup_cancel_charge(kpage, memcg, false);
204 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 205 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
205 unlock_page(page); 206 unlock_page(page);
206 return err; 207 return err;
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f974ae042a6..091a78be3b09 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -848,12 +848,7 @@ void do_exit(long code)
848 TASKS_RCU(preempt_enable()); 848 TASKS_RCU(preempt_enable());
849 exit_notify(tsk, group_dead); 849 exit_notify(tsk, group_dead);
850 proc_exit_connector(tsk); 850 proc_exit_connector(tsk);
851#ifdef CONFIG_NUMA 851 mpol_put_task_policy(tsk);
852 task_lock(tsk);
853 mpol_put(tsk->mempolicy);
854 tsk->mempolicy = NULL;
855 task_unlock(tsk);
856#endif
857#ifdef CONFIG_FUTEX 852#ifdef CONFIG_FUTEX
858 if (unlikely(current->pi_state_cache)) 853 if (unlikely(current->pi_state_cache))
859 kfree(current->pi_state_cache); 854 kfree(current->pi_state_cache);
diff --git a/kernel/fork.c b/kernel/fork.c
index 52e725d4a866..beb31725f7e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -799,6 +799,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
799EXPORT_SYMBOL(get_mm_exe_file); 799EXPORT_SYMBOL(get_mm_exe_file);
800 800
801/** 801/**
802 * get_task_exe_file - acquire a reference to the task's executable file
803 *
804 * Returns %NULL if task's mm (if any) has no associated executable file or
805 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
806 * User must release file via fput().
807 */
808struct file *get_task_exe_file(struct task_struct *task)
809{
810 struct file *exe_file = NULL;
811 struct mm_struct *mm;
812
813 task_lock(task);
814 mm = task->mm;
815 if (mm) {
816 if (!(task->flags & PF_KTHREAD))
817 exe_file = get_mm_exe_file(mm);
818 }
819 task_unlock(task);
820 return exe_file;
821}
822EXPORT_SYMBOL(get_task_exe_file);
823
824/**
802 * get_task_mm - acquire a reference to the task's mm 825 * get_task_mm - acquire a reference to the task's mm
803 * 826 *
804 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning 827 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
@@ -913,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
913 deactivate_mm(tsk, mm); 936 deactivate_mm(tsk, mm);
914 937
915 /* 938 /*
916 * If we're exiting normally, clear a user-space tid field if 939 * Signal userspace if we're not exiting with a core dump
917 * requested. We leave this alone when dying by signal, to leave 940 * because we want to leave the value intact for debugging
918 * the value intact in a core dump, and to save the unnecessary 941 * purposes.
919 * trouble, say, a killed vfork parent shouldn't touch this mm.
920 * Userland only wants this done for a sys_exit.
921 */ 942 */
922 if (tsk->clear_child_tid) { 943 if (tsk->clear_child_tid) {
923 if (!(tsk->flags & PF_SIGNALED) && 944 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
924 atomic_read(&mm->mm_users) > 1) { 945 atomic_read(&mm->mm_users) > 1) {
925 /* 946 /*
926 * We don't check the error code - if userspace has 947 * We don't check the error code - if userspace has
@@ -1404,7 +1425,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1404 p->real_start_time = ktime_get_boot_ns(); 1425 p->real_start_time = ktime_get_boot_ns();
1405 p->io_context = NULL; 1426 p->io_context = NULL;
1406 p->audit_context = NULL; 1427 p->audit_context = NULL;
1407 threadgroup_change_begin(current);
1408 cgroup_fork(p); 1428 cgroup_fork(p);
1409#ifdef CONFIG_NUMA 1429#ifdef CONFIG_NUMA
1410 p->mempolicy = mpol_dup(p->mempolicy); 1430 p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1576,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1556 INIT_LIST_HEAD(&p->thread_group); 1576 INIT_LIST_HEAD(&p->thread_group);
1557 p->task_works = NULL; 1577 p->task_works = NULL;
1558 1578
1579 threadgroup_change_begin(current);
1559 /* 1580 /*
1560 * Ensure that the cgroup subsystem policies allow the new process to be 1581 * Ensure that the cgroup subsystem policies allow the new process to be
1561 * forked. It should be noted the the new process's css_set can be changed 1582 * forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1677,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1656bad_fork_cancel_cgroup: 1677bad_fork_cancel_cgroup:
1657 cgroup_cancel_fork(p); 1678 cgroup_cancel_fork(p);
1658bad_fork_free_pid: 1679bad_fork_free_pid:
1680 threadgroup_change_end(current);
1659 if (pid != &init_struct_pid) 1681 if (pid != &init_struct_pid)
1660 free_pid(pid); 1682 free_pid(pid);
1661bad_fork_cleanup_thread: 1683bad_fork_cleanup_thread:
@@ -1688,7 +1710,6 @@ bad_fork_cleanup_policy:
1688 mpol_put(p->mempolicy); 1710 mpol_put(p->mempolicy);
1689bad_fork_cleanup_threadgroup_lock: 1711bad_fork_cleanup_threadgroup_lock:
1690#endif 1712#endif
1691 threadgroup_change_end(current);
1692 delayacct_tsk_free(p); 1713 delayacct_tsk_free(p);
1693bad_fork_cleanup_count: 1714bad_fork_cleanup_count:
1694 atomic_dec(&p->cred->user->processes); 1715 atomic_dec(&p->cred->user->processes);
diff --git a/kernel/futex.c b/kernel/futex.c
index 33664f70e2d2..46cb3a301bc1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled;
179 * Futex flags used to encode options to functions and preserve them across 179 * Futex flags used to encode options to functions and preserve them across
180 * restarts. 180 * restarts.
181 */ 181 */
182#define FLAGS_SHARED 0x01 182#ifdef CONFIG_MMU
183# define FLAGS_SHARED 0x01
184#else
185/*
186 * NOMMU does not have per process address space. Let the compiler optimize
187 * code away.
188 */
189# define FLAGS_SHARED 0x00
190#endif
183#define FLAGS_CLOCKRT 0x02 191#define FLAGS_CLOCKRT 0x02
184#define FLAGS_HAS_TIMEOUT 0x04 192#define FLAGS_HAS_TIMEOUT 0x04
185 193
@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key)
405 if (!key->both.ptr) 413 if (!key->both.ptr)
406 return; 414 return;
407 415
416 /*
417 * On MMU less systems futexes are always "private" as there is no per
418 * process address space. We need the smp wmb nevertheless - yes,
419 * arch/blackfin has MMU less SMP ...
420 */
421 if (!IS_ENABLED(CONFIG_MMU)) {
422 smp_mb(); /* explicit smp_mb(); (B) */
423 return;
424 }
425
408 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 426 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
409 case FUT_OFF_INODE: 427 case FUT_OFF_INODE:
410 ihold(key->shared.inode); /* implies smp_mb(); (B) */ 428 ihold(key->shared.inode); /* implies smp_mb(); (B) */
@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key)
436 return; 454 return;
437 } 455 }
438 456
457 if (!IS_ENABLED(CONFIG_MMU))
458 return;
459
439 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 460 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
440 case FUT_OFF_INODE: 461 case FUT_OFF_INODE:
441 iput(key->shared.inode); 462 iput(key->shared.inode);
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f68959341c0f..32f6cfcff212 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -39,6 +39,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
39 return NULL; 39 return NULL;
40 } 40 }
41 41
42 get_online_cpus();
42 if (max_vecs >= num_online_cpus()) { 43 if (max_vecs >= num_online_cpus()) {
43 cpumask_copy(affinity_mask, cpu_online_mask); 44 cpumask_copy(affinity_mask, cpu_online_mask);
44 *nr_vecs = num_online_cpus(); 45 *nr_vecs = num_online_cpus();
@@ -56,6 +57,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
56 } 57 }
57 *nr_vecs = vecs; 58 *nr_vecs = vecs;
58 } 59 }
60 put_online_cpus();
59 61
60 return affinity_mask; 62 return affinity_mask;
61} 63}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b4c1bc7c9ca2..637389088b3f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -820,6 +820,17 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
820 desc->name = name; 820 desc->name = name;
821 821
822 if (handle != handle_bad_irq && is_chained) { 822 if (handle != handle_bad_irq && is_chained) {
823 /*
824 * We're about to start this interrupt immediately,
825 * hence the need to set the trigger configuration.
826 * But the .set_type callback may have overridden the
827 * flow handler, ignoring that we're dealing with a
828 * chained interrupt. Reset it immediately because we
829 * do know better.
830 */
831 __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
832 desc->handle_irq = handle;
833
823 irq_settings_set_noprobe(desc); 834 irq_settings_set_noprobe(desc);
824 irq_settings_set_norequest(desc); 835 irq_settings_set_norequest(desc);
825 irq_settings_set_nothread(desc); 836 irq_settings_set_nothread(desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 73a2b786b5e9..9530fcd27704 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1681,8 +1681,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1681 action->dev_id = dev_id; 1681 action->dev_id = dev_id;
1682 1682
1683 retval = irq_chip_pm_get(&desc->irq_data); 1683 retval = irq_chip_pm_get(&desc->irq_data);
1684 if (retval < 0) 1684 if (retval < 0) {
1685 kfree(action);
1685 return retval; 1686 return retval;
1687 }
1686 1688
1687 chip_bus_lock(desc); 1689 chip_bus_lock(desc);
1688 retval = __setup_irq(irq, desc, action); 1690 retval = __setup_irq(irq, desc, action);
@@ -1985,8 +1987,10 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1985 action->percpu_dev_id = dev_id; 1987 action->percpu_dev_id = dev_id;
1986 1988
1987 retval = irq_chip_pm_get(&desc->irq_data); 1989 retval = irq_chip_pm_get(&desc->irq_data);
1988 if (retval < 0) 1990 if (retval < 0) {
1991 kfree(action);
1989 return retval; 1992 return retval;
1993 }
1990 1994
1991 chip_bus_lock(desc); 1995 chip_bus_lock(desc);
1992 retval = __setup_irq(irq, desc, action); 1996 retval = __setup_irq(irq, desc, action);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 54999350162c..19e9dfbe97fa 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -359,6 +359,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
359 else 359 else
360 dev_dbg(dev, "irq [%d-%d] for MSI\n", 360 dev_dbg(dev, "irq [%d-%d] for MSI\n",
361 virq, virq + desc->nvec_used - 1); 361 virq, virq + desc->nvec_used - 1);
362 /*
363 * This flag is set by the PCI layer as we need to activate
364 * the MSI entries before the PCI layer enables MSI in the
365 * card. Otherwise the card latches a random msi message.
366 */
367 if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
368 struct irq_data *irq_data;
369
370 irq_data = irq_domain_get_irq_data(domain, desc->irq);
371 irq_domain_activate_irq(irq_data);
372 }
362 } 373 }
363 374
364 return 0; 375 return 0;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 503bc2d348e5..037c321c5618 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
887 return 0; 887 return 0;
888out: 888out:
889 vfree(pi->sechdrs); 889 vfree(pi->sechdrs);
890 pi->sechdrs = NULL;
891
890 vfree(pi->purgatory_buf); 892 vfree(pi->purgatory_buf);
893 pi->purgatory_buf = NULL;
891 return ret; 894 return ret;
892} 895}
893 896
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 37649e69056c..8a99abf58080 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
450 goto gotlock; 450 goto gotlock;
451 } 451 }
452 } 452 }
453 WRITE_ONCE(pn->state, vcpu_halted); 453 WRITE_ONCE(pn->state, vcpu_hashed);
454 qstat_inc(qstat_pv_wait_head, true); 454 qstat_inc(qstat_pv_wait_head, true);
455 qstat_inc(qstat_pv_wait_again, waitcnt); 455 qstat_inc(qstat_pv_wait_again, waitcnt);
456 pv_wait(&l->locked, _Q_SLOW_VAL); 456 pv_wait(&l->locked, _Q_SLOW_VAL);
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index 22e025309845..b9d031516254 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
153 */ 153 */
154 if ((counter == qstat_pv_latency_kick) || 154 if ((counter == qstat_pv_latency_kick) ||
155 (counter == qstat_pv_latency_wake)) { 155 (counter == qstat_pv_latency_wake)) {
156 stat = 0;
157 if (kicks) 156 if (kicks)
158 stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); 157 stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
159 } 158 }
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 251d16b4cb41..b501e390bb34 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
247 align_start = res->start & ~(SECTION_SIZE - 1); 247 align_start = res->start & ~(SECTION_SIZE - 1);
248 align_size = ALIGN(resource_size(res), SECTION_SIZE); 248 align_size = ALIGN(resource_size(res), SECTION_SIZE);
249 arch_remove_memory(align_start, align_size); 249 arch_remove_memory(align_start, align_size);
250 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
250 pgmap_radix_release(res); 251 pgmap_radix_release(res);
251 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, 252 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
252 "%s: failed to free all reserved pages\n", __func__); 253 "%s: failed to free all reserved pages\n", __func__);
@@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
282 struct percpu_ref *ref, struct vmem_altmap *altmap) 283 struct percpu_ref *ref, struct vmem_altmap *altmap)
283{ 284{
284 resource_size_t key, align_start, align_size, align_end; 285 resource_size_t key, align_start, align_size, align_end;
286 pgprot_t pgprot = PAGE_KERNEL;
285 struct dev_pagemap *pgmap; 287 struct dev_pagemap *pgmap;
286 struct page_map *page_map; 288 struct page_map *page_map;
287 int error, nid, is_ram; 289 int error, nid, is_ram;
@@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
351 if (nid < 0) 353 if (nid < 0)
352 nid = numa_mem_id(); 354 nid = numa_mem_id();
353 355
356 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
357 align_size);
358 if (error)
359 goto err_pfn_remap;
360
354 error = arch_add_memory(nid, align_start, align_size, true); 361 error = arch_add_memory(nid, align_start, align_size, true);
355 if (error) 362 if (error)
356 goto err_add_memory; 363 goto err_add_memory;
@@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
371 return __va(res->start); 378 return __va(res->start);
372 379
373 err_add_memory: 380 err_add_memory:
381 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
382 err_pfn_remap:
374 err_radix: 383 err_radix:
375 pgmap_radix_release(res); 384 pgmap_radix_release(res);
376 devres_free(page_map); 385 devres_free(page_map);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a881c6a7ba74..33c79b6105c5 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -300,12 +300,12 @@ static int create_image(int platform_mode)
300 save_processor_state(); 300 save_processor_state();
301 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); 301 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
302 error = swsusp_arch_suspend(); 302 error = swsusp_arch_suspend();
303 /* Restore control flow magically appears here */
304 restore_processor_state();
303 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); 305 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
304 if (error) 306 if (error)
305 printk(KERN_ERR "PM: Error %d creating hibernation image\n", 307 printk(KERN_ERR "PM: Error %d creating hibernation image\n",
306 error); 308 error);
307 /* Restore control flow magically appears here */
308 restore_processor_state();
309 if (!in_suspend) 309 if (!in_suspend)
310 events_check_enabled = false; 310 events_check_enabled = false;
311 311
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 97b0df71303e..168ff442ebde 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -482,7 +482,16 @@ void pm_qos_update_request(struct pm_qos_request *req,
482 return; 482 return;
483 } 483 }
484 484
485 cancel_delayed_work_sync(&req->work); 485 /*
486 * This function may be called very early during boot, for example,
487 * from of_clk_init(), where irq needs to stay disabled.
488 * cancel_delayed_work_sync() assumes that irq is enabled on
489 * invocation and re-enables it on return. Avoid calling it until
490 * workqueue is initialized.
491 */
492 if (keventd_up())
493 cancel_delayed_work_sync(&req->work);
494
486 __pm_qos_update_request(req, new_value); 495 __pm_qos_update_request(req, new_value);
487} 496}
488EXPORT_SYMBOL_GPL(pm_qos_update_request); 497EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 9a0178c2ac1d..b02228411d57 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -835,9 +835,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
835 */ 835 */
836static bool rtree_next_node(struct memory_bitmap *bm) 836static bool rtree_next_node(struct memory_bitmap *bm)
837{ 837{
838 bm->cur.node = list_entry(bm->cur.node->list.next, 838 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
839 struct rtree_node, list); 839 bm->cur.node = list_entry(bm->cur.node->list.next,
840 if (&bm->cur.node->list != &bm->cur.zone->leaves) { 840 struct rtree_node, list);
841 bm->cur.node_pfn += BM_BITS_PER_BLOCK; 841 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
842 bm->cur.node_bit = 0; 842 bm->cur.node_bit = 0;
843 touch_softlockup_watchdog(); 843 touch_softlockup_watchdog();
@@ -845,9 +845,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
845 } 845 }
846 846
847 /* No more nodes, goto next zone */ 847 /* No more nodes, goto next zone */
848 bm->cur.zone = list_entry(bm->cur.zone->list.next, 848 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
849 bm->cur.zone = list_entry(bm->cur.zone->list.next,
849 struct mem_zone_bm_rtree, list); 850 struct mem_zone_bm_rtree, list);
850 if (&bm->cur.zone->list != &bm->zones) {
851 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 851 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
852 struct rtree_node, list); 852 struct rtree_node, list);
853 bm->cur.node_pfn = 0; 853 bm->cur.node_pfn = 0;
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index 276762f3a460..d5760c42f042 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -9,10 +9,10 @@
9 9
10char *_braille_console_setup(char **str, char **brl_options) 10char *_braille_console_setup(char **str, char **brl_options)
11{ 11{
12 if (!memcmp(*str, "brl,", 4)) { 12 if (!strncmp(*str, "brl,", 4)) {
13 *brl_options = ""; 13 *brl_options = "";
14 *str += 4; 14 *str += 4;
15 } else if (!memcmp(str, "brl=", 4)) { 15 } else if (!strncmp(*str, "brl=", 4)) {
16 *brl_options = *str + 4; 16 *brl_options = *str + 4;
17 *str = strchr(*brl_options, ','); 17 *str = strchr(*brl_options, ',');
18 if (!*str) 18 if (!*str)
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 5d4505f30083..7fd2838fa417 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -16,11 +16,9 @@
16 */ 16 */
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18 18
19typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt, 19typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
20 va_list args);
21 20
22__printf(2, 0) 21int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
23int vprintk_default(int level, const char *fmt, va_list args);
24 22
25#ifdef CONFIG_PRINTK_NMI 23#ifdef CONFIG_PRINTK_NMI
26 24
@@ -33,10 +31,9 @@ extern raw_spinlock_t logbuf_lock;
33 * via per-CPU variable. 31 * via per-CPU variable.
34 */ 32 */
35DECLARE_PER_CPU(printk_func_t, printk_func); 33DECLARE_PER_CPU(printk_func_t, printk_func);
36__printf(2, 0) 34static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
37static inline int vprintk_func(int level, const char *fmt, va_list args)
38{ 35{
39 return this_cpu_read(printk_func)(level, fmt, args); 36 return this_cpu_read(printk_func)(fmt, args);
40} 37}
41 38
42extern atomic_t nmi_message_lost; 39extern atomic_t nmi_message_lost;
@@ -47,10 +44,9 @@ static inline int get_nmi_message_lost(void)
47 44
48#else /* CONFIG_PRINTK_NMI */ 45#else /* CONFIG_PRINTK_NMI */
49 46
50__printf(2, 0) 47static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
51static inline int vprintk_func(int level, const char *fmt, va_list args)
52{ 48{
53 return vprintk_default(level, fmt, args); 49 return vprintk_default(fmt, args);
54} 50}
55 51
56static inline int get_nmi_message_lost(void) 52static inline int get_nmi_message_lost(void)
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
index bc3eeb1ae6da..16bab471c7e2 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/nmi.c
@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
58 * one writer running. But the buffer might get flushed from another 58 * one writer running. But the buffer might get flushed from another
59 * CPU, so we need to be careful. 59 * CPU, so we need to be careful.
60 */ 60 */
61static int vprintk_nmi(int level, const char *fmt, va_list args) 61static int vprintk_nmi(const char *fmt, va_list args)
62{ 62{
63 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); 63 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
64 int add = 0; 64 int add = 0;
@@ -79,16 +79,7 @@ again:
79 if (!len) 79 if (!len)
80 smp_rmb(); 80 smp_rmb();
81 81
82 if (level != LOGLEVEL_DEFAULT) { 82 add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
83 add = snprintf(s->buffer + len, sizeof(s->buffer) - len,
84 KERN_SOH "%c", '0' + level);
85 add += vsnprintf(s->buffer + len + add,
86 sizeof(s->buffer) - len - add,
87 fmt, args);
88 } else {
89 add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len,
90 fmt, args);
91 }
92 83
93 /* 84 /*
94 * Do it once again if the buffer has been flushed in the meantime. 85 * Do it once again if the buffer has been flushed in the meantime.
@@ -108,24 +99,30 @@ again:
108 return add; 99 return add;
109} 100}
110 101
111/* 102static void printk_nmi_flush_line(const char *text, int len)
112 * printk one line from the temporary buffer from @start index until
113 * and including the @end index.
114 */
115static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
116{ 103{
117 const char *buf = s->buffer + start;
118
119 /* 104 /*
120 * The buffers are flushed in NMI only on panic. The messages must 105 * The buffers are flushed in NMI only on panic. The messages must
121 * go only into the ring buffer at this stage. Consoles will get 106 * go only into the ring buffer at this stage. Consoles will get
122 * explicitly called later when a crashdump is not generated. 107 * explicitly called later when a crashdump is not generated.
123 */ 108 */
124 if (in_nmi()) 109 if (in_nmi())
125 printk_deferred("%.*s", (end - start) + 1, buf); 110 printk_deferred("%.*s", len, text);
126 else 111 else
127 printk("%.*s", (end - start) + 1, buf); 112 printk("%.*s", len, text);
113
114}
128 115
116/*
117 * printk one line from the temporary buffer from @start index until
118 * and including the @end index.
119 */
120static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
121 int start, int end)
122{
123 const char *buf = s->buffer + start;
124
125 printk_nmi_flush_line(buf, (end - start) + 1);
129} 126}
130 127
131/* 128/*
@@ -159,9 +156,11 @@ more:
159 * the buffer an unexpected way. If we printed something then 156 * the buffer an unexpected way. If we printed something then
160 * @len must only increase. 157 * @len must only increase.
161 */ 158 */
162 if (i && i >= len) 159 if (i && i >= len) {
163 pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n", 160 const char *msg = "printk_nmi_flush: internal error\n";
164 i, len); 161
162 printk_nmi_flush_line(msg, strlen(msg));
163 }
165 164
166 if (!len) 165 if (!len)
167 goto out; /* Someone else has already flushed the buffer. */ 166 goto out; /* Someone else has already flushed the buffer. */
@@ -175,14 +174,14 @@ more:
175 /* Print line by line. */ 174 /* Print line by line. */
176 for (; i < size; i++) { 175 for (; i < size; i++) {
177 if (s->buffer[i] == '\n') { 176 if (s->buffer[i] == '\n') {
178 print_nmi_seq_line(s, last_i, i); 177 printk_nmi_flush_seq_line(s, last_i, i);
179 last_i = i + 1; 178 last_i = i + 1;
180 } 179 }
181 } 180 }
182 /* Check if there was a partial line. */ 181 /* Check if there was a partial line. */
183 if (last_i < size) { 182 if (last_i < size) {
184 print_nmi_seq_line(s, last_i, size - 1); 183 printk_nmi_flush_seq_line(s, last_i, size - 1);
185 pr_cont("\n"); 184 printk_nmi_flush_line("\n", strlen("\n"));
186 } 185 }
187 186
188 /* 187 /*
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a5ef95ca18c9..eea6dbc2d8cf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1930,28 +1930,7 @@ asmlinkage int printk_emit(int facility, int level,
1930} 1930}
1931EXPORT_SYMBOL(printk_emit); 1931EXPORT_SYMBOL(printk_emit);
1932 1932
1933#ifdef CONFIG_PRINTK 1933int vprintk_default(const char *fmt, va_list args)
1934#define define_pr_level(func, loglevel) \
1935asmlinkage __visible void func(const char *fmt, ...) \
1936{ \
1937 va_list args; \
1938 \
1939 va_start(args, fmt); \
1940 vprintk_default(loglevel, fmt, args); \
1941 va_end(args); \
1942} \
1943EXPORT_SYMBOL(func)
1944
1945define_pr_level(__pr_emerg, LOGLEVEL_EMERG);
1946define_pr_level(__pr_alert, LOGLEVEL_ALERT);
1947define_pr_level(__pr_crit, LOGLEVEL_CRIT);
1948define_pr_level(__pr_err, LOGLEVEL_ERR);
1949define_pr_level(__pr_warn, LOGLEVEL_WARNING);
1950define_pr_level(__pr_notice, LOGLEVEL_NOTICE);
1951define_pr_level(__pr_info, LOGLEVEL_INFO);
1952#endif
1953
1954int vprintk_default(int level, const char *fmt, va_list args)
1955{ 1934{
1956 int r; 1935 int r;
1957 1936
@@ -1961,7 +1940,7 @@ int vprintk_default(int level, const char *fmt, va_list args)
1961 return r; 1940 return r;
1962 } 1941 }
1963#endif 1942#endif
1964 r = vprintk_emit(0, level, NULL, 0, fmt, args); 1943 r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
1965 1944
1966 return r; 1945 return r;
1967} 1946}
@@ -1994,7 +1973,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
1994 int r; 1973 int r;
1995 1974
1996 va_start(args, fmt); 1975 va_start(args, fmt);
1997 r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args); 1976 r = vprintk_func(fmt, args);
1998 va_end(args); 1977 va_end(args);
1999 1978
2000 return r; 1979 return r;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c883fe8e440..44817c640e99 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
74#include <linux/context_tracking.h> 74#include <linux/context_tracking.h>
75#include <linux/compiler.h> 75#include <linux/compiler.h>
76#include <linux/frame.h> 76#include <linux/frame.h>
77#include <linux/prefetch.h>
77 78
78#include <asm/switch_to.h> 79#include <asm/switch_to.h>
79#include <asm/tlb.h> 80#include <asm/tlb.h>
@@ -2015,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2015 success = 1; /* we're going to change ->state */ 2016 success = 1; /* we're going to change ->state */
2016 cpu = task_cpu(p); 2017 cpu = task_cpu(p);
2017 2018
2019 /*
2020 * Ensure we load p->on_rq _after_ p->state, otherwise it would
2021 * be possible to, falsely, observe p->on_rq == 0 and get stuck
2022 * in smp_cond_load_acquire() below.
2023 *
2024 * sched_ttwu_pending() try_to_wake_up()
2025 * [S] p->on_rq = 1; [L] P->state
2026 * UNLOCK rq->lock -----.
2027 * \
2028 * +--- RMB
2029 * schedule() /
2030 * LOCK rq->lock -----'
2031 * UNLOCK rq->lock
2032 *
2033 * [task p]
2034 * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
2035 *
2036 * Pairs with the UNLOCK+LOCK on rq->lock from the
2037 * last wakeup of our task and the schedule that got our task
2038 * current.
2039 */
2040 smp_rmb();
2018 if (p->on_rq && ttwu_remote(p, wake_flags)) 2041 if (p->on_rq && ttwu_remote(p, wake_flags))
2019 goto stat; 2042 goto stat;
2020 2043
@@ -2972,6 +2995,23 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2972EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2995EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2973 2996
2974/* 2997/*
2998 * The function fair_sched_class.update_curr accesses the struct curr
2999 * and its field curr->exec_start; when called from task_sched_runtime(),
3000 * we observe a high rate of cache misses in practice.
3001 * Prefetching this data results in improved performance.
3002 */
3003static inline void prefetch_curr_exec_start(struct task_struct *p)
3004{
3005#ifdef CONFIG_FAIR_GROUP_SCHED
3006 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
3007#else
3008 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
3009#endif
3010 prefetch(curr);
3011 prefetch(&curr->exec_start);
3012}
3013
3014/*
2975 * Return accounted runtime for the task. 3015 * Return accounted runtime for the task.
2976 * In case the task is currently running, return the runtime plus current's 3016 * In case the task is currently running, return the runtime plus current's
2977 * pending runtime that have not been accounted yet. 3017 * pending runtime that have not been accounted yet.
@@ -3005,6 +3045,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3005 * thread, breaking clock_gettime(). 3045 * thread, breaking clock_gettime().
3006 */ 3046 */
3007 if (task_current(rq, p) && task_on_rq_queued(p)) { 3047 if (task_current(rq, p) && task_on_rq_queued(p)) {
3048 prefetch_curr_exec_start(p);
3008 update_rq_clock(rq); 3049 update_rq_clock(rq);
3009 p->sched_class->update_curr(rq); 3050 p->sched_class->update_curr(rq);
3010 } 3051 }
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5be58820465c..d4184498c9f5 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
168 168
169 if (old_idx == IDX_INVALID) { 169 if (old_idx == IDX_INVALID) {
170 cp->size++; 170 cp->size++;
171 cp->elements[cp->size - 1].dl = 0; 171 cp->elements[cp->size - 1].dl = dl;
172 cp->elements[cp->size - 1].cpu = cpu; 172 cp->elements[cp->size - 1].cpu = cpu;
173 cp->elements[cpu].idx = cp->size - 1; 173 cp->elements[cpu].idx = cp->size - 1;
174 cpudl_change_key(cp, cp->size - 1, dl); 174 cpudl_change_key(cp, cp->size - 1, dl);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 1934f658c036..a846cf89eb96 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime)
263 cpustat[CPUTIME_IDLE] += (__force u64) cputime; 263 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
264} 264}
265 265
266/*
267 * When a guest is interrupted for a longer amount of time, missed clock
268 * ticks are not redelivered later. Due to that, this function may on
269 * occasion account more time than the calling functions think elapsed.
270 */
266static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) 271static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
267{ 272{
268#ifdef CONFIG_PARAVIRT 273#ifdef CONFIG_PARAVIRT
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
371 * idle, or potentially user or system time. Due to rounding, 376 * idle, or potentially user or system time. Due to rounding,
372 * other time can exceed ticks occasionally. 377 * other time can exceed ticks occasionally.
373 */ 378 */
374 other = account_other_time(cputime); 379 other = account_other_time(ULONG_MAX);
375 if (other >= cputime) 380 if (other >= cputime)
376 return; 381 return;
377 cputime -= other; 382 cputime -= other;
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
486 } 491 }
487 492
488 cputime = cputime_one_jiffy; 493 cputime = cputime_one_jiffy;
489 steal = steal_account_process_time(cputime); 494 steal = steal_account_process_time(ULONG_MAX);
490 495
491 if (steal >= cputime) 496 if (steal >= cputime)
492 return; 497 return;
@@ -508,13 +513,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
508 */ 513 */
509void account_idle_ticks(unsigned long ticks) 514void account_idle_ticks(unsigned long ticks)
510{ 515{
516 cputime_t cputime, steal;
511 517
512 if (sched_clock_irqtime) { 518 if (sched_clock_irqtime) {
513 irqtime_account_idle_ticks(ticks); 519 irqtime_account_idle_ticks(ticks);
514 return; 520 return;
515 } 521 }
516 522
517 account_idle_time(jiffies_to_cputime(ticks)); 523 cputime = jiffies_to_cputime(ticks);
524 steal = steal_account_process_time(ULONG_MAX);
525
526 if (steal >= cputime)
527 return;
528
529 cputime -= steal;
530 account_idle_time(cputime);
518} 531}
519 532
520/* 533/*
@@ -606,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr,
606 stime = curr->stime; 619 stime = curr->stime;
607 utime = curr->utime; 620 utime = curr->utime;
608 621
609 if (utime == 0) { 622 /*
610 stime = rtime; 623 * If either stime or both stime and utime are 0, assume all runtime is
624 * userspace. Once a task gets some ticks, the monotonicy code at
625 * 'update' will ensure things converge to the observed ratio.
626 */
627 if (stime == 0) {
628 utime = rtime;
611 goto update; 629 goto update;
612 } 630 }
613 631
614 if (stime == 0) { 632 if (utime == 0) {
615 utime = rtime; 633 stime = rtime;
616 goto update; 634 goto update;
617 } 635 }
618 636
619 stime = scale_stime((__force u64)stime, (__force u64)rtime, 637 stime = scale_stime((__force u64)stime, (__force u64)rtime,
620 (__force u64)(stime + utime)); 638 (__force u64)(stime + utime));
621 639
640update:
622 /* 641 /*
623 * Make sure stime doesn't go backwards; this preserves monotonicity 642 * Make sure stime doesn't go backwards; this preserves monotonicity
624 * for utime because rtime is monotonic. 643 * for utime because rtime is monotonic.
@@ -641,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr,
641 stime = rtime - utime; 660 stime = rtime - utime;
642 } 661 }
643 662
644update:
645 prev->stime = stime; 663 prev->stime = stime;
646 prev->utime = utime; 664 prev->utime = utime;
647out: 665out:
@@ -686,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk)
686 unsigned long now = READ_ONCE(jiffies); 704 unsigned long now = READ_ONCE(jiffies);
687 cputime_t delta, other; 705 cputime_t delta, other;
688 706
707 /*
708 * Unlike tick based timing, vtime based timing never has lost
709 * ticks, and no need for steal time accounting to make up for
710 * lost ticks. Vtime accounts a rounded version of actual
711 * elapsed time. Limit account_other_time to prevent rounding
712 * errors from causing elapsed vtime to go negative.
713 */
689 delta = jiffies_to_cputime(now - tsk->vtime_snap); 714 delta = jiffies_to_cputime(now - tsk->vtime_snap);
690 other = account_other_time(delta); 715 other = account_other_time(delta);
691 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); 716 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fcb7f0217ff4..1ce8867283dc 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
658 * 658 *
659 * XXX figure out if select_task_rq_dl() deals with offline cpus. 659 * XXX figure out if select_task_rq_dl() deals with offline cpus.
660 */ 660 */
661 if (unlikely(!rq->online)) 661 if (unlikely(!rq->online)) {
662 lockdep_unpin_lock(&rq->lock, rf.cookie);
662 rq = dl_task_offline_migration(rq, p); 663 rq = dl_task_offline_migration(rq, p);
664 rf.cookie = lockdep_pin_lock(&rq->lock);
665 }
663 666
664 /* 667 /*
665 * Queueing this task back might have overloaded rq, check if we need 668 * Queueing this task back might have overloaded rq, check if we need
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4088eedea763..039de34f1521 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
4269 pcfs_rq = tg->parent->cfs_rq[cpu]; 4269 pcfs_rq = tg->parent->cfs_rq[cpu];
4270 4270
4271 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4271 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4272 pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4272 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4273} 4273}
4274 4274
4275/* conditionally throttle active cfs_rq's from put_prev_entity() */ 4275/* conditionally throttle active cfs_rq's from put_prev_entity() */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ef6c6c3f9d8a..0db7c8a2afe2 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -605,12 +605,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
605 ptrace_event(PTRACE_EVENT_SECCOMP, data); 605 ptrace_event(PTRACE_EVENT_SECCOMP, data);
606 /* 606 /*
607 * The delivery of a fatal signal during event 607 * The delivery of a fatal signal during event
608 * notification may silently skip tracer notification. 608 * notification may silently skip tracer notification,
609 * Terminating the task now avoids executing a system 609 * which could leave us with a potentially unmodified
610 * call that may not be intended. 610 * syscall that the tracer would have liked to have
611 * changed. Since the process is about to die, we just
612 * force the syscall to be skipped and let the signal
613 * kill the process and correctly handle any tracer exit
614 * notifications.
611 */ 615 */
612 if (fatal_signal_pending(current)) 616 if (fatal_signal_pending(current))
613 do_exit(SIGSYS); 617 goto skip;
614 /* Check if the tracer forced the syscall to be skipped. */ 618 /* Check if the tracer forced the syscall to be skipped. */
615 this_syscall = syscall_get_nr(current, task_pt_regs(current)); 619 this_syscall = syscall_get_nr(current, task_pt_regs(current));
616 if (this_syscall < 0) 620 if (this_syscall < 0)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b43d0b27c1fe..a13bbdaab47d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2140,6 +2140,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
2140 return 0; 2140 return 0;
2141} 2141}
2142 2142
2143static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
2144 int *valp,
2145 int write, void *data)
2146{
2147 if (write) {
2148 if (*negp)
2149 return -EINVAL;
2150 *valp = *lvalp;
2151 } else {
2152 unsigned int val = *valp;
2153 *lvalp = (unsigned long)val;
2154 }
2155 return 0;
2156}
2157
2143static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; 2158static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
2144 2159
2145static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, 2160static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
@@ -2259,8 +2274,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
2259int proc_dointvec(struct ctl_table *table, int write, 2274int proc_dointvec(struct ctl_table *table, int write,
2260 void __user *buffer, size_t *lenp, loff_t *ppos) 2275 void __user *buffer, size_t *lenp, loff_t *ppos)
2261{ 2276{
2262 return do_proc_dointvec(table,write,buffer,lenp,ppos, 2277 return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
2263 NULL,NULL); 2278}
2279
2280/**
2281 * proc_douintvec - read a vector of unsigned integers
2282 * @table: the sysctl table
2283 * @write: %TRUE if this is a write to the sysctl file
2284 * @buffer: the user buffer
2285 * @lenp: the size of the user buffer
2286 * @ppos: file position
2287 *
2288 * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
2289 * values from/to the user buffer, treated as an ASCII string.
2290 *
2291 * Returns 0 on success.
2292 */
2293int proc_douintvec(struct ctl_table *table, int write,
2294 void __user *buffer, size_t *lenp, loff_t *ppos)
2295{
2296 return do_proc_dointvec(table, write, buffer, lenp, ppos,
2297 do_proc_douintvec_conv, NULL);
2264} 2298}
2265 2299
2266/* 2300/*
@@ -2858,6 +2892,12 @@ int proc_dointvec(struct ctl_table *table, int write,
2858 return -ENOSYS; 2892 return -ENOSYS;
2859} 2893}
2860 2894
2895int proc_douintvec(struct ctl_table *table, int write,
2896 void __user *buffer, size_t *lenp, loff_t *ppos)
2897{
2898 return -ENOSYS;
2899}
2900
2861int proc_dointvec_minmax(struct ctl_table *table, int write, 2901int proc_dointvec_minmax(struct ctl_table *table, int write,
2862 void __user *buffer, size_t *lenp, loff_t *ppos) 2902 void __user *buffer, size_t *lenp, loff_t *ppos)
2863{ 2903{
@@ -2903,6 +2943,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2903 * exception granted :-) 2943 * exception granted :-)
2904 */ 2944 */
2905EXPORT_SYMBOL(proc_dointvec); 2945EXPORT_SYMBOL(proc_dointvec);
2946EXPORT_SYMBOL(proc_douintvec);
2906EXPORT_SYMBOL(proc_dointvec_jiffies); 2947EXPORT_SYMBOL(proc_dointvec_jiffies);
2907EXPORT_SYMBOL(proc_dointvec_minmax); 2948EXPORT_SYMBOL(proc_dointvec_minmax);
2908EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); 2949EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 204fdc86863d..2ec7c00228f3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -908,10 +908,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
908 ktime_t now, expires; 908 ktime_t now, expires;
909 int cpu = smp_processor_id(); 909 int cpu = smp_processor_id();
910 910
911 now = tick_nohz_start_idle(ts);
912
911 if (can_stop_idle_tick(cpu, ts)) { 913 if (can_stop_idle_tick(cpu, ts)) {
912 int was_stopped = ts->tick_stopped; 914 int was_stopped = ts->tick_stopped;
913 915
914 now = tick_nohz_start_idle(ts);
915 ts->idle_calls++; 916 ts->idle_calls++;
916 917
917 expires = tick_nohz_stop_sched_tick(ts, now, cpu); 918 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3b65746c7f15..e07fb093f819 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -401,7 +401,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
401 do { 401 do {
402 seq = raw_read_seqcount_latch(&tkf->seq); 402 seq = raw_read_seqcount_latch(&tkf->seq);
403 tkr = tkf->base + (seq & 0x01); 403 tkr = tkf->base + (seq & 0x01);
404 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr); 404 now = ktime_to_ns(tkr->base);
405
406 now += clocksource_delta(tkr->read(tkr->clock),
407 tkr->cycle_last, tkr->mask);
405 } while (read_seqcount_retry(&tkf->seq, seq)); 408 } while (read_seqcount_retry(&tkf->seq, seq));
406 409
407 return now; 410 return now;
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index f6bd65236712..107310a6f36f 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -23,7 +23,9 @@
23 23
24#include "timekeeping_internal.h" 24#include "timekeeping_internal.h"
25 25
26static unsigned int sleep_time_bin[32] = {0}; 26#define NUM_BINS 32
27
28static unsigned int sleep_time_bin[NUM_BINS] = {0};
27 29
28static int tk_debug_show_sleep_time(struct seq_file *s, void *data) 30static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
29{ 31{
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
69 71
70void tk_debug_account_sleep_time(struct timespec64 *t) 72void tk_debug_account_sleep_time(struct timespec64 *t)
71{ 73{
72 sleep_time_bin[fls(t->tv_sec)]++; 74 /* Cap bin index so we don't overflow the array */
75 int bin = min(fls(t->tv_sec), NUM_BINS-1);
76
77 sleep_time_bin[bin]++;
73} 78}
74 79
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 555670a5143c..32bf6f75a8fe 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1496,6 +1496,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1496 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1496 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1497 u64 expires = KTIME_MAX; 1497 u64 expires = KTIME_MAX;
1498 unsigned long nextevt; 1498 unsigned long nextevt;
1499 bool is_max_delta;
1499 1500
1500 /* 1501 /*
1501 * Pretend that there is no timer pending if the cpu is offline. 1502 * Pretend that there is no timer pending if the cpu is offline.
@@ -1506,6 +1507,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1506 1507
1507 spin_lock(&base->lock); 1508 spin_lock(&base->lock);
1508 nextevt = __next_timer_interrupt(base); 1509 nextevt = __next_timer_interrupt(base);
1510 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
1509 base->next_expiry = nextevt; 1511 base->next_expiry = nextevt;
1510 /* 1512 /*
1511 * We have a fresh next event. Check whether we can forward the base: 1513 * We have a fresh next event. Check whether we can forward the base:
@@ -1519,7 +1521,8 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1519 expires = basem; 1521 expires = basem;
1520 base->is_idle = false; 1522 base->is_idle = false;
1521 } else { 1523 } else {
1522 expires = basem + (nextevt - basej) * TICK_NSEC; 1524 if (!is_max_delta)
1525 expires = basem + (nextevt - basej) * TICK_NSEC;
1523 /* 1526 /*
1524 * If we expect to sleep more than a tick, mark the base idle: 1527 * If we expect to sleep more than a tick, mark the base idle:
1525 */ 1528 */
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7598e6ca817a..dbafc5df03f3 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
223 what |= MASK_TC_BIT(op_flags, META); 223 what |= MASK_TC_BIT(op_flags, META);
224 what |= MASK_TC_BIT(op_flags, PREFLUSH); 224 what |= MASK_TC_BIT(op_flags, PREFLUSH);
225 what |= MASK_TC_BIT(op_flags, FUA); 225 what |= MASK_TC_BIT(op_flags, FUA);
226 if (op == REQ_OP_DISCARD) 226 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
227 what |= BLK_TC_ACT(BLK_TC_DISCARD); 227 what |= BLK_TC_ACT(BLK_TC_DISCARD);
228 if (op == REQ_OP_FLUSH) 228 if (op == REQ_OP_FLUSH)
229 what |= BLK_TC_ACT(BLK_TC_FLUSH); 229 what |= BLK_TC_ACT(BLK_TC_FLUSH);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2307d7c89dac..2e2cca509231 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1686,24 +1686,6 @@ config LATENCYTOP
1686 Enable this option if you want to use the LatencyTOP tool 1686 Enable this option if you want to use the LatencyTOP tool
1687 to find out which userspace is blocking on what kernel operations. 1687 to find out which userspace is blocking on what kernel operations.
1688 1688
1689config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1690 bool
1691
1692config DEBUG_STRICT_USER_COPY_CHECKS
1693 bool "Strict user copy size checks"
1694 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1695 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
1696 help
1697 Enabling this option turns a certain set of sanity checks for user
1698 copy operations into compile time failures.
1699
1700 The copy_from_user() etc checks are there to help test if there
1701 are sufficient security checks on the length argument of
1702 the copy operation, by having gcc prove that the argument is
1703 within bounds.
1704
1705 If unsure, say N.
1706
1707source kernel/trace/Kconfig 1689source kernel/trace/Kconfig
1708 1690
1709menu "Runtime Testing" 1691menu "Runtime Testing"
diff --git a/lib/Makefile b/lib/Makefile
index cfa68eb269e4..5dc77a8ec297 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
25 earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o 25 earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
26 26
27obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
28lib-$(CONFIG_MMU) += ioremap.o 27lib-$(CONFIG_MMU) += ioremap.o
29lib-$(CONFIG_SMP) += cpumask.o 28lib-$(CONFIG_SMP) += cpumask.o
30lib-$(CONFIG_HAS_DMA) += dma-noop.o 29lib-$(CONFIG_HAS_DMA) += dma-noop.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9e8c7386b3a0..7e3138cfc8c9 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -291,33 +291,13 @@ done:
291} 291}
292 292
293/* 293/*
294 * Fault in the first iovec of the given iov_iter, to a maximum length
295 * of bytes. Returns 0 on success, or non-zero if the memory could not be
296 * accessed (ie. because it is an invalid address).
297 *
298 * writev-intensive code may want this to prefault several iovecs -- that
299 * would be possible (callers must not rely on the fact that _only_ the
300 * first iovec will be faulted with the current implementation).
301 */
302int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
303{
304 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
305 char __user *buf = i->iov->iov_base + i->iov_offset;
306 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
307 return fault_in_pages_readable(buf, bytes);
308 }
309 return 0;
310}
311EXPORT_SYMBOL(iov_iter_fault_in_readable);
312
313/*
314 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 294 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
315 * bytes. For each iovec, fault in each page that constitutes the iovec. 295 * bytes. For each iovec, fault in each page that constitutes the iovec.
316 * 296 *
317 * Return 0 on success, or non-zero if the memory could not be accessed (i.e. 297 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
318 * because it is an invalid address). 298 * because it is an invalid address).
319 */ 299 */
320int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) 300int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
321{ 301{
322 size_t skip = i->iov_offset; 302 size_t skip = i->iov_offset;
323 const struct iovec *iov; 303 const struct iovec *iov;
@@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
334 } 314 }
335 return 0; 315 return 0;
336} 316}
337EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable); 317EXPORT_SYMBOL(iov_iter_fault_in_readable);
338 318
339void iov_iter_init(struct iov_iter *i, int direction, 319void iov_iter_init(struct iov_iter *i, int direction,
340 const struct iovec *iov, unsigned long nr_segs, 320 const struct iovec *iov, unsigned long nr_segs,
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..56054e541a0f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -30,7 +30,7 @@
30 30
31#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
32#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 128UL 33#define BUCKET_LOCKS_PER_CPU 32UL
34 34
35static u32 head_hashfn(struct rhashtable *ht, 35static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl, 36 const struct bucket_table *tbl,
@@ -70,21 +70,25 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 unsigned int nr_pcpus = num_possible_cpus(); 70 unsigned int nr_pcpus = num_possible_cpus();
71#endif 71#endif
72 72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 73 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 75
76 /* Never allocate more than 0.5 locks per bucket */ 76 /* Never allocate more than 0.5 locks per bucket */
77 size = min_t(unsigned int, size, tbl->size >> 1); 77 size = min_t(unsigned int, size, tbl->size >> 1);
78 78
79 if (sizeof(spinlock_t) != 0) { 79 if (sizeof(spinlock_t) != 0) {
80 tbl->locks = NULL;
80#ifdef CONFIG_NUMA 81#ifdef CONFIG_NUMA
81 if (size * sizeof(spinlock_t) > PAGE_SIZE && 82 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82 gfp == GFP_KERNEL) 83 gfp == GFP_KERNEL)
83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 84 tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 else
85#endif 85#endif
86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 86 if (gfp != GFP_KERNEL)
87 gfp); 87 gfp |= __GFP_NOWARN | __GFP_NORETRY;
88
89 if (!tbl->locks)
90 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
91 gfp);
88 if (!tbl->locks) 92 if (!tbl->locks)
89 return -ENOMEM; 93 return -ENOMEM;
90 for (i = 0; i < size; i++) 94 for (i = 0; i < size; i++)
@@ -321,12 +325,14 @@ static int rhashtable_expand(struct rhashtable *ht)
321static int rhashtable_shrink(struct rhashtable *ht) 325static int rhashtable_shrink(struct rhashtable *ht)
322{ 326{
323 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324 unsigned int size; 328 unsigned int nelems = atomic_read(&ht->nelems);
329 unsigned int size = 0;
325 int err; 330 int err;
326 331
327 ASSERT_RHT_MUTEX(ht); 332 ASSERT_RHT_MUTEX(ht);
328 333
329 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 334 if (nelems)
335 size = roundup_pow_of_two(nelems * 3 / 2);
330 if (size < ht->p.min_size) 336 if (size < ht->p.min_size)
331 size = ht->p.min_size; 337 size = ht->p.min_size;
332 338
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 33f655ef48cd..9c5fe8110413 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
40 unsigned long c, data; 40 unsigned long c, data;
41 41
42 /* Fall back to byte-at-a-time if we get a page fault */ 42 /* Fall back to byte-at-a-time if we get a page fault */
43 if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) 43 unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
44 break; 44
45 *(unsigned long *)(dst+res) = c; 45 *(unsigned long *)(dst+res) = c;
46 if (has_zero(c, &data, &constants)) { 46 if (has_zero(c, &data, &constants)) {
47 data = prep_zero_mask(c, data, &constants); 47 data = prep_zero_mask(c, data, &constants);
@@ -56,8 +56,7 @@ byte_at_a_time:
56 while (max) { 56 while (max) {
57 char c; 57 char c;
58 58
59 if (unlikely(unsafe_get_user(c,src+res))) 59 unsafe_get_user(c,src+res, efault);
60 return -EFAULT;
61 dst[res] = c; 60 dst[res] = c;
62 if (!c) 61 if (!c)
63 return res; 62 return res;
@@ -76,6 +75,7 @@ byte_at_a_time:
76 * Nope: we hit the address space limit, and we still had more 75 * Nope: we hit the address space limit, and we still had more
77 * characters the caller would have wanted. That's an EFAULT. 76 * characters the caller would have wanted. That's an EFAULT.
78 */ 77 */
78efault:
79 return -EFAULT; 79 return -EFAULT;
80} 80}
81 81
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 2625943625d7..8e105ed4df12 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
45 src -= align; 45 src -= align;
46 max += align; 46 max += align;
47 47
48 if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) 48 unsafe_get_user(c, (unsigned long __user *)src, efault);
49 return 0;
50 c |= aligned_byte_mask(align); 49 c |= aligned_byte_mask(align);
51 50
52 for (;;) { 51 for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
61 if (unlikely(max <= sizeof(unsigned long))) 60 if (unlikely(max <= sizeof(unsigned long)))
62 break; 61 break;
63 max -= sizeof(unsigned long); 62 max -= sizeof(unsigned long);
64 if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) 63 unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
65 return 0;
66 } 64 }
67 res -= align; 65 res -= align;
68 66
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
77 * Nope: we hit the address space limit, and we still had more 75 * Nope: we hit the address space limit, and we still had more
78 * characters the caller would have wanted. That's 0. 76 * characters the caller would have wanted. That's 0.
79 */ 77 */
78efault:
80 return 0; 79 return 0;
81} 80}
82 81
diff --git a/lib/test_hash.c b/lib/test_hash.c
index 66c5fc8351e8..cac20c5fb304 100644
--- a/lib/test_hash.c
+++ b/lib/test_hash.c
@@ -143,7 +143,7 @@ static int __init
143test_hash_init(void) 143test_hash_init(void)
144{ 144{
145 char buf[SIZE+1]; 145 char buf[SIZE+1];
146 u32 string_or = 0, hash_or[2][33] = { 0 }; 146 u32 string_or = 0, hash_or[2][33] = { { 0, } };
147 unsigned tests = 0; 147 unsigned tests = 0;
148 unsigned long long h64 = 0; 148 unsigned long long h64 = 0;
149 int i, j; 149 int i, j;
@@ -219,21 +219,27 @@ test_hash_init(void)
219 } 219 }
220 220
221 /* Issue notices about skipped tests. */ 221 /* Issue notices about skipped tests. */
222#ifndef HAVE_ARCH__HASH_32 222#ifdef HAVE_ARCH__HASH_32
223 pr_info("__hash_32() has no arch implementation to test."); 223#if HAVE_ARCH__HASH_32 != 1
224#elif HAVE_ARCH__HASH_32 != 1
225 pr_info("__hash_32() is arch-specific; not compared to generic."); 224 pr_info("__hash_32() is arch-specific; not compared to generic.");
226#endif 225#endif
227#ifndef HAVE_ARCH_HASH_32 226#else
228 pr_info("hash_32() has no arch implementation to test."); 227 pr_info("__hash_32() has no arch implementation to test.");
229#elif HAVE_ARCH_HASH_32 != 1 228#endif
229#ifdef HAVE_ARCH_HASH_32
230#if HAVE_ARCH_HASH_32 != 1
230 pr_info("hash_32() is arch-specific; not compared to generic."); 231 pr_info("hash_32() is arch-specific; not compared to generic.");
231#endif 232#endif
232#ifndef HAVE_ARCH_HASH_64 233#else
233 pr_info("hash_64() has no arch implementation to test."); 234 pr_info("hash_32() has no arch implementation to test.");
234#elif HAVE_ARCH_HASH_64 != 1 235#endif
236#ifdef HAVE_ARCH_HASH_64
237#if HAVE_ARCH_HASH_64 != 1
235 pr_info("hash_64() is arch-specific; not compared to generic."); 238 pr_info("hash_64() is arch-specific; not compared to generic.");
236#endif 239#endif
240#else
241 pr_info("hash_64() has no arch implementation to test.");
242#endif
237 243
238 pr_notice("%u tests passed.", tests); 244 pr_notice("%u tests passed.", tests);
239 245
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 297fdb5e74bd..64e899b63337 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
38 38
39static int max_size = 0; 39static int max_size = 0;
40module_param(max_size, int, 0); 40module_param(max_size, int, 0);
41MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); 41MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
42 42
43static bool shrinking = false; 43static bool shrinking = false;
44module_param(shrinking, bool, 0); 44module_param(shrinking, bool, 0);
diff --git a/lib/usercopy.c b/lib/usercopy.c
deleted file mode 100644
index 4f5b1ddbcd25..000000000000
--- a/lib/usercopy.c
+++ /dev/null
@@ -1,9 +0,0 @@
1#include <linux/export.h>
2#include <linux/bug.h>
3#include <linux/uaccess.h>
4
5void copy_from_user_overflow(void)
6{
7 WARN(1, "Buffer overflow detected!\n");
8}
9EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/mm/Kconfig b/mm/Kconfig
index 78a23c5c302d..be0ee11fa0d9 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -262,7 +262,14 @@ config COMPACTION
262 select MIGRATION 262 select MIGRATION
263 depends on MMU 263 depends on MMU
264 help 264 help
265 Allows the compaction of memory for the allocation of huge pages. 265 Compaction is the only memory management component to form
266 high order (larger physically contiguous) memory blocks
267 reliably. The page allocator relies on compaction heavily and
268 the lack of the feature can lead to unexpected OOM killer
269 invocations for high order memory requests. You shouldn't
270 disable this option unless there really is a strong reason for
271 it and then we would be really interested to hear about that at
272 linux-mm@kvack.org.
266 273
267# 274#
268# support for page migration 275# support for page migration
diff --git a/mm/Makefile b/mm/Makefile
index fc059666c760..2ca1faf3fa09 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
21KCOV_INSTRUMENT_mmzone.o := n 21KCOV_INSTRUMENT_mmzone.o := n
22KCOV_INSTRUMENT_vmstat.o := n 22KCOV_INSTRUMENT_vmstat.o := n
23 23
24# Since __builtin_frame_address does work as used, disable the warning.
25CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
26
24mmu-y := nommu.o 27mmu-y := nommu.o
25mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 28mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
26 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ 29 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
99obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 102obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
100obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o 103obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
101obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o 104obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
105obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2373f0a7d340..a6abd76baa72 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1078 goto out; 1078 goto out;
1079 1079
1080 page = pmd_page(*pmd); 1080 page = pmd_page(*pmd);
1081 VM_BUG_ON_PAGE(!PageHead(page), page); 1081 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1082 if (flags & FOLL_TOUCH) 1082 if (flags & FOLL_TOUCH)
1083 touch_pmd(vma, addr, pmd); 1083 touch_pmd(vma, addr, pmd);
1084 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1084 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1116 } 1116 }
1117skip_mlock: 1117skip_mlock:
1118 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1118 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1119 VM_BUG_ON_PAGE(!PageCompound(page), page); 1119 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1120 if (flags & FOLL_GET) 1120 if (flags & FOLL_GET)
1121 get_page(page); 1121 get_page(page);
1122 1122
@@ -1512,7 +1512,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1512 struct page *page; 1512 struct page *page;
1513 pgtable_t pgtable; 1513 pgtable_t pgtable;
1514 pmd_t _pmd; 1514 pmd_t _pmd;
1515 bool young, write, dirty; 1515 bool young, write, dirty, soft_dirty;
1516 unsigned long addr; 1516 unsigned long addr;
1517 int i; 1517 int i;
1518 1518
@@ -1546,6 +1546,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1546 write = pmd_write(*pmd); 1546 write = pmd_write(*pmd);
1547 young = pmd_young(*pmd); 1547 young = pmd_young(*pmd);
1548 dirty = pmd_dirty(*pmd); 1548 dirty = pmd_dirty(*pmd);
1549 soft_dirty = pmd_soft_dirty(*pmd);
1549 1550
1550 pmdp_huge_split_prepare(vma, haddr, pmd); 1551 pmdp_huge_split_prepare(vma, haddr, pmd);
1551 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1552 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1562,6 +1563,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1562 swp_entry_t swp_entry; 1563 swp_entry_t swp_entry;
1563 swp_entry = make_migration_entry(page + i, write); 1564 swp_entry = make_migration_entry(page + i, write);
1564 entry = swp_entry_to_pte(swp_entry); 1565 entry = swp_entry_to_pte(swp_entry);
1566 if (soft_dirty)
1567 entry = pte_swp_mksoft_dirty(entry);
1565 } else { 1568 } else {
1566 entry = mk_pte(page + i, vma->vm_page_prot); 1569 entry = mk_pte(page + i, vma->vm_page_prot);
1567 entry = maybe_mkwrite(entry, vma); 1570 entry = maybe_mkwrite(entry, vma);
@@ -1569,6 +1572,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1569 entry = pte_wrprotect(entry); 1572 entry = pte_wrprotect(entry);
1570 if (!young) 1573 if (!young)
1571 entry = pte_mkold(entry); 1574 entry = pte_mkold(entry);
1575 if (soft_dirty)
1576 entry = pte_mksoft_dirty(entry);
1572 } 1577 }
1573 if (dirty) 1578 if (dirty)
1574 SetPageDirty(page + i); 1579 SetPageDirty(page + i);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b9aa1b0b38b0..87e11d8ad536 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
1448 list_del(&page->lru); 1448 list_del(&page->lru);
1449 h->free_huge_pages--; 1449 h->free_huge_pages--;
1450 h->free_huge_pages_node[nid]--; 1450 h->free_huge_pages_node[nid]--;
1451 h->max_huge_pages--;
1451 update_and_free_page(h, page); 1452 update_and_free_page(h, page);
1452 } 1453 }
1453 spin_unlock(&hugetlb_lock); 1454 spin_unlock(&hugetlb_lock);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index b6728a33a4ac..baabaad4a4aa 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -217,11 +217,8 @@ void quarantine_reduce(void)
217 new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / 217 new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
218 QUARANTINE_FRACTION; 218 QUARANTINE_FRACTION;
219 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); 219 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
220 if (WARN_ONCE(new_quarantine_size < percpu_quarantines, 220 new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
221 "Too little memory, disabling global KASAN quarantine.\n")) 221 0 : new_quarantine_size - percpu_quarantines;
222 new_quarantine_size = 0;
223 else
224 new_quarantine_size -= percpu_quarantines;
225 WRITE_ONCE(quarantine_size, new_quarantine_size); 222 WRITE_ONCE(quarantine_size, new_quarantine_size);
226 223
227 last = global_quarantine.head; 224 last = global_quarantine.head;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 66beca1ad92f..9a6a51a7c416 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2337,8 +2337,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2337 return 0; 2337 return 0;
2338 2338
2339 memcg = get_mem_cgroup_from_mm(current->mm); 2339 memcg = get_mem_cgroup_from_mm(current->mm);
2340 if (!mem_cgroup_is_root(memcg)) 2340 if (!mem_cgroup_is_root(memcg)) {
2341 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2341 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2342 if (!ret)
2343 __SetPageKmemcg(page);
2344 }
2342 css_put(&memcg->css); 2345 css_put(&memcg->css);
2343 return ret; 2346 return ret;
2344} 2347}
@@ -2365,6 +2368,11 @@ void memcg_kmem_uncharge(struct page *page, int order)
2365 page_counter_uncharge(&memcg->memsw, nr_pages); 2368 page_counter_uncharge(&memcg->memsw, nr_pages);
2366 2369
2367 page->mem_cgroup = NULL; 2370 page->mem_cgroup = NULL;
2371
2372 /* slab pages do not have PageKmemcg flag set */
2373 if (PageKmemcg(page))
2374 __ClearPageKmemcg(page);
2375
2368 css_put_many(&memcg->css, nr_pages); 2376 css_put_many(&memcg->css, nr_pages);
2369} 2377}
2370#endif /* !CONFIG_SLOB */ 2378#endif /* !CONFIG_SLOB */
@@ -4069,14 +4077,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
4069 4077
4070static DEFINE_IDR(mem_cgroup_idr); 4078static DEFINE_IDR(mem_cgroup_idr);
4071 4079
4072static void mem_cgroup_id_get(struct mem_cgroup *memcg) 4080static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4073{ 4081{
4074 atomic_inc(&memcg->id.ref); 4082 atomic_add(n, &memcg->id.ref);
4075} 4083}
4076 4084
4077static void mem_cgroup_id_put(struct mem_cgroup *memcg) 4085static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4078{ 4086{
4079 if (atomic_dec_and_test(&memcg->id.ref)) { 4087 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4080 idr_remove(&mem_cgroup_idr, memcg->id.id); 4088 idr_remove(&mem_cgroup_idr, memcg->id.id);
4081 memcg->id.id = 0; 4089 memcg->id.id = 0;
4082 4090
@@ -4085,6 +4093,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg)
4085 } 4093 }
4086} 4094}
4087 4095
4096static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4097{
4098 mem_cgroup_id_get_many(memcg, 1);
4099}
4100
4101static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4102{
4103 mem_cgroup_id_put_many(memcg, 1);
4104}
4105
4088/** 4106/**
4089 * mem_cgroup_from_id - look up a memcg from a memcg id 4107 * mem_cgroup_from_id - look up a memcg from a memcg id
4090 * @id: the memcg id to look up 4108 * @id: the memcg id to look up
@@ -4719,6 +4737,8 @@ static void __mem_cgroup_clear_mc(void)
4719 if (!mem_cgroup_is_root(mc.from)) 4737 if (!mem_cgroup_is_root(mc.from))
4720 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4738 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4721 4739
4740 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4741
4722 /* 4742 /*
4723 * we charged both to->memory and to->memsw, so we 4743 * we charged both to->memory and to->memsw, so we
4724 * should uncharge to->memory. 4744 * should uncharge to->memory.
@@ -4726,9 +4746,9 @@ static void __mem_cgroup_clear_mc(void)
4726 if (!mem_cgroup_is_root(mc.to)) 4746 if (!mem_cgroup_is_root(mc.to))
4727 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4747 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4728 4748
4729 css_put_many(&mc.from->css, mc.moved_swap); 4749 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4750 css_put_many(&mc.to->css, mc.moved_swap);
4730 4751
4731 /* we've already done css_get(mc.to) */
4732 mc.moved_swap = 0; 4752 mc.moved_swap = 0;
4733 } 4753 }
4734 memcg_oom_recover(from); 4754 memcg_oom_recover(from);
@@ -5537,8 +5557,10 @@ static void uncharge_list(struct list_head *page_list)
5537 else 5557 else
5538 nr_file += nr_pages; 5558 nr_file += nr_pages;
5539 pgpgout++; 5559 pgpgout++;
5540 } else 5560 } else {
5541 nr_kmem += 1 << compound_order(page); 5561 nr_kmem += 1 << compound_order(page);
5562 __ClearPageKmemcg(page);
5563 }
5542 5564
5543 page->mem_cgroup = NULL; 5565 page->mem_cgroup = NULL;
5544 } while (next != page_list); 5566 } while (next != page_list);
@@ -5781,6 +5803,24 @@ static int __init mem_cgroup_init(void)
5781subsys_initcall(mem_cgroup_init); 5803subsys_initcall(mem_cgroup_init);
5782 5804
5783#ifdef CONFIG_MEMCG_SWAP 5805#ifdef CONFIG_MEMCG_SWAP
5806static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5807{
5808 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5809 /*
5810 * The root cgroup cannot be destroyed, so it's refcount must
5811 * always be >= 1.
5812 */
5813 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5814 VM_BUG_ON(1);
5815 break;
5816 }
5817 memcg = parent_mem_cgroup(memcg);
5818 if (!memcg)
5819 memcg = root_mem_cgroup;
5820 }
5821 return memcg;
5822}
5823
5784/** 5824/**
5785 * mem_cgroup_swapout - transfer a memsw charge to swap 5825 * mem_cgroup_swapout - transfer a memsw charge to swap
5786 * @page: page whose memsw charge to transfer 5826 * @page: page whose memsw charge to transfer
@@ -5790,7 +5830,7 @@ subsys_initcall(mem_cgroup_init);
5790 */ 5830 */
5791void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5831void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5792{ 5832{
5793 struct mem_cgroup *memcg; 5833 struct mem_cgroup *memcg, *swap_memcg;
5794 unsigned short oldid; 5834 unsigned short oldid;
5795 5835
5796 VM_BUG_ON_PAGE(PageLRU(page), page); 5836 VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -5805,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5805 if (!memcg) 5845 if (!memcg)
5806 return; 5846 return;
5807 5847
5808 mem_cgroup_id_get(memcg); 5848 /*
5809 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5849 * In case the memcg owning these pages has been offlined and doesn't
5850 * have an ID allocated to it anymore, charge the closest online
5851 * ancestor for the swap instead and transfer the memory+swap charge.
5852 */
5853 swap_memcg = mem_cgroup_id_get_online(memcg);
5854 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5810 VM_BUG_ON_PAGE(oldid, page); 5855 VM_BUG_ON_PAGE(oldid, page);
5811 mem_cgroup_swap_statistics(memcg, true); 5856 mem_cgroup_swap_statistics(swap_memcg, true);
5812 5857
5813 page->mem_cgroup = NULL; 5858 page->mem_cgroup = NULL;
5814 5859
5815 if (!mem_cgroup_is_root(memcg)) 5860 if (!mem_cgroup_is_root(memcg))
5816 page_counter_uncharge(&memcg->memory, 1); 5861 page_counter_uncharge(&memcg->memory, 1);
5817 5862
5863 if (memcg != swap_memcg) {
5864 if (!mem_cgroup_is_root(swap_memcg))
5865 page_counter_charge(&swap_memcg->memsw, 1);
5866 page_counter_uncharge(&memcg->memsw, 1);
5867 }
5868
5818 /* 5869 /*
5819 * Interrupts should be disabled here because the caller holds the 5870 * Interrupts should be disabled here because the caller holds the
5820 * mapping->tree_lock lock which is taken with interrupts-off. It is 5871 * mapping->tree_lock lock which is taken with interrupts-off. It is
@@ -5853,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5853 if (!memcg) 5904 if (!memcg)
5854 return 0; 5905 return 0;
5855 5906
5907 memcg = mem_cgroup_id_get_online(memcg);
5908
5856 if (!mem_cgroup_is_root(memcg) && 5909 if (!mem_cgroup_is_root(memcg) &&
5857 !page_counter_try_charge(&memcg->swap, 1, &counter)) 5910 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5911 mem_cgroup_id_put(memcg);
5858 return -ENOMEM; 5912 return -ENOMEM;
5913 }
5859 5914
5860 mem_cgroup_id_get(memcg);
5861 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5915 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5862 VM_BUG_ON_PAGE(oldid, page); 5916 VM_BUG_ON_PAGE(oldid, page);
5863 mem_cgroup_swap_statistics(memcg, true); 5917 mem_cgroup_swap_statistics(memcg, true);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 3894b65b1555..41266dc29f33 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1219 1219
1220 /* init node's zones as empty zones, we don't have any present pages.*/ 1220 /* init node's zones as empty zones, we don't have any present pages.*/
1221 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1221 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1222 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1222 1223
1223 /* 1224 /*
1224 * The node we allocated has no zone fallback lists. For avoiding 1225 * The node we allocated has no zone fallback lists. For avoiding
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1249static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1250static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1250{ 1251{
1251 arch_refresh_nodedata(nid, NULL); 1252 arch_refresh_nodedata(nid, NULL);
1253 free_percpu(pgdat->per_cpu_nodestats);
1252 arch_free_nodedata(pgdat); 1254 arch_free_nodedata(pgdat);
1253 return; 1255 return;
1254} 1256}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d8c4e38fb5f4..2da72a5b6ecc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2336,6 +2336,23 @@ out:
2336 return ret; 2336 return ret;
2337} 2337}
2338 2338
2339/*
2340 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2341 * dropped after task->mempolicy is set to NULL so that any allocation done as
2342 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2343 * policy.
2344 */
2345void mpol_put_task_policy(struct task_struct *task)
2346{
2347 struct mempolicy *pol;
2348
2349 task_lock(task);
2350 pol = task->mempolicy;
2351 task->mempolicy = NULL;
2352 task_unlock(task);
2353 mpol_put(pol);
2354}
2355
2339static void sp_delete(struct shared_policy *sp, struct sp_node *n) 2356static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2340{ 2357{
2341 pr_debug("deleting %lx-l%lx\n", n->start, n->end); 2358 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 7d0a275df822..d53a9aa00977 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task)
764{ 764{
765 struct mm_struct *mm = task->mm; 765 struct mm_struct *mm = task->mm;
766 struct task_struct *p; 766 struct task_struct *p;
767 bool ret; 767 bool ret = true;
768 768
769 /* 769 /*
770 * Skip tasks without mm because it might have passed its exit_mm and 770 * Skip tasks without mm because it might have passed its exit_mm and
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fb975cec3518..a2214c64ed3c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
1008 } 1008 }
1009 if (PageMappingFlags(page)) 1009 if (PageMappingFlags(page))
1010 page->mapping = NULL; 1010 page->mapping = NULL;
1011 if (memcg_kmem_enabled() && PageKmemcg(page)) { 1011 if (memcg_kmem_enabled() && PageKmemcg(page))
1012 memcg_kmem_uncharge(page, order); 1012 memcg_kmem_uncharge(page, order);
1013 __ClearPageKmemcg(page);
1014 }
1015 if (check_free) 1013 if (check_free)
1016 bad += free_pages_check(page); 1014 bad += free_pages_check(page);
1017 if (bad) 1015 if (bad)
@@ -3139,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3139 return NULL; 3137 return NULL;
3140} 3138}
3141 3139
3142static inline bool
3143should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3144 enum compact_result compact_result,
3145 enum compact_priority *compact_priority,
3146 int compaction_retries)
3147{
3148 int max_retries = MAX_COMPACT_RETRIES;
3149
3150 if (!order)
3151 return false;
3152
3153 /*
3154 * compaction considers all the zone as desperately out of memory
3155 * so it doesn't really make much sense to retry except when the
3156 * failure could be caused by insufficient priority
3157 */
3158 if (compaction_failed(compact_result)) {
3159 if (*compact_priority > MIN_COMPACT_PRIORITY) {
3160 (*compact_priority)--;
3161 return true;
3162 }
3163 return false;
3164 }
3165
3166 /*
3167 * make sure the compaction wasn't deferred or didn't bail out early
3168 * due to locks contention before we declare that we should give up.
3169 * But do not retry if the given zonelist is not suitable for
3170 * compaction.
3171 */
3172 if (compaction_withdrawn(compact_result))
3173 return compaction_zonelist_suitable(ac, order, alloc_flags);
3174
3175 /*
3176 * !costly requests are much more important than __GFP_REPEAT
3177 * costly ones because they are de facto nofail and invoke OOM
3178 * killer to move on while costly can fail and users are ready
3179 * to cope with that. 1/4 retries is rather arbitrary but we
3180 * would need much more detailed feedback from compaction to
3181 * make a better decision.
3182 */
3183 if (order > PAGE_ALLOC_COSTLY_ORDER)
3184 max_retries /= 4;
3185 if (compaction_retries <= max_retries)
3186 return true;
3187
3188 return false;
3189}
3190#else 3140#else
3191static inline struct page * 3141static inline struct page *
3192__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3142__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3197,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3197 return NULL; 3147 return NULL;
3198} 3148}
3199 3149
3150#endif /* CONFIG_COMPACTION */
3151
3200static inline bool 3152static inline bool
3201should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3153should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3202 enum compact_result compact_result, 3154 enum compact_result compact_result,
@@ -3223,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
3223 } 3175 }
3224 return false; 3176 return false;
3225} 3177}
3226#endif /* CONFIG_COMPACTION */
3227 3178
3228/* Perform direct synchronous page reclaim */ 3179/* Perform direct synchronous page reclaim */
3229static int 3180static int
@@ -3756,12 +3707,10 @@ no_zone:
3756 } 3707 }
3757 3708
3758out: 3709out:
3759 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) { 3710 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
3760 if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) { 3711 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
3761 __free_pages(page, order); 3712 __free_pages(page, order);
3762 page = NULL; 3713 page = NULL;
3763 } else
3764 __SetPageKmemcg(page);
3765 } 3714 }
3766 3715
3767 if (kmemcheck_enabled && page) 3716 if (kmemcheck_enabled && page)
@@ -4064,7 +4013,7 @@ long si_mem_available(void)
4064 int lru; 4013 int lru;
4065 4014
4066 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4015 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4067 pages[lru] = global_page_state(NR_LRU_BASE + lru); 4016 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4068 4017
4069 for_each_zone(zone) 4018 for_each_zone(zone)
4070 wmark_low += zone->watermark[WMARK_LOW]; 4019 wmark_low += zone->watermark[WMARK_LOW];
@@ -4411,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4411 do { 4360 do {
4412 zone_type--; 4361 zone_type--;
4413 zone = pgdat->node_zones + zone_type; 4362 zone = pgdat->node_zones + zone_type;
4414 if (populated_zone(zone)) { 4363 if (managed_zone(zone)) {
4415 zoneref_set_zone(zone, 4364 zoneref_set_zone(zone,
4416 &zonelist->_zonerefs[nr_zones++]); 4365 &zonelist->_zonerefs[nr_zones++]);
4417 check_highest_zone(zone_type); 4366 check_highest_zone(zone_type);
@@ -4649,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
4649 for (j = 0; j < nr_nodes; j++) { 4598 for (j = 0; j < nr_nodes; j++) {
4650 node = node_order[j]; 4599 node = node_order[j];
4651 z = &NODE_DATA(node)->node_zones[zone_type]; 4600 z = &NODE_DATA(node)->node_zones[zone_type];
4652 if (populated_zone(z)) { 4601 if (managed_zone(z)) {
4653 zoneref_set_zone(z, 4602 zoneref_set_zone(z,
4654 &zonelist->_zonerefs[pos++]); 4603 &zonelist->_zonerefs[pos++]);
4655 check_highest_zone(zone_type); 4604 check_highest_zone(zone_type);
@@ -4761,6 +4710,8 @@ int local_memory_node(int node)
4761} 4710}
4762#endif 4711#endif
4763 4712
4713static void setup_min_unmapped_ratio(void);
4714static void setup_min_slab_ratio(void);
4764#else /* CONFIG_NUMA */ 4715#else /* CONFIG_NUMA */
4765 4716
4766static void set_zonelist_order(void) 4717static void set_zonelist_order(void)
@@ -5882,9 +5833,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
5882 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5833 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
5883#ifdef CONFIG_NUMA 5834#ifdef CONFIG_NUMA
5884 zone->node = nid; 5835 zone->node = nid;
5885 pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
5886 / 100;
5887 pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
5888#endif 5836#endif
5889 zone->name = zone_names[j]; 5837 zone->name = zone_names[j];
5890 zone->zone_pgdat = pgdat; 5838 zone->zone_pgdat = pgdat;
@@ -6805,6 +6753,12 @@ int __meminit init_per_zone_wmark_min(void)
6805 setup_per_zone_wmarks(); 6753 setup_per_zone_wmarks();
6806 refresh_zone_stat_thresholds(); 6754 refresh_zone_stat_thresholds();
6807 setup_per_zone_lowmem_reserve(); 6755 setup_per_zone_lowmem_reserve();
6756
6757#ifdef CONFIG_NUMA
6758 setup_min_unmapped_ratio();
6759 setup_min_slab_ratio();
6760#endif
6761
6808 return 0; 6762 return 0;
6809} 6763}
6810core_initcall(init_per_zone_wmark_min) 6764core_initcall(init_per_zone_wmark_min)
@@ -6846,43 +6800,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
6846} 6800}
6847 6801
6848#ifdef CONFIG_NUMA 6802#ifdef CONFIG_NUMA
6803static void setup_min_unmapped_ratio(void)
6804{
6805 pg_data_t *pgdat;
6806 struct zone *zone;
6807
6808 for_each_online_pgdat(pgdat)
6809 pgdat->min_unmapped_pages = 0;
6810
6811 for_each_zone(zone)
6812 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
6813 sysctl_min_unmapped_ratio) / 100;
6814}
6815
6816
6849int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6817int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
6850 void __user *buffer, size_t *length, loff_t *ppos) 6818 void __user *buffer, size_t *length, loff_t *ppos)
6851{ 6819{
6852 struct pglist_data *pgdat;
6853 struct zone *zone;
6854 int rc; 6820 int rc;
6855 6821
6856 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6822 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6857 if (rc) 6823 if (rc)
6858 return rc; 6824 return rc;
6859 6825
6826 setup_min_unmapped_ratio();
6827
6828 return 0;
6829}
6830
6831static void setup_min_slab_ratio(void)
6832{
6833 pg_data_t *pgdat;
6834 struct zone *zone;
6835
6860 for_each_online_pgdat(pgdat) 6836 for_each_online_pgdat(pgdat)
6861 pgdat->min_slab_pages = 0; 6837 pgdat->min_slab_pages = 0;
6862 6838
6863 for_each_zone(zone) 6839 for_each_zone(zone)
6864 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * 6840 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6865 sysctl_min_unmapped_ratio) / 100; 6841 sysctl_min_slab_ratio) / 100;
6866 return 0;
6867} 6842}
6868 6843
6869int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6844int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
6870 void __user *buffer, size_t *length, loff_t *ppos) 6845 void __user *buffer, size_t *length, loff_t *ppos)
6871{ 6846{
6872 struct pglist_data *pgdat;
6873 struct zone *zone;
6874 int rc; 6847 int rc;
6875 6848
6876 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6849 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6877 if (rc) 6850 if (rc)
6878 return rc; 6851 return rc;
6879 6852
6880 for_each_online_pgdat(pgdat) 6853 setup_min_slab_ratio();
6881 pgdat->min_slab_pages = 0;
6882 6854
6883 for_each_zone(zone)
6884 zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
6885 sysctl_min_slab_ratio) / 100;
6886 return 0; 6855 return 0;
6887} 6856}
6888#endif 6857#endif
diff --git a/mm/readahead.c b/mm/readahead.c
index 65ec288dc057..c8a955b1297e 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/dax.h>
11#include <linux/gfp.h> 12#include <linux/gfp.h>
12#include <linux/export.h> 13#include <linux/export.h>
13#include <linux/blkdev.h> 14#include <linux/blkdev.h>
@@ -544,6 +545,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
544 if (!mapping || !mapping->a_ops) 545 if (!mapping || !mapping->a_ops)
545 return -EINVAL; 546 return -EINVAL;
546 547
548 /*
549 * Readahead doesn't make sense for DAX inodes, but we don't want it
550 * to report a failure either. Instead, we just return success and
551 * don't do any work.
552 */
553 if (dax_mapping(mapping))
554 return 0;
555
547 return force_page_cache_readahead(mapping, filp, index, nr); 556 return force_page_cache_readahead(mapping, filp, index, nr);
548} 557}
549 558
diff --git a/mm/rmap.c b/mm/rmap.c
index 709bc83703b1..1ef36404e7b2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1284,8 +1284,9 @@ void page_add_file_rmap(struct page *page, bool compound)
1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1286 } else { 1286 } else {
1287 if (PageTransCompound(page)) { 1287 if (PageTransCompound(page) && page_mapping(page)) {
1288 VM_BUG_ON_PAGE(!PageLocked(page), page); 1288 VM_WARN_ON_ONCE(!PageLocked(page));
1289
1289 SetPageDoubleMap(compound_head(page)); 1290 SetPageDoubleMap(compound_head(page));
1290 if (PageMlocked(page)) 1291 if (PageMlocked(page))
1291 clear_page_mlock(compound_head(page)); 1292 clear_page_mlock(compound_head(page));
@@ -1303,7 +1304,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1303{ 1304{
1304 int i, nr = 1; 1305 int i, nr = 1;
1305 1306
1306 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1307 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1307 lock_page_memcg(page); 1308 lock_page_memcg(page);
1308 1309
1309 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1310 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
diff --git a/mm/shmem.c b/mm/shmem.c
index 7f7748a0f9e1..fd8b2b5741b1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3975,7 +3975,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
3975 3975
3976struct kobj_attribute shmem_enabled_attr = 3976struct kobj_attribute shmem_enabled_attr =
3977 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 3977 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3978#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3978 3979
3980#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3979bool shmem_huge_enabled(struct vm_area_struct *vma) 3981bool shmem_huge_enabled(struct vm_area_struct *vma)
3980{ 3982{
3981 struct inode *inode = file_inode(vma->vm_file); 3983 struct inode *inode = file_inode(vma->vm_file);
@@ -4006,7 +4008,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
4006 return false; 4008 return false;
4007 } 4009 }
4008} 4010}
4009#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 4011#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4010 4012
4011#else /* !CONFIG_SHMEM */ 4013#else /* !CONFIG_SHMEM */
4012 4014
diff --git a/mm/slab.c b/mm/slab.c
index 261147ba156f..b67271024135 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4441,6 +4441,36 @@ static int __init slab_proc_init(void)
4441module_init(slab_proc_init); 4441module_init(slab_proc_init);
4442#endif 4442#endif
4443 4443
4444#ifdef CONFIG_HARDENED_USERCOPY
4445/*
4446 * Rejects objects that are incorrectly sized.
4447 *
4448 * Returns NULL if check passes, otherwise const char * to name of cache
4449 * to indicate an error.
4450 */
4451const char *__check_heap_object(const void *ptr, unsigned long n,
4452 struct page *page)
4453{
4454 struct kmem_cache *cachep;
4455 unsigned int objnr;
4456 unsigned long offset;
4457
4458 /* Find and validate object. */
4459 cachep = page->slab_cache;
4460 objnr = obj_to_index(cachep, page, (void *)ptr);
4461 BUG_ON(objnr >= cachep->num);
4462
4463 /* Find offset within object. */
4464 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4465
4466 /* Allow address range falling entirely within object size. */
4467 if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4468 return NULL;
4469
4470 return cachep->name;
4471}
4472#endif /* CONFIG_HARDENED_USERCOPY */
4473
4444/** 4474/**
4445 * ksize - get the actual amount of memory allocated for a given object 4475 * ksize - get the actual amount of memory allocated for a given object
4446 * @objp: Pointer to the object 4476 * @objp: Pointer to the object
diff --git a/mm/slub.c b/mm/slub.c
index 850737bdfbd8..9adae58462f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3629,6 +3629,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3629 */ 3629 */
3630static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3630static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3631{ 3631{
3632 LIST_HEAD(discard);
3632 struct page *page, *h; 3633 struct page *page, *h;
3633 3634
3634 BUG_ON(irqs_disabled()); 3635 BUG_ON(irqs_disabled());
@@ -3636,13 +3637,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3636 list_for_each_entry_safe(page, h, &n->partial, lru) { 3637 list_for_each_entry_safe(page, h, &n->partial, lru) {
3637 if (!page->inuse) { 3638 if (!page->inuse) {
3638 remove_partial(n, page); 3639 remove_partial(n, page);
3639 discard_slab(s, page); 3640 list_add(&page->lru, &discard);
3640 } else { 3641 } else {
3641 list_slab_objects(s, page, 3642 list_slab_objects(s, page,
3642 "Objects remaining in %s on __kmem_cache_shutdown()"); 3643 "Objects remaining in %s on __kmem_cache_shutdown()");
3643 } 3644 }
3644 } 3645 }
3645 spin_unlock_irq(&n->list_lock); 3646 spin_unlock_irq(&n->list_lock);
3647
3648 list_for_each_entry_safe(page, h, &discard, lru)
3649 discard_slab(s, page);
3646} 3650}
3647 3651
3648/* 3652/*
@@ -3764,6 +3768,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3764EXPORT_SYMBOL(__kmalloc_node); 3768EXPORT_SYMBOL(__kmalloc_node);
3765#endif 3769#endif
3766 3770
3771#ifdef CONFIG_HARDENED_USERCOPY
3772/*
3773 * Rejects objects that are incorrectly sized.
3774 *
3775 * Returns NULL if check passes, otherwise const char * to name of cache
3776 * to indicate an error.
3777 */
3778const char *__check_heap_object(const void *ptr, unsigned long n,
3779 struct page *page)
3780{
3781 struct kmem_cache *s;
3782 unsigned long offset;
3783 size_t object_size;
3784
3785 /* Find object and usable object size. */
3786 s = page->slab_cache;
3787 object_size = slab_ksize(s);
3788
3789 /* Reject impossible pointers. */
3790 if (ptr < page_address(page))
3791 return s->name;
3792
3793 /* Find offset within object. */
3794 offset = (ptr - page_address(page)) % s->size;
3795
3796 /* Adjust for redzone and reject if within the redzone. */
3797 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3798 if (offset < s->red_left_pad)
3799 return s->name;
3800 offset -= s->red_left_pad;
3801 }
3802
3803 /* Allow address range falling entirely within object size. */
3804 if (offset <= object_size && n <= object_size - offset)
3805 return NULL;
3806
3807 return s->name;
3808}
3809#endif /* CONFIG_HARDENED_USERCOPY */
3810
3767static size_t __ksize(const void *object) 3811static size_t __ksize(const void *object)
3768{ 3812{
3769 struct page *page; 3813 struct page *page;
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644
index 000000000000..089328f2b920
--- /dev/null
+++ b/mm/usercopy.c
@@ -0,0 +1,277 @@
1/*
2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3 * which are designed to protect kernel memory from needless exposure
4 * and overwrite under many unintended conditions. This code is based
5 * on PAX_USERCOPY, which is:
6 *
7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8 * Security Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <asm/sections.h>
20
21enum {
22 BAD_STACK = -1,
23 NOT_STACK = 0,
24 GOOD_FRAME,
25 GOOD_STACK,
26};
27
28/*
29 * Checks if a given pointer and length is contained by the current
30 * stack frame (if possible).
31 *
32 * Returns:
33 * NOT_STACK: not at all on the stack
34 * GOOD_FRAME: fully within a valid stack frame
35 * GOOD_STACK: fully on the stack (when can't do frame-checking)
36 * BAD_STACK: error condition (invalid stack position or bad stack frame)
37 */
38static noinline int check_stack_object(const void *obj, unsigned long len)
39{
40 const void * const stack = task_stack_page(current);
41 const void * const stackend = stack + THREAD_SIZE;
42 int ret;
43
44 /* Object is not on the stack at all. */
45 if (obj + len <= stack || stackend <= obj)
46 return NOT_STACK;
47
48 /*
49 * Reject: object partially overlaps the stack (passing the
50 * the check above means at least one end is within the stack,
51 * so if this check fails, the other end is outside the stack).
52 */
53 if (obj < stack || stackend < obj + len)
54 return BAD_STACK;
55
56 /* Check if object is safely within a valid frame. */
57 ret = arch_within_stack_frames(stack, stackend, obj, len);
58 if (ret)
59 return ret;
60
61 return GOOD_STACK;
62}
63
64static void report_usercopy(const void *ptr, unsigned long len,
65 bool to_user, const char *type)
66{
67 pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
68 to_user ? "exposure" : "overwrite",
69 to_user ? "from" : "to", ptr, type ? : "unknown", len);
70 /*
71 * For greater effect, it would be nice to do do_group_exit(),
72 * but BUG() actually hooks all the lock-breaking and per-arch
73 * Oops code, so that is used here instead.
74 */
75 BUG();
76}
77
78/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
79static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
80 unsigned long high)
81{
82 unsigned long check_low = (uintptr_t)ptr;
83 unsigned long check_high = check_low + n;
84
85 /* Does not overlap if entirely above or entirely below. */
86 if (check_low >= high || check_high <= low)
87 return false;
88
89 return true;
90}
91
92/* Is this address range in the kernel text area? */
93static inline const char *check_kernel_text_object(const void *ptr,
94 unsigned long n)
95{
96 unsigned long textlow = (unsigned long)_stext;
97 unsigned long texthigh = (unsigned long)_etext;
98 unsigned long textlow_linear, texthigh_linear;
99
100 if (overlaps(ptr, n, textlow, texthigh))
101 return "<kernel text>";
102
103 /*
104 * Some architectures have virtual memory mappings with a secondary
105 * mapping of the kernel text, i.e. there is more than one virtual
106 * kernel address that points to the kernel image. It is usually
107 * when there is a separate linear physical memory mapping, in that
108 * __pa() is not just the reverse of __va(). This can be detected
109 * and checked:
110 */
111 textlow_linear = (unsigned long)__va(__pa(textlow));
112 /* No different mapping: we're done. */
113 if (textlow_linear == textlow)
114 return NULL;
115
116 /* Check the secondary mapping... */
117 texthigh_linear = (unsigned long)__va(__pa(texthigh));
118 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
119 return "<linear kernel text>";
120
121 return NULL;
122}
123
124static inline const char *check_bogus_address(const void *ptr, unsigned long n)
125{
126 /* Reject if object wraps past end of memory. */
127 if ((unsigned long)ptr + n < (unsigned long)ptr)
128 return "<wrapped address>";
129
130 /* Reject if NULL or ZERO-allocation. */
131 if (ZERO_OR_NULL_PTR(ptr))
132 return "<null>";
133
134 return NULL;
135}
136
137/* Checks for allocs that are marked in some way as spanning multiple pages. */
138static inline const char *check_page_span(const void *ptr, unsigned long n,
139 struct page *page, bool to_user)
140{
141#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
142 const void *end = ptr + n - 1;
143 struct page *endpage;
144 bool is_reserved, is_cma;
145
146 /*
147 * Sometimes the kernel data regions are not marked Reserved (see
148 * check below). And sometimes [_sdata,_edata) does not cover
149 * rodata and/or bss, so check each range explicitly.
150 */
151
152 /* Allow reads of kernel rodata region (if not marked as Reserved). */
153 if (ptr >= (const void *)__start_rodata &&
154 end <= (const void *)__end_rodata) {
155 if (!to_user)
156 return "<rodata>";
157 return NULL;
158 }
159
160 /* Allow kernel data region (if not marked as Reserved). */
161 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
162 return NULL;
163
164 /* Allow kernel bss region (if not marked as Reserved). */
165 if (ptr >= (const void *)__bss_start &&
166 end <= (const void *)__bss_stop)
167 return NULL;
168
169 /* Is the object wholly within one base page? */
170 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
171 ((unsigned long)end & (unsigned long)PAGE_MASK)))
172 return NULL;
173
174 /* Allow if fully inside the same compound (__GFP_COMP) page. */
175 endpage = virt_to_head_page(end);
176 if (likely(endpage == page))
177 return NULL;
178
179 /*
180 * Reject if range is entirely either Reserved (i.e. special or
181 * device memory), or CMA. Otherwise, reject since the object spans
182 * several independently allocated pages.
183 */
184 is_reserved = PageReserved(page);
185 is_cma = is_migrate_cma_page(page);
186 if (!is_reserved && !is_cma)
187 return "<spans multiple pages>";
188
189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
190 page = virt_to_head_page(ptr);
191 if (is_reserved && !PageReserved(page))
192 return "<spans Reserved and non-Reserved pages>";
193 if (is_cma && !is_migrate_cma_page(page))
194 return "<spans CMA and non-CMA pages>";
195 }
196#endif
197
198 return NULL;
199}
200
201static inline const char *check_heap_object(const void *ptr, unsigned long n,
202 bool to_user)
203{
204 struct page *page;
205
206 /*
207 * Some architectures (arm64) return true for virt_addr_valid() on
208 * vmalloced addresses. Work around this by checking for vmalloc
209 * first.
210 */
211 if (is_vmalloc_addr(ptr))
212 return NULL;
213
214 if (!virt_addr_valid(ptr))
215 return NULL;
216
217 page = virt_to_head_page(ptr);
218
219 /* Check slab allocator for flags and size. */
220 if (PageSlab(page))
221 return __check_heap_object(ptr, n, page);
222
223 /* Verify object does not incorrectly span multiple pages. */
224 return check_page_span(ptr, n, page, to_user);
225}
226
227/*
228 * Validates that the given object is:
229 * - not bogus address
230 * - known-safe heap or stack object
231 * - not in kernel text
232 */
233void __check_object_size(const void *ptr, unsigned long n, bool to_user)
234{
235 const char *err;
236
237 /* Skip all tests if size is zero. */
238 if (!n)
239 return;
240
241 /* Check for invalid addresses. */
242 err = check_bogus_address(ptr, n);
243 if (err)
244 goto report;
245
246 /* Check for bad heap object. */
247 err = check_heap_object(ptr, n, to_user);
248 if (err)
249 goto report;
250
251 /* Check for bad stack object. */
252 switch (check_stack_object(ptr, n)) {
253 case NOT_STACK:
254 /* Object is not touching the current process stack. */
255 break;
256 case GOOD_FRAME:
257 case GOOD_STACK:
258 /*
259 * Object is either in the correct frame (when it
260 * is possible to check) or just generally on the
261 * process stack (when frame checking not available).
262 */
263 return;
264 default:
265 err = "<process stack>";
266 goto report;
267 }
268
269 /* Check for object in kernel to avoid text exposure. */
270 err = check_kernel_text_object(ptr, n);
271 if (!err)
272 return;
273
274report:
275 report_usercopy(ptr, n, to_user, err);
276}
277EXPORT_SYMBOL(__check_object_size);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 374d95d04178..b1e12a1ea9cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
1665 1665
1666 for (zid = sc->reclaim_idx; zid >= 0; zid--) { 1666 for (zid = sc->reclaim_idx; zid >= 0; zid--) {
1667 zone = &pgdat->node_zones[zid]; 1667 zone = &pgdat->node_zones[zid];
1668 if (!populated_zone(zone)) 1668 if (!managed_zone(zone))
1669 continue; 1669 continue;
1670 1670
1671 if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE + 1671 if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2036 struct zone *zone = &pgdat->node_zones[zid]; 2036 struct zone *zone = &pgdat->node_zones[zid];
2037 unsigned long inactive_zone, active_zone; 2037 unsigned long inactive_zone, active_zone;
2038 2038
2039 if (!populated_zone(zone)) 2039 if (!managed_zone(zone))
2040 continue; 2040 continue;
2041 2041
2042 inactive_zone = zone_page_state(zone, 2042 inactive_zone = zone_page_state(zone,
@@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2171 2171
2172 for (z = 0; z < MAX_NR_ZONES; z++) { 2172 for (z = 0; z < MAX_NR_ZONES; z++) {
2173 struct zone *zone = &pgdat->node_zones[z]; 2173 struct zone *zone = &pgdat->node_zones[z];
2174 if (!populated_zone(zone)) 2174 if (!managed_zone(zone))
2175 continue; 2175 continue;
2176 2176
2177 total_high_wmark += high_wmark_pages(zone); 2177 total_high_wmark += high_wmark_pages(zone);
@@ -2510,7 +2510,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2510 /* If compaction would go ahead or the allocation would succeed, stop */ 2510 /* If compaction would go ahead or the allocation would succeed, stop */
2511 for (z = 0; z <= sc->reclaim_idx; z++) { 2511 for (z = 0; z <= sc->reclaim_idx; z++) {
2512 struct zone *zone = &pgdat->node_zones[z]; 2512 struct zone *zone = &pgdat->node_zones[z];
2513 if (!populated_zone(zone)) 2513 if (!managed_zone(zone))
2514 continue; 2514 continue;
2515 2515
2516 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { 2516 switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@@ -2840,7 +2840,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2840 2840
2841 for (i = 0; i <= ZONE_NORMAL; i++) { 2841 for (i = 0; i <= ZONE_NORMAL; i++) {
2842 zone = &pgdat->node_zones[i]; 2842 zone = &pgdat->node_zones[i];
2843 if (!populated_zone(zone) || 2843 if (!managed_zone(zone) ||
2844 pgdat_reclaimable_pages(pgdat) == 0) 2844 pgdat_reclaimable_pages(pgdat) == 0)
2845 continue; 2845 continue;
2846 2846
@@ -3141,7 +3141,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3141 for (i = 0; i <= classzone_idx; i++) { 3141 for (i = 0; i <= classzone_idx; i++) {
3142 struct zone *zone = pgdat->node_zones + i; 3142 struct zone *zone = pgdat->node_zones + i;
3143 3143
3144 if (!populated_zone(zone)) 3144 if (!managed_zone(zone))
3145 continue; 3145 continue;
3146 3146
3147 if (!zone_balanced(zone, order, classzone_idx)) 3147 if (!zone_balanced(zone, order, classzone_idx))
@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
3169 sc->nr_to_reclaim = 0; 3169 sc->nr_to_reclaim = 0;
3170 for (z = 0; z <= sc->reclaim_idx; z++) { 3170 for (z = 0; z <= sc->reclaim_idx; z++) {
3171 zone = pgdat->node_zones + z; 3171 zone = pgdat->node_zones + z;
3172 if (!populated_zone(zone)) 3172 if (!managed_zone(zone))
3173 continue; 3173 continue;
3174 3174
3175 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); 3175 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@@ -3242,7 +3242,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3242 if (buffer_heads_over_limit) { 3242 if (buffer_heads_over_limit) {
3243 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { 3243 for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3244 zone = pgdat->node_zones + i; 3244 zone = pgdat->node_zones + i;
3245 if (!populated_zone(zone)) 3245 if (!managed_zone(zone))
3246 continue; 3246 continue;
3247 3247
3248 sc.reclaim_idx = i; 3248 sc.reclaim_idx = i;
@@ -3262,7 +3262,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3262 */ 3262 */
3263 for (i = classzone_idx; i >= 0; i--) { 3263 for (i = classzone_idx; i >= 0; i--) {
3264 zone = pgdat->node_zones + i; 3264 zone = pgdat->node_zones + i;
3265 if (!populated_zone(zone)) 3265 if (!managed_zone(zone))
3266 continue; 3266 continue;
3267 3267
3268 if (zone_balanced(zone, sc.order, classzone_idx)) 3268 if (zone_balanced(zone, sc.order, classzone_idx))
@@ -3508,7 +3508,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3508 pg_data_t *pgdat; 3508 pg_data_t *pgdat;
3509 int z; 3509 int z;
3510 3510
3511 if (!populated_zone(zone)) 3511 if (!managed_zone(zone))
3512 return; 3512 return;
3513 3513
3514 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) 3514 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@@ -3522,7 +3522,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3522 /* Only wake kswapd if all zones are unbalanced */ 3522 /* Only wake kswapd if all zones are unbalanced */
3523 for (z = 0; z <= classzone_idx; z++) { 3523 for (z = 0; z <= classzone_idx; z++) {
3524 zone = pgdat->node_zones + z; 3524 zone = pgdat->node_zones + z;
3525 if (!populated_zone(zone)) 3525 if (!managed_zone(zone))
3526 continue; 3526 continue;
3527 3527
3528 if (zone_balanced(zone, order, classzone_idx)) 3528 if (zone_balanced(zone, order, classzone_idx))
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 82a116ba590e..8de138d3306b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; 172 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
173 err = register_netdevice(dev); 173 err = register_netdevice(dev);
174 if (err < 0) 174 if (err < 0)
175 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 4acb1d5417aa..f24b25c25106 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -507,8 +507,8 @@ err_out:
507 /* wakeup anybody waiting for slots to pin pages */ 507 /* wakeup anybody waiting for slots to pin pages */
508 wake_up(&vp_wq); 508 wake_up(&vp_wq);
509 } 509 }
510 kfree(in_pages); 510 kvfree(in_pages);
511 kfree(out_pages); 511 kvfree(out_pages);
512 return err; 512 return err;
513} 513}
514 514
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index ece45e0683fd..0b5f729d08d2 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -250,7 +250,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
250 250
251 skb_free_datagram(sk, skb); 251 skb_free_datagram(sk, skb);
252 252
253 if (msg->msg_flags & MSG_TRUNC) 253 if (flags & MSG_TRUNC)
254 copied = skblen; 254 copied = skblen;
255 255
256 return err ? : copied; 256 return err ? : copied;
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index c045b3c54768..b0e23dfc5c34 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -262,6 +262,8 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
262 break; 262 break;
263 } 263 }
264 264
265 kfree_skb(hdev->req_skb);
266 hdev->req_skb = NULL;
265 hdev->req_status = hdev->req_result = 0; 267 hdev->req_status = hdev->req_result = 0;
266 268
267 BT_DBG("%s end: err %d", hdev->name, err); 269 BT_DBG("%s end: err %d", hdev->name, err);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6ef8a01a9ad4..96f04b7b9556 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1091,7 +1091,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1091 1091
1092 skb_free_datagram(sk, skb); 1092 skb_free_datagram(sk, skb);
1093 1093
1094 if (msg->msg_flags & MSG_TRUNC) 1094 if (flags & MSG_TRUNC)
1095 copied = skblen; 1095 copied = skblen;
1096 1096
1097 return err ? : copied; 1097 return err ? : copied;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 54ceb1f2cc9a..d4cad29b033f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/debugfs.h> 33#include <linux/debugfs.h>
34#include <linux/crc16.h> 34#include <linux/crc16.h>
35#include <linux/filter.h>
35 36
36#include <net/bluetooth/bluetooth.h> 37#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h> 38#include <net/bluetooth/hci_core.h>
@@ -5835,6 +5836,9 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5835 if (chan->sdu) 5836 if (chan->sdu)
5836 break; 5837 break;
5837 5838
5839 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5840 break;
5841
5838 chan->sdu_len = get_unaligned_le16(skb->data); 5842 chan->sdu_len = get_unaligned_le16(skb->data);
5839 skb_pull(skb, L2CAP_SDULEN_SIZE); 5843 skb_pull(skb, L2CAP_SDULEN_SIZE);
5840 5844
@@ -6610,6 +6614,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6610 goto drop; 6614 goto drop;
6611 } 6615 }
6612 6616
6617 if ((chan->mode == L2CAP_MODE_ERTM ||
6618 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6619 goto drop;
6620
6613 if (!control->sframe) { 6621 if (!control->sframe) {
6614 int err; 6622 int err;
6615 6623
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 1842141baedb..a8ba752732c9 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1019,7 +1019,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1019 goto done; 1019 goto done;
1020 1020
1021 if (pi->rx_busy_skb) { 1021 if (pi->rx_busy_skb) {
1022 if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb)) 1022 if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
1023 pi->rx_busy_skb = NULL; 1023 pi->rx_busy_skb = NULL;
1024 else 1024 else
1025 goto done; 1025 goto done;
@@ -1270,7 +1270,17 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
1270 goto done; 1270 goto done;
1271 } 1271 }
1272 1272
1273 err = sock_queue_rcv_skb(sk, skb); 1273 if (chan->mode != L2CAP_MODE_ERTM &&
1274 chan->mode != L2CAP_MODE_STREAMING) {
1275 /* Even if no filter is attached, we could potentially
1276 * get errors from security modules, etc.
1277 */
1278 err = sk_filter(sk, skb);
1279 if (err)
1280 goto done;
1281 }
1282
1283 err = __sock_queue_rcv_skb(sk, skb);
1274 1284
1275 /* For ERTM, handle one skb that doesn't fit into the recv 1285 /* For ERTM, handle one skb that doesn't fit into the recv
1276 * buffer. This is important to do because the data frames 1286 * buffer. This is important to do because the data frames
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c18080ad4085..cd620fab41b0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
267 267
268 /* If old entry was unassociated with any port, then delete it. */ 268 /* If old entry was unassociated with any port, then delete it. */
269 f = __br_fdb_get(br, br->dev->dev_addr, 0); 269 f = __br_fdb_get(br, br->dev->dev_addr, 0);
270 if (f && f->is_local && !f->dst) 270 if (f && f->is_local && !f->dst && !f->added_by_user)
271 fdb_delete_local(br, NULL, f); 271 fdb_delete_local(br, NULL, f);
272 272
273 fdb_insert(br, NULL, newaddr, 0); 273 fdb_insert(br, NULL, newaddr, 0);
@@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
282 if (!br_vlan_should_use(v)) 282 if (!br_vlan_should_use(v))
283 continue; 283 continue;
284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
285 if (f && f->is_local && !f->dst) 285 if (f && f->is_local && !f->dst && !f->added_by_user)
286 fdb_delete_local(br, NULL, f); 286 fdb_delete_local(br, NULL, f);
287 fdb_insert(br, NULL, newaddr, v->vid); 287 fdb_insert(br, NULL, newaddr, v->vid);
288 } 288 }
@@ -764,20 +764,25 @@ out:
764} 764}
765 765
766/* Update (create or replace) forwarding database entry */ 766/* Update (create or replace) forwarding database entry */
767static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 767static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
768 __u16 state, __u16 flags, __u16 vid) 768 const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
769{ 769{
770 struct net_bridge *br = source->br;
771 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 770 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
772 struct net_bridge_fdb_entry *fdb; 771 struct net_bridge_fdb_entry *fdb;
773 bool modified = false; 772 bool modified = false;
774 773
775 /* If the port cannot learn allow only local and static entries */ 774 /* If the port cannot learn allow only local and static entries */
776 if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && 775 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
777 !(source->state == BR_STATE_LEARNING || 776 !(source->state == BR_STATE_LEARNING ||
778 source->state == BR_STATE_FORWARDING)) 777 source->state == BR_STATE_FORWARDING))
779 return -EPERM; 778 return -EPERM;
780 779
780 if (!source && !(state & NUD_PERMANENT)) {
781 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
782 br->dev->name);
783 return -EINVAL;
784 }
785
781 fdb = fdb_find(head, addr, vid); 786 fdb = fdb_find(head, addr, vid);
782 if (fdb == NULL) { 787 if (fdb == NULL) {
783 if (!(flags & NLM_F_CREATE)) 788 if (!(flags & NLM_F_CREATE))
@@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
832 return 0; 837 return 0;
833} 838}
834 839
835static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, 840static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
836 const unsigned char *addr, u16 nlh_flags, u16 vid) 841 struct net_bridge_port *p, const unsigned char *addr,
842 u16 nlh_flags, u16 vid)
837{ 843{
838 int err = 0; 844 int err = 0;
839 845
840 if (ndm->ndm_flags & NTF_USE) { 846 if (ndm->ndm_flags & NTF_USE) {
847 if (!p) {
848 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
849 br->dev->name);
850 return -EINVAL;
851 }
841 local_bh_disable(); 852 local_bh_disable();
842 rcu_read_lock(); 853 rcu_read_lock();
843 br_fdb_update(p->br, p, addr, vid, true); 854 br_fdb_update(br, p, addr, vid, true);
844 rcu_read_unlock(); 855 rcu_read_unlock();
845 local_bh_enable(); 856 local_bh_enable();
846 } else { 857 } else {
847 spin_lock_bh(&p->br->hash_lock); 858 spin_lock_bh(&br->hash_lock);
848 err = fdb_add_entry(p, addr, ndm->ndm_state, 859 err = fdb_add_entry(br, p, addr, ndm->ndm_state,
849 nlh_flags, vid); 860 nlh_flags, vid);
850 spin_unlock_bh(&p->br->hash_lock); 861 spin_unlock_bh(&br->hash_lock);
851 } 862 }
852 863
853 return err; 864 return err;
@@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
884 dev->name); 895 dev->name);
885 return -EINVAL; 896 return -EINVAL;
886 } 897 }
898 br = p->br;
887 vg = nbp_vlan_group(p); 899 vg = nbp_vlan_group(p);
888 } 900 }
889 901
@@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
895 } 907 }
896 908
897 /* VID was specified, so use it. */ 909 /* VID was specified, so use it. */
898 if (dev->priv_flags & IFF_EBRIDGE) 910 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
899 err = br_fdb_insert(br, NULL, addr, vid);
900 else
901 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
902 } else { 911 } else {
903 if (dev->priv_flags & IFF_EBRIDGE) 912 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
904 err = br_fdb_insert(br, NULL, addr, 0);
905 else
906 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
907 if (err || !vg || !vg->num_vlans) 913 if (err || !vg || !vg->num_vlans)
908 goto out; 914 goto out;
909 915
@@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
914 list_for_each_entry(v, &vg->vlan_list, vlist) { 920 list_for_each_entry(v, &vg->vlan_list, vlist) {
915 if (!br_vlan_should_use(v)) 921 if (!br_vlan_should_use(v))
916 continue; 922 continue;
917 if (dev->priv_flags & IFF_EBRIDGE) 923 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
918 err = br_fdb_insert(br, NULL, addr, v->vid);
919 else
920 err = __br_fdb_add(ndm, p, addr, nlh_flags,
921 v->vid);
922 if (err) 924 if (err)
923 goto out; 925 goto out;
924 } 926 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8e486203d133..abe11f085479 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
80 80
81 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false; 81 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
82 82
83 if (dev->flags & IFF_NOARP) 83 if ((dev->flags & IFF_NOARP) ||
84 !pskb_may_pull(skb, arp_hdr_len(dev)))
84 return; 85 return;
85 86
86 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
87 dev->stats.tx_dropped++;
88 return;
89 }
90 parp = arp_hdr(skb); 87 parp = arp_hdr(skb);
91 88
92 if (parp->ar_pro != htons(ETH_P_IP) || 89 if (parp->ar_pro != htons(ETH_P_IP) ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5423a1eec05..c5fea9393946 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1138 } else { 1138 } else {
1139 err = br_ip6_multicast_add_group(br, port, 1139 err = br_ip6_multicast_add_group(br, port,
1140 &grec->grec_mca, vid); 1140 &grec->grec_mca, vid);
1141 if (!err) 1141 if (err)
1142 break; 1142 break;
1143 } 1143 }
1144 } 1144 }
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index cceac5bb658f..0833c251aef7 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
368 368
369 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 369 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
370 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { 370 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
371 if (!IS_ERR(match))
372 module_put(match->me);
371 request_module("ebt_%s", m->u.name); 373 request_module("ebt_%s", m->u.name);
372 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); 374 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
373 } 375 }
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 4b901d9f2e7c..ad47a921b701 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
86 .init = nft_meta_set_init, 86 .init = nft_meta_set_init,
87 .destroy = nft_meta_set_destroy, 87 .destroy = nft_meta_set_destroy,
88 .dump = nft_meta_set_dump, 88 .dump = nft_meta_set_dump,
89 .validate = nft_meta_set_validate,
89}; 90};
90 91
91static const struct nft_expr_ops * 92static const struct nft_expr_ops *
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index c83326c5ba58..ef34a02719d7 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -574,7 +574,7 @@ static void complete_generic_request(struct ceph_mon_generic_request *req)
574 put_generic_request(req); 574 put_generic_request(req);
575} 575}
576 576
577void cancel_generic_request(struct ceph_mon_generic_request *req) 577static void cancel_generic_request(struct ceph_mon_generic_request *req)
578{ 578{
579 struct ceph_mon_client *monc = req->monc; 579 struct ceph_mon_client *monc = req->monc;
580 struct ceph_mon_generic_request *lookup_req; 580 struct ceph_mon_generic_request *lookup_req;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b5ec09612ff7..a97e7b506612 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4220,7 +4220,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4220 4220
4221 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 4221 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4222 GFP_NOIO); 4222 GFP_NOIO);
4223 if (!pages) { 4223 if (IS_ERR(pages)) {
4224 ceph_msg_put(m); 4224 ceph_msg_put(m);
4225 return NULL; 4225 return NULL;
4226 } 4226 }
diff --git a/net/ceph/string_table.c b/net/ceph/string_table.c
index ca53c8319209..22fb96efcf34 100644
--- a/net/ceph/string_table.c
+++ b/net/ceph/string_table.c
@@ -84,12 +84,6 @@ retry:
84} 84}
85EXPORT_SYMBOL(ceph_find_or_create_string); 85EXPORT_SYMBOL(ceph_find_or_create_string);
86 86
87static void ceph_free_string(struct rcu_head *head)
88{
89 struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
90 kfree(cs);
91}
92
93void ceph_release_string(struct kref *ref) 87void ceph_release_string(struct kref *ref)
94{ 88{
95 struct ceph_string *cs = container_of(ref, struct ceph_string, kref); 89 struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
@@ -101,7 +95,7 @@ void ceph_release_string(struct kref *ref)
101 } 95 }
102 spin_unlock(&string_tree_lock); 96 spin_unlock(&string_tree_lock);
103 97
104 call_rcu(&cs->rcu, ceph_free_string); 98 kfree_rcu(cs, rcu);
105} 99}
106EXPORT_SYMBOL(ceph_release_string); 100EXPORT_SYMBOL(ceph_release_string);
107 101
diff --git a/net/core/dev.c b/net/core/dev.c
index 4ce07dc25573..ea6312057a71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3975,6 +3975,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3975} 3975}
3976 3976
3977/** 3977/**
3978 * netdev_is_rx_handler_busy - check if receive handler is registered
3979 * @dev: device to check
3980 *
3981 * Check if a receive handler is already registered for a given device.
3982 * Return true if there one.
3983 *
3984 * The caller must hold the rtnl_mutex.
3985 */
3986bool netdev_is_rx_handler_busy(struct net_device *dev)
3987{
3988 ASSERT_RTNL();
3989 return dev && rtnl_dereference(dev->rx_handler);
3990}
3991EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
3992
3993/**
3978 * netdev_rx_handler_register - register receive handler 3994 * netdev_rx_handler_register - register receive handler
3979 * @dev: device to register a handler for 3995 * @dev: device to register a handler for
3980 * @rx_handler: receive handler to register 3996 * @rx_handler: receive handler to register
@@ -6045,8 +6061,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
6045EXPORT_SYMBOL(netdev_lower_dev_get_private); 6061EXPORT_SYMBOL(netdev_lower_dev_get_private);
6046 6062
6047 6063
6048int dev_get_nest_level(struct net_device *dev, 6064int dev_get_nest_level(struct net_device *dev)
6049 bool (*type_check)(const struct net_device *dev))
6050{ 6065{
6051 struct net_device *lower = NULL; 6066 struct net_device *lower = NULL;
6052 struct list_head *iter; 6067 struct list_head *iter;
@@ -6056,15 +6071,12 @@ int dev_get_nest_level(struct net_device *dev,
6056 ASSERT_RTNL(); 6071 ASSERT_RTNL();
6057 6072
6058 netdev_for_each_lower_dev(dev, lower, iter) { 6073 netdev_for_each_lower_dev(dev, lower, iter) {
6059 nest = dev_get_nest_level(lower, type_check); 6074 nest = dev_get_nest_level(lower);
6060 if (max_nest < nest) 6075 if (max_nest < nest)
6061 max_nest = nest; 6076 max_nest = nest;
6062 } 6077 }
6063 6078
6064 if (type_check(dev)) 6079 return max_nest + 1;
6065 max_nest++;
6066
6067 return max_nest;
6068} 6080}
6069EXPORT_SYMBOL(dev_get_nest_level); 6081EXPORT_SYMBOL(dev_get_nest_level);
6070 6082
diff --git a/net/core/filter.c b/net/core/filter.c
index 5708999f8a79..cb06aceb512a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
1355{ 1355{
1356 int err; 1356 int err;
1357 1357
1358 if (!skb_cloned(skb)) 1358 err = skb_ensure_writable(skb, write_len);
1359 return 0; 1359 bpf_compute_data_end(skb);
1360 if (skb_clone_writable(skb, write_len)) 1360
1361 return 0;
1362 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1363 if (!err)
1364 bpf_compute_data_end(skb);
1365 return err; 1361 return err;
1366} 1362}
1367 1363
1364static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1365{
1366 if (skb_at_tc_ingress(skb))
1367 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1368}
1369
1370static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1371{
1372 if (skb_at_tc_ingress(skb))
1373 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1374}
1375
1368static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1376static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1369{ 1377{
1370 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1371 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1378 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1372 int offset = (int) r2; 1379 unsigned int offset = (unsigned int) r2;
1373 void *from = (void *) (long) r3; 1380 void *from = (void *) (long) r3;
1374 unsigned int len = (unsigned int) r4; 1381 unsigned int len = (unsigned int) r4;
1375 void *ptr; 1382 void *ptr;
1376 1383
1377 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1384 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1378 return -EINVAL; 1385 return -EINVAL;
1379 1386 if (unlikely(offset > 0xffff))
1380 /* bpf verifier guarantees that:
1381 * 'from' pointer points to bpf program stack
1382 * 'len' bytes of it were initialized
1383 * 'len' > 0
1384 * 'skb' is a valid pointer to 'struct sk_buff'
1385 *
1386 * so check for invalid 'offset' and too large 'len'
1387 */
1388 if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
1389 return -EFAULT; 1387 return -EFAULT;
1390 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1388 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1391 return -EFAULT; 1389 return -EFAULT;
1392 1390
1393 ptr = skb_header_pointer(skb, offset, len, sp->buff); 1391 ptr = skb->data + offset;
1394 if (unlikely(!ptr))
1395 return -EFAULT;
1396
1397 if (flags & BPF_F_RECOMPUTE_CSUM) 1392 if (flags & BPF_F_RECOMPUTE_CSUM)
1398 skb_postpull_rcsum(skb, ptr, len); 1393 __skb_postpull_rcsum(skb, ptr, len, offset);
1399 1394
1400 memcpy(ptr, from, len); 1395 memcpy(ptr, from, len);
1401 1396
1402 if (ptr == sp->buff)
1403 /* skb_store_bits cannot return -EFAULT here */
1404 skb_store_bits(skb, offset, ptr, len);
1405
1406 if (flags & BPF_F_RECOMPUTE_CSUM) 1397 if (flags & BPF_F_RECOMPUTE_CSUM)
1407 skb_postpush_rcsum(skb, ptr, len); 1398 __skb_postpush_rcsum(skb, ptr, len, offset);
1408 if (flags & BPF_F_INVALIDATE_HASH) 1399 if (flags & BPF_F_INVALIDATE_HASH)
1409 skb_clear_hash(skb); 1400 skb_clear_hash(skb);
1410 1401
@@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1425static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1416static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1426{ 1417{
1427 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; 1418 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1428 int offset = (int) r2; 1419 unsigned int offset = (unsigned int) r2;
1429 void *to = (void *)(unsigned long) r3; 1420 void *to = (void *)(unsigned long) r3;
1430 unsigned int len = (unsigned int) r4; 1421 unsigned int len = (unsigned int) r4;
1431 void *ptr; 1422 void *ptr;
1432 1423
1433 if (unlikely((u32) offset > 0xffff)) 1424 if (unlikely(offset > 0xffff))
1434 goto err_clear; 1425 goto err_clear;
1435 1426
1436 ptr = skb_header_pointer(skb, offset, len, to); 1427 ptr = skb_header_pointer(skb, offset, len, to);
@@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1458static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1449static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1459{ 1450{
1460 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1451 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1461 int offset = (int) r2; 1452 unsigned int offset = (unsigned int) r2;
1462 __sum16 sum, *ptr; 1453 __sum16 *ptr;
1463 1454
1464 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1455 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1465 return -EINVAL; 1456 return -EINVAL;
1466 if (unlikely((u32) offset > 0xffff)) 1457 if (unlikely(offset > 0xffff || offset & 1))
1467 return -EFAULT; 1458 return -EFAULT;
1468 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1459 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1469 return -EFAULT;
1470
1471 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1472 if (unlikely(!ptr))
1473 return -EFAULT; 1460 return -EFAULT;
1474 1461
1462 ptr = (__sum16 *)(skb->data + offset);
1475 switch (flags & BPF_F_HDR_FIELD_MASK) { 1463 switch (flags & BPF_F_HDR_FIELD_MASK) {
1476 case 0: 1464 case 0:
1477 if (unlikely(from != 0)) 1465 if (unlikely(from != 0))
@@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1489 return -EINVAL; 1477 return -EINVAL;
1490 } 1478 }
1491 1479
1492 if (ptr == &sum)
1493 /* skb_store_bits guaranteed to not return -EFAULT here */
1494 skb_store_bits(skb, offset, ptr, sizeof(sum));
1495
1496 return 0; 1480 return 0;
1497} 1481}
1498 1482
@@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1512 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1496 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1513 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1497 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1514 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1498 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1515 int offset = (int) r2; 1499 unsigned int offset = (unsigned int) r2;
1516 __sum16 sum, *ptr; 1500 __sum16 *ptr;
1517 1501
1518 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | 1502 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
1519 BPF_F_HDR_FIELD_MASK))) 1503 BPF_F_HDR_FIELD_MASK)))
1520 return -EINVAL; 1504 return -EINVAL;
1521 if (unlikely((u32) offset > 0xffff)) 1505 if (unlikely(offset > 0xffff || offset & 1))
1522 return -EFAULT; 1506 return -EFAULT;
1523 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1507 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1524 return -EFAULT; 1508 return -EFAULT;
1525 1509
1526 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); 1510 ptr = (__sum16 *)(skb->data + offset);
1527 if (unlikely(!ptr))
1528 return -EFAULT;
1529 if (is_mmzero && !*ptr) 1511 if (is_mmzero && !*ptr)
1530 return 0; 1512 return 0;
1531 1513
@@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1548 1530
1549 if (is_mmzero && !*ptr) 1531 if (is_mmzero && !*ptr)
1550 *ptr = CSUM_MANGLED_0; 1532 *ptr = CSUM_MANGLED_0;
1551 if (ptr == &sum)
1552 /* skb_store_bits guaranteed to not return -EFAULT here */
1553 skb_store_bits(skb, offset, ptr, sizeof(sum));
1554
1555 return 0; 1533 return 0;
1556} 1534}
1557 1535
@@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
1607 1585
1608static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 1586static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1609{ 1587{
1610 if (skb_at_tc_ingress(skb))
1611 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1612
1613 return dev_forward_skb(dev, skb); 1588 return dev_forward_skb(dev, skb);
1614} 1589}
1615 1590
@@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1648 if (unlikely(!skb)) 1623 if (unlikely(!skb))
1649 return -ENOMEM; 1624 return -ENOMEM;
1650 1625
1626 bpf_push_mac_rcsum(skb);
1627
1651 return flags & BPF_F_INGRESS ? 1628 return flags & BPF_F_INGRESS ?
1652 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1629 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1653} 1630}
@@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb)
1693 return -EINVAL; 1670 return -EINVAL;
1694 } 1671 }
1695 1672
1673 bpf_push_mac_rcsum(skb);
1674
1696 return ri->flags & BPF_F_INGRESS ? 1675 return ri->flags & BPF_F_INGRESS ?
1697 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1676 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1698} 1677}
@@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
1756 vlan_proto != htons(ETH_P_8021AD))) 1735 vlan_proto != htons(ETH_P_8021AD)))
1757 vlan_proto = htons(ETH_P_8021Q); 1736 vlan_proto = htons(ETH_P_8021Q);
1758 1737
1738 bpf_push_mac_rcsum(skb);
1759 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 1739 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1740 bpf_pull_mac_rcsum(skb);
1741
1760 bpf_compute_data_end(skb); 1742 bpf_compute_data_end(skb);
1761 return ret; 1743 return ret;
1762} 1744}
@@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1776 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1758 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1777 int ret; 1759 int ret;
1778 1760
1761 bpf_push_mac_rcsum(skb);
1779 ret = skb_vlan_pop(skb); 1762 ret = skb_vlan_pop(skb);
1763 bpf_pull_mac_rcsum(skb);
1764
1780 bpf_compute_data_end(skb); 1765 bpf_compute_data_end(skb);
1781 return ret; 1766 return ret;
1782} 1767}
@@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2298} 2283}
2299 2284
2300#ifdef CONFIG_SOCK_CGROUP_DATA 2285#ifdef CONFIG_SOCK_CGROUP_DATA
2301static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 2286static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2302{ 2287{
2303 struct sk_buff *skb = (struct sk_buff *)(long)r1; 2288 struct sk_buff *skb = (struct sk_buff *)(long)r1;
2304 struct bpf_map *map = (struct bpf_map *)(long)r2; 2289 struct bpf_map *map = (struct bpf_map *)(long)r2;
@@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2321 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); 2306 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
2322} 2307}
2323 2308
2324static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { 2309static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
2325 .func = bpf_skb_in_cgroup, 2310 .func = bpf_skb_under_cgroup,
2326 .gpl_only = false, 2311 .gpl_only = false,
2327 .ret_type = RET_INTEGER, 2312 .ret_type = RET_INTEGER,
2328 .arg1_type = ARG_PTR_TO_CTX, 2313 .arg1_type = ARG_PTR_TO_CTX,
@@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2402 case BPF_FUNC_get_smp_processor_id: 2387 case BPF_FUNC_get_smp_processor_id:
2403 return &bpf_get_smp_processor_id_proto; 2388 return &bpf_get_smp_processor_id_proto;
2404#ifdef CONFIG_SOCK_CGROUP_DATA 2389#ifdef CONFIG_SOCK_CGROUP_DATA
2405 case BPF_FUNC_skb_in_cgroup: 2390 case BPF_FUNC_skb_under_cgroup:
2406 return &bpf_skb_in_cgroup_proto; 2391 return &bpf_skb_under_cgroup_proto;
2407#endif 2392#endif
2408 default: 2393 default:
2409 return sk_filter_func_proto(func_id); 2394 return sk_filter_func_proto(func_id);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 61ad43f61c5e..52742a02814f 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -680,11 +680,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
680void __skb_get_hash(struct sk_buff *skb) 680void __skb_get_hash(struct sk_buff *skb)
681{ 681{
682 struct flow_keys keys; 682 struct flow_keys keys;
683 u32 hash;
683 684
684 __flow_hash_secret_init(); 685 __flow_hash_secret_init();
685 686
686 __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd), 687 hash = ___skb_get_hash(skb, &keys, hashrnd);
687 flow_keys_have_l4(&keys)); 688
689 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
688} 690}
689EXPORT_SYMBOL(__skb_get_hash); 691EXPORT_SYMBOL(__skb_get_hash);
690 692
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 415e117967c7..062a67ca9a21 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
2232}; 2232};
2233 2233
2234static int __devinet_sysctl_register(struct net *net, char *dev_name, 2234static int __devinet_sysctl_register(struct net *net, char *dev_name,
2235 struct ipv4_devconf *p) 2235 int ifindex, struct ipv4_devconf *p)
2236{ 2236{
2237 int i; 2237 int i;
2238 struct devinet_sysctl_table *t; 2238 struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
2255 goto free; 2255 goto free;
2256 2256
2257 p->sysctl = t; 2257 p->sysctl = t;
2258
2259 inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
2258 return 0; 2260 return 0;
2259 2261
2260free: 2262free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
2286 if (err) 2288 if (err)
2287 return err; 2289 return err;
2288 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 2290 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2289 &idev->cnf); 2291 idev->dev->ifindex, &idev->cnf);
2290 if (err) 2292 if (err)
2291 neigh_sysctl_unregister(idev->arp_parms); 2293 neigh_sysctl_unregister(idev->arp_parms);
2292 return err; 2294 return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
2347 } 2349 }
2348 2350
2349#ifdef CONFIG_SYSCTL 2351#ifdef CONFIG_SYSCTL
2350 err = __devinet_sysctl_register(net, "all", all); 2352 err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
2351 if (err < 0) 2353 if (err < 0)
2352 goto err_reg_all; 2354 goto err_reg_all;
2353 2355
2354 err = __devinet_sysctl_register(net, "default", dflt); 2356 err = __devinet_sysctl_register(net, "default",
2357 NETCONFA_IFINDEX_DEFAULT, dflt);
2355 if (err < 0) 2358 if (err < 0)
2356 goto err_reg_dflt; 2359 goto err_reg_dflt;
2357 2360
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ef2ebeb89d0f..1b25daf8c7f1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -509,6 +509,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
509 if (!dev) 509 if (!dev)
510 return -ENODEV; 510 return -ENODEV;
511 cfg->fc_oif = dev->ifindex; 511 cfg->fc_oif = dev->ifindex;
512 cfg->fc_table = l3mdev_fib_table(dev);
512 if (colon) { 513 if (colon) {
513 struct in_ifaddr *ifa; 514 struct in_ifaddr *ifa;
514 struct in_device *in_dev = __in_dev_get_rtnl(dev); 515 struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1027,7 +1028,7 @@ no_promotions:
1027 * First of all, we scan fib_info list searching 1028 * First of all, we scan fib_info list searching
1028 * for stray nexthop entries, then ignite fib_flush. 1029 * for stray nexthop entries, then ignite fib_flush.
1029 */ 1030 */
1030 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local)) 1031 if (fib_sync_down_addr(dev, ifa->ifa_local))
1031 fib_flush(dev_net(dev)); 1032 fib_flush(dev_net(dev));
1032 } 1033 }
1033 } 1034 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 539fa264e67d..e9f56225e53f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1057 fi->fib_priority = cfg->fc_priority; 1057 fi->fib_priority = cfg->fc_priority;
1058 fi->fib_prefsrc = cfg->fc_prefsrc; 1058 fi->fib_prefsrc = cfg->fc_prefsrc;
1059 fi->fib_type = cfg->fc_type; 1059 fi->fib_type = cfg->fc_type;
1060 fi->fib_tb_id = cfg->fc_table;
1060 1061
1061 fi->fib_nhs = nhs; 1062 fi->fib_nhs = nhs;
1062 change_nexthops(fi) { 1063 change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
1337 * referring to it. 1338 * referring to it.
1338 * - device went down -> we must shutdown all nexthops going via it. 1339 * - device went down -> we must shutdown all nexthops going via it.
1339 */ 1340 */
1340int fib_sync_down_addr(struct net *net, __be32 local) 1341int fib_sync_down_addr(struct net_device *dev, __be32 local)
1341{ 1342{
1342 int ret = 0; 1343 int ret = 0;
1343 unsigned int hash = fib_laddr_hashfn(local); 1344 unsigned int hash = fib_laddr_hashfn(local);
1344 struct hlist_head *head = &fib_info_laddrhash[hash]; 1345 struct hlist_head *head = &fib_info_laddrhash[hash];
1346 struct net *net = dev_net(dev);
1347 int tb_id = l3mdev_fib_table(dev);
1345 struct fib_info *fi; 1348 struct fib_info *fi;
1346 1349
1347 if (!fib_info_laddrhash || local == 0) 1350 if (!fib_info_laddrhash || local == 0)
1348 return 0; 1351 return 0;
1349 1352
1350 hlist_for_each_entry(fi, head, fib_lhash) { 1353 hlist_for_each_entry(fi, head, fib_lhash) {
1351 if (!net_eq(fi->fib_net, net)) 1354 if (!net_eq(fi->fib_net, net) ||
1355 fi->fib_tb_id != tb_id)
1352 continue; 1356 continue;
1353 if (fi->fib_prefsrc == local) { 1357 if (fi->fib_prefsrc == local) {
1354 fi->fib_flags |= RTNH_F_DEAD; 1358 fi->fib_flags |= RTNH_F_DEAD;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d07fc076bea0..e2ffc2a5c7db 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -249,7 +249,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
249 * index into the parent's child array. That is, they will be used to find 249 * index into the parent's child array. That is, they will be used to find
250 * 'n' among tp's children. 250 * 'n' among tp's children.
251 * 251 *
252 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits 252 * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
253 * for the node n. 253 * for the node n.
254 * 254 *
255 * All the bits we have seen so far are significant to the node n. The rest 255 * All the bits we have seen so far are significant to the node n. The rest
@@ -258,7 +258,7 @@ static inline unsigned long get_index(t_key key, struct key_vector *kv)
258 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into 258 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
259 * n's child array, and will of course be different for each child. 259 * n's child array, and will of course be different for each child.
260 * 260 *
261 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown 261 * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
262 * at this point. 262 * at this point.
263 */ 263 */
264 264
@@ -2452,9 +2452,7 @@ struct fib_route_iter {
2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, 2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2453 loff_t pos) 2453 loff_t pos)
2454{ 2454{
2455 struct fib_table *tb = iter->main_tb;
2456 struct key_vector *l, **tp = &iter->tnode; 2455 struct key_vector *l, **tp = &iter->tnode;
2457 struct trie *t;
2458 t_key key; 2456 t_key key;
2459 2457
2460 /* use cache location of next-to-find key */ 2458 /* use cache location of next-to-find key */
@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2462 pos -= iter->pos; 2460 pos -= iter->pos;
2463 key = iter->key; 2461 key = iter->key;
2464 } else { 2462 } else {
2465 t = (struct trie *)tb->tb_data;
2466 iter->tnode = t->kv;
2467 iter->pos = 0; 2463 iter->pos = 0;
2468 key = 0; 2464 key = 0;
2469 } 2465 }
@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2504 return NULL; 2500 return NULL;
2505 2501
2506 iter->main_tb = tb; 2502 iter->main_tb = tb;
2503 t = (struct trie *)tb->tb_data;
2504 iter->tnode = t->kv;
2507 2505
2508 if (*pos != 0) 2506 if (*pos != 0)
2509 return fib_route_get_idx(iter, *pos); 2507 return fib_route_get_idx(iter, *pos);
2510 2508
2511 t = (struct trie *)tb->tb_data;
2512 iter->tnode = t->kv;
2513 iter->pos = 0; 2509 iter->pos = 0;
2514 iter->key = 0; 2510 iter->key = 0;
2515 2511
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5b1481be0282..113cc43df789 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
370 tunnel->parms.o_flags, proto, tunnel->parms.o_key, 370 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
371 htonl(tunnel->o_seqno)); 371 htonl(tunnel->o_seqno));
372 372
373 skb_set_inner_protocol(skb, proto);
374 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 373 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
375} 374}
376 375
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 9d847c302551..0f227db0e9ac 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -73,9 +73,11 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
73 skb_dst_set(skb, &rt->dst); 73 skb_dst_set(skb, &rt->dst);
74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
75 75
76 if (skb_iif && proto == IPPROTO_UDP) { 76 if (skb_iif && !(df & htons(IP_DF))) {
77 /* Arrived from an ingress interface and got udp encapuslated. 77 /* Arrived from an ingress interface, got encapsulated, with
78 * The encapsulated network segment length may exceed dst mtu. 78 * fragmentation of encapulating frames allowed.
79 * If skb is gso, the resulting encapsulated network segments
80 * may exceed dst mtu.
79 * Allow IP Fragmentation of segments. 81 * Allow IP Fragmentation of segments.
80 */ 82 */
81 IPCB(skb)->flags |= IPSKB_FRAG_SEGS; 83 IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index a917903d5e97..cc701fa70b12 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
557 .get_link_net = ip_tunnel_get_link_net, 557 .get_link_net = ip_tunnel_get_link_net,
558}; 558};
559 559
560static bool is_vti_tunnel(const struct net_device *dev)
561{
562 return dev->netdev_ops == &vti_netdev_ops;
563}
564
565static int vti_device_event(struct notifier_block *unused,
566 unsigned long event, void *ptr)
567{
568 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
569 struct ip_tunnel *tunnel = netdev_priv(dev);
570
571 if (!is_vti_tunnel(dev))
572 return NOTIFY_DONE;
573
574 switch (event) {
575 case NETDEV_DOWN:
576 if (!net_eq(tunnel->net, dev_net(dev)))
577 xfrm_garbage_collect(tunnel->net);
578 break;
579 }
580 return NOTIFY_DONE;
581}
582
583static struct notifier_block vti_notifier_block __read_mostly = {
584 .notifier_call = vti_device_event,
585};
586
560static int __init vti_init(void) 587static int __init vti_init(void)
561{ 588{
562 const char *msg; 589 const char *msg;
@@ -564,6 +591,8 @@ static int __init vti_init(void)
564 591
565 pr_info("IPv4 over IPsec tunneling driver\n"); 592 pr_info("IPv4 over IPsec tunneling driver\n");
566 593
594 register_netdevice_notifier(&vti_notifier_block);
595
567 msg = "tunnel device"; 596 msg = "tunnel device";
568 err = register_pernet_device(&vti_net_ops); 597 err = register_pernet_device(&vti_net_ops);
569 if (err < 0) 598 if (err < 0)
@@ -596,6 +625,7 @@ xfrm_proto_ah_failed:
596xfrm_proto_esp_failed: 625xfrm_proto_esp_failed:
597 unregister_pernet_device(&vti_net_ops); 626 unregister_pernet_device(&vti_net_ops);
598pernet_dev_failed: 627pernet_dev_failed:
628 unregister_netdevice_notifier(&vti_notifier_block);
599 pr_err("vti init: failed to register %s\n", msg); 629 pr_err("vti init: failed to register %s\n", msg);
600 return err; 630 return err;
601} 631}
@@ -607,6 +637,7 @@ static void __exit vti_fini(void)
607 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 637 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
608 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 638 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
609 unregister_pernet_device(&vti_net_ops); 639 unregister_pernet_device(&vti_net_ops);
640 unregister_netdevice_notifier(&vti_notifier_block);
610} 641}
611 642
612module_init(vti_init); 643module_init(vti_init);
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index c24f41c816b3..2c2553b9026c 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
46 .eval = nft_reject_ipv4_eval, 46 .eval = nft_reject_ipv4_eval,
47 .init = nft_reject_init, 47 .init = nft_reject_init,
48 .dump = nft_reject_dump, 48 .dump = nft_reject_dump,
49 .validate = nft_reject_validate,
49}; 50};
50 51
51static struct nft_expr_type nft_reject_ipv4_type __read_mostly = { 52static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 032a96d78c99..ffbb218de520 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3193,7 +3193,6 @@ int tcp_abort(struct sock *sk, int err)
3193 local_bh_enable(); 3193 local_bh_enable();
3194 return 0; 3194 return 0;
3195 } 3195 }
3196 sock_gen_put(sk);
3197 return -EOPNOTSUPP; 3196 return -EOPNOTSUPP;
3198 } 3197 }
3199 3198
@@ -3222,7 +3221,6 @@ int tcp_abort(struct sock *sk, int err)
3222 bh_unlock_sock(sk); 3221 bh_unlock_sock(sk);
3223 local_bh_enable(); 3222 local_bh_enable();
3224 release_sock(sk); 3223 release_sock(sk);
3225 sock_put(sk);
3226 return 0; 3224 return 0;
3227} 3225}
3228EXPORT_SYMBOL_GPL(tcp_abort); 3226EXPORT_SYMBOL_GPL(tcp_abort);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 4d610934fb39..a748c74aa8b7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -54,11 +54,16 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
54{ 54{
55 struct net *net = sock_net(in_skb->sk); 55 struct net *net = sock_net(in_skb->sk);
56 struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req); 56 struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
57 int err;
57 58
58 if (IS_ERR(sk)) 59 if (IS_ERR(sk))
59 return PTR_ERR(sk); 60 return PTR_ERR(sk);
60 61
61 return sock_diag_destroy(sk, ECONNABORTED); 62 err = sock_diag_destroy(sk, ECONNABORTED);
63
64 sock_gen_put(sk);
65
66 return err;
62} 67}
63#endif 68#endif
64 69
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 54d9f9b0120f..4e777a3243f9 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
150 tp->segs_in = 0; 150 tp->segs_in = 0;
151 tcp_segs_in(tp, skb); 151 tcp_segs_in(tp, skb);
152 __skb_pull(skb, tcp_hdrlen(skb)); 152 __skb_pull(skb, tcp_hdrlen(skb));
153 sk_forced_mem_schedule(sk, skb->truesize);
153 skb_set_owner_r(skb, sk); 154 skb_set_owner_r(skb, sk);
154 155
155 TCP_SKB_CB(skb)->seq++; 156 TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
226 tcp_fastopen_add_skb(child, skb); 227 tcp_fastopen_add_skb(child, skb);
227 228
228 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; 229 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
230 tp->rcv_wup = tp->rcv_nxt;
229 /* tcp_conn_request() is sending the SYNACK, 231 /* tcp_conn_request() is sending the SYNACK,
230 * and queues the child into listener accept queue. 232 * and queues the child into listener accept queue.
231 */ 233 */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 32b048e524d6..7158d4f8dae4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -814,8 +814,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
814 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : 814 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
815 tcp_sk(sk)->snd_nxt; 815 tcp_sk(sk)->snd_nxt;
816 816
817 /* RFC 7323 2.3
818 * The window field (SEG.WND) of every outgoing segment, with the
819 * exception of <SYN> segments, MUST be right-shifted by
820 * Rcv.Wind.Shift bits:
821 */
817 tcp_v4_send_ack(sock_net(sk), skb, seq, 822 tcp_v4_send_ack(sock_net(sk), skb, seq,
818 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, 823 tcp_rsk(req)->rcv_nxt,
824 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
819 tcp_time_stamp, 825 tcp_time_stamp,
820 req->ts_recent, 826 req->ts_recent,
821 0, 827 0,
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 028eb046ea40..9c5fc973267f 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
76 if (!tcp_is_cwnd_limited(sk)) 76 if (!tcp_is_cwnd_limited(sk))
77 return; 77 return;
78 78
79 if (tp->snd_cwnd <= tp->snd_ssthresh) 79 if (tcp_in_slow_start(tp))
80 tcp_slow_start(tp, acked); 80 tcp_slow_start(tp, acked);
81 81
82 else if (!yeah->doing_reno_now) { 82 else if (!yeah->doing_reno_now) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e61f7cd65d08..5fdcb8d108d4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1182,13 +1182,13 @@ out:
1182 * @sk: socket 1182 * @sk: socket
1183 * 1183 *
1184 * Drops all bad checksum frames, until a valid one is found. 1184 * Drops all bad checksum frames, until a valid one is found.
1185 * Returns the length of found skb, or 0 if none is found. 1185 * Returns the length of found skb, or -1 if none is found.
1186 */ 1186 */
1187static unsigned int first_packet_length(struct sock *sk) 1187static int first_packet_length(struct sock *sk)
1188{ 1188{
1189 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; 1189 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
1190 struct sk_buff *skb; 1190 struct sk_buff *skb;
1191 unsigned int res; 1191 int res;
1192 1192
1193 __skb_queue_head_init(&list_kill); 1193 __skb_queue_head_init(&list_kill);
1194 1194
@@ -1203,7 +1203,7 @@ static unsigned int first_packet_length(struct sock *sk)
1203 __skb_unlink(skb, rcvq); 1203 __skb_unlink(skb, rcvq);
1204 __skb_queue_tail(&list_kill, skb); 1204 __skb_queue_tail(&list_kill, skb);
1205 } 1205 }
1206 res = skb ? skb->len : 0; 1206 res = skb ? skb->len : -1;
1207 spin_unlock_bh(&rcvq->lock); 1207 spin_unlock_bh(&rcvq->lock);
1208 1208
1209 if (!skb_queue_empty(&list_kill)) { 1209 if (!skb_queue_empty(&list_kill)) {
@@ -1232,7 +1232,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1232 1232
1233 case SIOCINQ: 1233 case SIOCINQ:
1234 { 1234 {
1235 unsigned int amount = first_packet_length(sk); 1235 int amount = max_t(int, 0, first_packet_length(sk));
1236 1236
1237 return put_user(amount, (int __user *)arg); 1237 return put_user(amount, (int __user *)arg);
1238 } 1238 }
@@ -2184,7 +2184,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
2184 2184
2185 /* Check for false positives due to checksum errors */ 2185 /* Check for false positives due to checksum errors */
2186 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && 2186 if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2187 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) 2187 !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2188 mask &= ~(POLLIN | POLLRDNORM); 2188 mask &= ~(POLLIN | POLLRDNORM);
2189 2189
2190 return mask; 2190 return mask;
@@ -2216,7 +2216,6 @@ struct proto udp_prot = {
2216 .sysctl_wmem = &sysctl_udp_wmem_min, 2216 .sysctl_wmem = &sysctl_udp_wmem_min,
2217 .sysctl_rmem = &sysctl_udp_rmem_min, 2217 .sysctl_rmem = &sysctl_udp_rmem_min,
2218 .obj_size = sizeof(struct udp_sock), 2218 .obj_size = sizeof(struct udp_sock),
2219 .slab_flags = SLAB_DESTROY_BY_RCU,
2220 .h.udp_table = &udp_table, 2219 .h.udp_table = &udp_table,
2221#ifdef CONFIG_COMPAT 2220#ifdef CONFIG_COMPAT
2222 .compat_setsockopt = compat_udp_setsockopt, 2221 .compat_setsockopt = compat_udp_setsockopt,
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 3b3efbda48e1..2eea073e27ef 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -55,7 +55,6 @@ struct proto udplite_prot = {
55 .unhash = udp_lib_unhash, 55 .unhash = udp_lib_unhash,
56 .get_port = udp_v4_get_port, 56 .get_port = udp_v4_get_port,
57 .obj_size = sizeof(struct udp_sock), 57 .obj_size = sizeof(struct udp_sock),
58 .slab_flags = SLAB_DESTROY_BY_RCU,
59 .h.udp_table = &udplite_table, 58 .h.udp_table = &udplite_table,
60#ifdef CONFIG_COMPAT 59#ifdef CONFIG_COMPAT
61 .compat_setsockopt = compat_udp_setsockopt, 60 .compat_setsockopt = compat_udp_setsockopt,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b644a23c3db0..41f5b504a782 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
29 memset(fl4, 0, sizeof(*fl4)); 29 memset(fl4, 0, sizeof(*fl4));
30 fl4->daddr = daddr->a4; 30 fl4->daddr = daddr->a4;
31 fl4->flowi4_tos = tos; 31 fl4->flowi4_tos = tos;
32 fl4->flowi4_oif = oif; 32 fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
33 if (saddr) 33 if (saddr)
34 fl4->saddr = saddr->a4; 34 fl4->saddr = saddr->a4;
35 35
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab3e796596b1..2f1f5d439788 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
778 } 778 }
779 779
780 if (p == &net->ipv6.devconf_all->forwarding) { 780 if (p == &net->ipv6.devconf_all->forwarding) {
781 int old_dflt = net->ipv6.devconf_dflt->forwarding;
782
781 net->ipv6.devconf_dflt->forwarding = newf; 783 net->ipv6.devconf_dflt->forwarding = newf;
784 if ((!newf) ^ (!old_dflt))
785 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
786 NETCONFA_IFINDEX_DEFAULT,
787 net->ipv6.devconf_dflt);
788
782 addrconf_forward_change(net, newf); 789 addrconf_forward_change(net, newf);
783 if ((!newf) ^ (!old)) 790 if ((!newf) ^ (!old))
784 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, 791 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1872,7 +1879,6 @@ static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1872 1879
1873void addrconf_dad_failure(struct inet6_ifaddr *ifp) 1880void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1874{ 1881{
1875 struct in6_addr addr;
1876 struct inet6_dev *idev = ifp->idev; 1882 struct inet6_dev *idev = ifp->idev;
1877 struct net *net = dev_net(ifp->idev->dev); 1883 struct net *net = dev_net(ifp->idev->dev);
1878 1884
@@ -1934,18 +1940,6 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1934 in6_ifa_put(ifp2); 1940 in6_ifa_put(ifp2);
1935lock_errdad: 1941lock_errdad:
1936 spin_lock_bh(&ifp->lock); 1942 spin_lock_bh(&ifp->lock);
1937 } else if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
1938 addr.s6_addr32[0] = htonl(0xfe800000);
1939 addr.s6_addr32[1] = 0;
1940
1941 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
1942 ipv6_addr_equal(&ifp->addr, &addr)) {
1943 /* DAD failed for link-local based on MAC address */
1944 idev->cnf.disable_ipv6 = 1;
1945
1946 pr_info("%s: IPv6 being disabled!\n",
1947 ifp->idev->dev->name);
1948 }
1949 } 1943 }
1950 1944
1951errdad: 1945errdad:
@@ -1954,6 +1948,7 @@ errdad:
1954 spin_unlock_bh(&ifp->lock); 1948 spin_unlock_bh(&ifp->lock);
1955 1949
1956 addrconf_mod_dad_work(ifp, 0); 1950 addrconf_mod_dad_work(ifp, 0);
1951 in6_ifa_put(ifp);
1957} 1952}
1958 1953
1959/* Join to solicited addr multicast group. 1954/* Join to solicited addr multicast group.
@@ -3543,7 +3538,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
3543 /* combine the user config with event to determine if permanent 3538 /* combine the user config with event to determine if permanent
3544 * addresses are to be removed from address hash table 3539 * addresses are to be removed from address hash table
3545 */ 3540 */
3546 keep_addr = !(how || _keep_addr <= 0); 3541 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3547 3542
3548 /* Step 2: clear hash table */ 3543 /* Step 2: clear hash table */
3549 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3544 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
@@ -3599,7 +3594,7 @@ restart:
3599 /* re-combine the user config with event to determine if permanent 3594 /* re-combine the user config with event to determine if permanent
3600 * addresses are to be removed from the interface list 3595 * addresses are to be removed from the interface list
3601 */ 3596 */
3602 keep_addr = (!how && _keep_addr > 0); 3597 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3603 3598
3604 INIT_LIST_HEAD(&del_list); 3599 INIT_LIST_HEAD(&del_list);
3605 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3600 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
@@ -3821,6 +3816,7 @@ static void addrconf_dad_work(struct work_struct *w)
3821 dad_work); 3816 dad_work);
3822 struct inet6_dev *idev = ifp->idev; 3817 struct inet6_dev *idev = ifp->idev;
3823 struct in6_addr mcaddr; 3818 struct in6_addr mcaddr;
3819 bool disable_ipv6 = false;
3824 3820
3825 enum { 3821 enum {
3826 DAD_PROCESS, 3822 DAD_PROCESS,
@@ -3837,6 +3833,24 @@ static void addrconf_dad_work(struct work_struct *w)
3837 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { 3833 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3838 action = DAD_ABORT; 3834 action = DAD_ABORT;
3839 ifp->state = INET6_IFADDR_STATE_POSTDAD; 3835 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3836
3837 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
3838 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3839 struct in6_addr addr;
3840
3841 addr.s6_addr32[0] = htonl(0xfe800000);
3842 addr.s6_addr32[1] = 0;
3843
3844 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3845 ipv6_addr_equal(&ifp->addr, &addr)) {
3846 /* DAD failed for link-local based on MAC */
3847 idev->cnf.disable_ipv6 = 1;
3848
3849 pr_info("%s: IPv6 being disabled!\n",
3850 ifp->idev->dev->name);
3851 disable_ipv6 = true;
3852 }
3853 }
3840 } 3854 }
3841 spin_unlock_bh(&ifp->lock); 3855 spin_unlock_bh(&ifp->lock);
3842 3856
@@ -3844,7 +3858,10 @@ static void addrconf_dad_work(struct work_struct *w)
3844 addrconf_dad_begin(ifp); 3858 addrconf_dad_begin(ifp);
3845 goto out; 3859 goto out;
3846 } else if (action == DAD_ABORT) { 3860 } else if (action == DAD_ABORT) {
3861 in6_ifa_hold(ifp);
3847 addrconf_dad_stop(ifp, 1); 3862 addrconf_dad_stop(ifp, 1);
3863 if (disable_ipv6)
3864 addrconf_ifdown(idev->dev, 0);
3848 goto out; 3865 goto out;
3849 } 3866 }
3850 3867
@@ -6017,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
6017static int __addrconf_sysctl_register(struct net *net, char *dev_name, 6034static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6018 struct inet6_dev *idev, struct ipv6_devconf *p) 6035 struct inet6_dev *idev, struct ipv6_devconf *p)
6019{ 6036{
6020 int i; 6037 int i, ifindex;
6021 struct ctl_table *table; 6038 struct ctl_table *table;
6022 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ]; 6039 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6023 6040
@@ -6037,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6037 if (!p->sysctl_header) 6054 if (!p->sysctl_header)
6038 goto free; 6055 goto free;
6039 6056
6057 if (!strcmp(dev_name, "all"))
6058 ifindex = NETCONFA_IFINDEX_ALL;
6059 else if (!strcmp(dev_name, "default"))
6060 ifindex = NETCONFA_IFINDEX_DEFAULT;
6061 else
6062 ifindex = idev->dev->ifindex;
6063 inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
6040 return 0; 6064 return 0;
6041 6065
6042free: 6066free:
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index c53b92c617c5..37ac9de713c6 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop,
952 memcpy(new, hop, start); 952 memcpy(new, hop, start);
953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, 953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
954 secattr); 954 secattr);
955 if (ret_val < 0) 955 if (ret_val < 0) {
956 kfree(new);
956 return ERR_PTR(ret_val); 957 return ERR_PTR(ret_val);
958 }
957 959
958 buf_len = start + ret_val; 960 buf_len = start + ret_val;
959 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ 961 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 776d145113e1..704274cbd495 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
521 521
522 skb_set_inner_protocol(skb, protocol);
523
524 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 522 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
525 NEXTHDR_GRE); 523 NEXTHDR_GRE);
526} 524}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7b0481e3738f..888543debe4e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1174 encap_limit = t->parms.encap_limit; 1174 encap_limit = t->parms.encap_limit;
1175 1175
1176 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1176 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1177 fl6.flowi6_proto = IPPROTO_IPIP;
1177 1178
1178 dsfield = ipv4_get_dsfield(iph); 1179 dsfield = ipv4_get_dsfield(iph);
1179 1180
@@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1233 encap_limit = t->parms.encap_limit; 1234 encap_limit = t->parms.encap_limit;
1234 1235
1235 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 1236 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1237 fl6.flowi6_proto = IPPROTO_IPV6;
1236 1238
1237 dsfield = ipv6_get_dsfield(ipv6h); 1239 dsfield = ipv6_get_dsfield(ipv6h);
1238 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1240 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 533cd5719c59..92bda9908bb9 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
47 .eval = nft_reject_ipv6_eval, 47 .eval = nft_reject_ipv6_eval,
48 .init = nft_reject_init, 48 .init = nft_reject_init,
49 .dump = nft_reject_dump, 49 .dump = nft_reject_dump,
50 .validate = nft_reject_validate,
50}; 51};
51 52
52static struct nft_expr_type nft_reject_ipv6_type __read_mostly = { 53static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index fed40d1ec29b..0e983b694ee8 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
55 struct icmp6hdr user_icmph; 55 struct icmp6hdr user_icmph;
56 int addr_type; 56 int addr_type;
57 struct in6_addr *daddr; 57 struct in6_addr *daddr;
58 int iif = 0; 58 int oif = 0;
59 struct flowi6 fl6; 59 struct flowi6 fl6;
60 int err; 60 int err;
61 struct dst_entry *dst; 61 struct dst_entry *dst;
@@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
78 if (u->sin6_family != AF_INET6) { 78 if (u->sin6_family != AF_INET6) {
79 return -EAFNOSUPPORT; 79 return -EAFNOSUPPORT;
80 } 80 }
81 if (sk->sk_bound_dev_if &&
82 sk->sk_bound_dev_if != u->sin6_scope_id) {
83 return -EINVAL;
84 }
85 daddr = &(u->sin6_addr); 81 daddr = &(u->sin6_addr);
86 iif = u->sin6_scope_id; 82 if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
83 oif = u->sin6_scope_id;
87 } else { 84 } else {
88 if (sk->sk_state != TCP_ESTABLISHED) 85 if (sk->sk_state != TCP_ESTABLISHED)
89 return -EDESTADDRREQ; 86 return -EDESTADDRREQ;
90 daddr = &sk->sk_v6_daddr; 87 daddr = &sk->sk_v6_daddr;
91 } 88 }
92 89
93 if (!iif) 90 if (!oif)
94 iif = sk->sk_bound_dev_if; 91 oif = sk->sk_bound_dev_if;
92
93 if (!oif)
94 oif = np->sticky_pktinfo.ipi6_ifindex;
95
96 if (!oif && ipv6_addr_is_multicast(daddr))
97 oif = np->mcast_oif;
98 else if (!oif)
99 oif = np->ucast_oif;
95 100
96 addr_type = ipv6_addr_type(daddr); 101 addr_type = ipv6_addr_type(daddr);
97 if (__ipv6_addr_needs_scope_id(addr_type) && !iif) 102 if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
98 return -EINVAL; 103 (addr_type & IPV6_ADDR_MAPPED) ||
99 if (addr_type & IPV6_ADDR_MAPPED) 104 (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
100 return -EINVAL; 105 return -EINVAL;
101 106
102 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ 107 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
106 fl6.flowi6_proto = IPPROTO_ICMPV6; 111 fl6.flowi6_proto = IPPROTO_ICMPV6;
107 fl6.saddr = np->saddr; 112 fl6.saddr = np->saddr;
108 fl6.daddr = *daddr; 113 fl6.daddr = *daddr;
114 fl6.flowi6_oif = oif;
109 fl6.flowi6_mark = sk->sk_mark; 115 fl6.flowi6_mark = sk->sk_mark;
110 fl6.fl6_icmp_type = user_icmph.icmp6_type; 116 fl6.fl6_icmp_type = user_icmph.icmp6_type;
111 fl6.fl6_icmp_code = user_icmph.icmp6_code; 117 fl6.fl6_icmp_code = user_icmph.icmp6_code;
112 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 118 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
113 119
114 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
115 fl6.flowi6_oif = np->mcast_oif;
116 else if (!fl6.flowi6_oif)
117 fl6.flowi6_oif = np->ucast_oif;
118
119 ipc6.tclass = np->tclass; 120 ipc6.tclass = np->tclass;
120 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 121 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
121 122
@@ -125,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
125 rt = (struct rt6_info *) dst; 126 rt = (struct rt6_info *) dst;
126 127
127 np = inet6_sk(sk); 128 np = inet6_sk(sk);
128 if (!np) 129 if (!np) {
129 return -EBADF; 130 err = -EBADF;
131 goto dst_err_out;
132 }
130 133
131 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 134 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
132 fl6.flowi6_oif = np->mcast_oif; 135 fl6.flowi6_oif = np->mcast_oif;
@@ -162,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
162 } 165 }
163 release_sock(sk); 166 release_sock(sk);
164 167
168dst_err_out:
169 dst_release(dst);
170
165 if (err) 171 if (err)
166 return err; 172 return err;
167 173
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 33df8b8575cc..94f4f89d73e7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -944,9 +944,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
944 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV 944 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
945 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. 945 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
946 */ 946 */
947 /* RFC 7323 2.3
948 * The window field (SEG.WND) of every outgoing segment, with the
949 * exception of <SYN> segments, MUST be right-shifted by
950 * Rcv.Wind.Shift bits:
951 */
947 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? 952 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
948 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 953 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
949 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, 954 tcp_rsk(req)->rcv_nxt,
955 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
950 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 956 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
951 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 957 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
952 0, 0); 958 0, 0);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 81e2f98b958d..19ac3a1c308d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1460,7 +1460,6 @@ struct proto udpv6_prot = {
1460 .sysctl_wmem = &sysctl_udp_wmem_min, 1460 .sysctl_wmem = &sysctl_udp_wmem_min,
1461 .sysctl_rmem = &sysctl_udp_rmem_min, 1461 .sysctl_rmem = &sysctl_udp_rmem_min,
1462 .obj_size = sizeof(struct udp6_sock), 1462 .obj_size = sizeof(struct udp6_sock),
1463 .slab_flags = SLAB_DESTROY_BY_RCU,
1464 .h.udp_table = &udp_table, 1463 .h.udp_table = &udp_table,
1465#ifdef CONFIG_COMPAT 1464#ifdef CONFIG_COMPAT
1466 .compat_setsockopt = compat_udpv6_setsockopt, 1465 .compat_setsockopt = compat_udpv6_setsockopt,
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 9cf097e206e9..fd6ef414899b 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -50,7 +50,6 @@ struct proto udplitev6_prot = {
50 .unhash = udp_lib_unhash, 50 .unhash = udp_lib_unhash,
51 .get_port = udp_v6_get_port, 51 .get_port = udp_v6_get_port,
52 .obj_size = sizeof(struct udp6_sock), 52 .obj_size = sizeof(struct udp6_sock),
53 .slab_flags = SLAB_DESTROY_BY_RCU,
54 .h.udp_table = &udplite_table, 53 .h.udp_table = &udplite_table,
55#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
56 .compat_setsockopt = compat_udpv6_setsockopt, 55 .compat_setsockopt = compat_udpv6_setsockopt,
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 0eaab1fa6be5..00a2d40677d6 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -23,6 +23,7 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
23 23
24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
25{ 25{
26 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
26 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 27 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
27 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 28 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
28 return xfrm_input(skb, nexthdr, spi, 0); 29 return xfrm_input(skb, nexthdr, spi, 0);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6cc97003e4a9..70a86adad875 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
36 int err; 36 int err;
37 37
38 memset(&fl6, 0, sizeof(fl6)); 38 memset(&fl6, 0, sizeof(fl6));
39 fl6.flowi6_oif = oif; 39 fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
40 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; 40 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
41 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); 41 memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
42 if (saddr) 42 if (saddr)
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 4a7ae32afa09..1138eaf5c682 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
185 185
186 self->magic = IAS_MAGIC; 186 self->magic = IAS_MAGIC;
187 self->mode = mode; 187 self->mode = mode;
188 if (mode == IAS_CLIENT) 188 if (mode == IAS_CLIENT) {
189 iriap_register_lsap(self, slsap_sel, mode); 189 if (iriap_register_lsap(self, slsap_sel, mode)) {
190 kfree(self);
191 return NULL;
192 }
193 }
190 194
191 self->confirm = callback; 195 self->confirm = callback;
192 self->priv = priv; 196 self->priv = priv;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index cb39e05b166c..411693288648 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -13,6 +13,7 @@
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/syscalls.h>
16#include <net/kcm.h> 17#include <net/kcm.h>
17#include <net/netns/generic.h> 18#include <net/netns/generic.h>
18#include <net/sock.h> 19#include <net/sock.h>
@@ -2029,7 +2030,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2029 if (copy_to_user((void __user *)arg, &info, 2030 if (copy_to_user((void __user *)arg, &info,
2030 sizeof(info))) { 2031 sizeof(info))) {
2031 err = -EFAULT; 2032 err = -EFAULT;
2032 sock_release(newsock); 2033 sys_close(info.fd);
2033 } 2034 }
2034 } 2035 }
2035 2036
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1e40dacaa137..a2ed3bda4ddc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
1855 (void)l2tp_tunnel_delete(tunnel); 1855 (void)l2tp_tunnel_delete(tunnel);
1856 } 1856 }
1857 rcu_read_unlock_bh(); 1857 rcu_read_unlock_bh();
1858
1859 flush_workqueue(l2tp_wq);
1860 rcu_barrier();
1858} 1861}
1859 1862
1860static struct pernet_operations l2tp_net_ops = { 1863static struct pernet_operations l2tp_net_ops = {
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index d9560aa2dba3..232cb92033e8 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -856,7 +856,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
856 error = -ENOTCONN; 856 error = -ENOTCONN;
857 if (sk == NULL) 857 if (sk == NULL)
858 goto end; 858 goto end;
859 if (sk->sk_state != PPPOX_CONNECTED) 859 if (!(sk->sk_state & PPPOX_CONNECTED))
860 goto end; 860 goto end;
861 861
862 error = -EBADF; 862 error = -EBADF;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e99ab8d97a..543b1d4fc33d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
869 869
870 /* free all potentially still buffered bcast frames */ 870 /* free all potentially still buffered bcast frames */
871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
872 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 872 ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
873 873
874 mutex_lock(&local->mtx); 874 mutex_lock(&local->mtx);
875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true); 875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 184473c257eb..ba5fc1f01e53 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
1094 1094
1095 trace_drv_get_expected_throughput(sta); 1095 trace_drv_get_expected_throughput(sta);
1096 if (local->ops->get_expected_throughput) 1096 if (local->ops->get_expected_throughput)
1097 ret = local->ops->get_expected_throughput(sta); 1097 ret = local->ops->get_expected_throughput(&local->hw, sta);
1098 trace_drv_return_u32(local, ret); 1098 trace_drv_return_u32(local, ret);
1099 1099
1100 return ret; 1100 return ret;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c66411df9863..42120d965263 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
881 881
882 netif_carrier_off(sdata->dev); 882 netif_carrier_off(sdata->dev);
883 883
884 /* flush STAs and mpaths on this iface */
885 sta_info_flush(sdata);
886 mesh_path_flush_by_iface(sdata);
887
884 /* stop the beacon */ 888 /* stop the beacon */
885 ifmsh->mesh_id_len = 0; 889 ifmsh->mesh_id_len = 0;
886 sdata->vif.bss_conf.enable_beacon = false; 890 sdata->vif.bss_conf.enable_beacon = false;
887 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 891 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
888 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 892 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
893
894 /* remove beacon */
889 bcn = rcu_dereference_protected(ifmsh->beacon, 895 bcn = rcu_dereference_protected(ifmsh->beacon,
890 lockdep_is_held(&sdata->wdev.mtx)); 896 lockdep_is_held(&sdata->wdev.mtx));
891 RCU_INIT_POINTER(ifmsh->beacon, NULL); 897 RCU_INIT_POINTER(ifmsh->beacon, NULL);
892 kfree_rcu(bcn, rcu_head); 898 kfree_rcu(bcn, rcu_head);
893 899
894 /* flush STAs and mpaths on this iface */
895 sta_info_flush(sdata);
896 mesh_path_flush_by_iface(sdata);
897
898 /* free all potentially still buffered group-addressed frames */ 900 /* free all potentially still buffered group-addressed frames */
899 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); 901 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
900 skb_queue_purge(&ifmsh->ps.bc_buf); 902 skb_queue_purge(&ifmsh->ps.bc_buf);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2e8a9024625a..9dce3b157908 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1270 1270
1271 if (!txqi->tin.backlog_packets) 1271 if (txqi->tin.backlog_packets)
1272 set_bit(tid, &sta->txq_buffered_tids); 1272 set_bit(tid, &sta->txq_buffered_tids);
1273 else 1273 else
1274 clear_bit(tid, &sta->txq_buffered_tids); 1274 clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index c6d5c724e032..a2a68269675d 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
771 clear_sta_flag(sta, WLAN_STA_SP); 771 clear_sta_flag(sta, WLAN_STA_SP);
772 772
773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK); 773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
774
775 /* mesh Peer Service Period support */
776 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
777 ieee80211_is_data_qos(fc))
778 ieee80211_mpsp_trigger_process(
779 ieee80211_get_qos_ctl(hdr), sta, true, acked);
780
774 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { 781 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
775 /* 782 /*
776 * The STA is in power save mode, so assume 783 * The STA is in power save mode, so assume
@@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
781 return; 788 return;
782 } 789 }
783 790
784 /* mesh Peer Service Period support */
785 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
786 ieee80211_is_data_qos(fc))
787 ieee80211_mpsp_trigger_process(
788 ieee80211_get_qos_ctl(hdr),
789 sta, true, acked);
790
791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && 791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
792 (ieee80211_is_data(hdr->frame_control)) && 792 (ieee80211_is_data(hdr->frame_control)) &&
793 (rates_idx != -1)) 793 (rates_idx != -1))
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index b5d28f14b9cf..afca7d103684 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
333 if (!uc.center_freq1) 333 if (!uc.center_freq1)
334 return; 334 return;
335 335
336 /* proceed to downgrade the chandef until usable or the same */ 336 /* proceed to downgrade the chandef until usable or the same as AP BW */
337 while (uc.width > max_width || 337 while (uc.width > max_width ||
338 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, 338 (uc.width > sta->tdls_chandef.width &&
339 sdata->wdev.iftype)) 339 !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
340 sdata->wdev.iftype)))
340 ieee80211_chandef_downgrade(&uc); 341 ieee80211_chandef_downgrade(&uc);
341 342
342 if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { 343 if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 91461c415525..502396694f47 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
368 skb = skb_dequeue(&ps->bc_buf); 368 skb = skb_dequeue(&ps->bc_buf);
369 if (skb) { 369 if (skb) {
370 purged++; 370 purged++;
371 dev_kfree_skb(skb); 371 ieee80211_free_txskb(&local->hw, skb);
372 } 372 }
373 total += skb_queue_len(&ps->bc_buf); 373 total += skb_queue_len(&ps->bc_buf);
374 } 374 }
@@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { 451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
452 ps_dbg(tx->sdata, 452 ps_dbg(tx->sdata,
453 "BC TX buffer full - dropping the oldest frame\n"); 453 "BC TX buffer full - dropping the oldest frame\n");
454 dev_kfree_skb(skb_dequeue(&ps->bc_buf)); 454 ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
455 } else 455 } else
456 tx->local->total_ps_buffered++; 456 tx->local->total_ps_buffered++;
457 457
@@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); 4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) 4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
4277 break; 4277 break;
4278 dev_kfree_skb_any(skb); 4278 ieee80211_free_txskb(hw, skb);
4279 } 4279 }
4280 4280
4281 info = IEEE80211_SKB_CB(skb); 4281 info = IEEE80211_SKB_CB(skb);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 9e3693128313..f8dbacf66795 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
574 helper = rcu_dereference(nfct_help(expect->master)->helper); 574 helper = rcu_dereference(nfct_help(expect->master)->helper);
575 if (helper) { 575 if (helper) {
576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); 576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
577 if (helper->expect_policy[expect->class].name) 577 if (helper->expect_policy[expect->class].name[0])
578 seq_printf(s, "/%s", 578 seq_printf(s, "/%s",
579 helper->expect_policy[expect->class].name); 579 helper->expect_policy[expect->class].name);
580 } 580 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bb77a97961bf..5c0db5c64734 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1473 "timeout to %u seconds for", 1473 "timeout to %u seconds for",
1474 info->timeout); 1474 info->timeout);
1475 nf_ct_dump_tuple(&exp->tuple); 1475 nf_ct_dump_tuple(&exp->tuple);
1476 mod_timer(&exp->timeout, jiffies + info->timeout * HZ); 1476 mod_timer_pending(&exp->timeout,
1477 jiffies + info->timeout * HZ);
1477 } 1478 }
1478 spin_unlock_bh(&nf_conntrack_expect_lock); 1479 spin_unlock_bh(&nf_conntrack_expect_lock);
1479 } 1480 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 050bb3420a6b..fdfc71f416b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1894 1894
1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1896 return -EINVAL; 1896 return -EINVAL;
1897 if (otuple.dst.protonum != rtuple.dst.protonum)
1898 return -EINVAL;
1897 1899
1898 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, 1900 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
1899 &rtuple, u3); 1901 &rtuple, u3);
@@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2362 return PTR_ERR(exp); 2364 return PTR_ERR(exp);
2363 2365
2364 err = nf_ct_expect_related_report(exp, portid, report); 2366 err = nf_ct_expect_related_report(exp, portid, report);
2365 if (err < 0) { 2367 nf_ct_expect_put(exp);
2366 nf_ct_expect_put(exp); 2368 return err;
2367 return err;
2368 }
2369
2370 return 0;
2371} 2369}
2372 2370
2373static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2371static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 8d9db9d4702b..7d77217de6a3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
1383 return NF_DROP; 1383 return NF_DROP;
1384 } 1384 }
1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1386 if (!cseq) { 1386 if (!cseq && *(*dptr + matchoff) != '0') {
1387 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1387 nf_ct_helper_log(skb, ct, "cannot get cseq");
1388 return NF_DROP; 1388 return NF_DROP;
1389 } 1389 }
@@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1446 return NF_DROP; 1446 return NF_DROP;
1447 } 1447 }
1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1449 if (!cseq) { 1449 if (!cseq && *(*dptr + matchoff) != '0') {
1450 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1450 nf_ct_helper_log(skb, ct, "cannot get cseq");
1451 return NF_DROP; 1451 return NF_DROP;
1452 } 1452 }
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 958a1455ca7f..9f267c3ffb39 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -205,6 +205,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
205 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); 205 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
206 const struct nf_conntrack_l3proto *l3proto; 206 const struct nf_conntrack_l3proto *l3proto;
207 const struct nf_conntrack_l4proto *l4proto; 207 const struct nf_conntrack_l4proto *l4proto;
208 struct net *net = seq_file_net(s);
208 int ret = 0; 209 int ret = 0;
209 210
210 NF_CT_ASSERT(ct); 211 NF_CT_ASSERT(ct);
@@ -215,6 +216,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
215 if (NF_CT_DIRECTION(hash)) 216 if (NF_CT_DIRECTION(hash))
216 goto release; 217 goto release;
217 218
219 if (!net_eq(nf_ct_net(ct), net))
220 goto release;
221
218 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); 222 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
219 NF_CT_ASSERT(l3proto); 223 NF_CT_ASSERT(l3proto);
220 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 224 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 5eefe4a355c6..75d696f11045 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
30 if (!iph) 30 if (!iph)
31 return; 31 return;
32 32
33 iph = ip_hdr(skb);
34 if (iph->ihl < 5 || iph->version != 4) 33 if (iph->ihl < 5 || iph->version != 4)
35 return; 34 return;
36 35
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 1b4de4bd6958..d44d89b56127 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -326,14 +326,14 @@ static int nfnl_acct_try_del(struct nf_acct *cur)
326{ 326{
327 int ret = 0; 327 int ret = 0;
328 328
329 /* we want to avoid races with nfnl_acct_find_get. */ 329 /* We want to avoid races with nfnl_acct_put. So only when the current
330 if (atomic_dec_and_test(&cur->refcnt)) { 330 * refcnt is 1, we decrease it to 0.
331 */
332 if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) {
331 /* We are protected by nfnl mutex. */ 333 /* We are protected by nfnl mutex. */
332 list_del_rcu(&cur->head); 334 list_del_rcu(&cur->head);
333 kfree_rcu(cur, rcu_head); 335 kfree_rcu(cur, rcu_head);
334 } else { 336 } else {
335 /* still in use, restore reference counter. */
336 atomic_inc(&cur->refcnt);
337 ret = -EBUSY; 337 ret = -EBUSY;
338 } 338 }
339 return ret; 339 return ret;
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
343 struct sk_buff *skb, const struct nlmsghdr *nlh, 343 struct sk_buff *skb, const struct nlmsghdr *nlh,
344 const struct nlattr * const tb[]) 344 const struct nlattr * const tb[])
345{ 345{
346 char *acct_name; 346 struct nf_acct *cur, *tmp;
347 struct nf_acct *cur;
348 int ret = -ENOENT; 347 int ret = -ENOENT;
348 char *acct_name;
349 349
350 if (!tb[NFACCT_NAME]) { 350 if (!tb[NFACCT_NAME]) {
351 list_for_each_entry(cur, &net->nfnl_acct_list, head) 351 list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
352 nfnl_acct_try_del(cur); 352 nfnl_acct_try_del(cur);
353 353
354 return 0; 354 return 0;
@@ -443,7 +443,7 @@ void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
443} 443}
444EXPORT_SYMBOL_GPL(nfnl_acct_update); 444EXPORT_SYMBOL_GPL(nfnl_acct_update);
445 445
446static void nfnl_overquota_report(struct nf_acct *nfacct) 446static void nfnl_overquota_report(struct net *net, struct nf_acct *nfacct)
447{ 447{
448 int ret; 448 int ret;
449 struct sk_buff *skb; 449 struct sk_buff *skb;
@@ -458,11 +458,12 @@ static void nfnl_overquota_report(struct nf_acct *nfacct)
458 kfree_skb(skb); 458 kfree_skb(skb);
459 return; 459 return;
460 } 460 }
461 netlink_broadcast(init_net.nfnl, skb, 0, NFNLGRP_ACCT_QUOTA, 461 netlink_broadcast(net->nfnl, skb, 0, NFNLGRP_ACCT_QUOTA,
462 GFP_ATOMIC); 462 GFP_ATOMIC);
463} 463}
464 464
465int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct) 465int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
466 struct nf_acct *nfacct)
466{ 467{
467 u64 now; 468 u64 now;
468 u64 *quota; 469 u64 *quota;
@@ -480,7 +481,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
480 481
481 if (now >= *quota && 482 if (now >= *quota &&
482 !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) { 483 !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
483 nfnl_overquota_report(nfacct); 484 nfnl_overquota_report(net, nfacct);
484 } 485 }
485 486
486 return ret; 487 return ret;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 4cdcd969b64c..139e0867e56e 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
98 break; 98 break;
99 } 99 }
100 100
101 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
102
103 /* This protocol is not supportted, skip. */
104 if (l4proto->l4proto != l4num) {
105 ret = -EOPNOTSUPP;
106 goto err_proto_put;
107 }
108
109 if (matching) { 101 if (matching) {
110 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 102 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
111 /* You cannot replace one timeout policy by another of 103 /* You cannot replace one timeout policy by another of
112 * different kind, sorry. 104 * different kind, sorry.
113 */ 105 */
114 if (matching->l3num != l3num || 106 if (matching->l3num != l3num ||
115 matching->l4proto->l4proto != l4num) { 107 matching->l4proto->l4proto != l4num)
116 ret = -EINVAL; 108 return -EINVAL;
117 goto err_proto_put; 109
118 } 110 return ctnl_timeout_parse_policy(&matching->data,
119 111 matching->l4proto, net,
120 ret = ctnl_timeout_parse_policy(&matching->data, 112 cda[CTA_TIMEOUT_DATA]);
121 l4proto, net,
122 cda[CTA_TIMEOUT_DATA]);
123 return ret;
124 } 113 }
125 ret = -EBUSY; 114
115 return -EBUSY;
116 }
117
118 l4proto = nf_ct_l4proto_find_get(l3num, l4num);
119
120 /* This protocol is not supportted, skip. */
121 if (l4proto->l4proto != l4num) {
122 ret = -EOPNOTSUPP;
126 goto err_proto_put; 123 goto err_proto_put;
127 } 124 }
128 125
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
305 const struct hlist_nulls_node *nn; 302 const struct hlist_nulls_node *nn;
306 unsigned int last_hsize; 303 unsigned int last_hsize;
307 spinlock_t *lock; 304 spinlock_t *lock;
308 int i; 305 int i, cpu;
306
307 for_each_possible_cpu(cpu) {
308 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
309
310 spin_lock_bh(&pcpu->lock);
311 hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
312 untimeout(h, timeout);
313 spin_unlock_bh(&pcpu->lock);
314 }
309 315
310 local_bh_disable(); 316 local_bh_disable();
311restart: 317restart:
@@ -330,16 +336,16 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout)
330{ 336{
331 int ret = 0; 337 int ret = 0;
332 338
333 /* we want to avoid races with nf_ct_timeout_find_get. */ 339 /* We want to avoid races with ctnl_timeout_put. So only when the
334 if (atomic_dec_and_test(&timeout->refcnt)) { 340 * current refcnt is 1, we decrease it to 0.
341 */
342 if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) {
335 /* We are protected by nfnl mutex. */ 343 /* We are protected by nfnl mutex. */
336 list_del_rcu(&timeout->head); 344 list_del_rcu(&timeout->head);
337 nf_ct_l4proto_put(timeout->l4proto); 345 nf_ct_l4proto_put(timeout->l4proto);
338 ctnl_untimeout(net, timeout); 346 ctnl_untimeout(net, timeout);
339 kfree_rcu(timeout, rcu_head); 347 kfree_rcu(timeout, rcu_head);
340 } else { 348 } else {
341 /* still in use, restore reference counter. */
342 atomic_inc(&timeout->refcnt);
343 ret = -EBUSY; 349 ret = -EBUSY;
344 } 350 }
345 return ret; 351 return ret;
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
350 const struct nlmsghdr *nlh, 356 const struct nlmsghdr *nlh,
351 const struct nlattr * const cda[]) 357 const struct nlattr * const cda[])
352{ 358{
353 struct ctnl_timeout *cur; 359 struct ctnl_timeout *cur, *tmp;
354 int ret = -ENOENT; 360 int ret = -ENOENT;
355 char *name; 361 char *name;
356 362
357 if (!cda[CTA_TIMEOUT_NAME]) { 363 if (!cda[CTA_TIMEOUT_NAME]) {
358 list_for_each_entry(cur, &net->nfct_timeout_list, head) 364 list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
365 head)
359 ctnl_timeout_try_del(net, cur); 366 ctnl_timeout_try_del(net, cur);
360 367
361 return 0; 368 return 0;
@@ -543,7 +550,9 @@ err:
543 550
544static void ctnl_timeout_put(struct ctnl_timeout *timeout) 551static void ctnl_timeout_put(struct ctnl_timeout *timeout)
545{ 552{
546 atomic_dec(&timeout->refcnt); 553 if (atomic_dec_and_test(&timeout->refcnt))
554 kfree_rcu(timeout, rcu_head);
555
547 module_put(THIS_MODULE); 556 module_put(THIS_MODULE);
548} 557}
549#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 558#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
@@ -591,7 +600,9 @@ static void __net_exit cttimeout_net_exit(struct net *net)
591 list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { 600 list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) {
592 list_del_rcu(&cur->head); 601 list_del_rcu(&cur->head);
593 nf_ct_l4proto_put(cur->l4proto); 602 nf_ct_l4proto_put(cur->l4proto);
594 kfree_rcu(cur, rcu_head); 603
604 if (atomic_dec_and_test(&cur->refcnt))
605 kfree_rcu(cur, rcu_head);
595 } 606 }
596} 607}
597 608
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index cbcfdfb586a6..6577db524ef6 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1147,6 +1147,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
1147MODULE_ALIAS_NF_LOGGER(AF_INET, 1); 1147MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
1148MODULE_ALIAS_NF_LOGGER(AF_INET6, 1); 1148MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
1149MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1); 1149MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
1150MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */
1150 1151
1151module_init(nfnetlink_log_init); 1152module_init(nfnetlink_log_init);
1152module_exit(nfnetlink_log_fini); 1153module_exit(nfnetlink_log_fini);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5d36a0926b4a..f49f45081acb 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1146 int err; 1146 int err;
1147 1147
1148 queue = instance_lookup(q, queue_num); 1148 queue = verdict_instance_lookup(q, queue_num,
1149 if (!queue) 1149 NETLINK_CB(skb).portid);
1150 queue = verdict_instance_lookup(q, queue_num,
1151 NETLINK_CB(skb).portid);
1152 if (IS_ERR(queue)) 1150 if (IS_ERR(queue))
1153 return PTR_ERR(queue); 1151 return PTR_ERR(queue);
1154 1152
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index ba7aed13e174..82c264e40278 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
59 const struct nlattr * const tb[]) 59 const struct nlattr * const tb[])
60{ 60{
61 struct nft_exthdr *priv = nft_expr_priv(expr); 61 struct nft_exthdr *priv = nft_expr_priv(expr);
62 u32 offset, len;
62 63
63 if (tb[NFTA_EXTHDR_DREG] == NULL || 64 if (tb[NFTA_EXTHDR_DREG] == NULL ||
64 tb[NFTA_EXTHDR_TYPE] == NULL || 65 tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
66 tb[NFTA_EXTHDR_LEN] == NULL) 67 tb[NFTA_EXTHDR_LEN] == NULL)
67 return -EINVAL; 68 return -EINVAL;
68 69
70 offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
71 len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
72
73 if (offset > U8_MAX || len > U8_MAX)
74 return -ERANGE;
75
69 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); 76 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
70 priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); 77 priv->offset = offset;
71 priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); 78 priv->len = len;
72 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); 79 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
73 80
74 return nft_validate_register_store(ctx, priv->dreg, NULL, 81 return nft_validate_register_store(ctx, priv->dreg, NULL,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 2863f3493038..8a6bc7630912 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
291} 291}
292EXPORT_SYMBOL_GPL(nft_meta_get_init); 292EXPORT_SYMBOL_GPL(nft_meta_get_init);
293 293
294static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx) 294int nft_meta_set_validate(const struct nft_ctx *ctx,
295 const struct nft_expr *expr,
296 const struct nft_data **data)
295{ 297{
298 struct nft_meta *priv = nft_expr_priv(expr);
296 unsigned int hooks; 299 unsigned int hooks;
297 300
301 if (priv->key != NFT_META_PKTTYPE)
302 return 0;
303
298 switch (ctx->afi->family) { 304 switch (ctx->afi->family) {
299 case NFPROTO_BRIDGE: 305 case NFPROTO_BRIDGE:
300 hooks = 1 << NF_BR_PRE_ROUTING; 306 hooks = 1 << NF_BR_PRE_ROUTING;
@@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
308 314
309 return nft_chain_validate_hooks(ctx->chain, hooks); 315 return nft_chain_validate_hooks(ctx->chain, hooks);
310} 316}
317EXPORT_SYMBOL_GPL(nft_meta_set_validate);
311 318
312int nft_meta_set_init(const struct nft_ctx *ctx, 319int nft_meta_set_init(const struct nft_ctx *ctx,
313 const struct nft_expr *expr, 320 const struct nft_expr *expr,
@@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
327 len = sizeof(u8); 334 len = sizeof(u8);
328 break; 335 break;
329 case NFT_META_PKTTYPE: 336 case NFT_META_PKTTYPE:
330 err = nft_meta_set_init_pkttype(ctx);
331 if (err)
332 return err;
333 len = sizeof(u8); 337 len = sizeof(u8);
334 break; 338 break;
335 default: 339 default:
336 return -EOPNOTSUPP; 340 return -EOPNOTSUPP;
337 } 341 }
338 342
343 err = nft_meta_set_validate(ctx, expr, NULL);
344 if (err < 0)
345 return err;
346
339 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); 347 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
340 err = nft_validate_register_load(priv->sreg, len); 348 err = nft_validate_register_load(priv->sreg, len);
341 if (err < 0) 349 if (err < 0)
@@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
407 .init = nft_meta_set_init, 415 .init = nft_meta_set_init,
408 .destroy = nft_meta_set_destroy, 416 .destroy = nft_meta_set_destroy,
409 .dump = nft_meta_set_dump, 417 .dump = nft_meta_set_dump,
418 .validate = nft_meta_set_validate,
410}; 419};
411 420
412static const struct nft_expr_ops * 421static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 6473936d05c6..ffe9ae062d23 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
70 } else if (d > 0) 70 } else if (d > 0)
71 parent = parent->rb_right; 71 parent = parent->rb_right;
72 else { 72 else {
73found:
74 if (!nft_set_elem_active(&rbe->ext, genmask)) { 73 if (!nft_set_elem_active(&rbe->ext, genmask)) {
75 parent = parent->rb_left; 74 parent = parent->rb_left;
76 continue; 75 continue;
@@ -84,9 +83,12 @@ found:
84 } 83 }
85 } 84 }
86 85
87 if (set->flags & NFT_SET_INTERVAL && interval != NULL) { 86 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
88 rbe = interval; 87 nft_set_elem_active(&interval->ext, genmask) &&
89 goto found; 88 !nft_rbtree_interval_end(interval)) {
89 spin_unlock_bh(&nft_rbtree_lock);
90 *ext = &interval->ext;
91 return true;
90 } 92 }
91out: 93out:
92 spin_unlock_bh(&nft_rbtree_lock); 94 spin_unlock_bh(&nft_rbtree_lock);
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 0522fc9bfb0a..c64de3f7379d 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
26}; 26};
27EXPORT_SYMBOL_GPL(nft_reject_policy); 27EXPORT_SYMBOL_GPL(nft_reject_policy);
28 28
29int nft_reject_validate(const struct nft_ctx *ctx,
30 const struct nft_expr *expr,
31 const struct nft_data **data)
32{
33 return nft_chain_validate_hooks(ctx->chain,
34 (1 << NF_INET_LOCAL_IN) |
35 (1 << NF_INET_FORWARD) |
36 (1 << NF_INET_LOCAL_OUT));
37}
38EXPORT_SYMBOL_GPL(nft_reject_validate);
39
29int nft_reject_init(const struct nft_ctx *ctx, 40int nft_reject_init(const struct nft_ctx *ctx,
30 const struct nft_expr *expr, 41 const struct nft_expr *expr,
31 const struct nlattr * const tb[]) 42 const struct nlattr * const tb[])
32{ 43{
33 struct nft_reject *priv = nft_expr_priv(expr); 44 struct nft_reject *priv = nft_expr_priv(expr);
45 int err;
46
47 err = nft_reject_validate(ctx, expr, NULL);
48 if (err < 0)
49 return err;
34 50
35 if (tb[NFTA_REJECT_TYPE] == NULL) 51 if (tb[NFTA_REJECT_TYPE] == NULL)
36 return -EINVAL; 52 return -EINVAL;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 759ca5248a3d..e79d9ca2ffee 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
66 const struct nlattr * const tb[]) 66 const struct nlattr * const tb[])
67{ 67{
68 struct nft_reject *priv = nft_expr_priv(expr); 68 struct nft_reject *priv = nft_expr_priv(expr);
69 int icmp_code; 69 int icmp_code, err;
70
71 err = nft_reject_validate(ctx, expr, NULL);
72 if (err < 0)
73 return err;
70 74
71 if (tb[NFTA_REJECT_TYPE] == NULL) 75 if (tb[NFTA_REJECT_TYPE] == NULL)
72 return -EINVAL; 76 return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
124 .eval = nft_reject_inet_eval, 128 .eval = nft_reject_inet_eval,
125 .init = nft_reject_inet_init, 129 .init = nft_reject_inet_init,
126 .dump = nft_reject_inet_dump, 130 .dump = nft_reject_inet_dump,
131 .validate = nft_reject_validate,
127}; 132};
128 133
129static struct nft_expr_type nft_reject_inet_type __read_mostly = { 134static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 7f4414d26a66..663c4c3c9072 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -127,6 +127,8 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
127 daddr, dport, 127 daddr, dport,
128 in->ifindex); 128 in->ifindex);
129 129
130 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
131 sk = NULL;
130 /* NOTE: we return listeners even if bound to 132 /* NOTE: we return listeners even if bound to
131 * 0.0.0.0, those are filtered out in 133 * 0.0.0.0, those are filtered out in
132 * xt_socket, since xt_TPROXY needs 0 bound 134 * xt_socket, since xt_TPROXY needs 0 bound
@@ -195,6 +197,8 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
195 daddr, ntohs(dport), 197 daddr, ntohs(dport),
196 in->ifindex); 198 in->ifindex);
197 199
200 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
201 sk = NULL;
198 /* NOTE: we return listeners even if bound to 202 /* NOTE: we return listeners even if bound to
199 * 0.0.0.0, those are filtered out in 203 * 0.0.0.0, those are filtered out in
200 * xt_socket, since xt_TPROXY needs 0 bound 204 * xt_socket, since xt_TPROXY needs 0 bound
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 3048a7e3a90a..cf327593852a 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -26,7 +26,7 @@ static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
26 26
27 nfnl_acct_update(skb, info->nfacct); 27 nfnl_acct_update(skb, info->nfacct);
28 28
29 overquota = nfnl_acct_overquota(skb, info->nfacct); 29 overquota = nfnl_acct_overquota(par->net, skb, info->nfacct);
30 30
31 return overquota == NFACCT_UNDERQUOTA ? false : true; 31 return overquota == NFACCT_UNDERQUOTA ? false : true;
32} 32}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c644c78ed485..e054a748ff25 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
433 struct nf_conntrack_l4proto *l4proto; 433 struct nf_conntrack_l4proto *l4proto;
434 struct nf_conntrack_tuple tuple; 434 struct nf_conntrack_tuple tuple;
435 struct nf_conntrack_tuple_hash *h; 435 struct nf_conntrack_tuple_hash *h;
436 enum ip_conntrack_info ctinfo;
437 struct nf_conn *ct; 436 struct nf_conn *ct;
438 unsigned int dataoff; 437 unsigned int dataoff;
439 u8 protonum; 438 u8 protonum;
@@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
458 457
459 ct = nf_ct_tuplehash_to_ctrack(h); 458 ct = nf_ct_tuplehash_to_ctrack(h);
460 459
461 ctinfo = ovs_ct_get_info(h);
462 if (ctinfo == IP_CT_NEW) {
463 /* This should not happen. */
464 WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
465 }
466 skb->nfct = &ct->ct_general; 460 skb->nfct = &ct->ct_general;
467 skb->nfctinfo = ctinfo; 461 skb->nfctinfo = ovs_ct_get_info(h);
468 return ct; 462 return ct;
469} 463}
470 464
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 1a1fcec88695..5aaf3babfc3f 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
93 return ERR_CAST(dev); 93 return ERR_CAST(dev);
94 } 94 }
95 95
96 dev_change_flags(dev, dev->flags | IFF_UP); 96 err = dev_change_flags(dev, dev->flags | IFF_UP);
97 if (err < 0) {
98 rtnl_delete_link(dev);
99 rtnl_unlock();
100 ovs_vport_free(vport);
101 goto error;
102 }
103
97 rtnl_unlock(); 104 rtnl_unlock();
98 return vport; 105 return vport;
99error: 106error:
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 7f8897f33a67..0e72d95b0e8f 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
54 struct net *net = ovs_dp_get_net(parms->dp); 54 struct net *net = ovs_dp_get_net(parms->dp);
55 struct net_device *dev; 55 struct net_device *dev;
56 struct vport *vport; 56 struct vport *vport;
57 int err;
57 58
58 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); 59 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
59 if (IS_ERR(vport)) 60 if (IS_ERR(vport))
@@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
67 return ERR_CAST(dev); 68 return ERR_CAST(dev);
68 } 69 }
69 70
70 dev_change_flags(dev, dev->flags | IFF_UP); 71 err = dev_change_flags(dev, dev->flags | IFF_UP);
71 rtnl_unlock(); 72 if (err < 0) {
73 rtnl_delete_link(dev);
74 rtnl_unlock();
75 ovs_vport_free(vport);
76 return ERR_PTR(err);
77 }
72 78
79 rtnl_unlock();
73 return vport; 80 return vport;
74} 81}
75 82
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 434e04c3a189..95c36147a6e1 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
140 140
141static void internal_set_rx_headroom(struct net_device *dev, int new_hr) 141static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
142{ 142{
143 dev->needed_headroom = new_hr; 143 dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
144} 144}
145 145
146static const struct net_device_ops internal_dev_netdev_ops = { 146static const struct net_device_ops internal_dev_netdev_ops = {
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 5eb7694348b5..7eb955e453e6 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
130 return ERR_CAST(dev); 130 return ERR_CAST(dev);
131 } 131 }
132 132
133 dev_change_flags(dev, dev->flags | IFF_UP); 133 err = dev_change_flags(dev, dev->flags | IFF_UP);
134 if (err < 0) {
135 rtnl_delete_link(dev);
136 rtnl_unlock();
137 ovs_vport_free(vport);
138 goto error;
139 }
140
134 rtnl_unlock(); 141 rtnl_unlock();
135 return vport; 142 return vport;
136error: 143error:
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1bb9e7ac9e14..ff83fb1ddd47 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -425,6 +425,7 @@ struct rxrpc_call {
425 spinlock_t lock; 425 spinlock_t lock;
426 rwlock_t state_lock; /* lock for state transition */ 426 rwlock_t state_lock; /* lock for state transition */
427 atomic_t usage; 427 atomic_t usage;
428 atomic_t skb_count; /* Outstanding packets on this call */
428 atomic_t sequence; /* Tx data packet sequence counter */ 429 atomic_t sequence; /* Tx data packet sequence counter */
429 u32 local_abort; /* local abort code */ 430 u32 local_abort; /* local abort code */
430 u32 remote_abort; /* remote abort code */ 431 u32 remote_abort; /* remote abort code */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 0b2832141bd0..9bae21e66d65 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
130 call->state = RXRPC_CALL_SERVER_ACCEPTING; 130 call->state = RXRPC_CALL_SERVER_ACCEPTING;
131 list_add_tail(&call->accept_link, &rx->acceptq); 131 list_add_tail(&call->accept_link, &rx->acceptq);
132 rxrpc_get_call(call); 132 rxrpc_get_call(call);
133 atomic_inc(&call->skb_count);
133 nsp = rxrpc_skb(notification); 134 nsp = rxrpc_skb(notification);
134 nsp->call = call; 135 nsp->call = call;
135 136
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index fc32aa5764a2..e60cf65c2232 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
460 ASSERTCMP(sp->call, ==, NULL); 460 ASSERTCMP(sp->call, ==, NULL);
461 sp->call = call; 461 sp->call = call;
462 rxrpc_get_call(call); 462 rxrpc_get_call(call);
463 atomic_inc(&call->skb_count);
463 464
464 /* insert into the buffer in sequence order */ 465 /* insert into the buffer in sequence order */
465 spin_lock_bh(&call->lock); 466 spin_lock_bh(&call->lock);
@@ -734,6 +735,7 @@ all_acked:
734 skb->mark = RXRPC_SKB_MARK_FINAL_ACK; 735 skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
735 sp->call = call; 736 sp->call = call;
736 rxrpc_get_call(call); 737 rxrpc_get_call(call);
738 atomic_inc(&call->skb_count);
737 spin_lock_bh(&call->lock); 739 spin_lock_bh(&call->lock);
738 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) 740 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
739 BUG(); 741 BUG();
@@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
793 sp->error = error; 795 sp->error = error;
794 sp->call = call; 796 sp->call = call;
795 rxrpc_get_call(call); 797 rxrpc_get_call(call);
798 atomic_inc(&call->skb_count);
796 799
797 spin_lock_bh(&call->lock); 800 spin_lock_bh(&call->lock);
798 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); 801 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
@@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work)
834 return; 837 return;
835 } 838 }
836 839
840 if (!call->conn)
841 goto skip_msg_init;
842
837 /* there's a good chance we're going to have to send a message, so set 843 /* there's a good chance we're going to have to send a message, so set
838 * one up in advance */ 844 * one up in advance */
839 msg.msg_name = &call->conn->params.peer->srx.transport; 845 msg.msg_name = &call->conn->params.peer->srx.transport;
@@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work)
856 memset(iov, 0, sizeof(iov)); 862 memset(iov, 0, sizeof(iov));
857 iov[0].iov_base = &whdr; 863 iov[0].iov_base = &whdr;
858 iov[0].iov_len = sizeof(whdr); 864 iov[0].iov_len = sizeof(whdr);
865skip_msg_init:
859 866
860 /* deal with events of a final nature */ 867 /* deal with events of a final nature */
861 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { 868 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 91287c9d01bb..ae057e0740f3 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -275,6 +275,7 @@ error:
275 list_del_init(&call->link); 275 list_del_init(&call->link);
276 write_unlock_bh(&rxrpc_call_lock); 276 write_unlock_bh(&rxrpc_call_lock);
277 277
278 set_bit(RXRPC_CALL_RELEASED, &call->flags);
278 call->state = RXRPC_CALL_DEAD; 279 call->state = RXRPC_CALL_DEAD;
279 rxrpc_put_call(call); 280 rxrpc_put_call(call);
280 _leave(" = %d", ret); 281 _leave(" = %d", ret);
@@ -287,6 +288,7 @@ error:
287 */ 288 */
288found_user_ID_now_present: 289found_user_ID_now_present:
289 write_unlock(&rx->call_lock); 290 write_unlock(&rx->call_lock);
291 set_bit(RXRPC_CALL_RELEASED, &call->flags);
290 call->state = RXRPC_CALL_DEAD; 292 call->state = RXRPC_CALL_DEAD;
291 rxrpc_put_call(call); 293 rxrpc_put_call(call);
292 _leave(" = -EEXIST [%p]", call); 294 _leave(" = -EEXIST [%p]", call);
@@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call)
491 spin_lock_bh(&call->lock); 493 spin_lock_bh(&call->lock);
492 while ((skb = skb_dequeue(&call->rx_queue)) || 494 while ((skb = skb_dequeue(&call->rx_queue)) ||
493 (skb = skb_dequeue(&call->rx_oos_queue))) { 495 (skb = skb_dequeue(&call->rx_oos_queue))) {
494 sp = rxrpc_skb(skb);
495 if (sp->call) {
496 ASSERTCMP(sp->call, ==, call);
497 rxrpc_put_call(call);
498 sp->call = NULL;
499 }
500 skb->destructor = NULL;
501 spin_unlock_bh(&call->lock); 496 spin_unlock_bh(&call->lock);
502 497
498 sp = rxrpc_skb(skb);
503 _debug("- zap %s %%%u #%u", 499 _debug("- zap %s %%%u #%u",
504 rxrpc_pkts[sp->hdr.type], 500 rxrpc_pkts[sp->hdr.type],
505 sp->hdr.serial, sp->hdr.seq); 501 sp->hdr.serial, sp->hdr.seq);
@@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call)
605 601
606 if (atomic_dec_and_test(&call->usage)) { 602 if (atomic_dec_and_test(&call->usage)) {
607 _debug("call %d dead", call->debug_id); 603 _debug("call %d dead", call->debug_id);
604 WARN_ON(atomic_read(&call->skb_count) != 0);
608 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 605 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
609 rxrpc_queue_work(&call->destroyer); 606 rxrpc_queue_work(&call->destroyer);
610 } 607 }
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 991a20d25093..70bb77818dea 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
56 _debug("already terminated"); 56 _debug("already terminated");
57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); 57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
58 skb->destructor = NULL;
59 sp->call = NULL;
60 rxrpc_put_call(call);
61 rxrpc_free_skb(skb); 58 rxrpc_free_skb(skb);
62 return 0; 59 return 0;
63 } 60 }
@@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
111 ret = 0; 108 ret = 0;
112 109
113out: 110out:
114 /* release the socket buffer */ 111 rxrpc_free_skb(skb);
115 if (skb) {
116 skb->destructor = NULL;
117 sp->call = NULL;
118 rxrpc_put_call(call);
119 rxrpc_free_skb(skb);
120 }
121 112
122 _leave(" = %d", ret); 113 _leave(" = %d", ret);
123 return ret; 114 return ret;
@@ -133,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
133 struct rxrpc_skb_priv *sp; 124 struct rxrpc_skb_priv *sp;
134 bool terminal; 125 bool terminal;
135 int ret, ackbit, ack; 126 int ret, ackbit, ack;
127 u32 serial;
128 u8 flags;
136 129
137 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); 130 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
138 131
139 sp = rxrpc_skb(skb); 132 sp = rxrpc_skb(skb);
140 ASSERTCMP(sp->call, ==, NULL); 133 ASSERTCMP(sp->call, ==, NULL);
134 flags = sp->hdr.flags;
135 serial = sp->hdr.serial;
141 136
142 spin_lock(&call->lock); 137 spin_lock(&call->lock);
143 138
@@ -200,8 +195,9 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
200 195
201 sp->call = call; 196 sp->call = call;
202 rxrpc_get_call(call); 197 rxrpc_get_call(call);
203 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && 198 atomic_inc(&call->skb_count);
204 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); 199 terminal = ((flags & RXRPC_LAST_PACKET) &&
200 !(flags & RXRPC_CLIENT_INITIATED));
205 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); 201 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
206 if (ret < 0) { 202 if (ret < 0) {
207 if (ret == -ENOMEM || ret == -ENOBUFS) { 203 if (ret == -ENOMEM || ret == -ENOBUFS) {
@@ -213,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
213 } 209 }
214 210
215 skb = NULL; 211 skb = NULL;
212 sp = NULL;
216 213
217 _debug("post #%u", seq); 214 _debug("post #%u", seq);
218 ASSERTCMP(call->rx_data_post, ==, seq); 215 ASSERTCMP(call->rx_data_post, ==, seq);
219 call->rx_data_post++; 216 call->rx_data_post++;
220 217
221 if (sp->hdr.flags & RXRPC_LAST_PACKET) 218 if (flags & RXRPC_LAST_PACKET)
222 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); 219 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
223 220
224 /* if we've reached an out of sequence packet then we need to drain 221 /* if we've reached an out of sequence packet then we need to drain
@@ -234,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
234 231
235 spin_unlock(&call->lock); 232 spin_unlock(&call->lock);
236 atomic_inc(&call->ackr_not_idle); 233 atomic_inc(&call->ackr_not_idle);
237 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); 234 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
238 _leave(" = 0 [posted]"); 235 _leave(" = 0 [posted]");
239 return 0; 236 return 0;
240 237
@@ -247,7 +244,7 @@ out:
247 244
248discard_and_ack: 245discard_and_ack:
249 _debug("discard and ACK packet %p", skb); 246 _debug("discard and ACK packet %p", skb);
250 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 247 __rxrpc_propose_ACK(call, ack, serial, true);
251discard: 248discard:
252 spin_unlock(&call->lock); 249 spin_unlock(&call->lock);
253 rxrpc_free_skb(skb); 250 rxrpc_free_skb(skb);
@@ -255,7 +252,7 @@ discard:
255 return 0; 252 return 0;
256 253
257enqueue_and_ack: 254enqueue_and_ack:
258 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 255 __rxrpc_propose_ACK(call, ack, serial, true);
259enqueue_packet: 256enqueue_packet:
260 _net("defer skb %p", skb); 257 _net("defer skb %p", skb);
261 spin_unlock(&call->lock); 258 spin_unlock(&call->lock);
@@ -575,13 +572,13 @@ done:
575 * post connection-level events to the connection 572 * post connection-level events to the connection
576 * - this includes challenges, responses and some aborts 573 * - this includes challenges, responses and some aborts
577 */ 574 */
578static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 575static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
579 struct sk_buff *skb) 576 struct sk_buff *skb)
580{ 577{
581 _enter("%p,%p", conn, skb); 578 _enter("%p,%p", conn, skb);
582 579
583 skb_queue_tail(&conn->rx_queue, skb); 580 skb_queue_tail(&conn->rx_queue, skb);
584 return rxrpc_queue_conn(conn); 581 rxrpc_queue_conn(conn);
585} 582}
586 583
587/* 584/*
@@ -702,7 +699,6 @@ void rxrpc_data_ready(struct sock *sk)
702 699
703 rcu_read_lock(); 700 rcu_read_lock();
704 701
705retry_find_conn:
706 conn = rxrpc_find_connection_rcu(local, skb); 702 conn = rxrpc_find_connection_rcu(local, skb);
707 if (!conn) 703 if (!conn)
708 goto cant_route_call; 704 goto cant_route_call;
@@ -710,8 +706,7 @@ retry_find_conn:
710 if (sp->hdr.callNumber == 0) { 706 if (sp->hdr.callNumber == 0) {
711 /* Connection-level packet */ 707 /* Connection-level packet */
712 _debug("CONN %p {%d}", conn, conn->debug_id); 708 _debug("CONN %p {%d}", conn, conn->debug_id);
713 if (!rxrpc_post_packet_to_conn(conn, skb)) 709 rxrpc_post_packet_to_conn(conn, skb);
714 goto retry_find_conn;
715 } else { 710 } else {
716 /* Call-bound packets are routed by connection channel. */ 711 /* Call-bound packets are routed by connection channel. */
717 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; 712 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -749,6 +744,8 @@ cant_route_call:
749 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 744 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
750 _debug("reject type %d",sp->hdr.type); 745 _debug("reject type %d",sp->hdr.type);
751 rxrpc_reject_packet(local, skb); 746 rxrpc_reject_packet(local, skb);
747 } else {
748 rxrpc_free_skb(skb);
752 } 749 }
753 _leave(" [no call]"); 750 _leave(" [no call]");
754 return; 751 return;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a3fa2ed85d63..9ed66d533002 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
203 } 203 }
204 204
205 /* we transferred the whole data packet */ 205 /* we transferred the whole data packet */
206 if (!(flags & MSG_PEEK))
207 rxrpc_kernel_data_consumed(call, skb);
208
206 if (sp->hdr.flags & RXRPC_LAST_PACKET) { 209 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
207 _debug("last"); 210 _debug("last");
208 if (rxrpc_conn_is_client(call->conn)) { 211 if (rxrpc_conn_is_client(call->conn)) {
@@ -360,28 +363,6 @@ wait_error:
360} 363}
361 364
362/** 365/**
363 * rxrpc_kernel_data_delivered - Record delivery of data message
364 * @skb: Message holding data
365 *
366 * Record the delivery of a data message. This permits RxRPC to keep its
367 * tracking correct. The socket buffer will be deleted.
368 */
369void rxrpc_kernel_data_delivered(struct sk_buff *skb)
370{
371 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
372 struct rxrpc_call *call = sp->call;
373
374 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
375 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
376 call->rx_data_recv = sp->hdr.seq;
377
378 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
379 rxrpc_free_skb(skb);
380}
381
382EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
383
384/**
385 * rxrpc_kernel_is_data_last - Determine if data message is last one 366 * rxrpc_kernel_is_data_last - Determine if data message is last one
386 * @skb: Message holding data 367 * @skb: Message holding data
387 * 368 *
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index eee0cfd9ac8c..06c51d4b622d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
98 spin_unlock_bh(&call->lock); 98 spin_unlock_bh(&call->lock);
99} 99}
100 100
101/**
102 * rxrpc_kernel_data_consumed - Record consumption of data message
103 * @call: The call to which the message pertains.
104 * @skb: Message holding data
105 *
106 * Record the consumption of a data message and generate an ACK if appropriate.
107 * The call state is shifted if this was the final packet. The caller must be
108 * in process context with no spinlocks held.
109 *
110 * TODO: Actually generate the ACK here rather than punting this to the
111 * workqueue.
112 */
113void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
114{
115 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
116
117 _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
118
119 ASSERTCMP(sp->call, ==, call);
120 ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
121
122 /* TODO: Fix the sequence number tracking */
123 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
124 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
125 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
126
127 call->rx_data_recv = sp->hdr.seq;
128 rxrpc_hard_ACK_data(call, sp);
129}
130EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
131
101/* 132/*
102 * destroy a packet that has an RxRPC control buffer 133 * Destroy a packet that has an RxRPC control buffer
103 * - advance the hard-ACK state of the parent call (done here in case something
104 * in the kernel bypasses recvmsg() and steals the packet directly off of the
105 * socket receive queue)
106 */ 134 */
107void rxrpc_packet_destructor(struct sk_buff *skb) 135void rxrpc_packet_destructor(struct sk_buff *skb)
108{ 136{
@@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb)
112 _enter("%p{%p}", skb, call); 140 _enter("%p{%p}", skb, call);
113 141
114 if (call) { 142 if (call) {
115 /* send the final ACK on a client call */ 143 if (atomic_dec_return(&call->skb_count) < 0)
116 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) 144 BUG();
117 rxrpc_hard_ACK_data(call, sp);
118 rxrpc_put_call(call); 145 rxrpc_put_call(call);
119 sp->call = NULL; 146 sp->call = NULL;
120 } 147 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e4a5f2607ffa..d09d0687594b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -64,7 +64,6 @@ int __tcf_hash_release(struct tc_action *p, bool bind, bool strict)
64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { 64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
65 if (p->ops->cleanup) 65 if (p->ops->cleanup)
66 p->ops->cleanup(p, bind); 66 p->ops->cleanup(p, bind);
67 list_del(&p->list);
68 tcf_hash_destroy(p->hinfo, p); 67 tcf_hash_destroy(p->hinfo, p);
69 ret = ACT_P_DELETED; 68 ret = ACT_P_DELETED;
70 } 69 }
@@ -421,18 +420,19 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
421 return res; 420 return res;
422} 421}
423 422
424int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 423int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
425 struct tcf_result *res) 424 int nr_actions, struct tcf_result *res)
426{ 425{
427 const struct tc_action *a; 426 int ret = -1, i;
428 int ret = -1;
429 427
430 if (skb->tc_verd & TC_NCLS) { 428 if (skb->tc_verd & TC_NCLS) {
431 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 429 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
432 ret = TC_ACT_OK; 430 ret = TC_ACT_OK;
433 goto exec_done; 431 goto exec_done;
434 } 432 }
435 list_for_each_entry(a, actions, list) { 433 for (i = 0; i < nr_actions; i++) {
434 const struct tc_action *a = actions[i];
435
436repeat: 436repeat:
437 ret = a->ops->act(skb, a, res); 437 ret = a->ops->act(skb, a, res);
438 if (ret == TC_ACT_REPEAT) 438 if (ret == TC_ACT_REPEAT)
@@ -754,16 +754,6 @@ err_out:
754 return ERR_PTR(err); 754 return ERR_PTR(err);
755} 755}
756 756
757static void cleanup_a(struct list_head *actions)
758{
759 struct tc_action *a, *tmp;
760
761 list_for_each_entry_safe(a, tmp, actions, list) {
762 list_del(&a->list);
763 kfree(a);
764 }
765}
766
767static int tca_action_flush(struct net *net, struct nlattr *nla, 757static int tca_action_flush(struct net *net, struct nlattr *nla,
768 struct nlmsghdr *n, u32 portid) 758 struct nlmsghdr *n, u32 portid)
769{ 759{
@@ -905,7 +895,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
905 return ret; 895 return ret;
906 } 896 }
907err: 897err:
908 cleanup_a(&actions); 898 tcf_action_destroy(&actions, 0);
909 return ret; 899 return ret;
910} 900}
911 901
@@ -942,15 +932,9 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
942 932
943 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); 933 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
944 if (ret) 934 if (ret)
945 goto done; 935 return ret;
946 936
947 /* dump then free all the actions after update; inserted policy 937 return tcf_add_notify(net, n, &actions, portid);
948 * stays intact
949 */
950 ret = tcf_add_notify(net, n, &actions, portid);
951 cleanup_a(&actions);
952done:
953 return ret;
954} 938}
955 939
956static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) 940static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 141a06eeb1e5..e87cd81315e1 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
53 u32 *tlv = (u32 *)(skbdata); 53 u32 *tlv = (u32 *)(skbdata);
54 u16 totlen = nla_total_size(dlen); /*alignment + hdr */ 54 u16 totlen = nla_total_size(dlen); /*alignment + hdr */
55 char *dptr = (char *)tlv + NLA_HDRLEN; 55 char *dptr = (char *)tlv + NLA_HDRLEN;
56 u32 htlv = attrtype << 16 | totlen; 56 u32 htlv = attrtype << 16 | dlen;
57 57
58 *tlv = htonl(htlv); 58 *tlv = htonl(htlv);
59 memset(dptr, 0, totlen - NLA_HDRLEN); 59 memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(ife_release_meta_gen);
135 135
136int ife_validate_meta_u32(void *val, int len) 136int ife_validate_meta_u32(void *val, int len)
137{ 137{
138 if (len == 4) 138 if (len == sizeof(u32))
139 return 0; 139 return 0;
140 140
141 return -EINVAL; 141 return -EINVAL;
@@ -144,8 +144,8 @@ EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
144 144
145int ife_validate_meta_u16(void *val, int len) 145int ife_validate_meta_u16(void *val, int len)
146{ 146{
147 /* length will include padding */ 147 /* length will not include padding */
148 if (len == NLA_ALIGN(2)) 148 if (len == sizeof(u16))
149 return 0; 149 return 0;
150 150
151 return -EINVAL; 151 return -EINVAL;
@@ -652,12 +652,14 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
652 u8 *tlvdata = (u8 *)tlv; 652 u8 *tlvdata = (u8 *)tlv;
653 u16 mtype = tlv->type; 653 u16 mtype = tlv->type;
654 u16 mlen = tlv->len; 654 u16 mlen = tlv->len;
655 u16 alen;
655 656
656 mtype = ntohs(mtype); 657 mtype = ntohs(mtype);
657 mlen = ntohs(mlen); 658 mlen = ntohs(mlen);
659 alen = NLA_ALIGN(mlen);
658 660
659 if (find_decode_metaid(skb, ife, mtype, (mlen - 4), 661 if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
660 (void *)(tlvdata + 4))) { 662 (void *)(tlvdata + NLA_HDRLEN))) {
661 /* abuse overlimits to count when we receive metadata 663 /* abuse overlimits to count when we receive metadata
662 * but dont have an ops for it 664 * but dont have an ops for it
663 */ 665 */
@@ -666,8 +668,8 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
666 ife->tcf_qstats.overlimits++; 668 ife->tcf_qstats.overlimits++;
667 } 669 }
668 670
669 tlvdata += mlen; 671 tlvdata += alen;
670 ifehdrln -= mlen; 672 ifehdrln -= alen;
671 tlv = (struct meta_tlvhdr *)tlvdata; 673 tlv = (struct meta_tlvhdr *)tlvdata;
672 } 674 }
673 675
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b3c7e975fc9e..8a3be1d99775 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -63,49 +63,8 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
63 const struct tc_action_ops *ops) 63 const struct tc_action_ops *ops)
64{ 64{
65 struct tc_action_net *tn = net_generic(net, police_net_id); 65 struct tc_action_net *tn = net_generic(net, police_net_id);
66 struct tcf_hashinfo *hinfo = tn->hinfo;
67 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
68 struct nlattr *nest;
69
70 spin_lock_bh(&hinfo->lock);
71
72 s_i = cb->args[0];
73
74 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
75 struct hlist_head *head;
76 struct tc_action *p;
77
78 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
79
80 hlist_for_each_entry_rcu(p, head, tcfa_head) {
81 index++;
82 if (index < s_i)
83 continue;
84 nest = nla_nest_start(skb, index);
85 if (nest == NULL)
86 goto nla_put_failure;
87 if (type == RTM_DELACTION)
88 err = tcf_action_dump_1(skb, p, 0, 1);
89 else
90 err = tcf_action_dump_1(skb, p, 0, 0);
91 if (err < 0) {
92 index--;
93 nla_nest_cancel(skb, nest);
94 goto done;
95 }
96 nla_nest_end(skb, nest);
97 n_i++;
98 }
99 }
100done:
101 spin_unlock_bh(&hinfo->lock);
102 if (n_i)
103 cb->args[0] += n_i;
104 return n_i;
105 66
106nla_put_failure: 67 return tcf_generic_walker(tn, skb, cb, type, ops);
107 nla_nest_cancel(skb, nest);
108 goto done;
109} 68}
110 69
111static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { 70static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@ -125,6 +84,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
125 struct tcf_police *police; 84 struct tcf_police *police;
126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 85 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
127 struct tc_action_net *tn = net_generic(net, police_net_id); 86 struct tc_action_net *tn = net_generic(net, police_net_id);
87 bool exists = false;
128 int size; 88 int size;
129 89
130 if (nla == NULL) 90 if (nla == NULL)
@@ -139,24 +99,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
139 size = nla_len(tb[TCA_POLICE_TBF]); 99 size = nla_len(tb[TCA_POLICE_TBF]);
140 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) 100 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
141 return -EINVAL; 101 return -EINVAL;
102
142 parm = nla_data(tb[TCA_POLICE_TBF]); 103 parm = nla_data(tb[TCA_POLICE_TBF]);
104 exists = tcf_hash_check(tn, parm->index, a, bind);
105 if (exists && bind)
106 return 0;
143 107
144 if (parm->index) { 108 if (!exists) {
145 if (tcf_hash_check(tn, parm->index, a, bind)) {
146 if (ovr)
147 goto override;
148 /* not replacing */
149 return -EEXIST;
150 }
151 } else {
152 ret = tcf_hash_create(tn, parm->index, NULL, a, 109 ret = tcf_hash_create(tn, parm->index, NULL, a,
153 &act_police_ops, bind, false); 110 &act_police_ops, bind, false);
154 if (ret) 111 if (ret)
155 return ret; 112 return ret;
156 ret = ACT_P_CREATED; 113 ret = ACT_P_CREATED;
114 } else {
115 tcf_hash_release(*a, bind);
116 if (!ovr)
117 return -EEXIST;
157 } 118 }
158 119
159override:
160 police = to_police(*a); 120 police = to_police(*a);
161 if (parm->rate.rate) { 121 if (parm->rate.rate) {
162 err = -ENOMEM; 122 err = -ENOMEM;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 843a716a4303..a7c5645373af 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -541,8 +541,12 @@ out:
541void tcf_exts_destroy(struct tcf_exts *exts) 541void tcf_exts_destroy(struct tcf_exts *exts)
542{ 542{
543#ifdef CONFIG_NET_CLS_ACT 543#ifdef CONFIG_NET_CLS_ACT
544 tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); 544 LIST_HEAD(actions);
545 INIT_LIST_HEAD(&exts->actions); 545
546 tcf_exts_to_list(exts, &actions);
547 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
548 kfree(exts->actions);
549 exts->nr_actions = 0;
546#endif 550#endif
547} 551}
548EXPORT_SYMBOL(tcf_exts_destroy); 552EXPORT_SYMBOL(tcf_exts_destroy);
@@ -554,7 +558,6 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
554 { 558 {
555 struct tc_action *act; 559 struct tc_action *act;
556 560
557 INIT_LIST_HEAD(&exts->actions);
558 if (exts->police && tb[exts->police]) { 561 if (exts->police && tb[exts->police]) {
559 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 562 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
560 "police", ovr, 563 "police", ovr,
@@ -563,14 +566,20 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
563 return PTR_ERR(act); 566 return PTR_ERR(act);
564 567
565 act->type = exts->type = TCA_OLD_COMPAT; 568 act->type = exts->type = TCA_OLD_COMPAT;
566 list_add(&act->list, &exts->actions); 569 exts->actions[0] = act;
570 exts->nr_actions = 1;
567 } else if (exts->action && tb[exts->action]) { 571 } else if (exts->action && tb[exts->action]) {
568 int err; 572 LIST_HEAD(actions);
573 int err, i = 0;
574
569 err = tcf_action_init(net, tb[exts->action], rate_tlv, 575 err = tcf_action_init(net, tb[exts->action], rate_tlv,
570 NULL, ovr, 576 NULL, ovr,
571 TCA_ACT_BIND, &exts->actions); 577 TCA_ACT_BIND, &actions);
572 if (err) 578 if (err)
573 return err; 579 return err;
580 list_for_each_entry(act, &actions, list)
581 exts->actions[i++] = act;
582 exts->nr_actions = i;
574 } 583 }
575 } 584 }
576#else 585#else
@@ -587,37 +596,49 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
587 struct tcf_exts *src) 596 struct tcf_exts *src)
588{ 597{
589#ifdef CONFIG_NET_CLS_ACT 598#ifdef CONFIG_NET_CLS_ACT
590 LIST_HEAD(tmp); 599 struct tcf_exts old = *dst;
600
591 tcf_tree_lock(tp); 601 tcf_tree_lock(tp);
592 list_splice_init(&dst->actions, &tmp); 602 dst->nr_actions = src->nr_actions;
593 list_splice(&src->actions, &dst->actions); 603 dst->actions = src->actions;
594 dst->type = src->type; 604 dst->type = src->type;
595 tcf_tree_unlock(tp); 605 tcf_tree_unlock(tp);
596 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 606
607 tcf_exts_destroy(&old);
597#endif 608#endif
598} 609}
599EXPORT_SYMBOL(tcf_exts_change); 610EXPORT_SYMBOL(tcf_exts_change);
600 611
601#define tcf_exts_first_act(ext) \ 612#ifdef CONFIG_NET_CLS_ACT
602 list_first_entry_or_null(&(exts)->actions, \ 613static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
603 struct tc_action, list) 614{
615 if (exts->nr_actions == 0)
616 return NULL;
617 else
618 return exts->actions[0];
619}
620#endif
604 621
605int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 622int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
606{ 623{
607#ifdef CONFIG_NET_CLS_ACT 624#ifdef CONFIG_NET_CLS_ACT
608 struct nlattr *nest; 625 struct nlattr *nest;
609 626
610 if (exts->action && !list_empty(&exts->actions)) { 627 if (exts->action && exts->nr_actions) {
611 /* 628 /*
612 * again for backward compatible mode - we want 629 * again for backward compatible mode - we want
613 * to work with both old and new modes of entering 630 * to work with both old and new modes of entering
614 * tc data even if iproute2 was newer - jhs 631 * tc data even if iproute2 was newer - jhs
615 */ 632 */
616 if (exts->type != TCA_OLD_COMPAT) { 633 if (exts->type != TCA_OLD_COMPAT) {
634 LIST_HEAD(actions);
635
617 nest = nla_nest_start(skb, exts->action); 636 nest = nla_nest_start(skb, exts->action);
618 if (nest == NULL) 637 if (nest == NULL)
619 goto nla_put_failure; 638 goto nla_put_failure;
620 if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0) 639
640 tcf_exts_to_list(exts, &actions);
641 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
621 goto nla_put_failure; 642 goto nla_put_failure;
622 nla_nest_end(skb, nest); 643 nla_nest_end(skb, nest);
623 } else if (exts->police) { 644 } else if (exts->police) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e95b67cd5718..657c13362b19 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -643,18 +643,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
643 struct Qdisc *sch; 643 struct Qdisc *sch;
644 644
645 if (!try_module_get(ops->owner)) 645 if (!try_module_get(ops->owner))
646 goto errout; 646 return NULL;
647 647
648 sch = qdisc_alloc(dev_queue, ops); 648 sch = qdisc_alloc(dev_queue, ops);
649 if (IS_ERR(sch)) 649 if (IS_ERR(sch)) {
650 goto errout; 650 module_put(ops->owner);
651 return NULL;
652 }
651 sch->parent = parentid; 653 sch->parent = parentid;
652 654
653 if (!ops->init || ops->init(sch, NULL) == 0) 655 if (!ops->init || ops->init(sch, NULL) == 0)
654 return sch; 656 return sch;
655 657
656 qdisc_destroy(sch); 658 qdisc_destroy(sch);
657errout:
658 return NULL; 659 return NULL;
659} 660}
660EXPORT_SYMBOL(qdisc_create_dflt); 661EXPORT_SYMBOL(qdisc_create_dflt);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c182db7d691f..69444d32ecda 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -119,7 +119,13 @@ int sctp_rcv(struct sk_buff *skb)
119 skb_transport_offset(skb)) 119 skb_transport_offset(skb))
120 goto discard_it; 120 goto discard_it;
121 121
122 if (!pskb_may_pull(skb, sizeof(struct sctphdr))) 122 /* If the packet is fragmented and we need to do crc checking,
123 * it's better to just linearize it otherwise crc computing
124 * takes longer.
125 */
126 if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) &&
127 skb_linearize(skb)) ||
128 !pskb_may_pull(skb, sizeof(struct sctphdr)))
123 goto discard_it; 129 goto discard_it;
124 130
125 /* Pull up the IP header. */ 131 /* Pull up the IP header. */
@@ -1177,9 +1183,6 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1177 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) 1183 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP)
1178 return NULL; 1184 return NULL;
1179 1185
1180 if (skb_linearize(skb))
1181 return NULL;
1182
1183 ch = (sctp_chunkhdr_t *) skb->data; 1186 ch = (sctp_chunkhdr_t *) skb->data;
1184 1187
1185 /* The code below will attempt to walk the chunk and extract 1188 /* The code below will attempt to walk the chunk and extract
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index c30ddb0f3190..6437aa97cfd7 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -170,19 +170,6 @@ next_chunk:
170 170
171 chunk = list_entry(entry, struct sctp_chunk, list); 171 chunk = list_entry(entry, struct sctp_chunk, list);
172 172
173 /* Linearize if it's not GSO */
174 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) != SKB_GSO_SCTP &&
175 skb_is_nonlinear(chunk->skb)) {
176 if (skb_linearize(chunk->skb)) {
177 __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
178 sctp_chunk_free(chunk);
179 goto next_chunk;
180 }
181
182 /* Update sctp_hdr as it probably changed */
183 chunk->sctp_hdr = sctp_hdr(chunk->skb);
184 }
185
186 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { 173 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) {
187 /* GSO-marked skbs but without frags, handle 174 /* GSO-marked skbs but without frags, handle
188 * them normally 175 * them normally
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1f1682b9a6a8..31b7bc35895d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -878,7 +878,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
878 struct sctp_chunk *chunk, 878 struct sctp_chunk *chunk,
879 u16 chunk_len) 879 u16 chunk_len)
880{ 880{
881 size_t psize, pmtu; 881 size_t psize, pmtu, maxsize;
882 sctp_xmit_t retval = SCTP_XMIT_OK; 882 sctp_xmit_t retval = SCTP_XMIT_OK;
883 883
884 psize = packet->size; 884 psize = packet->size;
@@ -906,6 +906,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
906 goto out; 906 goto out;
907 } 907 }
908 908
909 /* Similarly, if this chunk was built before a PMTU
910 * reduction, we have to fragment it at IP level now. So
911 * if the packet already contains something, we need to
912 * flush.
913 */
914 maxsize = pmtu - packet->overhead;
915 if (packet->auth)
916 maxsize -= WORD_ROUND(packet->auth->skb->len);
917 if (chunk_len > maxsize)
918 retval = SCTP_XMIT_PMTU_FULL;
919
909 /* It is also okay to fragment if the chunk we are 920 /* It is also okay to fragment if the chunk we are
910 * adding is a control chunk, but only if current packet 921 * adding is a control chunk, but only if current packet
911 * is not a GSO one otherwise it causes fragmentation of 922 * is not a GSO one otherwise it causes fragmentation of
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 4cb5aedfe3ee..ef8ba77a5bea 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
293 return ERR_PTR(err); 293 return ERR_PTR(err);
294 } 294 }
295 295
296 iter->start_fail = 0;
296 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); 297 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
297} 298}
298 299
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index f69edcf219e5..f3508aa75815 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
13{ 13{
14 union sctp_addr laddr, paddr; 14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst; 15 struct dst_entry *dst;
16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
16 17
17 laddr = list_entry(asoc->base.bind_addr.address_list.next, 18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
18 struct sctp_sockaddr_entry, list)->a; 19 struct sctp_sockaddr_entry, list)->a;
@@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
40 } 41 }
41 42
42 r->idiag_state = asoc->state; 43 r->idiag_state = asoc->state;
43 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; 44 if (timer_pending(t3_rtx)) {
44 r->idiag_retrans = asoc->rtx_data_chunks; 45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
45 r->idiag_expires = jiffies_to_msecs( 46 r->idiag_retrans = asoc->rtx_data_chunks;
46 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); 47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
47} 53}
48 54
49static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, 55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
@@ -350,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
350 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
351 goto next; 357 goto next;
352 358
353 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) 359 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
354 goto next; 360 goto next;
355 361
356 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
@@ -418,11 +424,13 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
418 paddr.v4.sin_family = AF_INET; 424 paddr.v4.sin_family = AF_INET;
419 } else { 425 } else {
420 laddr.v6.sin6_port = req->id.idiag_sport; 426 laddr.v6.sin6_port = req->id.idiag_sport;
421 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64); 427 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
428 sizeof(laddr.v6.sin6_addr));
422 laddr.v6.sin6_family = AF_INET6; 429 laddr.v6.sin6_family = AF_INET6;
423 430
424 paddr.v6.sin6_port = req->id.idiag_dport; 431 paddr.v6.sin6_port = req->id.idiag_dport;
425 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64); 432 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
433 sizeof(paddr.v6.sin6_addr));
426 paddr.v6.sin6_family = AF_INET6; 434 paddr.v6.sin6_family = AF_INET6;
427 } 435 }
428 436
@@ -465,7 +473,7 @@ skip:
465 * 3 : to mark if we have dumped the ep info of the current asoc 473 * 3 : to mark if we have dumped the ep info of the current asoc
466 * 4 : to work as a temporary variable to traversal list 474 * 4 : to work as a temporary variable to traversal list
467 */ 475 */
468 if (!(idiag_states & ~TCPF_LISTEN)) 476 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
469 goto done; 477 goto done;
470 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); 478 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
471done: 479done:
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 1bc4f71aaba8..d85b803da11d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
702 */ 702 */
703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); 703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
704 704
705 sctp_ulpevent_receive_data(event, asoc);
706
707 /* And hold the chunk as we need it for getting the IP headers 705 /* And hold the chunk as we need it for getting the IP headers
708 * later in recvmsg 706 * later in recvmsg
709 */ 707 */
710 sctp_chunk_hold(chunk); 708 sctp_chunk_hold(chunk);
711 event->chunk = chunk; 709 event->chunk = chunk;
712 710
711 sctp_ulpevent_receive_data(event, asoc);
712
713 event->stream = ntohs(chunk->subh.data_hdr->stream); 713 event->stream = ntohs(chunk->subh.data_hdr->stream);
714 event->ssn = ntohs(chunk->subh.data_hdr->ssn); 714 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
715 event->ppid = chunk->subh.data_hdr->ppid; 715 event->ppid = chunk->subh.data_hdr->ppid;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 23c8e7c39656..976c7812bbd5 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
340} 340}
341 341
342static struct gss_upcall_msg * 342static struct gss_upcall_msg *
343__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 343__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
344{ 344{
345 struct gss_upcall_msg *pos; 345 struct gss_upcall_msg *pos;
346 list_for_each_entry(pos, &pipe->in_downcall, list) { 346 list_for_each_entry(pos, &pipe->in_downcall, list) {
347 if (!uid_eq(pos->uid, uid)) 347 if (!uid_eq(pos->uid, uid))
348 continue; 348 continue;
349 if (auth && pos->auth->service != auth->service)
350 continue;
349 atomic_inc(&pos->count); 351 atomic_inc(&pos->count);
350 dprintk("RPC: %s found msg %p\n", __func__, pos); 352 dprintk("RPC: %s found msg %p\n", __func__, pos);
351 return pos; 353 return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
365 struct gss_upcall_msg *old; 367 struct gss_upcall_msg *old;
366 368
367 spin_lock(&pipe->lock); 369 spin_lock(&pipe->lock);
368 old = __gss_find_upcall(pipe, gss_msg->uid); 370 old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
369 if (old == NULL) { 371 if (old == NULL) {
370 atomic_inc(&gss_msg->count); 372 atomic_inc(&gss_msg->count);
371 list_add(&gss_msg->list, &pipe->in_downcall); 373 list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
714 err = -ENOENT; 716 err = -ENOENT;
715 /* Find a matching upcall */ 717 /* Find a matching upcall */
716 spin_lock(&pipe->lock); 718 spin_lock(&pipe->lock);
717 gss_msg = __gss_find_upcall(pipe, uid); 719 gss_msg = __gss_find_upcall(pipe, uid, NULL);
718 if (gss_msg == NULL) { 720 if (gss_msg == NULL) {
719 spin_unlock(&pipe->lock); 721 spin_unlock(&pipe->lock);
720 goto err_put_ctx; 722 goto err_put_ctx;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1d281816f2bf..d8582028b346 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
569 struct rsc *found; 569 struct rsc *found;
570 570
571 memset(&rsci, 0, sizeof(rsci)); 571 memset(&rsci, 0, sizeof(rsci));
572 rsci.handle.data = handle->data; 572 if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
573 rsci.handle.len = handle->len; 573 return NULL;
574 found = rsc_lookup(cd, &rsci); 574 found = rsc_lookup(cd, &rsci);
575 rsc_free(&rsci);
575 if (!found) 576 if (!found)
576 return NULL; 577 return NULL;
577 if (cache_check(cd, &found->h, NULL)) 578 if (cache_check(cd, &found->h, NULL))
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cb49898a5a58..66f23b376fa0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -453,7 +453,7 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
453 struct rpc_xprt_switch *xps; 453 struct rpc_xprt_switch *xps;
454 454
455 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { 455 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
456 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); 456 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
457 xps = args->bc_xprt->xpt_bc_xps; 457 xps = args->bc_xprt->xpt_bc_xps;
458 xprt_switch_get(xps); 458 xprt_switch_get(xps);
459 } else { 459 } else {
@@ -520,7 +520,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
520 char servername[48]; 520 char servername[48];
521 521
522 if (args->bc_xprt) { 522 if (args->bc_xprt) {
523 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); 523 WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
524 xprt = args->bc_xprt->xpt_bc_xprt; 524 xprt = args->bc_xprt->xpt_bc_xprt;
525 if (xprt) { 525 if (xprt) {
526 xprt_get(xprt); 526 xprt_get(xprt);
@@ -2638,6 +2638,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2638{ 2638{
2639 struct rpc_xprt_switch *xps; 2639 struct rpc_xprt_switch *xps;
2640 struct rpc_xprt *xprt; 2640 struct rpc_xprt *xprt;
2641 unsigned long reconnect_timeout;
2641 unsigned char resvport; 2642 unsigned char resvport;
2642 int ret = 0; 2643 int ret = 0;
2643 2644
@@ -2649,6 +2650,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2649 return -EAGAIN; 2650 return -EAGAIN;
2650 } 2651 }
2651 resvport = xprt->resvport; 2652 resvport = xprt->resvport;
2653 reconnect_timeout = xprt->max_reconnect_timeout;
2652 rcu_read_unlock(); 2654 rcu_read_unlock();
2653 2655
2654 xprt = xprt_create_transport(xprtargs); 2656 xprt = xprt_create_transport(xprtargs);
@@ -2657,6 +2659,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
2657 goto out_put_switch; 2659 goto out_put_switch;
2658 } 2660 }
2659 xprt->resvport = resvport; 2661 xprt->resvport = resvport;
2662 xprt->max_reconnect_timeout = reconnect_timeout;
2660 2663
2661 rpc_xprt_switch_set_roundrobin(xps); 2664 rpc_xprt_switch_set_roundrobin(xps);
2662 if (setup) { 2665 if (setup) {
@@ -2673,6 +2676,27 @@ out_put_switch:
2673} 2676}
2674EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 2677EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
2675 2678
2679static int
2680rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
2681 struct rpc_xprt *xprt,
2682 void *data)
2683{
2684 unsigned long timeout = *((unsigned long *)data);
2685
2686 if (timeout < xprt->max_reconnect_timeout)
2687 xprt->max_reconnect_timeout = timeout;
2688 return 0;
2689}
2690
2691void
2692rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo)
2693{
2694 rpc_clnt_iterate_for_each_xprt(clnt,
2695 rpc_xprt_cap_max_reconnect_timeout,
2696 &timeo);
2697}
2698EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout);
2699
2676#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 2700#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2677static void rpc_show_header(void) 2701static void rpc_show_header(void)
2678{ 2702{
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8313960cac52..ea244b29138b 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -680,6 +680,20 @@ out:
680 spin_unlock_bh(&xprt->transport_lock); 680 spin_unlock_bh(&xprt->transport_lock);
681} 681}
682 682
683static bool
684xprt_has_timer(const struct rpc_xprt *xprt)
685{
686 return xprt->idle_timeout != 0;
687}
688
689static void
690xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
691 __must_hold(&xprt->transport_lock)
692{
693 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
694 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
695}
696
683static void 697static void
684xprt_init_autodisconnect(unsigned long data) 698xprt_init_autodisconnect(unsigned long data)
685{ 699{
@@ -688,6 +702,8 @@ xprt_init_autodisconnect(unsigned long data)
688 spin_lock(&xprt->transport_lock); 702 spin_lock(&xprt->transport_lock);
689 if (!list_empty(&xprt->recv)) 703 if (!list_empty(&xprt->recv))
690 goto out_abort; 704 goto out_abort;
705 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
706 xprt->last_used = jiffies;
691 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 707 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
692 goto out_abort; 708 goto out_abort;
693 spin_unlock(&xprt->transport_lock); 709 spin_unlock(&xprt->transport_lock);
@@ -725,6 +741,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
725 goto out; 741 goto out;
726 xprt->snd_task =NULL; 742 xprt->snd_task =NULL;
727 xprt->ops->release_xprt(xprt, NULL); 743 xprt->ops->release_xprt(xprt, NULL);
744 xprt_schedule_autodisconnect(xprt);
728out: 745out:
729 spin_unlock_bh(&xprt->transport_lock); 746 spin_unlock_bh(&xprt->transport_lock);
730 wake_up_bit(&xprt->state, XPRT_LOCKED); 747 wake_up_bit(&xprt->state, XPRT_LOCKED);
@@ -888,11 +905,6 @@ static void xprt_timer(struct rpc_task *task)
888 spin_unlock_bh(&xprt->transport_lock); 905 spin_unlock_bh(&xprt->transport_lock);
889} 906}
890 907
891static inline int xprt_has_timer(struct rpc_xprt *xprt)
892{
893 return xprt->idle_timeout != 0;
894}
895
896/** 908/**
897 * xprt_prepare_transmit - reserve the transport before sending a request 909 * xprt_prepare_transmit - reserve the transport before sending a request
898 * @task: RPC task about to send a request 910 * @task: RPC task about to send a request
@@ -1280,9 +1292,7 @@ void xprt_release(struct rpc_task *task)
1280 if (!list_empty(&req->rq_list)) 1292 if (!list_empty(&req->rq_list))
1281 list_del(&req->rq_list); 1293 list_del(&req->rq_list);
1282 xprt->last_used = jiffies; 1294 xprt->last_used = jiffies;
1283 if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 1295 xprt_schedule_autodisconnect(xprt);
1284 mod_timer(&xprt->timer,
1285 xprt->last_used + xprt->idle_timeout);
1286 spin_unlock_bh(&xprt->transport_lock); 1296 spin_unlock_bh(&xprt->transport_lock);
1287 if (req->rq_buffer) 1297 if (req->rq_buffer)
1288 xprt->ops->buf_free(req->rq_buffer); 1298 xprt->ops->buf_free(req->rq_buffer);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 536d0be3f61b..799cce6cbe45 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -51,6 +51,7 @@
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/prefetch.h> 52#include <linux/prefetch.h>
53#include <linux/sunrpc/addr.h> 53#include <linux/sunrpc/addr.h>
54#include <linux/sunrpc/svc_rdma.h>
54#include <asm/bitops.h> 55#include <asm/bitops.h>
55#include <linux/module.h> /* try_module_get()/module_put() */ 56#include <linux/module.h> /* try_module_get()/module_put() */
56 57
@@ -923,7 +924,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
923 } 924 }
924 925
925 INIT_LIST_HEAD(&buf->rb_recv_bufs); 926 INIT_LIST_HEAD(&buf->rb_recv_bufs);
926 for (i = 0; i < buf->rb_max_requests; i++) { 927 for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
927 struct rpcrdma_rep *rep; 928 struct rpcrdma_rep *rep;
928 929
929 rep = rpcrdma_create_rep(r_xprt); 930 rep = rpcrdma_create_rep(r_xprt);
@@ -1018,6 +1019,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1018 rep = rpcrdma_buffer_get_rep_locked(buf); 1019 rep = rpcrdma_buffer_get_rep_locked(buf);
1019 rpcrdma_destroy_rep(ia, rep); 1020 rpcrdma_destroy_rep(ia, rep);
1020 } 1021 }
1022 buf->rb_send_count = 0;
1021 1023
1022 spin_lock(&buf->rb_reqslock); 1024 spin_lock(&buf->rb_reqslock);
1023 while (!list_empty(&buf->rb_allreqs)) { 1025 while (!list_empty(&buf->rb_allreqs)) {
@@ -1032,6 +1034,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1032 spin_lock(&buf->rb_reqslock); 1034 spin_lock(&buf->rb_reqslock);
1033 } 1035 }
1034 spin_unlock(&buf->rb_reqslock); 1036 spin_unlock(&buf->rb_reqslock);
1037 buf->rb_recv_count = 0;
1035 1038
1036 rpcrdma_destroy_mrs(buf); 1039 rpcrdma_destroy_mrs(buf);
1037} 1040}
@@ -1074,8 +1077,27 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1074 spin_unlock(&buf->rb_mwlock); 1077 spin_unlock(&buf->rb_mwlock);
1075} 1078}
1076 1079
1080static struct rpcrdma_rep *
1081rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
1082{
1083 /* If an RPC previously completed without a reply (say, a
1084 * credential problem or a soft timeout occurs) then hold off
1085 * on supplying more Receive buffers until the number of new
1086 * pending RPCs catches up to the number of posted Receives.
1087 */
1088 if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
1089 return NULL;
1090
1091 if (unlikely(list_empty(&buffers->rb_recv_bufs)))
1092 return NULL;
1093 buffers->rb_recv_count++;
1094 return rpcrdma_buffer_get_rep_locked(buffers);
1095}
1096
1077/* 1097/*
1078 * Get a set of request/reply buffers. 1098 * Get a set of request/reply buffers.
1099 *
1100 * Reply buffer (if available) is attached to send buffer upon return.
1079 */ 1101 */
1080struct rpcrdma_req * 1102struct rpcrdma_req *
1081rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1103rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
@@ -1085,21 +1107,15 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1085 spin_lock(&buffers->rb_lock); 1107 spin_lock(&buffers->rb_lock);
1086 if (list_empty(&buffers->rb_send_bufs)) 1108 if (list_empty(&buffers->rb_send_bufs))
1087 goto out_reqbuf; 1109 goto out_reqbuf;
1110 buffers->rb_send_count++;
1088 req = rpcrdma_buffer_get_req_locked(buffers); 1111 req = rpcrdma_buffer_get_req_locked(buffers);
1089 if (list_empty(&buffers->rb_recv_bufs)) 1112 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1090 goto out_repbuf;
1091 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1092 spin_unlock(&buffers->rb_lock); 1113 spin_unlock(&buffers->rb_lock);
1093 return req; 1114 return req;
1094 1115
1095out_reqbuf: 1116out_reqbuf:
1096 spin_unlock(&buffers->rb_lock); 1117 spin_unlock(&buffers->rb_lock);
1097 pr_warn("rpcrdma: out of request buffers (%p)\n", buffers); 1118 pr_warn("RPC: %s: out of request buffers\n", __func__);
1098 return NULL;
1099out_repbuf:
1100 list_add(&req->rl_free, &buffers->rb_send_bufs);
1101 spin_unlock(&buffers->rb_lock);
1102 pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
1103 return NULL; 1119 return NULL;
1104} 1120}
1105 1121
@@ -1117,9 +1133,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
1117 req->rl_reply = NULL; 1133 req->rl_reply = NULL;
1118 1134
1119 spin_lock(&buffers->rb_lock); 1135 spin_lock(&buffers->rb_lock);
1136 buffers->rb_send_count--;
1120 list_add_tail(&req->rl_free, &buffers->rb_send_bufs); 1137 list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1121 if (rep) 1138 if (rep) {
1139 buffers->rb_recv_count--;
1122 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); 1140 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1141 }
1123 spin_unlock(&buffers->rb_lock); 1142 spin_unlock(&buffers->rb_lock);
1124} 1143}
1125 1144
@@ -1133,8 +1152,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1133 struct rpcrdma_buffer *buffers = req->rl_buffer; 1152 struct rpcrdma_buffer *buffers = req->rl_buffer;
1134 1153
1135 spin_lock(&buffers->rb_lock); 1154 spin_lock(&buffers->rb_lock);
1136 if (!list_empty(&buffers->rb_recv_bufs)) 1155 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1137 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1138 spin_unlock(&buffers->rb_lock); 1156 spin_unlock(&buffers->rb_lock);
1139} 1157}
1140 1158
@@ -1148,6 +1166,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1148 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; 1166 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1149 1167
1150 spin_lock(&buffers->rb_lock); 1168 spin_lock(&buffers->rb_lock);
1169 buffers->rb_recv_count--;
1151 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); 1170 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1152 spin_unlock(&buffers->rb_lock); 1171 spin_unlock(&buffers->rb_lock);
1153} 1172}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 670fad57153a..a71b0f5897d8 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -321,6 +321,7 @@ struct rpcrdma_buffer {
321 char *rb_pool; 321 char *rb_pool;
322 322
323 spinlock_t rb_lock; /* protect buf lists */ 323 spinlock_t rb_lock; /* protect buf lists */
324 int rb_send_count, rb_recv_count;
324 struct list_head rb_send_bufs; 325 struct list_head rb_send_bufs;
325 struct list_head rb_recv_bufs; 326 struct list_head rb_recv_bufs;
326 u32 rb_max_requests; 327 u32 rb_max_requests;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 111767ab124a..bf168838a029 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -177,7 +177,6 @@ static struct ctl_table sunrpc_table[] = {
177 * increase over time if the server is down or not responding. 177 * increase over time if the server is down or not responding.
178 */ 178 */
179#define XS_TCP_INIT_REEST_TO (3U * HZ) 179#define XS_TCP_INIT_REEST_TO (3U * HZ)
180#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
181 180
182/* 181/*
183 * TCP idle timeout; client drops the transport socket if it is idle 182 * TCP idle timeout; client drops the transport socket if it is idle
@@ -1075,7 +1074,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
1075 skb = skb_recv_datagram(sk, 0, 1, &err); 1074 skb = skb_recv_datagram(sk, 0, 1, &err);
1076 if (skb != NULL) { 1075 if (skb != NULL) {
1077 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1076 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1078 skb_free_datagram(sk, skb); 1077 skb_free_datagram_locked(sk, skb);
1079 continue; 1078 continue;
1080 } 1079 }
1081 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 1080 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
@@ -2173,6 +2172,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2173 write_unlock_bh(&sk->sk_callback_lock); 2172 write_unlock_bh(&sk->sk_callback_lock);
2174 } 2173 }
2175 xs_udp_do_set_buffer_size(xprt); 2174 xs_udp_do_set_buffer_size(xprt);
2175
2176 xprt->stat.connect_start = jiffies;
2176} 2177}
2177 2178
2178static void xs_udp_setup_socket(struct work_struct *work) 2179static void xs_udp_setup_socket(struct work_struct *work)
@@ -2236,6 +2237,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2236 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2237 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2237 unsigned int opt_on = 1; 2238 unsigned int opt_on = 1;
2238 unsigned int timeo; 2239 unsigned int timeo;
2240 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2239 2241
2240 /* TCP Keepalive options */ 2242 /* TCP Keepalive options */
2241 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 2243 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
@@ -2247,6 +2249,16 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2247 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2249 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2248 (char *)&keepcnt, sizeof(keepcnt)); 2250 (char *)&keepcnt, sizeof(keepcnt));
2249 2251
2252 /* Avoid temporary address, they are bad for long-lived
2253 * connections such as NFS mounts.
2254 * RFC4941, section 3.6 suggests that:
2255 * Individual applications, which have specific
2256 * knowledge about the normal duration of connections,
2257 * MAY override this as appropriate.
2258 */
2259 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2260 (char *)&addr_pref, sizeof(addr_pref));
2261
2250 /* TCP user timeout (see RFC5482) */ 2262 /* TCP user timeout (see RFC5482) */
2251 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2263 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2252 (xprt->timeout->to_retries + 1); 2264 (xprt->timeout->to_retries + 1);
@@ -2295,6 +2307,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2295 /* SYN_SENT! */ 2307 /* SYN_SENT! */
2296 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2308 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2297 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2309 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2310 break;
2311 case -EADDRNOTAVAIL:
2312 /* Source port number is unavailable. Try a new one! */
2313 transport->srcport = 0;
2298 } 2314 }
2299out: 2315out:
2300 return ret; 2316 return ret;
@@ -2369,6 +2385,25 @@ out:
2369 xprt_wake_pending_tasks(xprt, status); 2385 xprt_wake_pending_tasks(xprt, status);
2370} 2386}
2371 2387
2388static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
2389{
2390 unsigned long start, now = jiffies;
2391
2392 start = xprt->stat.connect_start + xprt->reestablish_timeout;
2393 if (time_after(start, now))
2394 return start - now;
2395 return 0;
2396}
2397
2398static void xs_reconnect_backoff(struct rpc_xprt *xprt)
2399{
2400 xprt->reestablish_timeout <<= 1;
2401 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
2402 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
2403 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2404 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2405}
2406
2372/** 2407/**
2373 * xs_connect - connect a socket to a remote endpoint 2408 * xs_connect - connect a socket to a remote endpoint
2374 * @xprt: pointer to transport structure 2409 * @xprt: pointer to transport structure
@@ -2386,6 +2421,7 @@ out:
2386static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2421static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2387{ 2422{
2388 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2423 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2424 unsigned long delay = 0;
2389 2425
2390 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2426 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2391 2427
@@ -2397,19 +2433,15 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2397 /* Start by resetting any existing state */ 2433 /* Start by resetting any existing state */
2398 xs_reset_transport(transport); 2434 xs_reset_transport(transport);
2399 2435
2400 queue_delayed_work(xprtiod_workqueue, 2436 delay = xs_reconnect_delay(xprt);
2401 &transport->connect_worker, 2437 xs_reconnect_backoff(xprt);
2402 xprt->reestablish_timeout); 2438
2403 xprt->reestablish_timeout <<= 1; 2439 } else
2404 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2405 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2406 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2407 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2408 } else {
2409 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2440 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2410 queue_delayed_work(xprtiod_workqueue, 2441
2411 &transport->connect_worker, 0); 2442 queue_delayed_work(xprtiod_workqueue,
2412 } 2443 &transport->connect_worker,
2444 delay);
2413} 2445}
2414 2446
2415/** 2447/**
@@ -2961,6 +2993,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2961 xprt->ops = &xs_tcp_ops; 2993 xprt->ops = &xs_tcp_ops;
2962 xprt->timeout = &xs_tcp_default_timeout; 2994 xprt->timeout = &xs_tcp_default_timeout;
2963 2995
2996 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2997
2964 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); 2998 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
2965 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 2999 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2966 3000
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index b62caa1c770c..ed97a5876ebe 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
728 u32 bearer_id, u32 *prev_node) 728 u32 bearer_id, u32 *prev_node)
729{ 729{
730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id); 730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
731 struct tipc_peer *peer = mon->self; 731 struct tipc_peer *peer;
732 732
733 if (!mon) 733 if (!mon)
734 return -EINVAL; 734 return -EINVAL;
735 735
736 read_lock_bh(&mon->lock); 736 read_lock_bh(&mon->lock);
737 peer = mon->self;
737 do { 738 do {
738 if (*prev_node) { 739 if (*prev_node) {
739 if (peer->addr == *prev_node) 740 if (peer->addr == *prev_node)
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 6b626a64b517..a04fe9be1c60 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
62 62
63/** 63/**
64 * named_prepare_buf - allocate & initialize a publication message 64 * named_prepare_buf - allocate & initialize a publication message
65 *
66 * The buffer returned is of size INT_H_SIZE + payload size
65 */ 67 */
66static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, 68static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
67 u32 dest) 69 u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
141 struct publication *publ; 143 struct publication *publ;
142 struct sk_buff *skb = NULL; 144 struct sk_buff *skb = NULL;
143 struct distr_item *item = NULL; 145 struct distr_item *item = NULL;
144 uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * 146 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
145 ITEM_SIZE; 147 ITEM_SIZE) * ITEM_SIZE;
146 uint msg_rem = msg_dsz; 148 u32 msg_rem = msg_dsz;
147 149
148 list_for_each_entry(publ, pls, local_list) { 150 list_for_each_entry(publ, pls, local_list) {
149 /* Prepare next buffer: */ 151 /* Prepare next buffer: */
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c49b8df438cb..f9f5f3c3dab5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2180,7 +2180,8 @@ restart:
2180 TIPC_CONN_MSG, SHORT_H_SIZE, 2180 TIPC_CONN_MSG, SHORT_H_SIZE,
2181 0, dnode, onode, dport, oport, 2181 0, dnode, onode, dport, oport,
2182 TIPC_CONN_SHUTDOWN); 2182 TIPC_CONN_SHUTDOWN);
2183 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 2183 if (skb)
2184 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2184 } 2185 }
2185 tsk->connected = 0; 2186 tsk->connected = 0;
2186 sock->state = SS_DISCONNECTING; 2187 sock->state = SS_DISCONNECTING;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b016c011970b..ae7e14cae085 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -396,10 +396,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
396 tuncfg.encap_destroy = NULL; 396 tuncfg.encap_destroy = NULL;
397 setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); 397 setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
398 398
399 if (enable_mcast(ub, remote)) 399 err = enable_mcast(ub, remote);
400 if (err)
400 goto err; 401 goto err;
401 return 0; 402 return 0;
402err: 403err:
404 if (ub->ubsock)
405 udp_tunnel_sock_release(ub->ubsock);
403 kfree(ub); 406 kfree(ub);
404 return err; 407 return err;
405} 408}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f1dffe84f0d5..8309687a56b0 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
661{ 661{
662 struct unix_sock *u = unix_sk(sk); 662 struct unix_sock *u = unix_sk(sk);
663 663
664 if (mutex_lock_interruptible(&u->readlock)) 664 if (mutex_lock_interruptible(&u->iolock))
665 return -EINTR; 665 return -EINTR;
666 666
667 sk->sk_peek_off = val; 667 sk->sk_peek_off = val;
668 mutex_unlock(&u->readlock); 668 mutex_unlock(&u->iolock);
669 669
670 return 0; 670 return 0;
671} 671}
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
779 spin_lock_init(&u->lock); 779 spin_lock_init(&u->lock);
780 atomic_long_set(&u->inflight, 0); 780 atomic_long_set(&u->inflight, 0);
781 INIT_LIST_HEAD(&u->link); 781 INIT_LIST_HEAD(&u->link);
782 mutex_init(&u->readlock); /* single task reading lock */ 782 mutex_init(&u->iolock); /* single task reading lock */
783 mutex_init(&u->bindlock); /* single task binding lock */
783 init_waitqueue_head(&u->peer_wait); 784 init_waitqueue_head(&u->peer_wait);
784 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); 785 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
785 unix_insert_socket(unix_sockets_unbound(sk), sk); 786 unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
848 int err; 849 int err;
849 unsigned int retries = 0; 850 unsigned int retries = 0;
850 851
851 err = mutex_lock_interruptible(&u->readlock); 852 err = mutex_lock_interruptible(&u->bindlock);
852 if (err) 853 if (err)
853 return err; 854 return err;
854 855
@@ -895,7 +896,7 @@ retry:
895 spin_unlock(&unix_table_lock); 896 spin_unlock(&unix_table_lock);
896 err = 0; 897 err = 0;
897 898
898out: mutex_unlock(&u->readlock); 899out: mutex_unlock(&u->bindlock);
899 return err; 900 return err;
900} 901}
901 902
@@ -954,20 +955,32 @@ fail:
954 return NULL; 955 return NULL;
955} 956}
956 957
957static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode, 958static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
958 struct path *res)
959{ 959{
960 int err; 960 struct dentry *dentry;
961 struct path path;
962 int err = 0;
963 /*
964 * Get the parent directory, calculate the hash for last
965 * component.
966 */
967 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
968 err = PTR_ERR(dentry);
969 if (IS_ERR(dentry))
970 return err;
961 971
962 err = security_path_mknod(path, dentry, mode, 0); 972 /*
973 * All right, let's create it.
974 */
975 err = security_path_mknod(&path, dentry, mode, 0);
963 if (!err) { 976 if (!err) {
964 err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0); 977 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
965 if (!err) { 978 if (!err) {
966 res->mnt = mntget(path->mnt); 979 res->mnt = mntget(path.mnt);
967 res->dentry = dget(dentry); 980 res->dentry = dget(dentry);
968 } 981 }
969 } 982 }
970 983 done_path_create(&path, dentry);
971 return err; 984 return err;
972} 985}
973 986
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
978 struct unix_sock *u = unix_sk(sk); 991 struct unix_sock *u = unix_sk(sk);
979 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 992 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
980 char *sun_path = sunaddr->sun_path; 993 char *sun_path = sunaddr->sun_path;
981 int err, name_err; 994 int err;
982 unsigned int hash; 995 unsigned int hash;
983 struct unix_address *addr; 996 struct unix_address *addr;
984 struct hlist_head *list; 997 struct hlist_head *list;
985 struct path path;
986 struct dentry *dentry;
987 998
988 err = -EINVAL; 999 err = -EINVAL;
989 if (sunaddr->sun_family != AF_UNIX) 1000 if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
999 goto out; 1010 goto out;
1000 addr_len = err; 1011 addr_len = err;
1001 1012
1002 name_err = 0; 1013 err = mutex_lock_interruptible(&u->bindlock);
1003 dentry = NULL;
1004 if (sun_path[0]) {
1005 /* Get the parent directory, calculate the hash for last
1006 * component.
1007 */
1008 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
1009
1010 if (IS_ERR(dentry)) {
1011 /* delay report until after 'already bound' check */
1012 name_err = PTR_ERR(dentry);
1013 dentry = NULL;
1014 }
1015 }
1016
1017 err = mutex_lock_interruptible(&u->readlock);
1018 if (err) 1014 if (err)
1019 goto out_path; 1015 goto out;
1020 1016
1021 err = -EINVAL; 1017 err = -EINVAL;
1022 if (u->addr) 1018 if (u->addr)
1023 goto out_up; 1019 goto out_up;
1024 1020
1025 if (name_err) {
1026 err = name_err == -EEXIST ? -EADDRINUSE : name_err;
1027 goto out_up;
1028 }
1029
1030 err = -ENOMEM; 1021 err = -ENOMEM;
1031 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); 1022 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1032 if (!addr) 1023 if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1037 addr->hash = hash ^ sk->sk_type; 1028 addr->hash = hash ^ sk->sk_type;
1038 atomic_set(&addr->refcnt, 1); 1029 atomic_set(&addr->refcnt, 1);
1039 1030
1040 if (dentry) { 1031 if (sun_path[0]) {
1041 struct path u_path; 1032 struct path path;
1042 umode_t mode = S_IFSOCK | 1033 umode_t mode = S_IFSOCK |
1043 (SOCK_INODE(sock)->i_mode & ~current_umask()); 1034 (SOCK_INODE(sock)->i_mode & ~current_umask());
1044 err = unix_mknod(dentry, &path, mode, &u_path); 1035 err = unix_mknod(sun_path, mode, &path);
1045 if (err) { 1036 if (err) {
1046 if (err == -EEXIST) 1037 if (err == -EEXIST)
1047 err = -EADDRINUSE; 1038 err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1049 goto out_up; 1040 goto out_up;
1050 } 1041 }
1051 addr->hash = UNIX_HASH_SIZE; 1042 addr->hash = UNIX_HASH_SIZE;
1052 hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1043 hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1053 spin_lock(&unix_table_lock); 1044 spin_lock(&unix_table_lock);
1054 u->path = u_path; 1045 u->path = path;
1055 list = &unix_socket_table[hash]; 1046 list = &unix_socket_table[hash];
1056 } else { 1047 } else {
1057 spin_lock(&unix_table_lock); 1048 spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1073out_unlock: 1064out_unlock:
1074 spin_unlock(&unix_table_lock); 1065 spin_unlock(&unix_table_lock);
1075out_up: 1066out_up:
1076 mutex_unlock(&u->readlock); 1067 mutex_unlock(&u->bindlock);
1077out_path:
1078 if (dentry)
1079 done_path_create(&path, dentry);
1080
1081out: 1068out:
1082 return err; 1069 return err;
1083} 1070}
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1969 if (false) { 1956 if (false) {
1970alloc_skb: 1957alloc_skb:
1971 unix_state_unlock(other); 1958 unix_state_unlock(other);
1972 mutex_unlock(&unix_sk(other)->readlock); 1959 mutex_unlock(&unix_sk(other)->iolock);
1973 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1960 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1974 &err, 0); 1961 &err, 0);
1975 if (!newskb) 1962 if (!newskb)
1976 goto err; 1963 goto err;
1977 } 1964 }
1978 1965
1979 /* we must acquire readlock as we modify already present 1966 /* we must acquire iolock as we modify already present
1980 * skbs in the sk_receive_queue and mess with skb->len 1967 * skbs in the sk_receive_queue and mess with skb->len
1981 */ 1968 */
1982 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1969 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
1983 if (err) { 1970 if (err) {
1984 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1971 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1985 goto err; 1972 goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
2046 } 2033 }
2047 2034
2048 unix_state_unlock(other); 2035 unix_state_unlock(other);
2049 mutex_unlock(&unix_sk(other)->readlock); 2036 mutex_unlock(&unix_sk(other)->iolock);
2050 2037
2051 other->sk_data_ready(other); 2038 other->sk_data_ready(other);
2052 scm_destroy(&scm); 2039 scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
2055err_state_unlock: 2042err_state_unlock:
2056 unix_state_unlock(other); 2043 unix_state_unlock(other);
2057err_unlock: 2044err_unlock:
2058 mutex_unlock(&unix_sk(other)->readlock); 2045 mutex_unlock(&unix_sk(other)->iolock);
2059err: 2046err:
2060 kfree_skb(newskb); 2047 kfree_skb(newskb);
2061 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2048 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2123 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2110 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2124 2111
2125 do { 2112 do {
2126 mutex_lock(&u->readlock); 2113 mutex_lock(&u->iolock);
2127 2114
2128 skip = sk_peek_offset(sk, flags); 2115 skip = sk_peek_offset(sk, flags);
2129 skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err, 2116 skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2131 if (skb) 2118 if (skb)
2132 break; 2119 break;
2133 2120
2134 mutex_unlock(&u->readlock); 2121 mutex_unlock(&u->iolock);
2135 2122
2136 if (err != -EAGAIN) 2123 if (err != -EAGAIN)
2137 break; 2124 break;
2138 } while (timeo && 2125 } while (timeo &&
2139 !__skb_wait_for_more_packets(sk, &err, &timeo, last)); 2126 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
2140 2127
2141 if (!skb) { /* implies readlock unlocked */ 2128 if (!skb) { /* implies iolock unlocked */
2142 unix_state_lock(sk); 2129 unix_state_lock(sk);
2143 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ 2130 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2144 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && 2131 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2203 2190
2204out_free: 2191out_free:
2205 skb_free_datagram(sk, skb); 2192 skb_free_datagram(sk, skb);
2206 mutex_unlock(&u->readlock); 2193 mutex_unlock(&u->iolock);
2207out: 2194out:
2208 return err; 2195 return err;
2209} 2196}
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2298 /* Lock the socket to prevent queue disordering 2285 /* Lock the socket to prevent queue disordering
2299 * while sleeps in memcpy_tomsg 2286 * while sleeps in memcpy_tomsg
2300 */ 2287 */
2301 mutex_lock(&u->readlock); 2288 mutex_lock(&u->iolock);
2302 2289
2303 if (flags & MSG_PEEK) 2290 if (flags & MSG_PEEK)
2304 skip = sk_peek_offset(sk, flags); 2291 skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
2340 break; 2327 break;
2341 } 2328 }
2342 2329
2343 mutex_unlock(&u->readlock); 2330 mutex_unlock(&u->iolock);
2344 2331
2345 timeo = unix_stream_data_wait(sk, timeo, last, 2332 timeo = unix_stream_data_wait(sk, timeo, last,
2346 last_len); 2333 last_len);
@@ -2351,7 +2338,7 @@ again:
2351 goto out; 2338 goto out;
2352 } 2339 }
2353 2340
2354 mutex_lock(&u->readlock); 2341 mutex_lock(&u->iolock);
2355 goto redo; 2342 goto redo;
2356unlock: 2343unlock:
2357 unix_state_unlock(sk); 2344 unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
2454 } 2441 }
2455 } while (size); 2442 } while (size);
2456 2443
2457 mutex_unlock(&u->readlock); 2444 mutex_unlock(&u->iolock);
2458 if (state->msg) 2445 if (state->msg)
2459 scm_recv(sock, state->msg, &scm, flags); 2446 scm_recv(sock, state->msg, &scm, flags);
2460 else 2447 else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
2495 int ret; 2482 int ret;
2496 struct unix_sock *u = unix_sk(sk); 2483 struct unix_sock *u = unix_sk(sk);
2497 2484
2498 mutex_unlock(&u->readlock); 2485 mutex_unlock(&u->iolock);
2499 ret = splice_to_pipe(pipe, spd); 2486 ret = splice_to_pipe(pipe, spd);
2500 mutex_lock(&u->readlock); 2487 mutex_lock(&u->iolock);
2501 2488
2502 return ret; 2489 return ret;
2503} 2490}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 699dfabdbccd..936d7eee62d0 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
87 87
88 vq = vsock->vqs[VSOCK_VQ_TX]; 88 vq = vsock->vqs[VSOCK_VQ_TX];
89 89
90 /* Avoid unnecessary interrupts while we're processing the ring */
91 virtqueue_disable_cb(vq);
92
93 for (;;) { 90 for (;;) {
94 struct virtio_vsock_pkt *pkt; 91 struct virtio_vsock_pkt *pkt;
95 struct scatterlist hdr, buf, *sgs[2]; 92 struct scatterlist hdr, buf, *sgs[2];
@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work)
99 spin_lock_bh(&vsock->send_pkt_list_lock); 96 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) { 97 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock); 98 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 virtqueue_enable_cb(vq);
103 break; 99 break;
104 } 100 }
105 101
@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work)
118 } 114 }
119 115
120 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); 116 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
117 /* Usually this means that there is no more space available in
118 * the vq
119 */
121 if (ret < 0) { 120 if (ret < 0) {
122 spin_lock_bh(&vsock->send_pkt_list_lock); 121 spin_lock_bh(&vsock->send_pkt_list_lock);
123 list_add(&pkt->list, &vsock->send_pkt_list); 122 list_add(&pkt->list, &vsock->send_pkt_list);
124 spin_unlock_bh(&vsock->send_pkt_list_lock); 123 spin_unlock_bh(&vsock->send_pkt_list_lock);
125
126 if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
127 continue; /* retry now that we have more space */
128 break; 124 break;
129 } 125 }
130 126
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b0e11b6dc994..0f506220a3bd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
513 r = cfg80211_get_chans_dfs_available(wiphy, 513 r = cfg80211_get_chans_dfs_available(wiphy,
514 chandef->center_freq2, 514 chandef->center_freq2,
515 width); 515 width);
516 break;
516 default: 517 default:
517 WARN_ON(chandef->center_freq2); 518 WARN_ON(chandef->center_freq2);
518 break; 519 break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 46417f9cce68..f02653a08993 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
5380{ 5380{
5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; 5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
5382 u32 mask = 0; 5382 u32 mask = 0;
5383 u16 ht_opmode;
5383 5384
5384#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ 5385#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
5385do { \ 5386do { \
@@ -5471,9 +5472,36 @@ do { \
5471 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, 5472 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
5472 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 5473 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
5473 nl80211_check_s32); 5474 nl80211_check_s32);
5474 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, 5475 /*
5475 mask, NL80211_MESHCONF_HT_OPMODE, 5476 * Check HT operation mode based on
5476 nl80211_check_u16); 5477 * IEEE 802.11 2012 8.4.2.59 HT Operation element.
5478 */
5479 if (tb[NL80211_MESHCONF_HT_OPMODE]) {
5480 ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
5481
5482 if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION |
5483 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
5484 IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5485 return -EINVAL;
5486
5487 if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
5488 (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5489 return -EINVAL;
5490
5491 switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
5492 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
5493 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
5494 if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
5495 return -EINVAL;
5496 break;
5497 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
5498 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
5499 if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5500 return -EINVAL;
5501 break;
5502 }
5503 cfg->ht_opmode = ht_opmode;
5504 }
5477 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 5505 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
5478 1, 65535, mask, 5506 1, 65535, mask,
5479 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, 5507 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index dbb2738e356a..6250b1cfcde5 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
958 return private(dev, iwr, cmd, info, handler); 958 return private(dev, iwr, cmd, info, handler);
959 } 959 }
960 /* Old driver API : call driver ioctl handler */ 960 /* Old driver API : call driver ioctl handler */
961 if (dev->netdev_ops->ndo_do_ioctl) { 961 if (dev->netdev_ops->ndo_do_ioctl)
962#ifdef CONFIG_COMPAT 962 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
963 if (info->flags & IW_REQUEST_FLAG_COMPAT) {
964 int ret = 0;
965 struct iwreq iwr_lcl;
966 struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
967
968 memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
969 iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
970 iwr_lcl.u.data.length = iwp_compat->length;
971 iwr_lcl.u.data.flags = iwp_compat->flags;
972
973 ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
974
975 iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
976 iwp_compat->length = iwr_lcl.u.data.length;
977 iwp_compat->flags = iwr_lcl.u.data.flags;
978
979 return ret;
980 } else
981#endif
982 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
983 }
984 return -EOPNOTSUPP; 963 return -EOPNOTSUPP;
985} 964}
986 965
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1c4ad477ce93..6e3f0254d8a1 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
207 family = XFRM_SPI_SKB_CB(skb)->family; 207 family = XFRM_SPI_SKB_CB(skb)->family;
208 208
209 /* if tunnel is present override skb->mark value with tunnel i_key */ 209 /* if tunnel is present override skb->mark value with tunnel i_key */
210 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { 210 switch (family) {
211 switch (family) { 211 case AF_INET:
212 case AF_INET: 212 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); 213 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
214 break; 214 break;
215 case AF_INET6: 215 case AF_INET6:
216 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
216 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); 217 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
217 break; 218 break;
218 }
219 } 219 }
220 220
221 /* Allocate new secpath or COW existing one. */ 221 /* Allocate new secpath or COW existing one. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b5e665b3cfb0..45f9cf97ea25 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -626,6 +626,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
626 626
627 /* re-insert all policies by order of creation */ 627 /* re-insert all policies by order of creation */
628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 628 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
629 if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
630 /* skip socket policies */
631 continue;
632 }
629 newpos = NULL; 633 newpos = NULL;
630 chain = policy_hash_bysel(net, &policy->selector, 634 chain = policy_hash_bysel(net, &policy->selector,
631 policy->family, 635 policy->family,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d516845e16e3..cb65d916a345 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -896,7 +896,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
896 struct sock *sk = cb->skb->sk; 896 struct sock *sk = cb->skb->sk;
897 struct net *net = sock_net(sk); 897 struct net *net = sock_net(sk);
898 898
899 xfrm_state_walk_done(walk, net); 899 if (cb->args[0])
900 xfrm_state_walk_done(walk, net);
900 return 0; 901 return 0;
901} 902}
902 903
@@ -921,8 +922,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
921 u8 proto = 0; 922 u8 proto = 0;
922 int err; 923 int err;
923 924
924 cb->args[0] = 1;
925
926 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, 925 err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
927 xfrma_policy); 926 xfrma_policy);
928 if (err < 0) 927 if (err < 0)
@@ -939,6 +938,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
939 proto = nla_get_u8(attrs[XFRMA_PROTO]); 938 proto = nla_get_u8(attrs[XFRMA_PROTO]);
940 939
941 xfrm_state_walk_init(walk, proto, filter); 940 xfrm_state_walk_init(walk, proto, filter);
941 cb->args[0] = 1;
942 } 942 }
943 943
944 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 944 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2051,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2051 if (up->hard) { 2051 if (up->hard) {
2052 xfrm_policy_delete(xp, p->dir); 2052 xfrm_policy_delete(xp, p->dir);
2053 xfrm_audit_policy_delete(xp, 1, true); 2053 xfrm_audit_policy_delete(xp, 1, true);
2054 } else {
2055 // reset the timers here?
2056 WARN(1, "Don't know what to do with soft policy expire\n");
2057 } 2054 }
2058 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); 2055 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2059 2056
@@ -2117,7 +2114,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2117 2114
2118 err = verify_newpolicy_info(&ua->policy); 2115 err = verify_newpolicy_info(&ua->policy);
2119 if (err) 2116 if (err)
2120 goto bad_policy; 2117 goto free_state;
2121 2118
2122 /* build an XP */ 2119 /* build an XP */
2123 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 2120 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2146,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2149 2146
2150 return 0; 2147 return 0;
2151 2148
2152bad_policy:
2153 WARN(1, "BAD policy passed\n");
2154free_state: 2149free_state:
2155 kfree(x); 2150 kfree(x);
2156nomem: 2151nomem:
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 217c8d507f2e..7927a090fa0d 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -72,8 +72,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
72 (void *) BPF_FUNC_l3_csum_replace; 72 (void *) BPF_FUNC_l3_csum_replace;
73static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = 73static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
74 (void *) BPF_FUNC_l4_csum_replace; 74 (void *) BPF_FUNC_l4_csum_replace;
75static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) = 75static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
76 (void *) BPF_FUNC_skb_in_cgroup; 76 (void *) BPF_FUNC_skb_under_cgroup;
77 77
78#if defined(__x86_64__) 78#if defined(__x86_64__)
79 79
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 2732c37c8d5b..10ff73404e3a 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -57,7 +57,7 @@ int handle_egress(struct __sk_buff *skb)
57 bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), 57 bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
58 eth->h_proto, ip6h->nexthdr); 58 eth->h_proto, ip6h->nexthdr);
59 return TC_ACT_OK; 59 return TC_ACT_OK;
60 } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { 60 } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
61 bpf_trace_printk(pass_msg, sizeof(pass_msg)); 61 bpf_trace_printk(pass_msg, sizeof(pass_msg));
62 return TC_ACT_OK; 62 return TC_ACT_OK;
63 } else { 63 } else {
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index 47bf0858f9e4..cce2b59751eb 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -68,7 +68,16 @@ static void test_hashmap_sanity(int i, void *data)
68 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 68 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
69 errno == E2BIG); 69 errno == E2BIG);
70 70
71 /* update existing element, thought the map is full */
72 key = 1;
73 assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
74 key = 2;
75 assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
76 key = 1;
77 assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
78
71 /* check that key = 0 doesn't exist */ 79 /* check that key = 0 doesn't exist */
80 key = 0;
72 assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 81 assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
73 82
74 /* iterate over two elements */ 83 /* iterate over two elements */
@@ -413,10 +422,12 @@ static void do_work(int fn, void *data)
413 422
414 for (i = fn; i < MAP_SIZE; i += TASKS) { 423 for (i = fn; i < MAP_SIZE; i += TASKS) {
415 key = value = i; 424 key = value = i;
416 if (do_update) 425 if (do_update) {
417 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); 426 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
418 else 427 assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
428 } else {
419 assert(bpf_delete_elem(map_fd, &key) == 0); 429 assert(bpf_delete_elem(map_fd, &key) == 0);
430 }
420 } 431 }
421} 432}
422 433
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 15b196fc2f49..179219845dfc 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -108,16 +108,20 @@ as-option = $(call try-run,\
108as-instr = $(call try-run,\ 108as-instr = $(call try-run,\
109 printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) 109 printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
110 110
111# Do not attempt to build with gcc plugins during cc-option tests.
112# (And this uses delayed resolution so the flags will be up to date.)
113CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
114
111# cc-option 115# cc-option
112# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) 116# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
113 117
114cc-option = $(call try-run,\ 118cc-option = $(call try-run,\
115 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) 119 $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
116 120
117# cc-option-yn 121# cc-option-yn
118# Usage: flag := $(call cc-option-yn,-march=winchip-c6) 122# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
119cc-option-yn = $(call try-run,\ 123cc-option-yn = $(call try-run,\
120 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) 124 $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
121 125
122# cc-option-align 126# cc-option-align
123# Prefix align with either -falign or -malign 127# Prefix align with either -falign or -malign
@@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\
127# cc-disable-warning 131# cc-disable-warning
128# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) 132# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
129cc-disable-warning = $(call try-run,\ 133cc-disable-warning = $(call try-run,\
130 $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) 134 $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
131 135
132# cc-name 136# cc-name
133# Expands to either gcc or clang 137# Expands to either gcc or clang
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 5e22b60589c1..61f0e6db909b 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -19,25 +19,42 @@ ifdef CONFIG_GCC_PLUGINS
19 endif 19 endif
20 endif 20 endif
21 21
22 GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) 22 GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
23 23
24 export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN 24 export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
25 25
26 ifneq ($(PLUGINCC),)
27 # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
28 GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
29 endif
30
31 KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
32 GCC_PLUGIN := $(gcc-plugin-y)
33 GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
34endif
35
36# If plugins aren't supported, abort the build before hard-to-read compiler
37# errors start getting spewed by the main build.
38PHONY += gcc-plugins-check
39gcc-plugins-check: FORCE
40ifdef CONFIG_GCC_PLUGINS
26 ifeq ($(PLUGINCC),) 41 ifeq ($(PLUGINCC),)
27 ifneq ($(GCC_PLUGINS_CFLAGS),) 42 ifneq ($(GCC_PLUGINS_CFLAGS),)
28 ifeq ($(call cc-ifversion, -ge, 0405, y), y) 43 ifeq ($(call cc-ifversion, -ge, 0405, y), y)
29 PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)") 44 $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
30 $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?) 45 @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
31 else 46 else
32 $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least) 47 @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
33 endif 48 endif
34 endif 49 endif
35 else
36 # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
37 GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
38 endif 50 endif
51endif
52 @:
39 53
40 KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) 54# Actually do the build, if requested.
41 GCC_PLUGIN := $(gcc-plugin-y) 55PHONY += gcc-plugins
42 56gcc-plugins: scripts_basic gcc-plugins-check
57ifdef CONFIG_GCC_PLUGINS
58 $(Q)$(MAKE) $(build)=scripts/gcc-plugins
43endif 59endif
60 @:
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4de3cc42fc50..206a6b346a8d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3570,15 +3570,6 @@ sub process {
3570 } 3570 }
3571 } 3571 }
3572 3572
3573# check for uses of DEFINE_PCI_DEVICE_TABLE
3574 if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
3575 if (WARN("DEFINE_PCI_DEVICE_TABLE",
3576 "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
3577 $fix) {
3578 $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
3579 }
3580 }
3581
3582# check for new typedefs, only function parameters and sparse annotations 3573# check for new typedefs, only function parameters and sparse annotations
3583# make sense. 3574# make sense.
3584 if ($line =~ /\btypedef\s/ && 3575 if ($line =~ /\btypedef\s/ &&
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
index fb9207565471..b65224bfb847 100755
--- a/scripts/gcc-plugin.sh
+++ b/scripts/gcc-plugin.sh
@@ -1,5 +1,12 @@
1#!/bin/sh 1#!/bin/sh
2srctree=$(dirname "$0") 2srctree=$(dirname "$0")
3
4SHOW_ERROR=
5if [ "$1" = "--show-error" ] ; then
6 SHOW_ERROR=1
7 shift || true
8fi
9
3gccplugins_dir=$($3 -print-file-name=plugin) 10gccplugins_dir=$($3 -print-file-name=plugin)
4plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF 11plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
5#include "gcc-common.h" 12#include "gcc-common.h"
@@ -13,6 +20,9 @@ EOF
13 20
14if [ $? -ne 0 ] 21if [ $? -ne 0 ]
15then 22then
23 if [ -n "$SHOW_ERROR" ] ; then
24 echo "${plugincc}" >&2
25 fi
16 exit 1 26 exit 1
17fi 27fi
18 28
@@ -48,4 +58,8 @@ then
48 echo "$2" 58 echo "$2"
49 exit 0 59 exit 0
50fi 60fi
61
62if [ -n "$SHOW_ERROR" ] ; then
63 echo "${plugincc}" >&2
64fi
51exit 1 65exit 1
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index 88c8ec47232b..8b29dc17c73c 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -12,16 +12,18 @@ else
12 export HOST_EXTRACXXFLAGS 12 export HOST_EXTRACXXFLAGS
13endif 13endif
14 14
15export GCCPLUGINS_DIR HOSTLIBS
16
17ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN)) 15ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
18 GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN)) 16 GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
19endif 17endif
20 18
21$(HOSTLIBS)-y := $(GCC_PLUGIN) 19export HOSTLIBS
20
21$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
22always := $($(HOSTLIBS)-y) 22always := $($(HOSTLIBS)-y)
23 23
24cyc_complexity_plugin-objs := cyc_complexity_plugin.o 24$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
25sancov_plugin-objs := sancov_plugin.o 25
26subdir-y := $(GCC_PLUGIN_SUBDIR)
27subdir- += $(GCC_PLUGIN_SUBDIR)
26 28
27clean-files += *.so 29clean-files += *.so
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 122fcdaf42c8..aed4511f0304 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -432,7 +432,7 @@ foreach my $file (@ARGV) {
432 die "$P: file '${file}' not found\n"; 432 die "$P: file '${file}' not found\n";
433 } 433 }
434 } 434 }
435 if ($from_filename || vcs_file_exists($file)) { 435 if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) {
436 $file =~ s/^\Q${cur_path}\E//; #strip any absolute path 436 $file =~ s/^\Q${cur_path}\E//; #strip any absolute path
437 $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree 437 $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree
438 push(@files, $file); 438 push(@files, $file);
@@ -2136,9 +2136,11 @@ sub vcs_file_exists {
2136 2136
2137 my $cmd = $VCS_cmds{"file_exists_cmd"}; 2137 my $cmd = $VCS_cmds{"file_exists_cmd"};
2138 $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd 2138 $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
2139 2139 $cmd .= " 2>&1";
2140 $exists = &{$VCS_cmds{"execute_cmd"}}($cmd); 2140 $exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
2141 2141
2142 return 0 if ($? != 0);
2143
2142 return $exists; 2144 return $exists;
2143} 2145}
2144 2146
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index e1c09e2f9be7..8ea9fd2b6573 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
332 (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles" 332 (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
333fi 333fi
334(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles" 334(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
335(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles" 335if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
336 (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
337fi
336destdir=$kernel_headers_dir/usr/src/linux-headers-$version 338destdir=$kernel_headers_dir/usr/src/linux-headers-$version
337mkdir -p "$destdir" 339mkdir -p "$destdir"
338(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -) 340(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
diff --git a/scripts/tags.sh b/scripts/tags.sh
index ed7eef24ef89..b3775a9604ea 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -206,7 +206,6 @@ regex_c=(
206 '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/' 206 '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
207 '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/' 207 '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
208 '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/' 208 '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
209 '/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
210 '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/' 209 '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
211 '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/' 210 '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
212 '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/' 211 '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'
diff --git a/security/Kconfig b/security/Kconfig
index 176758cdfa57..118f4549404e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -118,6 +118,46 @@ config LSM_MMAP_MIN_ADDR
118 this low address space will need the permission specific to the 118 this low address space will need the permission specific to the
119 systems running LSM. 119 systems running LSM.
120 120
121config HAVE_HARDENED_USERCOPY_ALLOCATOR
122 bool
123 help
124 The heap allocator implements __check_heap_object() for
125 validating memory ranges against heap object sizes in
126 support of CONFIG_HARDENED_USERCOPY.
127
128config HAVE_ARCH_HARDENED_USERCOPY
129 bool
130 help
131 The architecture supports CONFIG_HARDENED_USERCOPY by
132 calling check_object_size() just before performing the
133 userspace copies in the low level implementation of
134 copy_to_user() and copy_from_user().
135
136config HARDENED_USERCOPY
137 bool "Harden memory copies between kernel and userspace"
138 depends on HAVE_ARCH_HARDENED_USERCOPY
139 depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
140 select BUG
141 help
142 This option checks for obviously wrong memory regions when
143 copying memory to/from the kernel (via copy_to_user() and
144 copy_from_user() functions) by rejecting memory ranges that
145 are larger than the specified heap object, span multiple
146 separately allocates pages, are not on the process stack,
147 or are part of the kernel text. This kills entire classes
148 of heap overflow exploits and similar kernel memory exposures.
149
150config HARDENED_USERCOPY_PAGESPAN
151 bool "Refuse to copy allocations that span multiple pages"
152 depends on HARDENED_USERCOPY
153 depends on EXPERT
154 help
155 When a multi-page allocation is done without __GFP_COMP,
156 hardened usercopy will reject attempts to copy it. There are,
157 however, several cases of this in the kernel that have not all
158 been removed. This config is intended to be used only while
159 trying to find such users.
160
121source security/selinux/Kconfig 161source security/selinux/Kconfig
122source security/smack/Kconfig 162source security/smack/Kconfig
123source security/tomoyo/Kconfig 163source security/tomoyo/Kconfig
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 795437b10082..b450a27588c8 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
1633 return -EBUSY; 1633 return -EBUSY;
1634 } 1634 }
1635 list_add_tail(&rmidi->list, &snd_rawmidi_devices); 1635 list_add_tail(&rmidi->list, &snd_rawmidi_devices);
1636 mutex_unlock(&register_mutex);
1636 err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI, 1637 err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
1637 rmidi->card, rmidi->device, 1638 rmidi->card, rmidi->device,
1638 &snd_rawmidi_f_ops, rmidi, &rmidi->dev); 1639 &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
1639 if (err < 0) { 1640 if (err < 0) {
1640 rmidi_err(rmidi, "unable to register\n"); 1641 rmidi_err(rmidi, "unable to register\n");
1642 mutex_lock(&register_mutex);
1641 list_del(&rmidi->list); 1643 list_del(&rmidi->list);
1642 mutex_unlock(&register_mutex); 1644 mutex_unlock(&register_mutex);
1643 return err; 1645 return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
1645 if (rmidi->ops && rmidi->ops->dev_register && 1647 if (rmidi->ops && rmidi->ops->dev_register &&
1646 (err = rmidi->ops->dev_register(rmidi)) < 0) { 1648 (err = rmidi->ops->dev_register(rmidi)) < 0) {
1647 snd_unregister_device(&rmidi->dev); 1649 snd_unregister_device(&rmidi->dev);
1650 mutex_lock(&register_mutex);
1648 list_del(&rmidi->list); 1651 list_del(&rmidi->list);
1649 mutex_unlock(&register_mutex); 1652 mutex_unlock(&register_mutex);
1650 return err; 1653 return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
1677 } 1680 }
1678 } 1681 }
1679#endif /* CONFIG_SND_OSSEMUL */ 1682#endif /* CONFIG_SND_OSSEMUL */
1680 mutex_unlock(&register_mutex);
1681 sprintf(name, "midi%d", rmidi->device); 1683 sprintf(name, "midi%d", rmidi->device);
1682 entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root); 1684 entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
1683 if (entry) { 1685 if (entry) {
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 9a6157ea6881..fc144f43faa6 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -35,6 +35,9 @@
35#include <sound/initval.h> 35#include <sound/initval.h>
36#include <linux/kmod.h> 36#include <linux/kmod.h>
37 37
38/* internal flags */
39#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
40
38#if IS_ENABLED(CONFIG_SND_HRTIMER) 41#if IS_ENABLED(CONFIG_SND_HRTIMER)
39#define DEFAULT_TIMER_LIMIT 4 42#define DEFAULT_TIMER_LIMIT 4
40#else 43#else
@@ -294,8 +297,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
294 get_device(&timer->card->card_dev); 297 get_device(&timer->card->card_dev);
295 timeri->slave_class = tid->dev_sclass; 298 timeri->slave_class = tid->dev_sclass;
296 timeri->slave_id = slave_id; 299 timeri->slave_id = slave_id;
297 if (list_empty(&timer->open_list_head) && timer->hw.open) 300
298 timer->hw.open(timer); 301 if (list_empty(&timer->open_list_head) && timer->hw.open) {
302 int err = timer->hw.open(timer);
303 if (err) {
304 kfree(timeri->owner);
305 kfree(timeri);
306
307 if (timer->card)
308 put_device(&timer->card->card_dev);
309 module_put(timer->module);
310 mutex_unlock(&register_mutex);
311 return err;
312 }
313 }
314
299 list_add_tail(&timeri->open_list, &timer->open_list_head); 315 list_add_tail(&timeri->open_list, &timer->open_list_head);
300 snd_timer_check_master(timeri); 316 snd_timer_check_master(timeri);
301 mutex_unlock(&register_mutex); 317 mutex_unlock(&register_mutex);
@@ -526,6 +542,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
526 } 542 }
527 } 543 }
528 timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); 544 timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
545 if (stop)
546 timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
547 else
548 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
529 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : 549 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
530 SNDRV_TIMER_EVENT_CONTINUE); 550 SNDRV_TIMER_EVENT_CONTINUE);
531 unlock: 551 unlock:
@@ -587,6 +607,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
587 */ 607 */
588int snd_timer_continue(struct snd_timer_instance *timeri) 608int snd_timer_continue(struct snd_timer_instance *timeri)
589{ 609{
610 /* timer can continue only after pause */
611 if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
612 return -EINVAL;
613
590 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) 614 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
591 return snd_timer_start_slave(timeri, false); 615 return snd_timer_start_slave(timeri, false);
592 else 616 else
@@ -813,6 +837,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
813 timer->tmr_subdevice = tid->subdevice; 837 timer->tmr_subdevice = tid->subdevice;
814 if (id) 838 if (id)
815 strlcpy(timer->id, id, sizeof(timer->id)); 839 strlcpy(timer->id, id, sizeof(timer->id));
840 timer->sticks = 1;
816 INIT_LIST_HEAD(&timer->device_list); 841 INIT_LIST_HEAD(&timer->device_list);
817 INIT_LIST_HEAD(&timer->open_list_head); 842 INIT_LIST_HEAD(&timer->open_list_head);
818 INIT_LIST_HEAD(&timer->active_list_head); 843 INIT_LIST_HEAD(&timer->active_list_head);
@@ -1817,6 +1842,9 @@ static int snd_timer_user_continue(struct file *file)
1817 tu = file->private_data; 1842 tu = file->private_data;
1818 if (!tu->timeri) 1843 if (!tu->timeri)
1819 return -EBADFD; 1844 return -EBADFD;
1845 /* start timer instead of continue if it's not used before */
1846 if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
1847 return snd_timer_user_start(file);
1820 tu->timeri->lost = 0; 1848 tu->timeri->lost = 0;
1821 return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; 1849 return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
1822} 1850}
@@ -1958,6 +1986,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1958 tu->qused--; 1986 tu->qused--;
1959 spin_unlock_irq(&tu->qlock); 1987 spin_unlock_irq(&tu->qlock);
1960 1988
1989 mutex_lock(&tu->ioctl_lock);
1961 if (tu->tread) { 1990 if (tu->tread) {
1962 if (copy_to_user(buffer, &tu->tqueue[qhead], 1991 if (copy_to_user(buffer, &tu->tqueue[qhead],
1963 sizeof(struct snd_timer_tread))) 1992 sizeof(struct snd_timer_tread)))
@@ -1967,6 +1996,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1967 sizeof(struct snd_timer_read))) 1996 sizeof(struct snd_timer_read)))
1968 err = -EFAULT; 1997 err = -EFAULT;
1969 } 1998 }
1999 mutex_unlock(&tu->ioctl_lock);
1970 2000
1971 spin_lock_irq(&tu->qlock); 2001 spin_lock_irq(&tu->qlock);
1972 if (err < 0) 2002 if (err < 0)
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 03ed35237e2b..d73c12b8753d 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -108,7 +108,6 @@ struct snd_efw {
108 u8 *resp_buf; 108 u8 *resp_buf;
109 u8 *pull_ptr; 109 u8 *pull_ptr;
110 u8 *push_ptr; 110 u8 *push_ptr;
111 unsigned int resp_queues;
112}; 111};
113 112
114int snd_efw_transaction_cmd(struct fw_unit *unit, 113int snd_efw_transaction_cmd(struct fw_unit *unit,
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 33df8655fe81..2e1d9a23920c 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
25{ 25{
26 unsigned int length, till_end, type; 26 unsigned int length, till_end, type;
27 struct snd_efw_transaction *t; 27 struct snd_efw_transaction *t;
28 u8 *pull_ptr;
28 long count = 0; 29 long count = 0;
29 30
30 if (remained < sizeof(type) + sizeof(struct snd_efw_transaction)) 31 if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
38 buf += sizeof(type); 39 buf += sizeof(type);
39 40
40 /* write into buffer as many responses as possible */ 41 /* write into buffer as many responses as possible */
41 while (efw->resp_queues > 0) { 42 spin_lock_irq(&efw->lock);
42 t = (struct snd_efw_transaction *)(efw->pull_ptr); 43
44 /*
45 * When another task reaches here during this task's access to user
46 * space, it picks up current position in buffer and can read the same
47 * series of responses.
48 */
49 pull_ptr = efw->pull_ptr;
50
51 while (efw->push_ptr != pull_ptr) {
52 t = (struct snd_efw_transaction *)(pull_ptr);
43 length = be32_to_cpu(t->length) * sizeof(__be32); 53 length = be32_to_cpu(t->length) * sizeof(__be32);
44 54
45 /* confirm enough space for this response */ 55 /* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
49 /* copy from ring buffer to user buffer */ 59 /* copy from ring buffer to user buffer */
50 while (length > 0) { 60 while (length > 0) {
51 till_end = snd_efw_resp_buf_size - 61 till_end = snd_efw_resp_buf_size -
52 (unsigned int)(efw->pull_ptr - efw->resp_buf); 62 (unsigned int)(pull_ptr - efw->resp_buf);
53 till_end = min_t(unsigned int, length, till_end); 63 till_end = min_t(unsigned int, length, till_end);
54 64
55 if (copy_to_user(buf, efw->pull_ptr, till_end)) 65 spin_unlock_irq(&efw->lock);
66
67 if (copy_to_user(buf, pull_ptr, till_end))
56 return -EFAULT; 68 return -EFAULT;
57 69
58 efw->pull_ptr += till_end; 70 spin_lock_irq(&efw->lock);
59 if (efw->pull_ptr >= efw->resp_buf + 71
60 snd_efw_resp_buf_size) 72 pull_ptr += till_end;
61 efw->pull_ptr -= snd_efw_resp_buf_size; 73 if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
74 pull_ptr -= snd_efw_resp_buf_size;
62 75
63 length -= till_end; 76 length -= till_end;
64 buf += till_end; 77 buf += till_end;
65 count += till_end; 78 count += till_end;
66 remained -= till_end; 79 remained -= till_end;
67 } 80 }
68
69 efw->resp_queues--;
70 } 81 }
71 82
83 /*
84 * All of tasks can read from the buffer nearly simultaneously, but the
85 * last position for each task is different depending on the length of
86 * given buffer. Here, for simplicity, a position of buffer is set by
87 * the latest task. It's better for a listening application to allow one
88 * thread to read from the buffer. Unless, each task can read different
89 * sequence of responses depending on variation of buffer length.
90 */
91 efw->pull_ptr = pull_ptr;
92
93 spin_unlock_irq(&efw->lock);
94
72 return count; 95 return count;
73} 96}
74 97
@@ -76,14 +99,17 @@ static long
76hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count, 99hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
77 loff_t *offset) 100 loff_t *offset)
78{ 101{
79 union snd_firewire_event event; 102 union snd_firewire_event event = {
103 .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
104 };
80 105
81 memset(&event, 0, sizeof(event)); 106 spin_lock_irq(&efw->lock);
82 107
83 event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
84 event.lock_status.status = (efw->dev_lock_count > 0); 108 event.lock_status.status = (efw->dev_lock_count > 0);
85 efw->dev_lock_changed = false; 109 efw->dev_lock_changed = false;
86 110
111 spin_unlock_irq(&efw->lock);
112
87 count = min_t(long, count, sizeof(event.lock_status)); 113 count = min_t(long, count, sizeof(event.lock_status));
88 114
89 if (copy_to_user(buf, &event, count)) 115 if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
98{ 124{
99 struct snd_efw *efw = hwdep->private_data; 125 struct snd_efw *efw = hwdep->private_data;
100 DEFINE_WAIT(wait); 126 DEFINE_WAIT(wait);
127 bool dev_lock_changed;
128 bool queued;
101 129
102 spin_lock_irq(&efw->lock); 130 spin_lock_irq(&efw->lock);
103 131
104 while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) { 132 dev_lock_changed = efw->dev_lock_changed;
133 queued = efw->push_ptr != efw->pull_ptr;
134
135 while (!dev_lock_changed && !queued) {
105 prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE); 136 prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
106 spin_unlock_irq(&efw->lock); 137 spin_unlock_irq(&efw->lock);
107 schedule(); 138 schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
109 if (signal_pending(current)) 140 if (signal_pending(current))
110 return -ERESTARTSYS; 141 return -ERESTARTSYS;
111 spin_lock_irq(&efw->lock); 142 spin_lock_irq(&efw->lock);
143 dev_lock_changed = efw->dev_lock_changed;
144 queued = efw->push_ptr != efw->pull_ptr;
112 } 145 }
113 146
114 if (efw->dev_lock_changed) 147 spin_unlock_irq(&efw->lock);
148
149 if (dev_lock_changed)
115 count = hwdep_read_locked(efw, buf, count, offset); 150 count = hwdep_read_locked(efw, buf, count, offset);
116 else if (efw->resp_queues > 0) 151 else if (queued)
117 count = hwdep_read_resp_buf(efw, buf, count, offset); 152 count = hwdep_read_resp_buf(efw, buf, count, offset);
118 153
119 spin_unlock_irq(&efw->lock);
120
121 return count; 154 return count;
122} 155}
123 156
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
160 poll_wait(file, &efw->hwdep_wait, wait); 193 poll_wait(file, &efw->hwdep_wait, wait);
161 194
162 spin_lock_irq(&efw->lock); 195 spin_lock_irq(&efw->lock);
163 if (efw->dev_lock_changed || (efw->resp_queues > 0)) 196 if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
164 events = POLLIN | POLLRDNORM; 197 events = POLLIN | POLLRDNORM;
165 else 198 else
166 events = 0; 199 events = 0;
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
index 0639dcb13f7d..beb0a0ffee57 100644
--- a/sound/firewire/fireworks/fireworks_proc.c
+++ b/sound/firewire/fireworks/fireworks_proc.c
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
188 else 188 else
189 consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr); 189 consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
190 190
191 snd_iprintf(buffer, "%d %d/%d\n", 191 snd_iprintf(buffer, "%d/%d\n",
192 efw->resp_queues, consumed, snd_efw_resp_buf_size); 192 consumed, snd_efw_resp_buf_size);
193} 193}
194 194
195static void 195static void
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index f550808d1784..36a08ba51ec7 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
121 size_t capacity, till_end; 121 size_t capacity, till_end;
122 struct snd_efw_transaction *t; 122 struct snd_efw_transaction *t;
123 123
124 spin_lock_irq(&efw->lock);
125
126 t = (struct snd_efw_transaction *)data; 124 t = (struct snd_efw_transaction *)data;
127 length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); 125 length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
128 126
127 spin_lock_irq(&efw->lock);
128
129 if (efw->push_ptr < efw->pull_ptr) 129 if (efw->push_ptr < efw->pull_ptr)
130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); 130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
131 else 131 else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
155 } 155 }
156 156
157 /* for hwdep */ 157 /* for hwdep */
158 efw->resp_queues++;
159 wake_up(&efw->hwdep_wait); 158 wake_up(&efw->hwdep_wait);
160 159
161 *rcode = RCODE_COMPLETE; 160 *rcode = RCODE_COMPLETE;
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 131267c3a042..106406cbfaa3 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -16,31 +16,14 @@
16 16
17#include "tascam.h" 17#include "tascam.h"
18 18
19static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
20 long count)
21{
22 union snd_firewire_event event;
23
24 memset(&event, 0, sizeof(event));
25
26 event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
27 event.lock_status.status = (tscm->dev_lock_count > 0);
28 tscm->dev_lock_changed = false;
29
30 count = min_t(long, count, sizeof(event.lock_status));
31
32 if (copy_to_user(buf, &event, count))
33 return -EFAULT;
34
35 return count;
36}
37
38static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, 19static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
39 loff_t *offset) 20 loff_t *offset)
40{ 21{
41 struct snd_tscm *tscm = hwdep->private_data; 22 struct snd_tscm *tscm = hwdep->private_data;
42 DEFINE_WAIT(wait); 23 DEFINE_WAIT(wait);
43 union snd_firewire_event event; 24 union snd_firewire_event event = {
25 .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
26 };
44 27
45 spin_lock_irq(&tscm->lock); 28 spin_lock_irq(&tscm->lock);
46 29
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
54 spin_lock_irq(&tscm->lock); 37 spin_lock_irq(&tscm->lock);
55 } 38 }
56 39
57 memset(&event, 0, sizeof(event)); 40 event.lock_status.status = (tscm->dev_lock_count > 0);
58 count = hwdep_read_locked(tscm, buf, count); 41 tscm->dev_lock_changed = false;
42
59 spin_unlock_irq(&tscm->lock); 43 spin_unlock_irq(&tscm->lock);
60 44
45 count = min_t(long, count, sizeof(event.lock_status));
46
47 if (copy_to_user(buf, &event, count))
48 return -EFAULT;
49
61 return count; 50 return count;
62} 51}
63 52
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 89dacf9b4e6c..160c7f713722 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -906,20 +906,23 @@ static int azx_resume(struct device *dev)
906 struct snd_card *card = dev_get_drvdata(dev); 906 struct snd_card *card = dev_get_drvdata(dev);
907 struct azx *chip; 907 struct azx *chip;
908 struct hda_intel *hda; 908 struct hda_intel *hda;
909 struct hdac_bus *bus;
909 910
910 if (!card) 911 if (!card)
911 return 0; 912 return 0;
912 913
913 chip = card->private_data; 914 chip = card->private_data;
914 hda = container_of(chip, struct hda_intel, chip); 915 hda = container_of(chip, struct hda_intel, chip);
916 bus = azx_bus(chip);
915 if (chip->disabled || hda->init_failed || !chip->running) 917 if (chip->disabled || hda->init_failed || !chip->running)
916 return 0; 918 return 0;
917 919
918 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 920 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
919 && hda->need_i915_power) { 921 snd_hdac_display_power(bus, true);
920 snd_hdac_display_power(azx_bus(chip), true); 922 if (hda->need_i915_power)
921 snd_hdac_i915_set_bclk(azx_bus(chip)); 923 snd_hdac_i915_set_bclk(bus);
922 } 924 }
925
923 if (chip->msi) 926 if (chip->msi)
924 if (pci_enable_msi(pci) < 0) 927 if (pci_enable_msi(pci) < 0)
925 chip->msi = 0; 928 chip->msi = 0;
@@ -929,6 +932,11 @@ static int azx_resume(struct device *dev)
929 932
930 hda_intel_init_chip(chip, true); 933 hda_intel_init_chip(chip, true);
931 934
935 /* power down again for link-controlled chips */
936 if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
937 !hda->need_i915_power)
938 snd_hdac_display_power(bus, false);
939
932 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 940 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
933 941
934 trace_azx_resume(chip); 942 trace_azx_resume(chip);
@@ -1008,6 +1016,7 @@ static int azx_runtime_resume(struct device *dev)
1008 1016
1009 chip = card->private_data; 1017 chip = card->private_data;
1010 hda = container_of(chip, struct hda_intel, chip); 1018 hda = container_of(chip, struct hda_intel, chip);
1019 bus = azx_bus(chip);
1011 if (chip->disabled || hda->init_failed) 1020 if (chip->disabled || hda->init_failed)
1012 return 0; 1021 return 0;
1013 1022
@@ -1015,15 +1024,9 @@ static int azx_runtime_resume(struct device *dev)
1015 return 0; 1024 return 0;
1016 1025
1017 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1026 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
1018 bus = azx_bus(chip); 1027 snd_hdac_display_power(bus, true);
1019 if (hda->need_i915_power) { 1028 if (hda->need_i915_power)
1020 snd_hdac_display_power(bus, true);
1021 snd_hdac_i915_set_bclk(bus); 1029 snd_hdac_i915_set_bclk(bus);
1022 } else {
1023 /* toggle codec wakeup bit for STATESTS read */
1024 snd_hdac_set_codec_wakeup(bus, true);
1025 snd_hdac_set_codec_wakeup(bus, false);
1026 }
1027 } 1030 }
1028 1031
1029 /* Read STATESTS before controller reset */ 1032 /* Read STATESTS before controller reset */
@@ -1043,6 +1046,11 @@ static int azx_runtime_resume(struct device *dev)
1043 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & 1046 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
1044 ~STATESTS_INT_MASK); 1047 ~STATESTS_INT_MASK);
1045 1048
1049 /* power down again for link-controlled chips */
1050 if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
1051 !hda->need_i915_power)
1052 snd_hdac_display_power(bus, false);
1053
1046 trace_azx_runtime_resume(chip); 1054 trace_azx_runtime_resume(chip);
1047 return 0; 1055 return 0;
1048} 1056}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 574b1b48996f..575cefd8cc4a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4828,7 +4828,7 @@ enum {
4828 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 4828 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4829 ALC292_FIXUP_TPT440_DOCK, 4829 ALC292_FIXUP_TPT440_DOCK,
4830 ALC292_FIXUP_TPT440, 4830 ALC292_FIXUP_TPT440,
4831 ALC283_FIXUP_BXBT2807_MIC, 4831 ALC283_FIXUP_HEADSET_MIC,
4832 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4832 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4833 ALC282_FIXUP_ASPIRE_V5_PINS, 4833 ALC282_FIXUP_ASPIRE_V5_PINS,
4834 ALC280_FIXUP_HP_GPIO4, 4834 ALC280_FIXUP_HP_GPIO4,
@@ -4855,6 +4855,7 @@ enum {
4855 ALC221_FIXUP_HP_FRONT_MIC, 4855 ALC221_FIXUP_HP_FRONT_MIC,
4856 ALC292_FIXUP_TPT460, 4856 ALC292_FIXUP_TPT460,
4857 ALC298_FIXUP_SPK_VOLUME, 4857 ALC298_FIXUP_SPK_VOLUME,
4858 ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
4858}; 4859};
4859 4860
4860static const struct hda_fixup alc269_fixups[] = { 4861static const struct hda_fixup alc269_fixups[] = {
@@ -5321,7 +5322,7 @@ static const struct hda_fixup alc269_fixups[] = {
5321 .chained = true, 5322 .chained = true,
5322 .chain_id = ALC292_FIXUP_TPT440_DOCK, 5323 .chain_id = ALC292_FIXUP_TPT440_DOCK,
5323 }, 5324 },
5324 [ALC283_FIXUP_BXBT2807_MIC] = { 5325 [ALC283_FIXUP_HEADSET_MIC] = {
5325 .type = HDA_FIXUP_PINS, 5326 .type = HDA_FIXUP_PINS,
5326 .v.pins = (const struct hda_pintbl[]) { 5327 .v.pins = (const struct hda_pintbl[]) {
5327 { 0x19, 0x04a110f0 }, 5328 { 0x19, 0x04a110f0 },
@@ -5516,6 +5517,15 @@ static const struct hda_fixup alc269_fixups[] = {
5516 .chained = true, 5517 .chained = true,
5517 .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5518 .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
5518 }, 5519 },
5520 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5521 .type = HDA_FIXUP_PINS,
5522 .v.pins = (const struct hda_pintbl[]) {
5523 { 0x1b, 0x90170151 },
5524 { }
5525 },
5526 .chained = true,
5527 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5528 },
5519}; 5529};
5520 5530
5521static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5531static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5560,6 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5560 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5570 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5561 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5571 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5562 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5572 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5573 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
5563 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 5574 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5564 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5575 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5565 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 5576 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -5651,7 +5662,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5651 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), 5662 SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
5652 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 5663 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
5653 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 5664 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
5654 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), 5665 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
5666 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
5655 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 5667 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
5656 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 5668 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
5657 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), 5669 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -5894,6 +5906,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5894 {0x12, 0x90a60170}, 5906 {0x12, 0x90a60170},
5895 {0x14, 0x90170120}, 5907 {0x14, 0x90170120},
5896 {0x21, 0x02211030}), 5908 {0x21, 0x02211030}),
5909 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5910 {0x12, 0x90a60180},
5911 {0x14, 0x90170120},
5912 {0x21, 0x02211030}),
5897 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5913 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5898 ALC256_STANDARD_PINS), 5914 ALC256_STANDARD_PINS),
5899 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5915 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 54c09acd3fed..16e459aedffe 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -299,8 +299,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
299 clk_enable(ssc_p->ssc->clk); 299 clk_enable(ssc_p->ssc->clk);
300 ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk); 300 ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
301 301
302 /* Reset the SSC to keep it at a clean status */ 302 /* Reset the SSC unless initialized to keep it in a clean state */
303 ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST)); 303 if (!ssc_p->initialized)
304 ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
304 305
305 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 306 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
306 dir = 0; 307 dir = 0;
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index e5527bc570ae..bcf1834c5648 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1247,8 +1247,8 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
1247 return -EINVAL; 1247 return -EINVAL;
1248 } 1248 }
1249 1249
1250 /* By default only 32 BCLK per WCLK is supported */ 1250 /* By default only 64 BCLK per WCLK is supported */
1251 dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_32; 1251 dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64;
1252 1252
1253 snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode); 1253 snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode);
1254 snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK, 1254 snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK,
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
index cf0a39bb631a..02352ed8961c 100644
--- a/sound/soc/codecs/max98371.c
+++ b/sound/soc/codecs/max98371.c
@@ -412,6 +412,7 @@ static int max98371_i2c_remove(struct i2c_client *client)
412 412
413static const struct i2c_device_id max98371_i2c_id[] = { 413static const struct i2c_device_id max98371_i2c_id[] = {
414 { "max98371", 0 }, 414 { "max98371", 0 },
415 { }
415}; 416};
416 417
417MODULE_DEVICE_TABLE(i2c, max98371_i2c_id); 418MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 5c9707ac4bbf..2e59a85e360b 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -212,31 +212,6 @@ static const unsigned short logtable[256] = {
212 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47 212 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
213}; 213};
214 214
215static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
216{
217 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
218 struct snd_soc_component *component = &codec->component;
219 struct snd_soc_dai *codec_dai, *_dai;
220
221 list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
222 if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
223 strlen(NUVOTON_CODEC_DAI)))
224 return codec_dai;
225 }
226 return NULL;
227}
228
229static bool nau8825_dai_is_active(struct nau8825 *nau8825)
230{
231 struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
232
233 if (codec_dai) {
234 if (codec_dai->playback_active || codec_dai->capture_active)
235 return true;
236 }
237 return false;
238}
239
240/** 215/**
241 * nau8825_sema_acquire - acquire the semaphore of nau88l25 216 * nau8825_sema_acquire - acquire the semaphore of nau88l25
242 * @nau8825: component to register the codec private data with 217 * @nau8825: component to register the codec private data with
@@ -250,19 +225,26 @@ static bool nau8825_dai_is_active(struct nau8825 *nau8825)
250 * Acquires the semaphore without jiffies. If no more tasks are allowed 225 * Acquires the semaphore without jiffies. If no more tasks are allowed
251 * to acquire the semaphore, calling this function will put the task to 226 * to acquire the semaphore, calling this function will put the task to
252 * sleep until the semaphore is released. 227 * sleep until the semaphore is released.
253 * It returns if the semaphore was acquired. 228 * If the semaphore is not released within the specified number of jiffies,
229 * this function returns -ETIME.
230 * If the sleep is interrupted by a signal, this function will return -EINTR.
231 * It returns 0 if the semaphore was acquired successfully.
254 */ 232 */
255static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout) 233static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
256{ 234{
257 int ret; 235 int ret;
258 236
259 if (timeout) 237 if (timeout) {
260 ret = down_timeout(&nau8825->xtalk_sem, timeout); 238 ret = down_timeout(&nau8825->xtalk_sem, timeout);
261 else 239 if (ret < 0)
240 dev_warn(nau8825->dev, "Acquire semaphone timeout\n");
241 } else {
262 ret = down_interruptible(&nau8825->xtalk_sem); 242 ret = down_interruptible(&nau8825->xtalk_sem);
243 if (ret < 0)
244 dev_warn(nau8825->dev, "Acquire semaphone fail\n");
245 }
263 246
264 if (ret < 0) 247 return ret;
265 dev_warn(nau8825->dev, "Acquire semaphone fail\n");
266} 248}
267 249
268/** 250/**
@@ -1205,6 +1187,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
1205 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); 1187 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1206 unsigned int val_len = 0; 1188 unsigned int val_len = 0;
1207 1189
1190 nau8825_sema_acquire(nau8825, 2 * HZ);
1191
1208 switch (params_width(params)) { 1192 switch (params_width(params)) {
1209 case 16: 1193 case 16:
1210 val_len |= NAU8825_I2S_DL_16; 1194 val_len |= NAU8825_I2S_DL_16;
@@ -1225,6 +1209,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
1225 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1, 1209 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
1226 NAU8825_I2S_DL_MASK, val_len); 1210 NAU8825_I2S_DL_MASK, val_len);
1227 1211
1212 /* Release the semaphone. */
1213 nau8825_sema_release(nau8825);
1214
1228 return 0; 1215 return 0;
1229} 1216}
1230 1217
@@ -1234,6 +1221,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
1234 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); 1221 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1235 unsigned int ctrl1_val = 0, ctrl2_val = 0; 1222 unsigned int ctrl1_val = 0, ctrl2_val = 0;
1236 1223
1224 nau8825_sema_acquire(nau8825, 2 * HZ);
1225
1237 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 1226 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
1238 case SND_SOC_DAIFMT_CBM_CFM: 1227 case SND_SOC_DAIFMT_CBM_CFM:
1239 ctrl2_val |= NAU8825_I2S_MS_MASTER; 1228 ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1282,6 +1271,9 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
1282 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, 1271 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
1283 NAU8825_I2S_MS_MASK, ctrl2_val); 1272 NAU8825_I2S_MS_MASK, ctrl2_val);
1284 1273
1274 /* Release the semaphone. */
1275 nau8825_sema_release(nau8825);
1276
1285 return 0; 1277 return 0;
1286} 1278}
1287 1279
@@ -1611,8 +1603,11 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
1611 * cess and restore changes if process 1603 * cess and restore changes if process
1612 * is ongoing when ejection. 1604 * is ongoing when ejection.
1613 */ 1605 */
1606 int ret;
1614 nau8825->xtalk_protect = true; 1607 nau8825->xtalk_protect = true;
1615 nau8825_sema_acquire(nau8825, 0); 1608 ret = nau8825_sema_acquire(nau8825, 0);
1609 if (ret < 0)
1610 nau8825->xtalk_protect = false;
1616 } 1611 }
1617 /* Startup cross talk detection process */ 1612 /* Startup cross talk detection process */
1618 nau8825->xtalk_state = NAU8825_XTALK_PREPARE; 1613 nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
@@ -2238,23 +2233,14 @@ static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
2238static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec) 2233static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
2239{ 2234{
2240 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); 2235 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
2236 int ret;
2241 2237
2242 regcache_cache_only(nau8825->regmap, false); 2238 regcache_cache_only(nau8825->regmap, false);
2243 regcache_sync(nau8825->regmap); 2239 regcache_sync(nau8825->regmap);
2244 if (nau8825_is_jack_inserted(nau8825->regmap)) { 2240 nau8825->xtalk_protect = true;
2245 /* If the jack is inserted, we need to check whether the play- 2241 ret = nau8825_sema_acquire(nau8825, 0);
2246 * back is active before suspend. If active, the driver has to 2242 if (ret < 0)
2247 * raise the protection for cross talk function to avoid the 2243 nau8825->xtalk_protect = false;
2248 * playback recovers before cross talk process finish. Other-
2249 * wise, the playback will be interfered by cross talk func-
2250 * tion. It is better to apply hardware related parameters
2251 * before starting playback or record.
2252 */
2253 if (nau8825_dai_is_active(nau8825)) {
2254 nau8825->xtalk_protect = true;
2255 nau8825_sema_acquire(nau8825, 0);
2256 }
2257 }
2258 enable_irq(nau8825->irq); 2244 enable_irq(nau8825->irq);
2259 2245
2260 return 0; 2246 return 0;
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a67ea10f41a1..f2664396be6f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -581,7 +581,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000,
581 if (anc_transitions[i].dest == ANC_OFF) 581 if (anc_transitions[i].dest == ANC_OFF)
582 clk_disable_unprepare(wm2000->mclk); 582 clk_disable_unprepare(wm2000->mclk);
583 583
584 return ret; 584 return 0;
585} 585}
586 586
587static int wm2000_anc_set_mode(struct wm2000_priv *wm2000) 587static int wm2000_anc_set_mode(struct wm2000_priv *wm2000)
diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile
index 45602ca8536e..2d53c8d70705 100644
--- a/sound/soc/generic/Makefile
+++ b/sound/soc/generic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o 1snd-soc-simple-card-utils-objs := simple-card-utils.o
2
3snd-soc-simple-card-objs := simple-card.o 2snd-soc-simple-card-objs := simple-card.o
4 3
5obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o 4obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o
5obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index d89a9a1b2471..9599de69a880 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -7,6 +7,7 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/module.h>
10#include <linux/of.h> 11#include <linux/of.h>
11#include <sound/simple_card_utils.h> 12#include <sound/simple_card_utils.h>
12 13
@@ -95,3 +96,8 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
95 return 0; 96 return 0;
96} 97}
97EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name); 98EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
99
100/* Module information */
101MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
102MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
103MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 25fcb796bd86..ddcb52a51854 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -123,6 +123,11 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
123 123
124 uuid_mod = (uuid_le *)uuid; 124 uuid_mod = (uuid_le *)uuid;
125 125
126 if (list_empty(&ctx->uuid_list)) {
127 dev_err(ctx->dev, "Module list is empty\n");
128 return -EINVAL;
129 }
130
126 list_for_each_entry(module, &ctx->uuid_list, list) { 131 list_for_each_entry(module, &ctx->uuid_list, list) {
127 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) { 132 if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
128 dfw_config->module_id = module->id; 133 dfw_config->module_id = module->id;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index cd59536a761d..e3e764167765 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -672,8 +672,10 @@ static int skl_probe(struct pci_dev *pci,
672 672
673 skl->nhlt = skl_nhlt_init(bus->dev); 673 skl->nhlt = skl_nhlt_init(bus->dev);
674 674
675 if (skl->nhlt == NULL) 675 if (skl->nhlt == NULL) {
676 err = -ENODEV;
676 goto out_free; 677 goto out_free;
678 }
677 679
678 skl_nhlt_update_topology_bin(skl); 680 skl_nhlt_update_topology_bin(skl);
679 681
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 0843a68f277c..f61b3b58083b 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -38,10 +38,10 @@
38struct abe_twl6040 { 38struct abe_twl6040 {
39 int jack_detection; /* board can detect jack events */ 39 int jack_detection; /* board can detect jack events */
40 int mclk_freq; /* MCLK frequency speed for twl6040 */ 40 int mclk_freq; /* MCLK frequency speed for twl6040 */
41
42 struct platform_device *dmic_codec_dev;
43}; 41};
44 42
43struct platform_device *dmic_codec_dev;
44
45static int omap_abe_hw_params(struct snd_pcm_substream *substream, 45static int omap_abe_hw_params(struct snd_pcm_substream *substream,
46 struct snd_pcm_hw_params *params) 46 struct snd_pcm_hw_params *params)
47{ 47{
@@ -258,8 +258,6 @@ static int omap_abe_probe(struct platform_device *pdev)
258 if (priv == NULL) 258 if (priv == NULL)
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 priv->dmic_codec_dev = ERR_PTR(-EINVAL);
262
263 if (snd_soc_of_parse_card_name(card, "ti,model")) { 261 if (snd_soc_of_parse_card_name(card, "ti,model")) {
264 dev_err(&pdev->dev, "Card name is not provided\n"); 262 dev_err(&pdev->dev, "Card name is not provided\n");
265 return -ENODEV; 263 return -ENODEV;
@@ -284,13 +282,6 @@ static int omap_abe_probe(struct platform_device *pdev)
284 num_links = 2; 282 num_links = 2;
285 abe_twl6040_dai_links[1].cpu_of_node = dai_node; 283 abe_twl6040_dai_links[1].cpu_of_node = dai_node;
286 abe_twl6040_dai_links[1].platform_of_node = dai_node; 284 abe_twl6040_dai_links[1].platform_of_node = dai_node;
287
288 priv->dmic_codec_dev = platform_device_register_simple(
289 "dmic-codec", -1, NULL, 0);
290 if (IS_ERR(priv->dmic_codec_dev)) {
291 dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
292 return PTR_ERR(priv->dmic_codec_dev);
293 }
294 } else { 285 } else {
295 num_links = 1; 286 num_links = 1;
296 } 287 }
@@ -299,16 +290,14 @@ static int omap_abe_probe(struct platform_device *pdev)
299 of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq); 290 of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
300 if (!priv->mclk_freq) { 291 if (!priv->mclk_freq) {
301 dev_err(&pdev->dev, "MCLK frequency not provided\n"); 292 dev_err(&pdev->dev, "MCLK frequency not provided\n");
302 ret = -EINVAL; 293 return -EINVAL;
303 goto err_unregister;
304 } 294 }
305 295
306 card->fully_routed = 1; 296 card->fully_routed = 1;
307 297
308 if (!priv->mclk_freq) { 298 if (!priv->mclk_freq) {
309 dev_err(&pdev->dev, "MCLK frequency missing\n"); 299 dev_err(&pdev->dev, "MCLK frequency missing\n");
310 ret = -ENODEV; 300 return -ENODEV;
311 goto err_unregister;
312 } 301 }
313 302
314 card->dai_link = abe_twl6040_dai_links; 303 card->dai_link = abe_twl6040_dai_links;
@@ -317,17 +306,9 @@ static int omap_abe_probe(struct platform_device *pdev)
317 snd_soc_card_set_drvdata(card, priv); 306 snd_soc_card_set_drvdata(card, priv);
318 307
319 ret = snd_soc_register_card(card); 308 ret = snd_soc_register_card(card);
320 if (ret) { 309 if (ret)
321 dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", 310 dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
322 ret); 311 ret);
323 goto err_unregister;
324 }
325
326 return 0;
327
328err_unregister:
329 if (!IS_ERR(priv->dmic_codec_dev))
330 platform_device_unregister(priv->dmic_codec_dev);
331 312
332 return ret; 313 return ret;
333} 314}
@@ -335,13 +316,9 @@ err_unregister:
335static int omap_abe_remove(struct platform_device *pdev) 316static int omap_abe_remove(struct platform_device *pdev)
336{ 317{
337 struct snd_soc_card *card = platform_get_drvdata(pdev); 318 struct snd_soc_card *card = platform_get_drvdata(pdev);
338 struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
339 319
340 snd_soc_unregister_card(card); 320 snd_soc_unregister_card(card);
341 321
342 if (!IS_ERR(priv->dmic_codec_dev))
343 platform_device_unregister(priv->dmic_codec_dev);
344
345 return 0; 322 return 0;
346} 323}
347 324
@@ -361,7 +338,33 @@ static struct platform_driver omap_abe_driver = {
361 .remove = omap_abe_remove, 338 .remove = omap_abe_remove,
362}; 339};
363 340
364module_platform_driver(omap_abe_driver); 341static int __init omap_abe_init(void)
342{
343 int ret;
344
345 dmic_codec_dev = platform_device_register_simple("dmic-codec", -1, NULL,
346 0);
347 if (IS_ERR(dmic_codec_dev)) {
348 pr_err("%s: dmic-codec device registration failed\n", __func__);
349 return PTR_ERR(dmic_codec_dev);
350 }
351
352 ret = platform_driver_register(&omap_abe_driver);
353 if (ret) {
354 pr_err("%s: platform driver registration failed\n", __func__);
355 platform_device_unregister(dmic_codec_dev);
356 }
357
358 return ret;
359}
360module_init(omap_abe_init);
361
362static void __exit omap_abe_exit(void)
363{
364 platform_driver_unregister(&omap_abe_driver);
365 platform_device_unregister(dmic_codec_dev);
366}
367module_exit(omap_abe_exit);
365 368
366MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>"); 369MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
367MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec"); 370MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec");
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index e7cdc51fd806..64609c77a79d 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -31,7 +31,6 @@
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/clk.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
37#include <linux/of_device.h> 36#include <linux/of_device.h>
@@ -55,7 +54,6 @@ struct omap_mcpdm {
55 unsigned long phys_base; 54 unsigned long phys_base;
56 void __iomem *io_base; 55 void __iomem *io_base;
57 int irq; 56 int irq;
58 struct clk *pdmclk;
59 57
60 struct mutex mutex; 58 struct mutex mutex;
61 59
@@ -390,15 +388,14 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai)
390 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 388 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
391 int ret; 389 int ret;
392 390
393 clk_prepare_enable(mcpdm->pdmclk);
394 pm_runtime_enable(mcpdm->dev); 391 pm_runtime_enable(mcpdm->dev);
395 392
396 /* Disable lines while request is ongoing */ 393 /* Disable lines while request is ongoing */
397 pm_runtime_get_sync(mcpdm->dev); 394 pm_runtime_get_sync(mcpdm->dev);
398 omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); 395 omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
399 396
400 ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler, 397 ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
401 0, "McPDM", (void *)mcpdm); 398 (void *)mcpdm);
402 399
403 pm_runtime_put_sync(mcpdm->dev); 400 pm_runtime_put_sync(mcpdm->dev);
404 401
@@ -423,9 +420,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
423{ 420{
424 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 421 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
425 422
423 free_irq(mcpdm->irq, (void *)mcpdm);
426 pm_runtime_disable(mcpdm->dev); 424 pm_runtime_disable(mcpdm->dev);
427 425
428 clk_disable_unprepare(mcpdm->pdmclk);
429 return 0; 426 return 0;
430} 427}
431 428
@@ -445,8 +442,6 @@ static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
445 mcpdm->pm_active_count++; 442 mcpdm->pm_active_count++;
446 } 443 }
447 444
448 clk_disable_unprepare(mcpdm->pdmclk);
449
450 return 0; 445 return 0;
451} 446}
452 447
@@ -454,8 +449,6 @@ static int omap_mcpdm_resume(struct snd_soc_dai *dai)
454{ 449{
455 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); 450 struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
456 451
457 clk_prepare_enable(mcpdm->pdmclk);
458
459 if (mcpdm->pm_active_count) { 452 if (mcpdm->pm_active_count) {
460 while (mcpdm->pm_active_count--) 453 while (mcpdm->pm_active_count--)
461 pm_runtime_get_sync(mcpdm->dev); 454 pm_runtime_get_sync(mcpdm->dev);
@@ -549,15 +542,6 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
549 542
550 mcpdm->dev = &pdev->dev; 543 mcpdm->dev = &pdev->dev;
551 544
552 mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
553 if (IS_ERR(mcpdm->pdmclk)) {
554 if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
555 return -EPROBE_DEFER;
556 dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
557 PTR_ERR(mcpdm->pdmclk));
558 mcpdm->pdmclk = NULL;
559 }
560
561 ret = devm_snd_soc_register_component(&pdev->dev, 545 ret = devm_snd_soc_register_component(&pdev->dev,
562 &omap_mcpdm_component, 546 &omap_mcpdm_component,
563 &omap_mcpdm_dai, 1); 547 &omap_mcpdm_dai, 1);
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 50849e137fc0..92e88bca386e 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -58,10 +58,12 @@ static struct platform_device *s3c24xx_uda134x_snd_device;
58 58
59static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream) 59static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
60{ 60{
61 int ret = 0; 61 struct snd_soc_pcm_runtime *rtd = substream->private_data;
62 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
62#ifdef ENFORCE_RATES 63#ifdef ENFORCE_RATES
63 struct snd_pcm_runtime *runtime = substream->runtime; 64 struct snd_pcm_runtime *runtime = substream->runtime;
64#endif 65#endif
66 int ret = 0;
65 67
66 mutex_lock(&clk_lock); 68 mutex_lock(&clk_lock);
67 pr_debug("%s %d\n", __func__, clk_users); 69 pr_debug("%s %d\n", __func__, clk_users);
@@ -71,8 +73,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
71 printk(KERN_ERR "%s cannot get xtal\n", __func__); 73 printk(KERN_ERR "%s cannot get xtal\n", __func__);
72 ret = PTR_ERR(xtal); 74 ret = PTR_ERR(xtal);
73 } else { 75 } else {
74 pclk = clk_get(&s3c24xx_uda134x_snd_device->dev, 76 pclk = clk_get(cpu_dai->dev, "iis");
75 "pclk");
76 if (IS_ERR(pclk)) { 77 if (IS_ERR(pclk)) {
77 printk(KERN_ERR "%s cannot get pclk\n", 78 printk(KERN_ERR "%s cannot get pclk\n",
78 __func__); 79 __func__);
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index e39f916d0f2f..969a5169de25 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -226,8 +226,12 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
226 ifscr = 0; 226 ifscr = 0;
227 fsrate = 0; 227 fsrate = 0;
228 if (fin != fout) { 228 if (fin != fout) {
229 u64 n;
230
229 ifscr = 1; 231 ifscr = 1;
230 fsrate = 0x0400000 / fout * fin; 232 n = (u64)0x0400000 * fin;
233 do_div(n, fout);
234 fsrate = n;
231 } 235 }
232 236
233 /* 237 /*
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index d2df46c14c68..bf7b52fce597 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -121,7 +121,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
121 121
122 dpcm_be_disconnect(fe, stream); 122 dpcm_be_disconnect(fe, stream);
123 fe->dpcm[stream].runtime = NULL; 123 fe->dpcm[stream].runtime = NULL;
124 goto fe_err; 124 goto path_err;
125 } 125 }
126 126
127 dpcm_clear_pending_state(fe, stream); 127 dpcm_clear_pending_state(fe, stream);
@@ -136,6 +136,8 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
136 136
137 return 0; 137 return 0;
138 138
139path_err:
140 dpcm_path_put(&list);
139fe_err: 141fe_err:
140 if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown) 142 if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
141 fe->dai_link->compr_ops->shutdown(cstream); 143 fe->dai_link->compr_ops->shutdown(cstream);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 16369cad4803..4afa8dba5e98 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1056,7 +1056,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
1056 if (!rtd->platform) { 1056 if (!rtd->platform) {
1057 dev_err(card->dev, "ASoC: platform %s not registered\n", 1057 dev_err(card->dev, "ASoC: platform %s not registered\n",
1058 dai_link->platform_name); 1058 dai_link->platform_name);
1059 return -EPROBE_DEFER; 1059 goto _err_defer;
1060 } 1060 }
1061 1061
1062 soc_add_pcm_runtime(card, rtd); 1062 soc_add_pcm_runtime(card, rtd);
@@ -2083,14 +2083,13 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2083 /* remove auxiliary devices */ 2083 /* remove auxiliary devices */
2084 soc_remove_aux_devices(card); 2084 soc_remove_aux_devices(card);
2085 2085
2086 snd_soc_dapm_free(&card->dapm);
2086 soc_cleanup_card_debugfs(card); 2087 soc_cleanup_card_debugfs(card);
2087 2088
2088 /* remove the card */ 2089 /* remove the card */
2089 if (card->remove) 2090 if (card->remove)
2090 card->remove(card); 2091 card->remove(card);
2091 2092
2092 snd_soc_dapm_free(&card->dapm);
2093
2094 snd_card_free(card->snd_card); 2093 snd_card_free(card->snd_card);
2095 return 0; 2094 return 0;
2096 2095
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 8698c26773b3..d908ff8f9755 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3493,6 +3493,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3493 const struct snd_soc_pcm_stream *config = w->params + w->params_select; 3493 const struct snd_soc_pcm_stream *config = w->params + w->params_select;
3494 struct snd_pcm_substream substream; 3494 struct snd_pcm_substream substream;
3495 struct snd_pcm_hw_params *params = NULL; 3495 struct snd_pcm_hw_params *params = NULL;
3496 struct snd_pcm_runtime *runtime = NULL;
3496 u64 fmt; 3497 u64 fmt;
3497 int ret; 3498 int ret;
3498 3499
@@ -3541,6 +3542,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3541 3542
3542 memset(&substream, 0, sizeof(substream)); 3543 memset(&substream, 0, sizeof(substream));
3543 3544
3545 /* Allocate a dummy snd_pcm_runtime for startup() and other ops() */
3546 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
3547 if (!runtime) {
3548 ret = -ENOMEM;
3549 goto out;
3550 }
3551 substream.runtime = runtime;
3552
3544 switch (event) { 3553 switch (event) {
3545 case SND_SOC_DAPM_PRE_PMU: 3554 case SND_SOC_DAPM_PRE_PMU:
3546 substream.stream = SNDRV_PCM_STREAM_CAPTURE; 3555 substream.stream = SNDRV_PCM_STREAM_CAPTURE;
@@ -3606,6 +3615,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3606 } 3615 }
3607 3616
3608out: 3617out:
3618 kfree(runtime);
3609 kfree(params); 3619 kfree(params);
3610 return ret; 3620 return ret;
3611} 3621}
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 204cc074adb9..41aa3355e920 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
55 err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE); 55 err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
56 if (err < 0) { 56 if (err < 0) {
57 line6pcm->impulse_volume = 0; 57 line6pcm->impulse_volume = 0;
58 line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
59 return err; 58 return err;
60 } 59 }
61 } else { 60 } else {
@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
211 spin_lock_irqsave(&pstr->lock, flags); 210 spin_lock_irqsave(&pstr->lock, flags);
212 clear_bit(type, &pstr->running); 211 clear_bit(type, &pstr->running);
213 if (!pstr->running) { 212 if (!pstr->running) {
213 spin_unlock_irqrestore(&pstr->lock, flags);
214 line6_unlink_audio_urbs(line6pcm, pstr); 214 line6_unlink_audio_urbs(line6pcm, pstr);
215 spin_lock_irqsave(&pstr->lock, flags);
215 if (direction == SNDRV_PCM_STREAM_CAPTURE) { 216 if (direction == SNDRV_PCM_STREAM_CAPTURE) {
216 line6pcm->prev_fbuf = NULL; 217 line6pcm->prev_fbuf = NULL;
217 line6pcm->prev_fsize = 0; 218 line6pcm->prev_fsize = 0;
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index daf81d169a42..45dd34874f43 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
244static ssize_t serial_number_show(struct device *dev, 244static ssize_t serial_number_show(struct device *dev,
245 struct device_attribute *attr, char *buf) 245 struct device_attribute *attr, char *buf)
246{ 246{
247 struct usb_interface *interface = to_usb_interface(dev); 247 struct snd_card *card = dev_to_snd_card(dev);
248 struct usb_line6_pod *pod = usb_get_intfdata(interface); 248 struct usb_line6_pod *pod = card->private_data;
249 249
250 return sprintf(buf, "%u\n", pod->serial_number); 250 return sprintf(buf, "%u\n", pod->serial_number);
251} 251}
@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
256static ssize_t firmware_version_show(struct device *dev, 256static ssize_t firmware_version_show(struct device *dev,
257 struct device_attribute *attr, char *buf) 257 struct device_attribute *attr, char *buf)
258{ 258{
259 struct usb_interface *interface = to_usb_interface(dev); 259 struct snd_card *card = dev_to_snd_card(dev);
260 struct usb_line6_pod *pod = usb_get_intfdata(interface); 260 struct usb_line6_pod *pod = card->private_data;
261 261
262 return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100, 262 return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
263 pod->firmware_version % 100); 263 pod->firmware_version % 100);
@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
269static ssize_t device_id_show(struct device *dev, 269static ssize_t device_id_show(struct device *dev,
270 struct device_attribute *attr, char *buf) 270 struct device_attribute *attr, char *buf)
271{ 271{
272 struct usb_interface *interface = to_usb_interface(dev); 272 struct snd_card *card = dev_to_snd_card(dev);
273 struct usb_line6_pod *pod = usb_get_intfdata(interface); 273 struct usb_line6_pod *pod = card->private_data;
274 274
275 return sprintf(buf, "%d\n", pod->device_id); 275 return sprintf(buf, "%d\n", pod->device_id);
276} 276}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6adde457b602..152292e5ee2b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1128,6 +1128,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1128{ 1128{
1129 /* devices which do not support reading the sample rate. */ 1129 /* devices which do not support reading the sample rate. */
1130 switch (chip->usb_id) { 1130 switch (chip->usb_id) {
1131 case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
1131 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ 1132 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
1132 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ 1133 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
1133 case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */ 1134 case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
@@ -1138,7 +1139,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1138 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1139 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1140 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1140 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ 1141 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
1142 case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
1141 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1143 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1144 case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
1142 case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ 1145 case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
1143 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ 1146 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
1144 case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */ 1147 case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index f209ea151dca..3051f86a9b5f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -87,9 +87,11 @@ struct kvm_regs {
87/* Supported VGICv3 address types */ 87/* Supported VGICv3 address types */
88#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 88#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
89#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 89#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
90#define KVM_VGIC_ITS_ADDR_TYPE 4
90 91
91#define KVM_VGIC_V3_DIST_SIZE SZ_64K 92#define KVM_VGIC_V3_DIST_SIZE SZ_64K
92#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) 93#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
94#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
93 95
94#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ 96#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
95#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ 97#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 3b8e99ef9d58..a2ffec4139ad 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -93,6 +93,47 @@ struct kvm_s390_vm_cpu_machine {
93 __u64 fac_list[256]; 93 __u64 fac_list[256];
94}; 94};
95 95
96#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2
97#define KVM_S390_VM_CPU_MACHINE_FEAT 3
98
99#define KVM_S390_VM_CPU_FEAT_NR_BITS 1024
100#define KVM_S390_VM_CPU_FEAT_ESOP 0
101#define KVM_S390_VM_CPU_FEAT_SIEF2 1
102#define KVM_S390_VM_CPU_FEAT_64BSCAO 2
103#define KVM_S390_VM_CPU_FEAT_SIIF 3
104#define KVM_S390_VM_CPU_FEAT_GPERE 4
105#define KVM_S390_VM_CPU_FEAT_GSLS 5
106#define KVM_S390_VM_CPU_FEAT_IB 6
107#define KVM_S390_VM_CPU_FEAT_CEI 7
108#define KVM_S390_VM_CPU_FEAT_IBS 8
109#define KVM_S390_VM_CPU_FEAT_SKEY 9
110#define KVM_S390_VM_CPU_FEAT_CMMA 10
111#define KVM_S390_VM_CPU_FEAT_PFMFI 11
112#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
113struct kvm_s390_vm_cpu_feat {
114 __u64 feat[16];
115};
116
117#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4
118#define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5
119/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */
120struct kvm_s390_vm_cpu_subfunc {
121 __u8 plo[32]; /* always */
122 __u8 ptff[16]; /* with TOD-clock steering */
123 __u8 kmac[16]; /* with MSA */
124 __u8 kmc[16]; /* with MSA */
125 __u8 km[16]; /* with MSA */
126 __u8 kimd[16]; /* with MSA */
127 __u8 klmd[16]; /* with MSA */
128 __u8 pckmo[16]; /* with MSA3 */
129 __u8 kmctr[16]; /* with MSA4 */
130 __u8 kmf[16]; /* with MSA4 */
131 __u8 kmo[16]; /* with MSA4 */
132 __u8 pcc[16]; /* with MSA4 */
133 __u8 ppno[16]; /* with MSA5 */
134 __u8 reserved[1824];
135};
136
96/* kvm attributes for crypto */ 137/* kvm attributes for crypto */
97#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 138#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
98#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 139#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
diff --git a/tools/arch/s390/include/uapi/asm/sie.h b/tools/arch/s390/include/uapi/asm/sie.h
index 8fb5d4a6dd25..3ac634368939 100644
--- a/tools/arch/s390/include/uapi/asm/sie.h
+++ b/tools/arch/s390/include/uapi/asm/sie.h
@@ -140,6 +140,7 @@
140 exit_code_ipa0(0xB2, 0x4c, "TAR"), \ 140 exit_code_ipa0(0xB2, 0x4c, "TAR"), \
141 exit_code_ipa0(0xB2, 0x50, "CSP"), \ 141 exit_code_ipa0(0xB2, 0x50, "CSP"), \
142 exit_code_ipa0(0xB2, 0x54, "MVPG"), \ 142 exit_code_ipa0(0xB2, 0x54, "MVPG"), \
143 exit_code_ipa0(0xB2, 0x56, "STHYI"), \
143 exit_code_ipa0(0xB2, 0x58, "BSG"), \ 144 exit_code_ipa0(0xB2, 0x58, "BSG"), \
144 exit_code_ipa0(0xB2, 0x5a, "BSA"), \ 145 exit_code_ipa0(0xB2, 0x5a, "BSA"), \
145 exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ 146 exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 4a413485f9eb..92a8308b96f6 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -225,7 +225,6 @@
225#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 225#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
226#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 226#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
227#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 227#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
228#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
229#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 228#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
230#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 229#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
231#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 230#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
@@ -301,10 +300,6 @@
301#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 300#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
302#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 301#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
303#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 302#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
304#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
305#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
306
307
308#ifdef CONFIG_X86_32 303#ifdef CONFIG_X86_32
309/* 304/*
310 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 305 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
@@ -312,5 +307,7 @@
312 */ 307 */
313#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 308#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
314#endif 309#endif
315 310#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
311#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
312#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
316#endif /* _ASM_X86_CPUFEATURES_H */ 313#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 911e9358ceb1..85599ad4d024 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -56,5 +56,7 @@
56#define DISABLED_MASK14 0 56#define DISABLED_MASK14 0
57#define DISABLED_MASK15 0 57#define DISABLED_MASK15 0
58#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) 58#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
59#define DISABLED_MASK17 0
60#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
59 61
60#endif /* _ASM_X86_DISABLED_FEATURES_H */ 62#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index 4916144e3c42..fac9a5c0abe9 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -99,5 +99,7 @@
99#define REQUIRED_MASK14 0 99#define REQUIRED_MASK14 0
100#define REQUIRED_MASK15 0 100#define REQUIRED_MASK15 0
101#define REQUIRED_MASK16 0 101#define REQUIRED_MASK16 0
102#define REQUIRED_MASK17 0
103#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
102 104
103#endif /* _ASM_X86_REQUIRED_FEATURES_H */ 105#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index 5b15d94a33f8..37fee272618f 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -78,7 +78,6 @@
78#define EXIT_REASON_PML_FULL 62 78#define EXIT_REASON_PML_FULL 62
79#define EXIT_REASON_XSAVES 63 79#define EXIT_REASON_XSAVES 63
80#define EXIT_REASON_XRSTORS 64 80#define EXIT_REASON_XRSTORS 64
81#define EXIT_REASON_PCOMMIT 65
82 81
83#define VMX_EXIT_REASONS \ 82#define VMX_EXIT_REASONS \
84 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ 83 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -127,8 +126,7 @@
127 { EXIT_REASON_INVVPID, "INVVPID" }, \ 126 { EXIT_REASON_INVVPID, "INVVPID" }, \
128 { EXIT_REASON_INVPCID, "INVPCID" }, \ 127 { EXIT_REASON_INVPCID, "INVPCID" }, \
129 { EXIT_REASON_XSAVES, "XSAVES" }, \ 128 { EXIT_REASON_XSAVES, "XSAVES" }, \
130 { EXIT_REASON_XRSTORS, "XRSTORS" }, \ 129 { EXIT_REASON_XRSTORS, "XRSTORS" }
131 { EXIT_REASON_PCOMMIT, "PCOMMIT" }
132 130
133#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 131#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
134#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 132#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index 448ed96b3b4f..1c14c2595158 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * gpio-hammer - example swiss army knife to shake GPIO lines on a system 2 * gpio-event-mon - monitor GPIO line events from userspace
3 * 3 *
4 * Copyright (C) 2016 Linus Walleij 4 * Copyright (C) 2016 Linus Walleij
5 * 5 *
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 0e8a1f7a292d..f39c0e9c0d5c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
348 int notrigger = 0; 348 int notrigger = 0;
349 char *dummy; 349 char *dummy;
350 350
351 struct iio_channel_info *channels; 351 struct iio_channel_info *channels = NULL;
352 352
353 register_cleanup(); 353 register_cleanup();
354 354
@@ -456,7 +456,7 @@ int main(int argc, char **argv)
456 456
457 if (notrigger) { 457 if (notrigger) {
458 printf("trigger-less mode selected\n"); 458 printf("trigger-less mode selected\n");
459 } if (trig_num >= 0) { 459 } else if (trig_num >= 0) {
460 char *trig_dev_name; 460 char *trig_dev_name;
461 ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num); 461 ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
462 if (ret < 0) { 462 if (ret < 0) {
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index b96879477311..f436d2420a18 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -8,7 +8,11 @@ void *memdup(const void *src, size_t len);
8 8
9int strtobool(const char *s, bool *res); 9int strtobool(const char *s, bool *res);
10 10
11#ifdef __GLIBC__ 11/*
12 * glibc based builds needs the extern while uClibc doesn't.
13 * However uClibc headers also define __GLIBC__ hence the hack below
14 */
15#if defined(__GLIBC__) && !defined(__UCLIBC__)
12extern size_t strlcpy(char *dest, const char *src, size_t size); 16extern size_t strlcpy(char *dest, const char *src, size_t size);
13#endif 17#endif
14 18
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 406459b935a2..da218fec6056 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -84,6 +84,7 @@ enum bpf_map_type {
84 BPF_MAP_TYPE_PERCPU_HASH, 84 BPF_MAP_TYPE_PERCPU_HASH,
85 BPF_MAP_TYPE_PERCPU_ARRAY, 85 BPF_MAP_TYPE_PERCPU_ARRAY,
86 BPF_MAP_TYPE_STACK_TRACE, 86 BPF_MAP_TYPE_STACK_TRACE,
87 BPF_MAP_TYPE_CGROUP_ARRAY,
87}; 88};
88 89
89enum bpf_prog_type { 90enum bpf_prog_type {
@@ -93,6 +94,7 @@ enum bpf_prog_type {
93 BPF_PROG_TYPE_SCHED_CLS, 94 BPF_PROG_TYPE_SCHED_CLS,
94 BPF_PROG_TYPE_SCHED_ACT, 95 BPF_PROG_TYPE_SCHED_ACT,
95 BPF_PROG_TYPE_TRACEPOINT, 96 BPF_PROG_TYPE_TRACEPOINT,
97 BPF_PROG_TYPE_XDP,
96}; 98};
97 99
98#define BPF_PSEUDO_MAP_FD 1 100#define BPF_PSEUDO_MAP_FD 1
@@ -313,6 +315,66 @@ enum bpf_func_id {
313 */ 315 */
314 BPF_FUNC_skb_get_tunnel_opt, 316 BPF_FUNC_skb_get_tunnel_opt,
315 BPF_FUNC_skb_set_tunnel_opt, 317 BPF_FUNC_skb_set_tunnel_opt,
318
319 /**
320 * bpf_skb_change_proto(skb, proto, flags)
321 * Change protocol of the skb. Currently supported is
322 * v4 -> v6, v6 -> v4 transitions. The helper will also
323 * resize the skb. eBPF program is expected to fill the
324 * new headers via skb_store_bytes and lX_csum_replace.
325 * @skb: pointer to skb
326 * @proto: new skb->protocol type
327 * @flags: reserved
328 * Return: 0 on success or negative error
329 */
330 BPF_FUNC_skb_change_proto,
331
332 /**
333 * bpf_skb_change_type(skb, type)
334 * Change packet type of skb.
335 * @skb: pointer to skb
336 * @type: new skb->pkt_type type
337 * Return: 0 on success or negative error
338 */
339 BPF_FUNC_skb_change_type,
340
341 /**
342 * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
343 * @skb: pointer to skb
344 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
345 * @index: index of the cgroup in the bpf_map
346 * Return:
347 * == 0 skb failed the cgroup2 descendant test
348 * == 1 skb succeeded the cgroup2 descendant test
349 * < 0 error
350 */
351 BPF_FUNC_skb_in_cgroup,
352
353 /**
354 * bpf_get_hash_recalc(skb)
355 * Retrieve and possibly recalculate skb->hash.
356 * @skb: pointer to skb
357 * Return: hash
358 */
359 BPF_FUNC_get_hash_recalc,
360
361 /**
362 * u64 bpf_get_current_task(void)
363 * Returns current task_struct
364 * Return: current
365 */
366 BPF_FUNC_get_current_task,
367
368 /**
369 * bpf_probe_write_user(void *dst, void *src, int len)
370 * safely attempt to write to a location
371 * @dst: destination address in userspace
372 * @src: source address on stack
373 * @len: number of bytes to copy
374 * Return: 0 on success or negative error
375 */
376 BPF_FUNC_probe_write_user,
377
316 __BPF_FUNC_MAX_ID, 378 __BPF_FUNC_MAX_ID,
317}; 379};
318 380
@@ -347,9 +409,11 @@ enum bpf_func_id {
347#define BPF_F_ZERO_CSUM_TX (1ULL << 1) 409#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
348#define BPF_F_DONT_FRAGMENT (1ULL << 2) 410#define BPF_F_DONT_FRAGMENT (1ULL << 2)
349 411
350/* BPF_FUNC_perf_event_output flags. */ 412/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
351#define BPF_F_INDEX_MASK 0xffffffffULL 413#define BPF_F_INDEX_MASK 0xffffffffULL
352#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 414#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
415/* BPF_FUNC_perf_event_output for sk_buff input context. */
416#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
353 417
354/* user accessible mirror of in-kernel sk_buff. 418/* user accessible mirror of in-kernel sk_buff.
355 * new fields can only be added to the end of this structure 419 * new fields can only be added to the end of this structure
@@ -386,4 +450,24 @@ struct bpf_tunnel_key {
386 __u32 tunnel_label; 450 __u32 tunnel_label;
387}; 451};
388 452
453/* User return codes for XDP prog type.
454 * A valid XDP program must return one of these defined values. All other
455 * return codes are reserved for future use. Unknown return codes will result
456 * in packet drop.
457 */
458enum xdp_action {
459 XDP_ABORTED = 0,
460 XDP_DROP,
461 XDP_PASS,
462 XDP_TX,
463};
464
465/* user accessible metadata for XDP packet hook
466 * new fields must be added to the end of this structure
467 */
468struct xdp_md {
469 __u32 data;
470 __u32 data_end;
471};
472
389#endif /* _UAPI__LINUX_BPF_H__ */ 473#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index d9836c5eb694..11c8d9bc762e 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -3266,6 +3266,9 @@ int main(int argc, char *argv[])
3266 } 3266 }
3267 } 3267 }
3268 3268
3269 /* If we exit via err(), this kills all the threads, restores tty. */
3270 atexit(cleanup_devices);
3271
3269 /* We always have a console device, and it's always device 1. */ 3272 /* We always have a console device, and it's always device 1. */
3270 setup_console(); 3273 setup_console();
3271 3274
@@ -3369,9 +3372,6 @@ int main(int argc, char *argv[])
3369 /* Ensure that we terminate if a device-servicing child dies. */ 3372 /* Ensure that we terminate if a device-servicing child dies. */
3370 signal(SIGCHLD, kill_launcher); 3373 signal(SIGCHLD, kill_launcher);
3371 3374
3372 /* If we exit via err(), this kills all the threads, restores tty. */
3373 atexit(cleanup_devices);
3374
3375 /* If requested, chroot to a directory */ 3375 /* If requested, chroot to a directory */
3376 if (chroot_path) { 3376 if (chroot_path) {
3377 if (chroot(chroot_path) != 0) 3377 if (chroot(chroot_path) != 0)
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 736da44596e4..b303bcdd8ed1 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -176,10 +176,18 @@ Each probe argument follows below syntax.
176 176
177'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) 177'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
178'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters. 178'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
179'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. 179'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail)
180 180
181On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. 181On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
182 182
183TYPES
184-----
185Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe.
186String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
187Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is;
188
189 b<bit-width>@<bit-offset>/<container-size>
190
183LINE SYNTAX 191LINE SYNTAX
184----------- 192-----------
185Line range is described by following syntax. 193Line range is described by following syntax.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 1f6c70594f0f..053bbbd84ece 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,8 +116,8 @@ OPTIONS
116--fields:: 116--fields::
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
119 srcline, period, iregs, brstack, brstacksym, flags. 119 srcline, period, iregs, brstack, brstacksym, flags, bpf-output,
120 Field list can be prepended with the type, trace, sw or hw, 120 callindent. Field list can be prepended with the type, trace, sw or hw,
121 to indicate to which event type the field list applies. 121 to indicate to which event type the field list applies.
122 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace 122 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
123 123
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index c6d0f91731a1..35745a733100 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -54,10 +54,6 @@ int arch__compare_symbol_names(const char *namea, const char *nameb)
54#endif 54#endif
55 55
56#if defined(_CALL_ELF) && _CALL_ELF == 2 56#if defined(_CALL_ELF) && _CALL_ELF == 2
57bool arch__prefers_symtab(void)
58{
59 return true;
60}
61 57
62#ifdef HAVE_LIBELF_SUPPORT 58#ifdef HAVE_LIBELF_SUPPORT
63void arch__sym_update(struct symbol *s, GElf_Sym *sym) 59void arch__sym_update(struct symbol *s, GElf_Sym *sym)
@@ -100,4 +96,29 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
100 tev->point.offset += lep_offset; 96 tev->point.offset += lep_offset;
101 } 97 }
102} 98}
99
100#ifdef HAVE_LIBELF_SUPPORT
101void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
102 int ntevs)
103{
104 struct probe_trace_event *tev;
105 struct map *map;
106 struct symbol *sym = NULL;
107 struct rb_node *tmp;
108 int i = 0;
109
110 map = get_target_map(pev->target, pev->uprobes);
111 if (!map || map__load(map, NULL) < 0)
112 return;
113
114 for (i = 0; i < ntevs; i++) {
115 tev = &pev->tevs[i];
116 map__for_each_symbol(map, sym, tmp) {
117 if (map->unmap_ip(map, sym->start) == tev->point.address)
118 arch__fix_tev_from_maps(pev, tev, map, sym);
119 }
120 }
121}
122#endif /* HAVE_LIBELF_SUPPORT */
123
103#endif 124#endif
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index fb51457ba338..a2412e9d883b 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -501,7 +501,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
501 struct intel_pt_recording *ptr = 501 struct intel_pt_recording *ptr =
502 container_of(itr, struct intel_pt_recording, itr); 502 container_of(itr, struct intel_pt_recording, itr);
503 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; 503 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
504 bool have_timing_info; 504 bool have_timing_info, need_immediate = false;
505 struct perf_evsel *evsel, *intel_pt_evsel = NULL; 505 struct perf_evsel *evsel, *intel_pt_evsel = NULL;
506 const struct cpu_map *cpus = evlist->cpus; 506 const struct cpu_map *cpus = evlist->cpus;
507 bool privileged = geteuid() == 0 || perf_event_paranoid() < 0; 507 bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
@@ -655,6 +655,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
655 ptr->have_sched_switch = 3; 655 ptr->have_sched_switch = 3;
656 } else { 656 } else {
657 opts->record_switch_events = true; 657 opts->record_switch_events = true;
658 need_immediate = true;
658 if (cpu_wide) 659 if (cpu_wide)
659 ptr->have_sched_switch = 3; 660 ptr->have_sched_switch = 3;
660 else 661 else
@@ -700,6 +701,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
700 tracking_evsel->attr.freq = 0; 701 tracking_evsel->attr.freq = 0;
701 tracking_evsel->attr.sample_period = 1; 702 tracking_evsel->attr.sample_period = 1;
702 703
704 if (need_immediate)
705 tracking_evsel->immediate = true;
706
703 /* In per-cpu case, always need the time of mmap events etc */ 707 /* In per-cpu case, always need the time of mmap events etc */
704 if (!cpu_map__empty(cpus)) { 708 if (!cpu_map__empty(cpus)) {
705 perf_evsel__set_sample_bit(tracking_evsel, TIME); 709 perf_evsel__set_sample_bit(tracking_evsel, TIME);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index d608a2c9e48c..d1ce29be560e 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -88,6 +88,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
88 if (mem->operation & MEM_OPERATION_LOAD) 88 if (mem->operation & MEM_OPERATION_LOAD)
89 perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true; 89 perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
90 90
91 if (mem->operation & MEM_OPERATION_STORE)
92 perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
93
91 if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record) 94 if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
92 rec_argv[i++] = "-W"; 95 rec_argv[i++] = "-W";
93 96
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 971ff91b16cb..c859e59dfe3e 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -371,14 +371,16 @@ static int perf_session__check_output_opt(struct perf_session *session)
371 371
372 if (!no_callchain) { 372 if (!no_callchain) {
373 bool use_callchain = false; 373 bool use_callchain = false;
374 bool not_pipe = false;
374 375
375 evlist__for_each_entry(session->evlist, evsel) { 376 evlist__for_each_entry(session->evlist, evsel) {
377 not_pipe = true;
376 if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 378 if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
377 use_callchain = true; 379 use_callchain = true;
378 break; 380 break;
379 } 381 }
380 } 382 }
381 if (!use_callchain) 383 if (not_pipe && !use_callchain)
382 symbol_conf.use_callchain = false; 384 symbol_conf.use_callchain = false;
383 } 385 }
384 386
@@ -1690,8 +1692,13 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
1690 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); 1692 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
1691 1693
1692 scripts_dir = opendir(scripts_path); 1694 scripts_dir = opendir(scripts_path);
1693 if (!scripts_dir) 1695 if (!scripts_dir) {
1694 return -1; 1696 fprintf(stdout,
1697 "open(%s) failed.\n"
1698 "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
1699 scripts_path);
1700 exit(-1);
1701 }
1695 1702
1696 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 1703 for_each_lang(scripts_path, scripts_dir, lang_dirent) {
1697 snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, 1704 snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
@@ -2116,7 +2123,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
2116 "Valid types: hw,sw,trace,raw. " 2123 "Valid types: hw,sw,trace,raw. "
2117 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," 2124 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
2118 "addr,symoff,period,iregs,brstack,brstacksym,flags," 2125 "addr,symoff,period,iregs,brstack,brstacksym,flags,"
2119 "callindent", parse_output_fields), 2126 "bpf-output,callindent", parse_output_fields),
2120 OPT_BOOLEAN('a', "all-cpus", &system_wide, 2127 OPT_BOOLEAN('a', "all-cpus", &system_wide,
2121 "system-wide collection from all CPUs"), 2128 "system-wide collection from all CPUs"),
2122 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 2129 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0c16d20d7e32..3c7452b39f57 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -331,7 +331,7 @@ static int read_counter(struct perf_evsel *counter)
331 return 0; 331 return 0;
332} 332}
333 333
334static void read_counters(bool close_counters) 334static void read_counters(void)
335{ 335{
336 struct perf_evsel *counter; 336 struct perf_evsel *counter;
337 337
@@ -341,11 +341,6 @@ static void read_counters(bool close_counters)
341 341
342 if (perf_stat_process_counter(&stat_config, counter)) 342 if (perf_stat_process_counter(&stat_config, counter))
343 pr_warning("failed to process counter %s\n", counter->name); 343 pr_warning("failed to process counter %s\n", counter->name);
344
345 if (close_counters) {
346 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
347 thread_map__nr(evsel_list->threads));
348 }
349 } 344 }
350} 345}
351 346
@@ -353,7 +348,7 @@ static void process_interval(void)
353{ 348{
354 struct timespec ts, rs; 349 struct timespec ts, rs;
355 350
356 read_counters(false); 351 read_counters();
357 352
358 clock_gettime(CLOCK_MONOTONIC, &ts); 353 clock_gettime(CLOCK_MONOTONIC, &ts);
359 diff_timespec(&rs, &ts, &ref_time); 354 diff_timespec(&rs, &ts, &ref_time);
@@ -380,6 +375,17 @@ static void enable_counters(void)
380 perf_evlist__enable(evsel_list); 375 perf_evlist__enable(evsel_list);
381} 376}
382 377
378static void disable_counters(void)
379{
380 /*
381 * If we don't have tracee (attaching to task or cpu), counters may
382 * still be running. To get accurate group ratios, we must stop groups
383 * from counting before reading their constituent counters.
384 */
385 if (!target__none(&target))
386 perf_evlist__disable(evsel_list);
387}
388
383static volatile int workload_exec_errno; 389static volatile int workload_exec_errno;
384 390
385/* 391/*
@@ -657,11 +663,20 @@ try_again:
657 } 663 }
658 } 664 }
659 665
666 disable_counters();
667
660 t1 = rdclock(); 668 t1 = rdclock();
661 669
662 update_stats(&walltime_nsecs_stats, t1 - t0); 670 update_stats(&walltime_nsecs_stats, t1 - t0);
663 671
664 read_counters(true); 672 /*
673 * Closing a group leader splits the group, and as we only disable
674 * group leaders, results in remaining events becoming enabled. To
675 * avoid arbitrary skew, we must read all counters before closing any
676 * group leaders.
677 */
678 read_counters();
679 perf_evlist__close(evsel_list);
665 680
666 return WEXITSTATUS(status); 681 return WEXITSTATUS(status);
667} 682}
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d9b80ef881cd..21fd573106ed 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -507,17 +507,17 @@ static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
507 u8 op, result, type = (config >> 0) & 0xff; 507 u8 op, result, type = (config >> 0) & 0xff;
508 const char *err = "unknown-ext-hardware-cache-type"; 508 const char *err = "unknown-ext-hardware-cache-type";
509 509
510 if (type > PERF_COUNT_HW_CACHE_MAX) 510 if (type >= PERF_COUNT_HW_CACHE_MAX)
511 goto out_err; 511 goto out_err;
512 512
513 op = (config >> 8) & 0xff; 513 op = (config >> 8) & 0xff;
514 err = "unknown-ext-hardware-cache-op"; 514 err = "unknown-ext-hardware-cache-op";
515 if (op > PERF_COUNT_HW_CACHE_OP_MAX) 515 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
516 goto out_err; 516 goto out_err;
517 517
518 result = (config >> 16) & 0xff; 518 result = (config >> 16) & 0xff;
519 err = "unknown-ext-hardware-cache-result"; 519 err = "unknown-ext-hardware-cache-result";
520 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) 520 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
521 goto out_err; 521 goto out_err;
522 522
523 err = "invalid-cache"; 523 err = "invalid-cache";
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 9c8f15da86ce..8ff6c6a61291 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -123,8 +123,6 @@ struct intel_pt_decoder {
123 bool have_calc_cyc_to_tsc; 123 bool have_calc_cyc_to_tsc;
124 int exec_mode; 124 int exec_mode;
125 unsigned int insn_bytes; 125 unsigned int insn_bytes;
126 uint64_t sign_bit;
127 uint64_t sign_bits;
128 uint64_t period; 126 uint64_t period;
129 enum intel_pt_period_type period_type; 127 enum intel_pt_period_type period_type;
130 uint64_t tot_insn_cnt; 128 uint64_t tot_insn_cnt;
@@ -191,9 +189,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
191 decoder->data = params->data; 189 decoder->data = params->data;
192 decoder->return_compression = params->return_compression; 190 decoder->return_compression = params->return_compression;
193 191
194 decoder->sign_bit = (uint64_t)1 << 47;
195 decoder->sign_bits = ~(((uint64_t)1 << 48) - 1);
196
197 decoder->period = params->period; 192 decoder->period = params->period;
198 decoder->period_type = params->period_type; 193 decoder->period_type = params->period_type;
199 194
@@ -362,21 +357,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen)
362 return 0; 357 return 0;
363} 358}
364 359
365static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, 360static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
366 const struct intel_pt_pkt *packet,
367 uint64_t last_ip) 361 uint64_t last_ip)
368{ 362{
369 uint64_t ip; 363 uint64_t ip;
370 364
371 switch (packet->count) { 365 switch (packet->count) {
372 case 2: 366 case 1:
373 ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | 367 ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
374 packet->payload; 368 packet->payload;
375 break; 369 break;
376 case 4: 370 case 2:
377 ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | 371 ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
378 packet->payload; 372 packet->payload;
379 break; 373 break;
374 case 3:
375 ip = packet->payload;
376 /* Sign-extend 6-byte ip */
377 if (ip & (uint64_t)0x800000000000ULL)
378 ip |= (uint64_t)0xffff000000000000ULL;
379 break;
380 case 4:
381 ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
382 packet->payload;
383 break;
380 case 6: 384 case 6:
381 ip = packet->payload; 385 ip = packet->payload;
382 break; 386 break;
@@ -384,16 +388,12 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
384 return 0; 388 return 0;
385 } 389 }
386 390
387 if (ip & decoder->sign_bit)
388 return ip | decoder->sign_bits;
389
390 return ip; 391 return ip;
391} 392}
392 393
393static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) 394static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
394{ 395{
395 decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, 396 decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
396 decoder->last_ip);
397} 397}
398 398
399static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) 399static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -1657,6 +1657,12 @@ next:
1657 } 1657 }
1658} 1658}
1659 1659
1660static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
1661{
1662 return decoder->last_ip || decoder->packet.count == 0 ||
1663 decoder->packet.count == 3 || decoder->packet.count == 6;
1664}
1665
1660/* Walk PSB+ packets to get in sync. */ 1666/* Walk PSB+ packets to get in sync. */
1661static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) 1667static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
1662{ 1668{
@@ -1677,8 +1683,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
1677 1683
1678 case INTEL_PT_FUP: 1684 case INTEL_PT_FUP:
1679 decoder->pge = true; 1685 decoder->pge = true;
1680 if (decoder->last_ip || decoder->packet.count == 6 || 1686 if (intel_pt_have_ip(decoder)) {
1681 decoder->packet.count == 0) {
1682 uint64_t current_ip = decoder->ip; 1687 uint64_t current_ip = decoder->ip;
1683 1688
1684 intel_pt_set_ip(decoder); 1689 intel_pt_set_ip(decoder);
@@ -1767,8 +1772,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
1767 case INTEL_PT_TIP_PGE: 1772 case INTEL_PT_TIP_PGE:
1768 case INTEL_PT_TIP: 1773 case INTEL_PT_TIP:
1769 decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; 1774 decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
1770 if (decoder->last_ip || decoder->packet.count == 6 || 1775 if (intel_pt_have_ip(decoder))
1771 decoder->packet.count == 0)
1772 intel_pt_set_ip(decoder); 1776 intel_pt_set_ip(decoder);
1773 if (decoder->ip) 1777 if (decoder->ip)
1774 return 0; 1778 return 0;
@@ -1776,9 +1780,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
1776 1780
1777 case INTEL_PT_FUP: 1781 case INTEL_PT_FUP:
1778 if (decoder->overflow) { 1782 if (decoder->overflow) {
1779 if (decoder->last_ip || 1783 if (intel_pt_have_ip(decoder))
1780 decoder->packet.count == 6 ||
1781 decoder->packet.count == 0)
1782 intel_pt_set_ip(decoder); 1784 intel_pt_set_ip(decoder);
1783 if (decoder->ip) 1785 if (decoder->ip)
1784 return 0; 1786 return 0;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index b1257c816310..4f7b32020487 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -292,36 +292,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
292 const unsigned char *buf, size_t len, 292 const unsigned char *buf, size_t len,
293 struct intel_pt_pkt *packet) 293 struct intel_pt_pkt *packet)
294{ 294{
295 switch (byte >> 5) { 295 int ip_len;
296
297 packet->count = byte >> 5;
298
299 switch (packet->count) {
296 case 0: 300 case 0:
297 packet->count = 0; 301 ip_len = 0;
298 break; 302 break;
299 case 1: 303 case 1:
300 if (len < 3) 304 if (len < 3)
301 return INTEL_PT_NEED_MORE_BYTES; 305 return INTEL_PT_NEED_MORE_BYTES;
302 packet->count = 2; 306 ip_len = 2;
303 packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); 307 packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
304 break; 308 break;
305 case 2: 309 case 2:
306 if (len < 5) 310 if (len < 5)
307 return INTEL_PT_NEED_MORE_BYTES; 311 return INTEL_PT_NEED_MORE_BYTES;
308 packet->count = 4; 312 ip_len = 4;
309 packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); 313 packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
310 break; 314 break;
311 case 3: 315 case 3:
312 case 6: 316 case 4:
313 if (len < 7) 317 if (len < 7)
314 return INTEL_PT_NEED_MORE_BYTES; 318 return INTEL_PT_NEED_MORE_BYTES;
315 packet->count = 6; 319 ip_len = 6;
316 memcpy_le64(&packet->payload, buf + 1, 6); 320 memcpy_le64(&packet->payload, buf + 1, 6);
317 break; 321 break;
322 case 6:
323 if (len < 9)
324 return INTEL_PT_NEED_MORE_BYTES;
325 ip_len = 8;
326 packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
327 break;
318 default: 328 default:
319 return INTEL_PT_BAD_PACKET; 329 return INTEL_PT_BAD_PACKET;
320 } 330 }
321 331
322 packet->type = type; 332 packet->type = type;
323 333
324 return packet->count + 1; 334 return ip_len + 1;
325} 335}
326 336
327static int intel_pt_get_mode(const unsigned char *buf, size_t len, 337static int intel_pt_get_mode(const unsigned char *buf, size_t len,
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 9f3305f6b6d5..95f0884aae02 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -1,3 +1,4 @@
1#include <sys/sysmacros.h>
1#include <sys/types.h> 2#include <sys/types.h>
2#include <stdio.h> 3#include <stdio.h>
3#include <stdlib.h> 4#include <stdlib.h>
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 953dc1ab2ed7..28733962cd80 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -170,15 +170,17 @@ static struct map *kernel_get_module_map(const char *module)
170 module = "kernel"; 170 module = "kernel";
171 171
172 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 172 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
173 /* short_name is "[module]" */
173 if (strncmp(pos->dso->short_name + 1, module, 174 if (strncmp(pos->dso->short_name + 1, module,
174 pos->dso->short_name_len - 2) == 0) { 175 pos->dso->short_name_len - 2) == 0 &&
176 module[pos->dso->short_name_len - 2] == '\0') {
175 return pos; 177 return pos;
176 } 178 }
177 } 179 }
178 return NULL; 180 return NULL;
179} 181}
180 182
181static struct map *get_target_map(const char *target, bool user) 183struct map *get_target_map(const char *target, bool user)
182{ 184{
183 /* Init maps of given executable or kernel */ 185 /* Init maps of given executable or kernel */
184 if (user) 186 if (user)
@@ -385,7 +387,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
385 if (uprobes) 387 if (uprobes)
386 address = sym->start; 388 address = sym->start;
387 else 389 else
388 address = map->unmap_ip(map, sym->start); 390 address = map->unmap_ip(map, sym->start) - map->reloc;
389 break; 391 break;
390 } 392 }
391 if (!address) { 393 if (!address) {
@@ -664,22 +666,14 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
664 return ret; 666 return ret;
665} 667}
666 668
667/* Post processing the probe events */ 669static int
668static int post_process_probe_trace_events(struct probe_trace_event *tevs, 670post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
669 int ntevs, const char *module, 671 int ntevs)
670 bool uprobe)
671{ 672{
672 struct ref_reloc_sym *reloc_sym; 673 struct ref_reloc_sym *reloc_sym;
673 char *tmp; 674 char *tmp;
674 int i, skipped = 0; 675 int i, skipped = 0;
675 676
676 if (uprobe)
677 return add_exec_to_probe_trace_events(tevs, ntevs, module);
678
679 /* Note that currently ref_reloc_sym based probe is not for drivers */
680 if (module)
681 return add_module_to_probe_trace_events(tevs, ntevs, module);
682
683 reloc_sym = kernel_get_ref_reloc_sym(); 677 reloc_sym = kernel_get_ref_reloc_sym();
684 if (!reloc_sym) { 678 if (!reloc_sym) {
685 pr_warning("Relocated base symbol is not found!\n"); 679 pr_warning("Relocated base symbol is not found!\n");
@@ -711,6 +705,34 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
711 return skipped; 705 return skipped;
712} 706}
713 707
708void __weak
709arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused,
710 int ntevs __maybe_unused)
711{
712}
713
714/* Post processing the probe events */
715static int post_process_probe_trace_events(struct perf_probe_event *pev,
716 struct probe_trace_event *tevs,
717 int ntevs, const char *module,
718 bool uprobe)
719{
720 int ret;
721
722 if (uprobe)
723 ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
724 else if (module)
725 /* Currently ref_reloc_sym based probe is not for drivers */
726 ret = add_module_to_probe_trace_events(tevs, ntevs, module);
727 else
728 ret = post_process_kernel_probe_trace_events(tevs, ntevs);
729
730 if (ret >= 0)
731 arch__post_process_probe_trace_events(pev, ntevs);
732
733 return ret;
734}
735
714/* Try to find perf_probe_event with debuginfo */ 736/* Try to find perf_probe_event with debuginfo */
715static int try_to_find_probe_trace_events(struct perf_probe_event *pev, 737static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
716 struct probe_trace_event **tevs) 738 struct probe_trace_event **tevs)
@@ -749,7 +771,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
749 771
750 if (ntevs > 0) { /* Succeeded to find trace events */ 772 if (ntevs > 0) { /* Succeeded to find trace events */
751 pr_debug("Found %d probe_trace_events.\n", ntevs); 773 pr_debug("Found %d probe_trace_events.\n", ntevs);
752 ret = post_process_probe_trace_events(*tevs, ntevs, 774 ret = post_process_probe_trace_events(pev, *tevs, ntevs,
753 pev->target, pev->uprobes); 775 pev->target, pev->uprobes);
754 if (ret < 0 || ret == ntevs) { 776 if (ret < 0 || ret == ntevs) {
755 clear_probe_trace_events(*tevs, ntevs); 777 clear_probe_trace_events(*tevs, ntevs);
@@ -2936,8 +2958,6 @@ errout:
2936 return err; 2958 return err;
2937} 2959}
2938 2960
2939bool __weak arch__prefers_symtab(void) { return false; }
2940
2941/* Concatinate two arrays */ 2961/* Concatinate two arrays */
2942static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b) 2962static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
2943{ 2963{
@@ -3158,12 +3178,6 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
3158 if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */ 3178 if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */
3159 return ret == 0 ? -ENOENT : ret; /* Found in probe cache */ 3179 return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
3160 3180
3161 if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
3162 ret = find_probe_trace_events_from_map(pev, tevs);
3163 if (ret > 0)
3164 return ret; /* Found in symbol table */
3165 }
3166
3167 /* Convert perf_probe_event with debuginfo */ 3181 /* Convert perf_probe_event with debuginfo */
3168 ret = try_to_find_probe_trace_events(pev, tevs); 3182 ret = try_to_find_probe_trace_events(pev, tevs);
3169 if (ret != 0) 3183 if (ret != 0)
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index e18ea9fe6385..f4f45db77c1c 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -158,7 +158,6 @@ int show_line_range(struct line_range *lr, const char *module, bool user);
158int show_available_vars(struct perf_probe_event *pevs, int npevs, 158int show_available_vars(struct perf_probe_event *pevs, int npevs,
159 struct strfilter *filter); 159 struct strfilter *filter);
160int show_available_funcs(const char *module, struct strfilter *filter, bool user); 160int show_available_funcs(const char *module, struct strfilter *filter, bool user);
161bool arch__prefers_symtab(void);
162void arch__fix_tev_from_maps(struct perf_probe_event *pev, 161void arch__fix_tev_from_maps(struct perf_probe_event *pev,
163 struct probe_trace_event *tev, struct map *map, 162 struct probe_trace_event *tev, struct map *map,
164 struct symbol *sym); 163 struct symbol *sym);
@@ -173,4 +172,9 @@ int e_snprintf(char *str, size_t size, const char *format, ...)
173int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, 172int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
174 struct perf_probe_arg *pvar); 173 struct perf_probe_arg *pvar);
175 174
175struct map *get_target_map(const char *target, bool user);
176
177void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
178 int ntevs);
179
176#endif /*_PROBE_EVENT_H */ 180#endif /*_PROBE_EVENT_H */
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 9aed9c332da6..9c3b9ed5b3c3 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -133,7 +133,7 @@ int probe_file__open_both(int *kfd, int *ufd, int flag)
133/* Get raw string list of current kprobe_events or uprobe_events */ 133/* Get raw string list of current kprobe_events or uprobe_events */
134struct strlist *probe_file__get_rawlist(int fd) 134struct strlist *probe_file__get_rawlist(int fd)
135{ 135{
136 int ret, idx; 136 int ret, idx, fddup;
137 FILE *fp; 137 FILE *fp;
138 char buf[MAX_CMDLEN]; 138 char buf[MAX_CMDLEN];
139 char *p; 139 char *p;
@@ -143,8 +143,17 @@ struct strlist *probe_file__get_rawlist(int fd)
143 return NULL; 143 return NULL;
144 144
145 sl = strlist__new(NULL, NULL); 145 sl = strlist__new(NULL, NULL);
146 if (sl == NULL)
147 return NULL;
148
149 fddup = dup(fd);
150 if (fddup < 0)
151 goto out_free_sl;
152
153 fp = fdopen(fddup, "r");
154 if (!fp)
155 goto out_close_fddup;
146 156
147 fp = fdopen(dup(fd), "r");
148 while (!feof(fp)) { 157 while (!feof(fp)) {
149 p = fgets(buf, MAX_CMDLEN, fp); 158 p = fgets(buf, MAX_CMDLEN, fp);
150 if (!p) 159 if (!p)
@@ -156,13 +165,21 @@ struct strlist *probe_file__get_rawlist(int fd)
156 ret = strlist__add(sl, buf); 165 ret = strlist__add(sl, buf);
157 if (ret < 0) { 166 if (ret < 0) {
158 pr_debug("strlist__add failed (%d)\n", ret); 167 pr_debug("strlist__add failed (%d)\n", ret);
159 strlist__delete(sl); 168 goto out_close_fp;
160 return NULL;
161 } 169 }
162 } 170 }
163 fclose(fp); 171 fclose(fp);
164 172
165 return sl; 173 return sl;
174
175out_close_fp:
176 fclose(fp);
177 goto out_free_sl;
178out_close_fddup:
179 close(fddup);
180out_free_sl:
181 strlist__delete(sl);
182 return NULL;
166} 183}
167 184
168static struct strlist *__probe_file__get_namelist(int fd, bool include_group) 185static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
@@ -447,12 +464,17 @@ static int probe_cache__load(struct probe_cache *pcache)
447{ 464{
448 struct probe_cache_entry *entry = NULL; 465 struct probe_cache_entry *entry = NULL;
449 char buf[MAX_CMDLEN], *p; 466 char buf[MAX_CMDLEN], *p;
450 int ret = 0; 467 int ret = 0, fddup;
451 FILE *fp; 468 FILE *fp;
452 469
453 fp = fdopen(dup(pcache->fd), "r"); 470 fddup = dup(pcache->fd);
454 if (!fp) 471 if (fddup < 0)
472 return -errno;
473 fp = fdopen(fddup, "r");
474 if (!fp) {
475 close(fddup);
455 return -EINVAL; 476 return -EINVAL;
477 }
456 478
457 while (!feof(fp)) { 479 while (!feof(fp)) {
458 if (!fgets(buf, MAX_CMDLEN, fp)) 480 if (!fgets(buf, MAX_CMDLEN, fp))
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index f2d9ff064e2d..5c290c682afe 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -297,10 +297,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
297 char sbuf[STRERR_BUFSIZE]; 297 char sbuf[STRERR_BUFSIZE];
298 int bsize, boffs, total; 298 int bsize, boffs, total;
299 int ret; 299 int ret;
300 char sign;
300 301
301 /* TODO: check all types */ 302 /* TODO: check all types */
302 if (cast && strcmp(cast, "string") != 0) { 303 if (cast && strcmp(cast, "string") != 0 &&
304 strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
303 /* Non string type is OK */ 305 /* Non string type is OK */
306 /* and respect signedness cast */
304 tvar->type = strdup(cast); 307 tvar->type = strdup(cast);
305 return (tvar->type == NULL) ? -ENOMEM : 0; 308 return (tvar->type == NULL) ? -ENOMEM : 0;
306 } 309 }
@@ -361,6 +364,13 @@ static int convert_variable_type(Dwarf_Die *vr_die,
361 return (tvar->type == NULL) ? -ENOMEM : 0; 364 return (tvar->type == NULL) ? -ENOMEM : 0;
362 } 365 }
363 366
367 if (cast && (strcmp(cast, "u") == 0))
368 sign = 'u';
369 else if (cast && (strcmp(cast, "s") == 0))
370 sign = 's';
371 else
372 sign = die_is_signed_type(&type) ? 's' : 'u';
373
364 ret = dwarf_bytesize(&type); 374 ret = dwarf_bytesize(&type);
365 if (ret <= 0) 375 if (ret <= 0)
366 /* No size ... try to use default type */ 376 /* No size ... try to use default type */
@@ -373,8 +383,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
373 dwarf_diename(&type), MAX_BASIC_TYPE_BITS); 383 dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
374 ret = MAX_BASIC_TYPE_BITS; 384 ret = MAX_BASIC_TYPE_BITS;
375 } 385 }
376 ret = snprintf(buf, 16, "%c%d", 386 ret = snprintf(buf, 16, "%c%d", sign, ret);
377 die_is_signed_type(&type) ? 's' : 'u', ret);
378 387
379formatted: 388formatted:
380 if (ret < 0 || ret >= 16) { 389 if (ret < 0 || ret >= 16) {
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 947d21f38398..3d3cb8392c86 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -588,7 +588,11 @@ static char *get_trace_output(struct hist_entry *he)
588 } else { 588 } else {
589 pevent_event_info(&seq, evsel->tp_format, &rec); 589 pevent_event_info(&seq, evsel->tp_format, &rec);
590 } 590 }
591 return seq.buffer; 591 /*
592 * Trim the buffer, it starts at 4KB and we're not going to
593 * add anything more to this buffer.
594 */
595 return realloc(seq.buffer, seq.len + 1);
592} 596}
593 597
594static int64_t 598static int64_t
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a34321e9b44d..a811c13a74d6 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -837,7 +837,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
837 sec = syms_ss->symtab; 837 sec = syms_ss->symtab;
838 shdr = syms_ss->symshdr; 838 shdr = syms_ss->symshdr;
839 839
840 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL)) 840 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
841 ".text", NULL))
841 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; 842 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
842 843
843 if (runtime_ss->opdsec) 844 if (runtime_ss->opdsec)
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index cf5e250bc78e..783a53fb7a4e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -66,7 +66,7 @@ static int entry(u64 ip, struct unwind_info *ui)
66 if (__report_module(&al, ip, ui)) 66 if (__report_module(&al, ip, ui))
67 return -1; 67 return -1;
68 68
69 e->ip = ip; 69 e->ip = al.addr;
70 e->map = al.map; 70 e->map = al.map;
71 e->sym = al.sym; 71 e->sym = al.sym;
72 72
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 97c0f8fc5561..20c2e5743903 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -542,7 +542,7 @@ static int entry(u64 ip, struct thread *thread,
542 thread__find_addr_location(thread, PERF_RECORD_MISC_USER, 542 thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
543 MAP__FUNCTION, ip, &al); 543 MAP__FUNCTION, ip, &al);
544 544
545 e.ip = ip; 545 e.ip = al.addr;
546 e.map = al.map; 546 e.map = al.map;
547 e.sym = al.sym; 547 e.sym = al.sym;
548 548
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 5404efa578a3..dd48f421844c 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -13,6 +13,7 @@
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/workqueue.h>
16#include <linux/libnvdimm.h> 17#include <linux/libnvdimm.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
18#include <linux/device.h> 19#include <linux/device.h>
@@ -1474,6 +1475,7 @@ static int nfit_test_probe(struct platform_device *pdev)
1474 if (nfit_test->setup != nfit_test0_setup) 1475 if (nfit_test->setup != nfit_test0_setup)
1475 return 0; 1476 return 0;
1476 1477
1478 flush_work(&acpi_desc->work);
1477 nfit_test->setup_hotplug = 1; 1479 nfit_test->setup_hotplug = 1;
1478 nfit_test->setup(nfit_test); 1480 nfit_test->setup(nfit_test);
1479 1481
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 3c40c9d0e6c7..1cc6d64c39b7 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc)
8 8
9GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") 9GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
10 10
11CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) 11CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
12 12
13export CFLAGS 13export CFLAGS
14 14
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index 4f93af89ae16..18601f6689b9 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -14,4 +14,20 @@ enum dma_data_direction {
14 DMA_NONE = 3, 14 DMA_NONE = 3,
15}; 15};
16 16
17#define dma_alloc_coherent(d, s, hp, f) ({ \
18 void *__dma_alloc_coherent_p = kmalloc((s), (f)); \
19 *(hp) = (unsigned long)__dma_alloc_coherent_p; \
20 __dma_alloc_coherent_p; \
21})
22
23#define dma_free_coherent(d, s, p, h) kfree(p)
24
25#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
26
27#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
28#define dma_mapping_error(...) (0)
29
30#define dma_unmap_single(...) do { } while (0)
31#define dma_unmap_page(...) do { } while (0)
32
17#endif 33#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 033849948215..d9554fc3f340 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -20,7 +20,9 @@
20 20
21#define PAGE_SIZE getpagesize() 21#define PAGE_SIZE getpagesize()
22#define PAGE_MASK (~(PAGE_SIZE-1)) 22#define PAGE_MASK (~(PAGE_SIZE-1))
23#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
23 24
25typedef unsigned long long phys_addr_t;
24typedef unsigned long long dma_addr_t; 26typedef unsigned long long dma_addr_t;
25typedef size_t __kernel_size_t; 27typedef size_t __kernel_size_t;
26typedef unsigned int __wsum; 28typedef unsigned int __wsum;
@@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp)
57 return p; 59 return p;
58} 60}
59 61
62static inline void *alloc_pages_exact(size_t s, gfp_t gfp)
63{
64 return kmalloc(s, gfp);
65}
66
60static inline void kfree(void *p) 67static inline void kfree(void *p)
61{ 68{
62 if (p >= __kfree_ignore_start && p < __kfree_ignore_end) 69 if (p >= __kfree_ignore_start && p < __kfree_ignore_end)
@@ -64,6 +71,11 @@ static inline void kfree(void *p)
64 free(p); 71 free(p);
65} 72}
66 73
74static inline void free_pages_exact(void *p, size_t s)
75{
76 kfree(p);
77}
78
67static inline void *krealloc(void *p, size_t s, gfp_t gfp) 79static inline void *krealloc(void *p, size_t s, gfp_t gfp)
68{ 80{
69 return realloc(p, s); 81 return realloc(p, s);
@@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr)
105#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 117#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
106#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) 118#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
107 119
120#define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n"))
121
108#define min(x, y) ({ \ 122#define min(x, y) ({ \
109 typeof(x) _min1 = (x); \ 123 typeof(x) _min1 = (x); \
110 typeof(y) _min2 = (y); \ 124 typeof(y) _min2 = (y); \
diff --git a/tools/virtio/linux/slab.h b/tools/virtio/linux/slab.h
index 81baeac8ae40..7e1c1197d439 100644
--- a/tools/virtio/linux/slab.h
+++ b/tools/virtio/linux/slab.h
@@ -1,2 +1,6 @@
1#ifndef LINUX_SLAB_H 1#ifndef LINUX_SLAB_H
2#define GFP_KERNEL 0
3#define GFP_ATOMIC 0
4#define __GFP_NOWARN 0
5#define __GFP_ZERO 0
2#endif 6#endif
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index ee125e714053..9377c8b4ac16 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -3,8 +3,12 @@
3#include <linux/scatterlist.h> 3#include <linux/scatterlist.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6struct device {
7 void *parent;
8};
9
6struct virtio_device { 10struct virtio_device {
7 void *dev; 11 struct device dev;
8 u64 features; 12 u64 features;
9}; 13};
10 14
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 57a6964a1e35..9ba11815e0a1 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
40#define virtio_has_feature(dev, feature) \ 40#define virtio_has_feature(dev, feature) \
41 (__virtio_test_bit((dev), feature)) 41 (__virtio_test_bit((dev), feature))
42 42
43/**
44 * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
45 * @vdev: the device
46 */
47static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
48{
49 /*
50 * Note the reverse polarity of the quirk feature (compared to most
51 * other features), this is for compatibility with legacy systems.
52 */
53 return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
54}
55
43static inline bool virtio_is_little_endian(struct virtio_device *vdev) 56static inline bool virtio_is_little_endian(struct virtio_device *vdev)
44{ 57{
45 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || 58 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 68e4f9f0da3a..bd2ad1d3b7a9 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -13,6 +13,7 @@
13#define cache_line_size() SMP_CACHE_BYTES 13#define cache_line_size() SMP_CACHE_BYTES
14#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) 14#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15#define unlikely(x) (__builtin_expect(!!(x), 0)) 15#define unlikely(x) (__builtin_expect(!!(x), 0))
16#define likely(x) (__builtin_expect(!!(x), 1))
16#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 17#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
17typedef pthread_spinlock_t spinlock_t; 18typedef pthread_spinlock_t spinlock_t;
18 19
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4fde8c7dfcfe..77e6ccf14901 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -33,6 +33,7 @@
33static struct timecounter *timecounter; 33static struct timecounter *timecounter;
34static struct workqueue_struct *wqueue; 34static struct workqueue_struct *wqueue;
35static unsigned int host_vtimer_irq; 35static unsigned int host_vtimer_irq;
36static u32 host_vtimer_irq_flags;
36 37
37void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 38void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
38{ 39{
@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
365 366
366static void kvm_timer_init_interrupt(void *info) 367static void kvm_timer_init_interrupt(void *info)
367{ 368{
368 enable_percpu_irq(host_vtimer_irq, 0); 369 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
369} 370}
370 371
371int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 372int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
432 } 433 }
433 host_vtimer_irq = info->virtual_irq; 434 host_vtimer_irq = info->virtual_irq;
434 435
436 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
437 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
438 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
439 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
440 host_vtimer_irq);
441 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
442 }
443
435 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 444 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
436 "kvm guest timer", kvm_get_running_vcpus()); 445 "kvm guest timer", kvm_get_running_vcpus());
437 if (err) { 446 if (err) {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index fb4b0a79a950..83777c1cbae0 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -73,12 +73,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
73 int i, vcpu_lock_idx = -1, ret; 73 int i, vcpu_lock_idx = -1, ret;
74 struct kvm_vcpu *vcpu; 74 struct kvm_vcpu *vcpu;
75 75
76 mutex_lock(&kvm->lock); 76 if (irqchip_in_kernel(kvm))
77 77 return -EEXIST;
78 if (irqchip_in_kernel(kvm)) {
79 ret = -EEXIST;
80 goto out;
81 }
82 78
83 /* 79 /*
84 * This function is also called by the KVM_CREATE_IRQCHIP handler, 80 * This function is also called by the KVM_CREATE_IRQCHIP handler,
@@ -87,10 +83,8 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
87 * the proper checks already. 83 * the proper checks already.
88 */ 84 */
89 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && 85 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
90 !kvm_vgic_global_state.can_emulate_gicv2) { 86 !kvm_vgic_global_state.can_emulate_gicv2)
91 ret = -ENODEV; 87 return -ENODEV;
92 goto out;
93 }
94 88
95 /* 89 /*
96 * Any time a vcpu is run, vcpu_load is called which tries to grab the 90 * Any time a vcpu is run, vcpu_load is called which tries to grab the
@@ -138,9 +132,6 @@ out_unlock:
138 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 132 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
139 mutex_unlock(&vcpu->mutex); 133 mutex_unlock(&vcpu->mutex);
140 } 134 }
141
142out:
143 mutex_unlock(&kvm->lock);
144 return ret; 135 return ret;
145} 136}
146 137
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 07411cf967b9..4660a7d04eea 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
51 51
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL); 52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
53 if (!irq) 53 if (!irq)
54 return NULL; 54 return ERR_PTR(-ENOMEM);
55 55
56 INIT_LIST_HEAD(&irq->lpi_list); 56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list); 57 INIT_LIST_HEAD(&irq->ap_list);
@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
441 * Find the target VCPU and the LPI number for a given devid/eventid pair 441 * Find the target VCPU and the LPI number for a given devid/eventid pair
442 * and make this IRQ pending, possibly injecting it. 442 * and make this IRQ pending, possibly injecting it.
443 * Must be called with the its_lock mutex held. 443 * Must be called with the its_lock mutex held.
444 * Returns 0 on success, a positive error value for any ITS mapping
445 * related errors and negative error values for generic errors.
444 */ 446 */
445static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, 447static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
446 u32 devid, u32 eventid) 448 u32 devid, u32 eventid)
447{ 449{
450 struct kvm_vcpu *vcpu;
448 struct its_itte *itte; 451 struct its_itte *itte;
449 452
450 if (!its->enabled) 453 if (!its->enabled)
451 return; 454 return -EBUSY;
452 455
453 itte = find_itte(its, devid, eventid); 456 itte = find_itte(its, devid, eventid);
454 /* Triggering an unmapped IRQ gets silently dropped. */ 457 if (!itte || !its_is_collection_mapped(itte->collection))
455 if (itte && its_is_collection_mapped(itte->collection)) { 458 return E_ITS_INT_UNMAPPED_INTERRUPT;
456 struct kvm_vcpu *vcpu; 459
457 460 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
458 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr); 461 if (!vcpu)
459 if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) { 462 return E_ITS_INT_UNMAPPED_INTERRUPT;
460 spin_lock(&itte->irq->irq_lock); 463
461 itte->irq->pending = true; 464 if (!vcpu->arch.vgic_cpu.lpis_enabled)
462 vgic_queue_irq_unlock(kvm, itte->irq); 465 return -EBUSY;
463 } 466
464 } 467 spin_lock(&itte->irq->irq_lock);
468 itte->irq->pending = true;
469 vgic_queue_irq_unlock(kvm, itte->irq);
470
471 return 0;
472}
473
474static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
475{
476 struct vgic_io_device *iodev;
477
478 if (dev->ops != &kvm_io_gic_ops)
479 return NULL;
480
481 iodev = container_of(dev, struct vgic_io_device, dev);
482
483 if (iodev->iodev_type != IODEV_ITS)
484 return NULL;
485
486 return iodev;
465} 487}
466 488
467/* 489/*
468 * Queries the KVM IO bus framework to get the ITS pointer from the given 490 * Queries the KVM IO bus framework to get the ITS pointer from the given
469 * doorbell address. 491 * doorbell address.
470 * We then call vgic_its_trigger_msi() with the decoded data. 492 * We then call vgic_its_trigger_msi() with the decoded data.
493 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
471 */ 494 */
472int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi) 495int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
473{ 496{
474 u64 address; 497 u64 address;
475 struct kvm_io_device *kvm_io_dev; 498 struct kvm_io_device *kvm_io_dev;
476 struct vgic_io_device *iodev; 499 struct vgic_io_device *iodev;
500 int ret;
477 501
478 if (!vgic_has_its(kvm)) 502 if (!vgic_has_its(kvm))
479 return -ENODEV; 503 return -ENODEV;
@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
485 509
486 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address); 510 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
487 if (!kvm_io_dev) 511 if (!kvm_io_dev)
488 return -ENODEV; 512 return -EINVAL;
489 513
490 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev); 514 iodev = vgic_get_its_iodev(kvm_io_dev);
515 if (!iodev)
516 return -EINVAL;
491 517
492 mutex_lock(&iodev->its->its_lock); 518 mutex_lock(&iodev->its->its_lock);
493 vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data); 519 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
494 mutex_unlock(&iodev->its->its_lock); 520 mutex_unlock(&iodev->its->its_lock);
495 521
496 return 0; 522 if (ret < 0)
523 return ret;
524
525 /*
526 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
527 * if the guest has blocked the MSI. So we map any LPI mapping
528 * related error to that.
529 */
530 if (ret)
531 return 0;
532 else
533 return 1;
497} 534}
498 535
499/* Requires the its_lock to be held. */ 536/* Requires the its_lock to be held. */
@@ -502,7 +539,8 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
502 list_del(&itte->itte_list); 539 list_del(&itte->itte_list);
503 540
504 /* This put matches the get in vgic_add_lpi. */ 541 /* This put matches the get in vgic_add_lpi. */
505 vgic_put_irq(kvm, itte->irq); 542 if (itte->irq)
543 vgic_put_irq(kvm, itte->irq);
506 544
507 kfree(itte); 545 kfree(itte);
508} 546}
@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
697 struct its_device *device; 735 struct its_device *device;
698 struct its_collection *collection, *new_coll = NULL; 736 struct its_collection *collection, *new_coll = NULL;
699 int lpi_nr; 737 int lpi_nr;
738 struct vgic_irq *irq;
700 739
701 device = find_its_device(its, device_id); 740 device = find_its_device(its, device_id);
702 if (!device) 741 if (!device)
@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
710 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) 749 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
711 return E_ITS_MAPTI_PHYSICALID_OOR; 750 return E_ITS_MAPTI_PHYSICALID_OOR;
712 751
752 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
753 if (find_itte(its, device_id, event_id))
754 return 0;
755
713 collection = find_collection(its, coll_id); 756 collection = find_collection(its, coll_id);
714 if (!collection) { 757 if (!collection) {
715 int ret = vgic_its_alloc_collection(its, &collection, coll_id); 758 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
@@ -718,22 +761,28 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
718 new_coll = collection; 761 new_coll = collection;
719 } 762 }
720 763
721 itte = find_itte(its, device_id, event_id); 764 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
722 if (!itte) { 765 if (!itte) {
723 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL); 766 if (new_coll)
724 if (!itte) { 767 vgic_its_free_collection(its, coll_id);
725 if (new_coll) 768 return -ENOMEM;
726 vgic_its_free_collection(its, coll_id);
727 return -ENOMEM;
728 }
729
730 itte->event_id = event_id;
731 list_add_tail(&itte->itte_list, &device->itt_head);
732 } 769 }
733 770
771 itte->event_id = event_id;
772 list_add_tail(&itte->itte_list, &device->itt_head);
773
734 itte->collection = collection; 774 itte->collection = collection;
735 itte->lpi = lpi_nr; 775 itte->lpi = lpi_nr;
736 itte->irq = vgic_add_lpi(kvm, lpi_nr); 776
777 irq = vgic_add_lpi(kvm, lpi_nr);
778 if (IS_ERR(irq)) {
779 if (new_coll)
780 vgic_its_free_collection(its, coll_id);
781 its_free_itte(kvm, itte);
782 return PTR_ERR(irq);
783 }
784 itte->irq = irq;
785
737 update_affinity_itte(kvm, itte); 786 update_affinity_itte(kvm, itte);
738 787
739 /* 788 /*
@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
981 u32 msi_data = its_cmd_get_id(its_cmd); 1030 u32 msi_data = its_cmd_get_id(its_cmd);
982 u64 msi_devid = its_cmd_get_deviceid(its_cmd); 1031 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
983 1032
984 vgic_its_trigger_msi(kvm, its, msi_devid, msi_data); 1033 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
985
986 return 0;
987} 1034}
988 1035
989/* 1036/*
@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1288 its_sync_lpi_pending_table(vcpu); 1335 its_sync_lpi_pending_table(vcpu);
1289} 1336}
1290 1337
1291static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its) 1338static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
1292{ 1339{
1293 struct vgic_io_device *iodev = &its->iodev; 1340 struct vgic_io_device *iodev = &its->iodev;
1294 int ret; 1341 int ret;
1295 1342
1296 if (its->initialized) 1343 if (!its->initialized)
1297 return 0; 1344 return -EBUSY;
1298 1345
1299 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) 1346 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1300 return -ENXIO; 1347 return -ENXIO;
@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
1311 KVM_VGIC_V3_ITS_SIZE, &iodev->dev); 1358 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1312 mutex_unlock(&kvm->slots_lock); 1359 mutex_unlock(&kvm->slots_lock);
1313 1360
1314 if (!ret)
1315 its->initialized = true;
1316
1317 return ret; 1361 return ret;
1318} 1362}
1319 1363
@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev,
1435 if (type != KVM_VGIC_ITS_ADDR_TYPE) 1479 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1436 return -ENODEV; 1480 return -ENODEV;
1437 1481
1438 if (its->initialized)
1439 return -EBUSY;
1440
1441 if (copy_from_user(&addr, uaddr, sizeof(addr))) 1482 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1442 return -EFAULT; 1483 return -EFAULT;
1443 1484
@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev,
1453 case KVM_DEV_ARM_VGIC_GRP_CTRL: 1494 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1454 switch (attr->attr) { 1495 switch (attr->attr) {
1455 case KVM_DEV_ARM_VGIC_CTRL_INIT: 1496 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1456 return vgic_its_init_its(dev->kvm, its); 1497 its->initialized = true;
1498
1499 return 0;
1457 } 1500 }
1458 break; 1501 break;
1459 } 1502 }
@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void)
1498 return kvm_register_device_ops(&kvm_arm_vgic_its_ops, 1541 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1499 KVM_DEV_TYPE_ARM_VGIC_ITS); 1542 KVM_DEV_TYPE_ARM_VGIC_ITS);
1500} 1543}
1544
1545/*
1546 * Registers all ITSes with the kvm_io_bus framework.
1547 * To follow the existing VGIC initialization sequence, this has to be
1548 * done as late as possible, just before the first VCPU runs.
1549 */
1550int vgic_register_its_iodevs(struct kvm *kvm)
1551{
1552 struct kvm_device *dev;
1553 int ret = 0;
1554
1555 list_for_each_entry(dev, &kvm->devices, vm_node) {
1556 if (dev->ops != &kvm_arm_vgic_its_ops)
1557 continue;
1558
1559 ret = vgic_register_its_iodev(kvm, dev->private);
1560 if (ret)
1561 return ret;
1562 /*
1563 * We don't need to care about tearing down previously
1564 * registered ITSes, as the kvm_io_bus framework removes
1565 * them for us if the VM gets destroyed.
1566 */
1567 }
1568
1569 return ret;
1570}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index ff668e0dd586..90d81811fdda 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
306{ 306{
307 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 307 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
308 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 308 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
309 u64 propbaser = dist->propbaser; 309 u64 old_propbaser, propbaser;
310 310
311 /* Storing a value with LPIs already enabled is undefined */ 311 /* Storing a value with LPIs already enabled is undefined */
312 if (vgic_cpu->lpis_enabled) 312 if (vgic_cpu->lpis_enabled)
313 return; 313 return;
314 314
315 propbaser = update_64bit_reg(propbaser, addr & 4, len, val); 315 do {
316 propbaser = vgic_sanitise_propbaser(propbaser); 316 old_propbaser = dist->propbaser;
317 317 propbaser = old_propbaser;
318 dist->propbaser = propbaser; 318 propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
319 propbaser = vgic_sanitise_propbaser(propbaser);
320 } while (cmpxchg64(&dist->propbaser, old_propbaser,
321 propbaser) != old_propbaser);
319} 322}
320 323
321static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu, 324static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
331 unsigned long val) 334 unsigned long val)
332{ 335{
333 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 336 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
334 u64 pendbaser = vgic_cpu->pendbaser; 337 u64 old_pendbaser, pendbaser;
335 338
336 /* Storing a value with LPIs already enabled is undefined */ 339 /* Storing a value with LPIs already enabled is undefined */
337 if (vgic_cpu->lpis_enabled) 340 if (vgic_cpu->lpis_enabled)
338 return; 341 return;
339 342
340 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); 343 do {
341 pendbaser = vgic_sanitise_pendbaser(pendbaser); 344 old_pendbaser = vgic_cpu->pendbaser;
342 345 pendbaser = old_pendbaser;
343 vgic_cpu->pendbaser = pendbaser; 346 pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
347 pendbaser = vgic_sanitise_pendbaser(pendbaser);
348 } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
349 pendbaser) != old_pendbaser);
344} 350}
345 351
346/* 352/*
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 0506543df38a..9f0dae397d9c 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm)
289 goto out; 289 goto out;
290 } 290 }
291 291
292 if (vgic_has_its(kvm)) {
293 ret = vgic_register_its_iodevs(kvm);
294 if (ret) {
295 kvm_err("Unable to register VGIC ITS MMIO regions\n");
296 goto out;
297 }
298 }
299
292 dist->ready = true; 300 dist->ready = true;
293 301
294out: 302out:
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e7aeac719e09..e83b7fe4baae 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref)
117 117
118void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 118void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
119{ 119{
120 struct vgic_dist *dist; 120 struct vgic_dist *dist = &kvm->arch.vgic;
121 121
122 if (irq->intid < VGIC_MIN_LPI) 122 if (irq->intid < VGIC_MIN_LPI)
123 return; 123 return;
124 124
125 if (!kref_put(&irq->refcount, vgic_irq_release)) 125 spin_lock(&dist->lpi_list_lock);
126 if (!kref_put(&irq->refcount, vgic_irq_release)) {
127 spin_unlock(&dist->lpi_list_lock);
126 return; 128 return;
129 };
127 130
128 dist = &kvm->arch.vgic;
129
130 spin_lock(&dist->lpi_list_lock);
131 list_del(&irq->lpi_list); 131 list_del(&irq->lpi_list);
132 dist->lpi_list_count--; 132 dist->lpi_list_count--;
133 spin_unlock(&dist->lpi_list_lock); 133 spin_unlock(&dist->lpi_list_lock);
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 1d8e21d5c13f..6c4625c46368 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
84int vgic_v3_probe(const struct gic_kvm_info *info); 84int vgic_v3_probe(const struct gic_kvm_info *info);
85int vgic_v3_map_resources(struct kvm *kvm); 85int vgic_v3_map_resources(struct kvm *kvm);
86int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); 86int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
87int vgic_register_its_iodevs(struct kvm *kvm);
87bool vgic_has_its(struct kvm *kvm); 88bool vgic_has_its(struct kvm *kvm);
88int kvm_vgic_register_its_device(void); 89int kvm_vgic_register_its_device(void);
89void vgic_enable_lpis(struct kvm_vcpu *vcpu); 90void vgic_enable_lpis(struct kvm_vcpu *vcpu);
@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
140 return -ENODEV; 141 return -ENODEV;
141} 142}
142 143
144static inline int vgic_register_its_iodevs(struct kvm *kvm)
145{
146 return -ENODEV;
147}
148
143static inline bool vgic_has_its(struct kvm *kvm) 149static inline bool vgic_has_its(struct kvm *kvm)
144{ 150{
145 return false; 151 return false;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cc081ccfcaa3..195078225aa5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -696,6 +696,11 @@ static void kvm_destroy_devices(struct kvm *kvm)
696{ 696{
697 struct kvm_device *dev, *tmp; 697 struct kvm_device *dev, *tmp;
698 698
699 /*
700 * We do not need to take the kvm->lock here, because nobody else
701 * has a reference to the struct kvm at this point and therefore
702 * cannot access the devices list anyhow.
703 */
699 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 704 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
700 list_del(&dev->vm_node); 705 list_del(&dev->vm_node);
701 dev->ops->destroy(dev); 706 dev->ops->destroy(dev);
@@ -2832,19 +2837,28 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2832 dev->ops = ops; 2837 dev->ops = ops;
2833 dev->kvm = kvm; 2838 dev->kvm = kvm;
2834 2839
2840 mutex_lock(&kvm->lock);
2835 ret = ops->create(dev, cd->type); 2841 ret = ops->create(dev, cd->type);
2836 if (ret < 0) { 2842 if (ret < 0) {
2843 mutex_unlock(&kvm->lock);
2837 kfree(dev); 2844 kfree(dev);
2838 return ret; 2845 return ret;
2839 } 2846 }
2847 list_add(&dev->vm_node, &kvm->devices);
2848 mutex_unlock(&kvm->lock);
2849
2850 if (ops->init)
2851 ops->init(dev);
2840 2852
2841 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2853 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
2842 if (ret < 0) { 2854 if (ret < 0) {
2843 ops->destroy(dev); 2855 ops->destroy(dev);
2856 mutex_lock(&kvm->lock);
2857 list_del(&dev->vm_node);
2858 mutex_unlock(&kvm->lock);
2844 return ret; 2859 return ret;
2845 } 2860 }
2846 2861
2847 list_add(&dev->vm_node, &kvm->devices);
2848 kvm_get_kvm(kvm); 2862 kvm_get_kvm(kvm);
2849 cd->fd = ret; 2863 cd->fd = ret;
2850 return 0; 2864 return 0;